diff --git a/.github/actions/common_setup/action.yml b/.github/actions/common_setup/action.yml index b02413adc44..e492fa97816 100644 --- a/.github/actions/common_setup/action.yml +++ b/.github/actions/common_setup/action.yml @@ -18,9 +18,6 @@ runs: echo "Setup the common ENV variables" cat >> "$GITHUB_ENV" << 'EOF' TEMP_PATH=${{runner.temp}}/${{inputs.job_type}} - REPO_COPY=${{runner.temp}}/${{inputs.job_type}}/git-repo-copy - IMAGES_PATH=${{runner.temp}}/images_path - REPORTS_PATH=${{runner.temp}}/reports_dir EOF if [ -z "${{env.GITHUB_JOB_OVERRIDDEN}}" ] && [ "true" == "${{inputs.nested_job}}" ]; then echo "The GITHUB_JOB_OVERRIDDEN ENV is unset, and must be set for the nested jobs" @@ -30,6 +27,4 @@ runs: shell: bash run: | # to remove every leftovers - sudo rm -fr "$TEMP_PATH" - mkdir -p "$REPO_COPY" - cp -a "$GITHUB_WORKSPACE"/. "$REPO_COPY"/ + sudo rm -fr "$TEMP_PATH" && mkdir -p "$TEMP_PATH" diff --git a/.github/workflows/backport_branches.yml b/.github/workflows/backport_branches.yml index f6af4778cf1..ef554a1b0ff 100644 --- a/.github/workflows/backport_branches.yml +++ b/.github/workflows/backport_branches.yml @@ -10,27 +10,21 @@ on: # yamllint disable-line rule:truthy branches: - 'backport/**' jobs: - CheckLabels: + RunConfig: runs-on: [self-hosted, style-checker] - # Run the first check always, even if the CI is cancelled - if: ${{ always() }} + outputs: + data: ${{ steps.runconfig.outputs.CI_DATA }} steps: - name: Check out repository code uses: ClickHouse/checkout@v1 with: - clear-repository: true + clear-repository: true # to ensure correct digests + fetch-depth: 0 # to get version + filter: tree:0 - name: Labels check run: | cd "$GITHUB_WORKSPACE/tests/ci" python3 run_check.py - PythonUnitTests: - runs-on: [self-hosted, style-checker] - needs: CheckLabels - steps: - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - name: Python unit tests run: | cd "$GITHUB_WORKSPACE/tests/ci" @@ -40,273 +34,237 @@ jobs: echo "Testing $dir" python3 -m unittest discover -s "$dir" -p 'test_*.py' done - DockerHubPushAarch64: - runs-on: [self-hosted, style-checker-aarch64] - needs: CheckLabels - steps: - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Images check + - name: PrepareRunConfig + id: runconfig run: | - cd "$GITHUB_WORKSPACE/tests/ci" - python3 docker_images_check.py --suffix aarch64 - - name: Upload images files to artifacts - uses: actions/upload-artifact@v3 - with: - name: changed_images_aarch64 - path: ${{ runner.temp }}/docker_images_check/changed_images_aarch64.json - DockerHubPushAmd64: - runs-on: [self-hosted, style-checker] - needs: CheckLabels - steps: - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Images check + echo "::group::configure CI run" + python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --configure --outfile ${{ runner.temp }}/ci_run_data.json + echo "::endgroup::" + + echo "::group::CI run configure results" + python3 -m json.tool ${{ runner.temp }}/ci_run_data.json + echo "::endgroup::" + + { + echo 'CI_DATA<> "$GITHUB_OUTPUT" + - name: Re-create GH statuses for skipped jobs if any run: | - cd "$GITHUB_WORKSPACE/tests/ci" - python3 docker_images_check.py --suffix amd64 - - name: Upload images files to artifacts - uses: actions/upload-artifact@v3 - with: - name: changed_images_amd64 - path: ${{ runner.temp }}/docker_images_check/changed_images_amd64.json - DockerHubPush: - needs: [DockerHubPushAmd64, DockerHubPushAarch64, PythonUnitTests] - runs-on: [self-hosted, style-checker] - steps: - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - fetch-depth: 0 # to find ancestor merge commits necessary for finding proper docker tags - filter: tree:0 - - name: Download changed aarch64 images - uses: actions/download-artifact@v3 - with: - name: changed_images_aarch64 - path: ${{ runner.temp }} - - name: Download changed amd64 images - uses: actions/download-artifact@v3 - with: - name: changed_images_amd64 - path: ${{ runner.temp }} - - name: Images check - run: | - cd "$GITHUB_WORKSPACE/tests/ci" - python3 docker_manifests_merge.py --suffix amd64 --suffix aarch64 - - name: Upload images files to artifacts - uses: actions/upload-artifact@v3 - with: - name: changed_images - path: ${{ runner.temp }}/changed_images.json + python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ runner.temp }}/ci_run_data.json --update-gh-statuses + BuildDockers: + needs: [RunConfig] + if: ${{ !failure() && !cancelled() }} + uses: ./.github/workflows/reusable_docker.yml + with: + data: ${{ needs.RunConfig.outputs.data }} CompatibilityCheckX86: - needs: [BuilderDebRelease] + needs: [RunConfig, BuilderDebRelease] + if: ${{ !failure() && !cancelled() }} uses: ./.github/workflows/reusable_test.yml with: - test_name: Compatibility check X86 + test_name: Compatibility check (amd64) runner_type: style-checker + data: ${{ needs.RunConfig.outputs.data }} run_command: | - cd "$REPO_COPY/tests/ci" python3 compatibility_check.py --check-name "Compatibility check (amd64)" --check-glibc --check-distributions CompatibilityCheckAarch64: - needs: [BuilderDebAarch64] + needs: [RunConfig, BuilderDebAarch64] + if: ${{ !failure() && !cancelled() }} uses: ./.github/workflows/reusable_test.yml with: - test_name: Compatibility check X86 + test_name: Compatibility check (aarch64) runner_type: style-checker + data: ${{ needs.RunConfig.outputs.data }} run_command: | - cd "$REPO_COPY/tests/ci" python3 compatibility_check.py --check-name "Compatibility check (aarch64)" --check-glibc ######################################################################################### #################################### ORDINARY BUILDS #################################### ######################################################################################### BuilderDebRelease: - needs: [DockerHubPush] + needs: [RunConfig, BuildDockers] + if: ${{ !failure() && !cancelled() }} uses: ./.github/workflows/reusable_build.yml with: build_name: package_release checkout_depth: 0 + data: ${{ needs.RunConfig.outputs.data }} BuilderDebAarch64: - needs: [DockerHubPush] + needs: [RunConfig, BuildDockers] + if: ${{ !failure() && !cancelled() }} uses: ./.github/workflows/reusable_build.yml with: build_name: package_aarch64 checkout_depth: 0 + data: ${{ needs.RunConfig.outputs.data }} BuilderDebAsan: - needs: [DockerHubPush] + needs: [RunConfig, BuildDockers] + if: ${{ !failure() && !cancelled() }} uses: ./.github/workflows/reusable_build.yml with: build_name: package_asan + data: ${{ needs.RunConfig.outputs.data }} BuilderDebTsan: - needs: [DockerHubPush] + needs: [RunConfig, BuildDockers] + if: ${{ !failure() && !cancelled() }} uses: ./.github/workflows/reusable_build.yml with: build_name: package_tsan + data: ${{ needs.RunConfig.outputs.data }} BuilderDebDebug: - needs: [DockerHubPush] + needs: [RunConfig, BuildDockers] + if: ${{ !failure() && !cancelled() }} uses: ./.github/workflows/reusable_build.yml with: build_name: package_debug + data: ${{ needs.RunConfig.outputs.data }} BuilderBinDarwin: - needs: [DockerHubPush] + needs: [RunConfig, BuildDockers] + if: ${{ !failure() && !cancelled() }} uses: ./.github/workflows/reusable_build.yml with: build_name: binary_darwin + data: ${{ needs.RunConfig.outputs.data }} checkout_depth: 0 BuilderBinDarwinAarch64: - needs: [DockerHubPush] + needs: [RunConfig, BuildDockers] + if: ${{ !failure() && !cancelled() }} uses: ./.github/workflows/reusable_build.yml with: build_name: binary_darwin_aarch64 + data: ${{ needs.RunConfig.outputs.data }} checkout_depth: 0 ############################################################################################ ##################################### Docker images ####################################### ############################################################################################ DockerServerImages: - needs: - - BuilderDebRelease - - BuilderDebAarch64 - runs-on: [self-hosted, style-checker] - steps: - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - fetch-depth: 0 # It MUST BE THE SAME for all dependencies and the job itself - filter: tree:0 - - name: Check docker clickhouse/clickhouse-server building - run: | - cd "$GITHUB_WORKSPACE/tests/ci" - python3 docker_server.py --release-type head --no-push \ - --image-repo clickhouse/clickhouse-server --image-path docker/server - python3 docker_server.py --release-type head --no-push \ - --image-repo clickhouse/clickhouse-keeper --image-path docker/keeper - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + needs: [RunConfig, BuilderDebRelease, BuilderDebAarch64] + if: ${{ !failure() && !cancelled() }} + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Docker server and keeper images + runner_type: style-checker + data: ${{ needs.RunConfig.outputs.data }} + checkout_depth: 0 # It MUST BE THE SAME for all dependencies and the job itself + run_command: | + cd "$GITHUB_WORKSPACE/tests/ci" + python3 docker_server.py --release-type head --no-push \ + --image-repo clickhouse/clickhouse-server --image-path docker/server --allow-build-reuse + python3 docker_server.py --release-type head --no-push \ + --image-repo clickhouse/clickhouse-keeper --image-path docker/keeper --allow-build-reuse ############################################################################################ ##################################### BUILD REPORTER ####################################### ############################################################################################ BuilderReport: - if: ${{ success() || failure() }} + # run report check for failed builds to indicate the CI error + if: ${{ !cancelled() }} needs: - - BuilderDebRelease + - RunConfig - BuilderDebAarch64 - BuilderDebAsan - - BuilderDebTsan - BuilderDebDebug + - BuilderDebRelease + - BuilderDebTsan uses: ./.github/workflows/reusable_test.yml with: test_name: ClickHouse build check runner_type: style-checker + data: ${{ needs.RunConfig.outputs.data }} additional_envs: | NEEDS_DATA<> "$GITHUB_OUTPUT" + - name: Re-create GH statuses for skipped jobs if any run: | - cd "$GITHUB_WORKSPACE/tests/ci" - python3 docker_images_check.py --suffix amd64 - - name: Upload images files to artifacts - uses: actions/upload-artifact@v3 - with: - name: changed_images_amd64 - path: ${{ runner.temp }}/docker_images_check/changed_images_amd64.json - DockerHubPush: - needs: [DockerHubPushAmd64, DockerHubPushAarch64, PythonUnitTests] - runs-on: [self-hosted, style-checker] - steps: - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - fetch-depth: 0 # to find ancestor merge commits necessary for finding proper docker tags - filter: tree:0 - - name: Download changed aarch64 images - uses: actions/download-artifact@v3 - with: - name: changed_images_aarch64 - path: ${{ runner.temp }} - - name: Download changed amd64 images - uses: actions/download-artifact@v3 - with: - name: changed_images_amd64 - path: ${{ runner.temp }} - - name: Images check - run: | - cd "$GITHUB_WORKSPACE/tests/ci" - python3 docker_manifests_merge.py --suffix amd64 --suffix aarch64 - - name: Upload images files to artifacts - uses: actions/upload-artifact@v3 - with: - name: changed_images - path: ${{ runner.temp }}/changed_images.json + python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ runner.temp }}/ci_run_data.json --update-gh-statuses + BuildDockers: + needs: [RunConfig] + if: ${{ !failure() && !cancelled() }} + uses: ./.github/workflows/reusable_docker.yml + with: + data: ${{ needs.RunConfig.outputs.data }} + set_latest: true StyleCheck: - needs: DockerHubPush - if: ${{ success() || failure() }} + needs: [RunConfig, BuildDockers] + if: ${{ !failure() && !cancelled() }} uses: ./.github/workflows/reusable_test.yml with: test_name: Style check runner_type: style-checker + data: ${{ needs.RunConfig.outputs.data }} run_command: | - cd "$REPO_COPY/tests/ci" python3 style_check.py --no-push CompatibilityCheckX86: - needs: [BuilderDebRelease] + needs: [RunConfig, BuilderDebRelease] + if: ${{ !failure() && !cancelled() }} uses: ./.github/workflows/reusable_test.yml with: - test_name: Compatibility check X86 + test_name: Compatibility check (amd64) runner_type: style-checker + data: ${{ needs.RunConfig.outputs.data }} run_command: | - cd "$REPO_COPY/tests/ci" python3 compatibility_check.py --check-name "Compatibility check (amd64)" --check-glibc --check-distributions CompatibilityCheckAarch64: - needs: [BuilderDebAarch64] + needs: [RunConfig, BuilderDebAarch64] + if: ${{ !failure() && !cancelled() }} uses: ./.github/workflows/reusable_test.yml with: - test_name: Compatibility check X86 + test_name: Compatibility check (aarch64) runner_type: style-checker + data: ${{ needs.RunConfig.outputs.data }} run_command: | - cd "$REPO_COPY/tests/ci" python3 compatibility_check.py --check-name "Compatibility check (aarch64)" --check-glibc ######################################################################################### #################################### ORDINARY BUILDS #################################### ######################################################################################### +# TODO: never skip builds! BuilderDebRelease: - needs: [DockerHubPush] + needs: [RunConfig, BuildDockers] + if: ${{ !failure() && !cancelled() }} uses: ./.github/workflows/reusable_build.yml with: - checkout_depth: 0 build_name: package_release + checkout_depth: 0 + data: ${{ needs.RunConfig.outputs.data }} BuilderDebAarch64: - needs: [DockerHubPush] + needs: [RunConfig, BuildDockers] + if: ${{ !failure() && !cancelled() }} uses: ./.github/workflows/reusable_build.yml with: - checkout_depth: 0 build_name: package_aarch64 + checkout_depth: 0 + data: ${{ needs.RunConfig.outputs.data }} BuilderBinRelease: - needs: [DockerHubPush] + needs: [RunConfig, BuildDockers] + if: ${{ !failure() && !cancelled() }} uses: ./.github/workflows/reusable_build.yml with: - checkout_depth: 0 build_name: binary_release + checkout_depth: 0 # otherwise we will have no info about contributors + data: ${{ needs.RunConfig.outputs.data }} BuilderDebAsan: - needs: [DockerHubPush] + needs: [RunConfig, BuildDockers] + if: ${{ !failure() && !cancelled() }} uses: ./.github/workflows/reusable_build.yml with: build_name: package_asan + data: ${{ needs.RunConfig.outputs.data }} BuilderDebUBsan: - needs: [DockerHubPush] + needs: [RunConfig, BuildDockers] + if: ${{ !failure() && !cancelled() }} uses: ./.github/workflows/reusable_build.yml with: build_name: package_ubsan + data: ${{ needs.RunConfig.outputs.data }} BuilderDebTsan: - needs: [DockerHubPush] + needs: [RunConfig, BuildDockers] + if: ${{ !failure() && !cancelled() }} uses: ./.github/workflows/reusable_build.yml with: build_name: package_tsan + data: ${{ needs.RunConfig.outputs.data }} BuilderDebMsan: - needs: [DockerHubPush] + needs: [RunConfig, BuildDockers] + if: ${{ !failure() && !cancelled() }} uses: ./.github/workflows/reusable_build.yml with: build_name: package_msan + data: ${{ needs.RunConfig.outputs.data }} BuilderDebDebug: - needs: [DockerHubPush] + needs: [RunConfig, BuildDockers] + if: ${{ !failure() && !cancelled() }} uses: ./.github/workflows/reusable_build.yml with: build_name: package_debug + data: ${{ needs.RunConfig.outputs.data }} ########################################################################################## ##################################### SPECIAL BUILDS ##################################### ########################################################################################## BuilderBinClangTidy: - needs: [DockerHubPush] + needs: [RunConfig, BuilderDebRelease] + if: ${{ !failure() && !cancelled() }} uses: ./.github/workflows/reusable_build.yml with: build_name: binary_tidy + data: ${{ needs.RunConfig.outputs.data }} BuilderBinDarwin: - needs: [DockerHubPush] + needs: [RunConfig, BuilderDebRelease] + if: ${{ !failure() && !cancelled() }} uses: ./.github/workflows/reusable_build.yml with: build_name: binary_darwin + data: ${{ needs.RunConfig.outputs.data }} checkout_depth: 0 BuilderBinAarch64: - needs: [DockerHubPush] + needs: [RunConfig, BuilderDebRelease] + if: ${{ !failure() && !cancelled() }} uses: ./.github/workflows/reusable_build.yml with: build_name: binary_aarch64 + data: ${{ needs.RunConfig.outputs.data }} checkout_depth: 0 BuilderBinFreeBSD: - needs: [DockerHubPush] + needs: [RunConfig, BuilderDebRelease] + if: ${{ !failure() && !cancelled() }} uses: ./.github/workflows/reusable_build.yml with: build_name: binary_freebsd + data: ${{ needs.RunConfig.outputs.data }} checkout_depth: 0 BuilderBinDarwinAarch64: - needs: [DockerHubPush] + needs: [RunConfig, BuilderDebRelease] + if: ${{ !failure() && !cancelled() }} uses: ./.github/workflows/reusable_build.yml with: build_name: binary_darwin_aarch64 + data: ${{ needs.RunConfig.outputs.data }} checkout_depth: 0 BuilderBinPPC64: - needs: [DockerHubPush] + needs: [RunConfig, BuilderDebRelease] + if: ${{ !failure() && !cancelled() }} uses: ./.github/workflows/reusable_build.yml with: build_name: binary_ppc64le + data: ${{ needs.RunConfig.outputs.data }} checkout_depth: 0 BuilderBinAmd64Compat: - needs: [DockerHubPush] + needs: [RunConfig, BuilderDebRelease] + if: ${{ !failure() && !cancelled() }} uses: ./.github/workflows/reusable_build.yml with: build_name: binary_amd64_compat + data: ${{ needs.RunConfig.outputs.data }} checkout_depth: 0 BuilderBinAmd64Musl: - needs: [DockerHubPush] + needs: [RunConfig, BuilderDebRelease] + if: ${{ !failure() && !cancelled() }} uses: ./.github/workflows/reusable_build.yml with: build_name: binary_amd64_musl + data: ${{ needs.RunConfig.outputs.data }} checkout_depth: 0 BuilderBinAarch64V80Compat: - needs: [DockerHubPush] + needs: [RunConfig, BuilderDebRelease] + if: ${{ !failure() && !cancelled() }} uses: ./.github/workflows/reusable_build.yml with: build_name: binary_aarch64_v80compat + data: ${{ needs.RunConfig.outputs.data }} checkout_depth: 0 BuilderBinRISCV64: - needs: [DockerHubPush] + needs: [RunConfig, BuilderDebRelease] + if: ${{ !failure() && !cancelled() }} uses: ./.github/workflows/reusable_build.yml with: build_name: binary_riscv64 + data: ${{ needs.RunConfig.outputs.data }} checkout_depth: 0 BuilderBinS390X: - needs: [DockerHubPush] + needs: [RunConfig, BuilderDebRelease] + if: ${{ !failure() && !cancelled() }} uses: ./.github/workflows/reusable_build.yml with: build_name: binary_s390x + data: ${{ needs.RunConfig.outputs.data }} checkout_depth: 0 ############################################################################################ ##################################### Docker images ####################################### ############################################################################################ DockerServerImages: - needs: - - BuilderDebRelease - - BuilderDebAarch64 - runs-on: [self-hosted, style-checker] - steps: - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - fetch-depth: 0 # It MUST BE THE SAME for all dependencies and the job itself - filter: tree:0 - - name: Check docker clickhouse/clickhouse-server building - run: | - cd "$GITHUB_WORKSPACE/tests/ci" - python3 docker_server.py --release-type head \ - --image-repo clickhouse/clickhouse-server --image-path docker/server - python3 docker_server.py --release-type head \ - --image-repo clickhouse/clickhouse-keeper --image-path docker/keeper - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + needs: [RunConfig, BuilderDebRelease, BuilderDebAarch64] + if: ${{ !failure() && !cancelled() }} + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Docker server and keeper images + runner_type: style-checker + data: ${{ needs.RunConfig.outputs.data }} + # FIXME: avoid using 0 checkout + checkout_depth: 0 # It MUST BE THE SAME for all dependencies and the job itself + run_command: | + cd "$GITHUB_WORKSPACE/tests/ci" + python3 docker_server.py --release-type head \ + --image-repo clickhouse/clickhouse-server --image-path docker/server + python3 docker_server.py --release-type head \ + --image-repo clickhouse/clickhouse-keeper --image-path docker/keeper ############################################################################################ ##################################### BUILD REPORTER ####################################### ############################################################################################ BuilderReport: - if: ${{ success() || failure() }} + # run report check for failed builds to indicate the CI error + if: ${{ !cancelled() }} needs: + - RunConfig - BuilderBinRelease - BuilderDebAarch64 - BuilderDebAsan @@ -275,16 +278,18 @@ jobs: with: test_name: ClickHouse build check runner_type: style-checker + data: ${{ needs.RunConfig.outputs.data }} additional_envs: | NEEDS_DATA<> "$GITHUB_OUTPUT" + BuildDockers: + needs: [RunConfig] + uses: ./.github/workflows/reusable_docker.yml + with: + data: "${{ needs.RunConfig.outputs.data }}" + set_latest: true SonarCloud: runs-on: [self-hosted, builder] env: diff --git a/.github/workflows/pull_request.yml b/.github/workflows/pull_request.yml index a6631a93766..bd2b2b60904 100644 --- a/.github/workflows/pull_request.yml +++ b/.github/workflows/pull_request.yml @@ -13,37 +13,25 @@ on: # yamllint disable-line rule:truthy - opened branches: - master - paths-ignore: - - '**.md' - - 'docker/docs/**' - - 'docs/**' - - 'utils/check-style/aspell-ignore/**' - - 'tests/ci/docs_check.py' - - '.github/workflows/docs_check.yml' ########################################################################################## ##################################### SMALL CHECKS ####################################### ########################################################################################## jobs: - CheckLabels: + RunConfig: runs-on: [self-hosted, style-checker] - # Run the first check always, even if the CI is cancelled - if: ${{ always() }} + outputs: + data: ${{ steps.runconfig.outputs.CI_DATA }} steps: - name: Check out repository code uses: ClickHouse/checkout@v1 with: - clear-repository: true + clear-repository: true # to ensure correct digests + fetch-depth: 0 # to get version + filter: tree:0 - name: Labels check run: | cd "$GITHUB_WORKSPACE/tests/ci" python3 run_check.py - PythonUnitTests: - runs-on: [self-hosted, style-checker] - steps: - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - name: Python unit tests run: | cd "$GITHUB_WORKSPACE/tests/ci" @@ -53,249 +41,260 @@ jobs: echo "Testing $dir" python3 -m unittest discover -s "$dir" -p 'test_*.py' done - DockerHubPushAarch64: - needs: CheckLabels - runs-on: [self-hosted, style-checker-aarch64] - steps: - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Images check + - name: PrepareRunConfig + id: runconfig run: | - cd "$GITHUB_WORKSPACE/tests/ci" - python3 docker_images_check.py --suffix aarch64 - - name: Upload images files to artifacts - uses: actions/upload-artifact@v3 - with: - name: changed_images_aarch64 - path: ${{ runner.temp }}/docker_images_check/changed_images_aarch64.json - DockerHubPushAmd64: - needs: CheckLabels - runs-on: [self-hosted, style-checker] - steps: - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Images check + echo "::group::configure CI run" + python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --configure --outfile ${{ runner.temp }}/ci_run_data.json + echo "::endgroup::" + + echo "::group::CI run configure results" + python3 -m json.tool ${{ runner.temp }}/ci_run_data.json + echo "::endgroup::" + + { + echo 'CI_DATA<> "$GITHUB_OUTPUT" + - name: Re-create GH statuses for skipped jobs if any run: | - cd "$GITHUB_WORKSPACE/tests/ci" - python3 docker_images_check.py --suffix amd64 - - name: Upload images files to artifacts - uses: actions/upload-artifact@v3 - with: - name: changed_images_amd64 - path: ${{ runner.temp }}/docker_images_check/changed_images_amd64.json - DockerHubPush: - needs: [DockerHubPushAmd64, DockerHubPushAarch64, PythonUnitTests] - runs-on: [self-hosted, style-checker] - steps: - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - fetch-depth: 0 # to find ancestor merge commits necessary for finding proper docker tags - filter: tree:0 - - name: Download changed aarch64 images - uses: actions/download-artifact@v3 - with: - name: changed_images_aarch64 - path: ${{ runner.temp }} - - name: Download changed amd64 images - uses: actions/download-artifact@v3 - with: - name: changed_images_amd64 - path: ${{ runner.temp }} - - name: Images check + python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ runner.temp }}/ci_run_data.json --update-gh-statuses + - name: Style check early + # hack to run style check before the docker build job if possible (style-check image not changed) + if: contains(fromJson(steps.runconfig.outputs.CI_DATA).jobs_data.jobs_to_do, 'Style check early') run: | - cd "$GITHUB_WORKSPACE/tests/ci" - python3 docker_manifests_merge.py --suffix amd64 --suffix aarch64 - - name: Upload images files to artifacts - uses: actions/upload-artifact@v3 - with: - name: changed_images - path: ${{ runner.temp }}/changed_images.json + DOCKER_TAG=$(echo '${{ toJson(fromJson(steps.runconfig.outputs.CI_DATA).docker_data.images) }}' | tr -d '\n') + export DOCKER_TAG=$DOCKER_TAG + python3 ./tests/ci/style_check.py --no-push + BuildDockers: + needs: [RunConfig] + if: ${{ !failure() && !cancelled() }} + uses: ./.github/workflows/reusable_docker.yml + with: + data: ${{ needs.RunConfig.outputs.data }} StyleCheck: - needs: DockerHubPush - # We need additional `&& ! cancelled()` to have the job being able to cancel - if: ${{ success() || failure() || ( always() && ! cancelled() ) }} + needs: [RunConfig, BuildDockers] + if: ${{ !failure() && !cancelled() }} uses: ./.github/workflows/reusable_test.yml with: test_name: Style check runner_type: style-checker run_command: | - cd "$REPO_COPY/tests/ci" python3 style_check.py + data: ${{ needs.RunConfig.outputs.data }} secrets: secret_envs: | ROBOT_CLICKHOUSE_SSH_KEY<> "$GITHUB_OUTPUT" + - name: Re-create GH statuses for skipped jobs if any + run: | + python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ runner.temp }}/ci_run_data.json --update-gh-statuses + BuildDockers: + needs: [RunConfig] + if: ${{ !failure() && !cancelled() }} + uses: ./.github/workflows/reusable_docker.yml + with: + data: ${{ needs.RunConfig.outputs.data }} CompatibilityCheckX86: - needs: [BuilderDebRelease] + needs: [RunConfig, BuilderDebRelease] + if: ${{ !failure() && !cancelled() }} uses: ./.github/workflows/reusable_test.yml with: - test_name: Compatibility check X86 + test_name: Compatibility check (amd64) runner_type: style-checker + data: ${{ needs.RunConfig.outputs.data }} run_command: | - cd "$REPO_COPY/tests/ci" python3 compatibility_check.py --check-name "Compatibility check (amd64)" --check-glibc --check-distributions CompatibilityCheckAarch64: - needs: [BuilderDebAarch64] + needs: [RunConfig, BuilderDebAarch64] + if: ${{ !failure() && !cancelled() }} uses: ./.github/workflows/reusable_test.yml with: - test_name: Compatibility check X86 + test_name: Compatibility check (aarch64) runner_type: style-checker + data: ${{ needs.RunConfig.outputs.data }} run_command: | - cd "$REPO_COPY/tests/ci" python3 compatibility_check.py --check-name "Compatibility check (aarch64)" --check-glibc ######################################################################################### #################################### ORDINARY BUILDS #################################### ######################################################################################### BuilderDebRelease: - needs: [DockerHubPush] + needs: [RunConfig, BuildDockers] + if: ${{ !failure() && !cancelled() }} uses: ./.github/workflows/reusable_build.yml with: build_name: package_release checkout_depth: 0 + data: ${{ needs.RunConfig.outputs.data }} BuilderDebAarch64: - needs: [DockerHubPush] + needs: [RunConfig, BuildDockers] + if: ${{ !failure() && !cancelled() }} uses: ./.github/workflows/reusable_build.yml with: build_name: package_aarch64 checkout_depth: 0 + data: ${{ needs.RunConfig.outputs.data }} BuilderDebAsan: - needs: [DockerHubPush] + needs: [RunConfig, BuildDockers] + if: ${{ !failure() && !cancelled() }} uses: ./.github/workflows/reusable_build.yml with: build_name: package_asan + data: ${{ needs.RunConfig.outputs.data }} BuilderDebUBsan: - needs: [DockerHubPush] + needs: [RunConfig, BuildDockers] + if: ${{ !failure() && !cancelled() }} uses: ./.github/workflows/reusable_build.yml with: build_name: package_ubsan + data: ${{ needs.RunConfig.outputs.data }} BuilderDebTsan: - needs: [DockerHubPush] + needs: [RunConfig, BuildDockers] + if: ${{ !failure() && !cancelled() }} uses: ./.github/workflows/reusable_build.yml with: build_name: package_tsan + data: ${{ needs.RunConfig.outputs.data }} BuilderDebMsan: - needs: [DockerHubPush] + needs: [RunConfig, BuildDockers] + if: ${{ !failure() && !cancelled() }} uses: ./.github/workflows/reusable_build.yml with: build_name: package_msan + data: ${{ needs.RunConfig.outputs.data }} BuilderDebDebug: - needs: [DockerHubPush] + needs: [RunConfig, BuildDockers] + if: ${{ !failure() && !cancelled() }} uses: ./.github/workflows/reusable_build.yml with: build_name: package_debug + data: ${{ needs.RunConfig.outputs.data }} BuilderBinDarwin: - needs: [DockerHubPush] + needs: [RunConfig, BuildDockers] + if: ${{ !failure() && !cancelled() }} uses: ./.github/workflows/reusable_build.yml with: build_name: binary_darwin checkout_depth: 0 + data: ${{ needs.RunConfig.outputs.data }} BuilderBinDarwinAarch64: - needs: [DockerHubPush] + needs: [RunConfig, BuildDockers] + if: ${{ !failure() && !cancelled() }} uses: ./.github/workflows/reusable_build.yml with: build_name: binary_darwin_aarch64 checkout_depth: 0 + data: ${{ needs.RunConfig.outputs.data }} ############################################################################################ ##################################### Docker images ####################################### ############################################################################################ DockerServerImages: - needs: - - BuilderDebRelease - - BuilderDebAarch64 - runs-on: [self-hosted, style-checker] - steps: - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - fetch-depth: 0 # It MUST BE THE SAME for all dependencies and the job itself - filter: tree:0 - - name: Check docker clickhouse/clickhouse-server building - run: | - cd "$GITHUB_WORKSPACE/tests/ci" - python3 docker_server.py --release-type head --no-push \ - --image-repo clickhouse/clickhouse-server --image-path docker/server - python3 docker_server.py --release-type head --no-push \ - --image-repo clickhouse/clickhouse-keeper --image-path docker/keeper - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + needs: [RunConfig, BuilderDebRelease, BuilderDebAarch64] + if: ${{ !failure() && !cancelled() }} + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Docker server and keeper images + runner_type: style-checker + data: ${{ needs.RunConfig.outputs.data }} + checkout_depth: 0 + run_command: | + cd "$GITHUB_WORKSPACE/tests/ci" + python3 docker_server.py --release-type head --no-push \ + --image-repo clickhouse/clickhouse-server --image-path docker/server --allow-build-reuse + python3 docker_server.py --release-type head --no-push \ + --image-repo clickhouse/clickhouse-keeper --image-path docker/keeper --allow-build-reuse ############################################################################################ ##################################### BUILD REPORTER ####################################### ############################################################################################ BuilderReport: - if: ${{ success() || failure() }} + # run report check for failed builds to indicate the CI error + if: ${{ !cancelled() }} needs: + - RunConfig - BuilderDebRelease - BuilderDebAarch64 - BuilderDebAsan @@ -189,30 +187,38 @@ jobs: with: test_name: ClickHouse build check runner_type: style-checker + data: ${{ needs.RunConfig.outputs.data }} additional_envs: | NEEDS_DATA<> "$GITHUB_ENV" << 'EOF' ${{inputs.additional_envs}} + DOCKER_TAG<> "$GITHUB_ENV" - name: Apply sparse checkout for contrib # in order to check that it doesn't break build @@ -60,20 +69,20 @@ jobs: uses: ./.github/actions/common_setup with: job_type: build_check - - name: Download changed images - uses: actions/download-artifact@v3 - with: - name: changed_images - path: ${{ env.IMAGES_PATH }} + - name: Pre + run: | + python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ toJson(inputs.data) }} --pre --job-name '${{inputs.build_name}}' - name: Build run: | - cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME" - - name: Upload build URLs to artifacts - if: ${{ success() || failure() }} - uses: actions/upload-artifact@v3 - with: - name: ${{ env.BUILD_URLS }} - path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json + python3 "$GITHUB_WORKSPACE/tests/ci/build_check.py" "$BUILD_NAME" + - name: Post + # it still be build report to upload for failed build job + if: always() + run: | + python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ toJson(inputs.data) }} --post --job-name '${{inputs.build_name}}' + - name: Mark as done + run: | + python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ toJson(inputs.data) }} --mark-success --job-name '${{inputs.build_name}}' - name: Clean if: always() uses: ./.github/actions/clean diff --git a/.github/workflows/reusable_docker.yml b/.github/workflows/reusable_docker.yml new file mode 100644 index 00000000000..08a5740e7e0 --- /dev/null +++ b/.github/workflows/reusable_docker.yml @@ -0,0 +1,68 @@ +name: Build docker images +'on': + workflow_call: + inputs: + data: + description: json with ci data from todo job + required: true + type: string + set_latest: + description: set latest tag for resulting multiarch manifest + required: false + type: boolean + default: false +jobs: + DockerBuildAarch64: + runs-on: [self-hosted, style-checker-aarch64] + if: | + !failure() && !cancelled() && toJson(fromJson(inputs.data).docker_data.missing_aarch64) != '[]' + steps: + - name: Check out repository code + uses: ClickHouse/checkout@v1 + with: + ref: ${{ fromJson(inputs.data).git_ref }} + - name: Build images + run: | + python3 "${GITHUB_WORKSPACE}/tests/ci/docker_images_check.py" \ + --suffix aarch64 \ + --image-tags '${{ toJson(fromJson(inputs.data).docker_data.images) }}' \ + --missing-images '${{ toJson(fromJson(inputs.data).docker_data.missing_aarch64) }}' + DockerBuildAmd64: + runs-on: [self-hosted, style-checker] + if: | + !failure() && !cancelled() && toJson(fromJson(inputs.data).docker_data.missing_amd64) != '[]' + steps: + - name: Check out repository code + uses: ClickHouse/checkout@v1 + with: + ref: ${{ fromJson(inputs.data).git_ref }} + - name: Build images + run: | + python3 "${GITHUB_WORKSPACE}/tests/ci/docker_images_check.py" \ + --suffix amd64 \ + --image-tags '${{ toJson(fromJson(inputs.data).docker_data.images) }}' \ + --missing-images '${{ toJson(fromJson(inputs.data).docker_data.missing_amd64) }}' + DockerMultiArchManifest: + needs: [DockerBuildAmd64, DockerBuildAarch64] + runs-on: [self-hosted, style-checker] + if: | + !failure() && !cancelled() && toJson(fromJson(inputs.data).docker_data.missing_multi) != '[]' + steps: + - name: Check out repository code + uses: ClickHouse/checkout@v1 + with: + ref: ${{ fromJson(inputs.data).git_ref }} + - name: Build images + run: | + cd "$GITHUB_WORKSPACE/tests/ci" + if [ "${{ inputs.set_latest }}" == "true" ]; then + echo "latest tag will be set for resulting manifests" + python3 docker_manifests_merge.py --suffix amd64 --suffix aarch64 \ + --image-tags '${{ toJson(fromJson(inputs.data).docker_data.images) }}' \ + --missing-images '${{ toJson(fromJson(inputs.data).docker_data.missing_multi) }}' \ + --set-latest + else + python3 docker_manifests_merge.py --suffix amd64 --suffix aarch64 \ + --image-tags '${{ toJson(fromJson(inputs.data).docker_data.images) }}' \ + --missing-images '${{ toJson(fromJson(inputs.data).docker_data.missing_multi) }}' + fi diff --git a/.github/workflows/reusable_simple_job.yml b/.github/workflows/reusable_simple_job.yml new file mode 100644 index 00000000000..ea196a32664 --- /dev/null +++ b/.github/workflows/reusable_simple_job.yml @@ -0,0 +1,90 @@ +### For the pure soul wishes to move it to another place +# https://github.com/orgs/community/discussions/9050 + +name: Simple job +'on': + workflow_call: + inputs: + test_name: + description: the value of test type from tests/ci/ci_config.py, ends up as $CHECK_NAME ENV + required: true + type: string + runner_type: + description: the label of runner to use + required: true + type: string + run_command: + description: the command to launch the check + default: "" + required: false + type: string + checkout_depth: + description: the value of the git shallow checkout + required: false + type: number + default: 1 + submodules: + description: if the submodules should be checked out + required: false + type: boolean + default: false + additional_envs: + description: additional ENV variables to setup the job + type: string + working-directory: + description: sets custom working directory + type: string + default: "" + git_ref: + description: commit to use, merge commit for pr or head + required: false + type: string + default: ${{ github.event.after }} # no merge commit + secrets: + secret_envs: + description: if given, it's passed to the environments + required: false + + +env: + # Force the stdout and stderr streams to be unbuffered + PYTHONUNBUFFERED: 1 + CHECK_NAME: ${{inputs.test_name}} + +jobs: + Test: + runs-on: [self-hosted, '${{inputs.runner_type}}'] + name: ${{inputs.test_name}} + env: + GITHUB_JOB_OVERRIDDEN: ${{inputs.test_name}} + steps: + - name: Check out repository code + uses: ClickHouse/checkout@v1 + with: + clear-repository: true + ref: ${{ inputs.git_ref }} + submodules: ${{inputs.submodules}} + fetch-depth: ${{inputs.checkout_depth}} + filter: tree:0 + - name: Set build envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + CHECK_NAME=${{ inputs.test_name }} + ${{inputs.additional_envs}} + ${{secrets.secret_envs}} + EOF + - name: Common setup + uses: ./.github/actions/common_setup + with: + job_type: test + - name: Run + run: | + if [ -n '${{ inputs.working-directory }}' ]; then + cd "${{ inputs.working-directory }}" + else + cd "$GITHUB_WORKSPACE/tests/ci" + fi + ${{ inputs.run_command }} + - name: Clean + if: always() + uses: ./.github/actions/clean diff --git a/.github/workflows/reusable_test.yml b/.github/workflows/reusable_test.yml index e82d2d51596..09177ad887a 100644 --- a/.github/workflows/reusable_test.yml +++ b/.github/workflows/reusable_test.yml @@ -14,13 +14,10 @@ name: Testing workflow required: true type: string run_command: - description: the command to launch the check. Usually starts with `cd '$REPO_COPY/tests/ci'` - required: true + description: the command to launch the check + default: "" + required: false type: string - batches: - description: how many batches for the test will be launched - default: 1 - type: number checkout_depth: description: the value of the git shallow checkout required: false @@ -34,80 +31,89 @@ name: Testing workflow additional_envs: description: additional ENV variables to setup the job type: string + data: + description: ci data + type: string + required: true + working-directory: + description: sets custom working directory + type: string + default: "" secrets: secret_envs: description: if given, it's passed to the environments required: false + env: # Force the stdout and stderr streams to be unbuffered PYTHONUNBUFFERED: 1 CHECK_NAME: ${{inputs.test_name}} jobs: - PrepareStrategy: - # batches < 1 is misconfiguration, - # and we need this step only for batches > 1 - if: ${{ inputs.batches > 1 }} - runs-on: [self-hosted, style-checker-aarch64] - outputs: - batches: ${{steps.batches.outputs.batches}} - steps: - - name: Calculate batches - id: batches - run: | - batches_output=$(python3 -c 'import json; print(json.dumps(list(range(${{inputs.batches}}))))') - echo "batches=${batches_output}" >> "$GITHUB_OUTPUT" Test: - # If PrepareStrategy is skipped for batches == 1, - # we still need to launch the test. - # `! failure()` is mandatory here to launch on skipped Job - # `&& !cancelled()` to allow the be cancelable - if: ${{ ( !failure() && !cancelled() ) && inputs.batches > 0 }} - # Do not add `-0` to the end, if there's only one batch - name: ${{inputs.test_name}}${{ inputs.batches > 1 && format('-{0}',matrix.batch) || '' }} - env: - GITHUB_JOB_OVERRIDDEN: ${{inputs.test_name}}${{ inputs.batches > 1 && format('-{0}',matrix.batch) || '' }} runs-on: [self-hosted, '${{inputs.runner_type}}'] - needs: [PrepareStrategy] + if: ${{ !failure() && !cancelled() && contains(fromJson(inputs.data).jobs_data.jobs_to_do, inputs.test_name) }} + name: ${{inputs.test_name}}${{ fromJson(inputs.data).jobs_data.jobs_params[inputs.test_name].num_batches > 1 && format('-{0}',matrix.batch) || '' }} + env: + GITHUB_JOB_OVERRIDDEN: ${{inputs.test_name}}${{ fromJson(inputs.data).jobs_data.jobs_params[inputs.test_name].num_batches > 1 && format('-{0}',matrix.batch) || '' }} strategy: fail-fast: false # we always wait for entire matrix matrix: - # if PrepareStrategy does not have batches, we use 0 - batch: ${{ needs.PrepareStrategy.outputs.batches - && fromJson(needs.PrepareStrategy.outputs.batches) - || fromJson('[0]')}} + batch: ${{ fromJson(inputs.data).jobs_data.jobs_params[inputs.test_name].batches }} steps: - name: Check out repository code uses: ClickHouse/checkout@v1 with: clear-repository: true + ref: ${{ fromJson(inputs.data).git_ref }} submodules: ${{inputs.submodules}} fetch-depth: ${{inputs.checkout_depth}} filter: tree:0 - name: Set build envs run: | cat >> "$GITHUB_ENV" << 'EOF' + CHECK_NAME=${{ inputs.test_name }} ${{inputs.additional_envs}} ${{secrets.secret_envs}} + DOCKER_TAG< 1}} + if: ${{ fromJson(inputs.data).jobs_data.jobs_params[inputs.test_name].num_batches > 1 }} run: | cat >> "$GITHUB_ENV" << 'EOF' RUN_BY_HASH_NUM=${{matrix.batch}} - RUN_BY_HASH_TOTAL=${{inputs.batches}} + RUN_BY_HASH_TOTAL=${{ fromJson(inputs.data).jobs_data.jobs_params[inputs.test_name].num_batches }} EOF - - name: Run test - run: ${{inputs.run_command}} + - name: Pre run + run: | + python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ toJson(inputs.data) }} --pre --job-name '${{inputs.test_name}}' + - name: Run + run: | + if [ -n "${{ inputs.working-directory }}" ]; then + cd "${{ inputs.working-directory }}" + else + cd "$GITHUB_WORKSPACE/tests/ci" + fi + if [ -n "$(echo '${{ inputs.run_command }}' | tr -d '\n')" ]; then + echo "Running command from workflow input" + ${{ inputs.run_command }} + else + echo "Running command from job config" + python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ toJson(inputs.data) }} --run --job-name '${{inputs.test_name}}' + fi + - name: Post run + run: | + python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ toJson(inputs.data) }} --post --job-name '${{inputs.test_name}}' + - name: Mark as done + run: | + python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ toJson(inputs.data) }} --mark-success --job-name '${{inputs.test_name}}' --batch ${{matrix.batch}} - name: Clean if: always() uses: ./.github/actions/clean diff --git a/.gitmessage b/.gitmessage new file mode 100644 index 00000000000..f4a25a837bc --- /dev/null +++ b/.gitmessage @@ -0,0 +1,10 @@ + + +## To avoid merge commit in CI run (add a leading space to apply): +#no-merge-commit + +## Running specified job (add a leading space to apply): +#job_ +#job_stateless_tests_release +#job_package_debug +#job_integration_tests_asan diff --git a/.gitmodules b/.gitmodules index 4b86f0468f1..53ef899dd99 100644 --- a/.gitmodules +++ b/.gitmodules @@ -354,6 +354,9 @@ [submodule "contrib/aklomp-base64"] path = contrib/aklomp-base64 url = https://github.com/aklomp/base64.git +[submodule "contrib/pocketfft"] + path = contrib/pocketfft + url = https://github.com/mreineck/pocketfft.git [submodule "contrib/sqids-cpp"] path = contrib/sqids-cpp url = https://github.com/sqids/sqids-cpp.git diff --git a/CHANGELOG.md b/CHANGELOG.md index ca5c7a5eaf1..283000f1804 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,5 @@ ### Table of Contents +**[ClickHouse release v23.12, 2023-12-28](#2312)**
**[ClickHouse release v23.11, 2023-12-06](#2311)**
**[ClickHouse release v23.10, 2023-11-02](#2310)**
**[ClickHouse release v23.9, 2023-09-28](#239)**
@@ -14,6 +15,146 @@ # 2023 Changelog +### ClickHouse release 23.12, 2023-12-28 + +#### Backward Incompatible Change +* Fix check for non-deterministic functions in TTL expressions. Previously, you could create a TTL expression with non-deterministic functions in some cases, which could lead to undefined behavior later. This fixes [#37250](https://github.com/ClickHouse/ClickHouse/issues/37250). Disallow TTL expressions that don't depend on any columns of a table by default. It can be allowed back by `SET allow_suspicious_ttl_expressions = 1` or `SET compatibility = '23.11'`. Closes [#37286](https://github.com/ClickHouse/ClickHouse/issues/37286). [#51858](https://github.com/ClickHouse/ClickHouse/pull/51858) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* The MergeTree setting `clean_deleted_rows` is deprecated, it has no effect anymore. The `CLEANUP` keyword for the `OPTIMIZE` is not allowed by default (it can be unlocked with the `allow_experimental_replacing_merge_with_cleanup` setting). [#58267](https://github.com/ClickHouse/ClickHouse/pull/58267) ([Alexander Tokmakov](https://github.com/tavplubix)). This fixes [#57930](https://github.com/ClickHouse/ClickHouse/issues/57930). This closes [#54988](https://github.com/ClickHouse/ClickHouse/issues/54988). This closes [#54570](https://github.com/ClickHouse/ClickHouse/issues/54570). This closes [#50346](https://github.com/ClickHouse/ClickHouse/issues/50346). This closes [#47579](https://github.com/ClickHouse/ClickHouse/issues/47579). The feature has to be removed because it is not good. We have to remove it as quickly as possible, because there is no other option. [#57932](https://github.com/ClickHouse/ClickHouse/pull/57932) ([Alexey Milovidov](https://github.com/alexey-milovidov)). + +#### New Feature +* Implement Refreshable Materialized Views, requested in [#33919](https://github.com/ClickHouse/ClickHouse/issues/57995). [#56946](https://github.com/ClickHouse/ClickHouse/pull/56946) ([Michael Kolupaev](https://github.com/al13n321), [Michael Guzov](https://github.com/koloshmet)). +* Introduce `PASTE JOIN`, which allows users to join tables without `ON` clause simply by row numbers. Example: `SELECT * FROM (SELECT number AS a FROM numbers(2)) AS t1 PASTE JOIN (SELECT number AS a FROM numbers(2) ORDER BY a DESC) AS t2`. [#57995](https://github.com/ClickHouse/ClickHouse/pull/57995) ([Yarik Briukhovetskyi](https://github.com/yariks5s)). +* The `ORDER BY` clause now supports specifying `ALL`, meaning that ClickHouse sorts by all columns in the `SELECT` clause. Example: `SELECT col1, col2 FROM tab WHERE [...] ORDER BY ALL`. [#57875](https://github.com/ClickHouse/ClickHouse/pull/57875) ([zhongyuankai](https://github.com/zhongyuankai)). +* Added a new mutation command `ALTER TABLE APPLY DELETED MASK`, which allows to enforce applying of mask written by lightweight delete and to remove rows marked as deleted from disk. [#57433](https://github.com/ClickHouse/ClickHouse/pull/57433) ([Anton Popov](https://github.com/CurtizJ)). +* A handler `/binary` opens a visual viewer of symbols inside the ClickHouse binary. [#58211](https://github.com/ClickHouse/ClickHouse/pull/58211) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Added a new SQL function `sqid` to generate Sqids (https://sqids.org/), example: `SELECT sqid(125, 126)`. [#57512](https://github.com/ClickHouse/ClickHouse/pull/57512) ([Robert Schulze](https://github.com/rschu1ze)). +* Add a new function `seriesPeriodDetectFFT` to detect series period using FFT. [#57574](https://github.com/ClickHouse/ClickHouse/pull/57574) ([Bhavna Jindal](https://github.com/bhavnajindal)). +* Add an HTTP endpoint for checking if Keeper is ready to accept traffic. [#55876](https://github.com/ClickHouse/ClickHouse/pull/55876) ([Konstantin Bogdanov](https://github.com/thevar1able)). +* Add 'union' mode for schema inference. In this mode the resulting table schema is the union of all files schemas (so schema is inferred from each file). The mode of schema inference is controlled by a setting `schema_inference_mode` with two possible values - `default` and `union`. Closes [#55428](https://github.com/ClickHouse/ClickHouse/issues/55428). [#55892](https://github.com/ClickHouse/ClickHouse/pull/55892) ([Kruglov Pavel](https://github.com/Avogar)). +* Add new setting `input_format_csv_try_infer_numbers_from_strings` that allows to infer numbers from strings in CSV format. Closes [#56455](https://github.com/ClickHouse/ClickHouse/issues/56455). [#56859](https://github.com/ClickHouse/ClickHouse/pull/56859) ([Kruglov Pavel](https://github.com/Avogar)). +* When the number of databases or tables exceeds a configurable threshold, show a warning to the user. [#57375](https://github.com/ClickHouse/ClickHouse/pull/57375) ([凌涛](https://github.com/lingtaolf)). +* Dictionary with `HASHED_ARRAY` (and `COMPLEX_KEY_HASHED_ARRAY`) layout supports `SHARDS` similarly to `HASHED`. [#57544](https://github.com/ClickHouse/ClickHouse/pull/57544) ([vdimir](https://github.com/vdimir)). +* Add asynchronous metrics for total primary key bytes and total allocated primary key bytes in memory. [#57551](https://github.com/ClickHouse/ClickHouse/pull/57551) ([Bharat Nallan](https://github.com/bharatnc)). +* Add `SHA512_256` function. [#57645](https://github.com/ClickHouse/ClickHouse/pull/57645) ([Bharat Nallan](https://github.com/bharatnc)). +* Add `FORMAT_BYTES` as an alias for `formatReadableSize`. [#57592](https://github.com/ClickHouse/ClickHouse/pull/57592) ([Bharat Nallan](https://github.com/bharatnc)). +* Allow passing optional session token to the `s3` table function. [#57850](https://github.com/ClickHouse/ClickHouse/pull/57850) ([Shani Elharrar](https://github.com/shanielh)). +* Introduce a new setting `http_make_head_request`. If it is turned off, the URL table engine will not do a HEAD request to determine the file size. This is needed to support inefficient, misconfigured, or not capable HTTP servers. [#54602](https://github.com/ClickHouse/ClickHouse/pull/54602) ([Fionera](https://github.com/fionera)). +* It is now possible to refer to ALIAS column in index (non-primary-key) definitions (issue [#55650](https://github.com/ClickHouse/ClickHouse/issues/55650)). Example: `CREATE TABLE tab(col UInt32, col_alias ALIAS col + 1, INDEX idx (col_alias) TYPE minmax) ENGINE = MergeTree ORDER BY col;`. [#57546](https://github.com/ClickHouse/ClickHouse/pull/57546) ([Robert Schulze](https://github.com/rschu1ze)). +* Added a new setting `readonly` which can be used to specify an S3 disk is read only. It can be useful to create a table on a disk of `s3_plain` type, while having read only access to the underlying S3 bucket. [#57977](https://github.com/ClickHouse/ClickHouse/pull/57977) ([Pengyuan Bian](https://github.com/bianpengyuan)). +* The primary key analysis in MergeTree tables will now be applied to predicates that include the virtual column `_part_offset` (optionally with `_part`). This feature can serve as a special kind of a secondary index. [#58224](https://github.com/ClickHouse/ClickHouse/pull/58224) ([Amos Bird](https://github.com/amosbird)). + +#### Performance Improvement +* Extract non-intersecting parts ranges from MergeTree table during FINAL processing. That way we can avoid additional FINAL logic for this non-intersecting parts ranges. In case when amount of duplicate values with same primary key is low, performance will be almost the same as without FINAL. Improve reading performance for MergeTree FINAL when `do_not_merge_across_partitions_select_final` setting is set. [#58120](https://github.com/ClickHouse/ClickHouse/pull/58120) ([Maksim Kita](https://github.com/kitaisreal)). +* Made copy between s3 disks using a s3-server-side copy instead of copying through the buffer. Improves `BACKUP/RESTORE` operations and `clickhouse-disks copy` command. [#56744](https://github.com/ClickHouse/ClickHouse/pull/56744) ([MikhailBurdukov](https://github.com/MikhailBurdukov)). +* Hash JOIN respects setting `max_joined_block_size_rows` and do not produce large blocks for `ALL JOIN`. [#56996](https://github.com/ClickHouse/ClickHouse/pull/56996) ([vdimir](https://github.com/vdimir)). +* Release memory for aggregation earlier. This may avoid unnecessary external aggregation. [#57691](https://github.com/ClickHouse/ClickHouse/pull/57691) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Improve performance of string serialization. [#57717](https://github.com/ClickHouse/ClickHouse/pull/57717) ([Maksim Kita](https://github.com/kitaisreal)). +* Support trivial count optimization for `Merge`-engine tables. [#57867](https://github.com/ClickHouse/ClickHouse/pull/57867) ([skyoct](https://github.com/skyoct)). +* Optimized aggregation in some cases. [#57872](https://github.com/ClickHouse/ClickHouse/pull/57872) ([Anton Popov](https://github.com/CurtizJ)). +* The `hasAny` function can now take advantage of the full-text skipping indices. [#57878](https://github.com/ClickHouse/ClickHouse/pull/57878) ([Jpnock](https://github.com/Jpnock)). +* Function `if(cond, then, else)` (and its alias `cond ? then : else`) were optimized to use branch-free evaluation. [#57885](https://github.com/ClickHouse/ClickHouse/pull/57885) ([zhanglistar](https://github.com/zhanglistar)). +* MergeTree automatically derive `do_not_merge_across_partitions_select_final` setting if partition key expression contains only columns from primary key expression. [#58218](https://github.com/ClickHouse/ClickHouse/pull/58218) ([Maksim Kita](https://github.com/kitaisreal)). +* Speedup `MIN` and `MAX` for native types. [#58231](https://github.com/ClickHouse/ClickHouse/pull/58231) ([Raúl Marín](https://github.com/Algunenano)). +* Implement `SLRU` cache policy for filesystem cache. [#57076](https://github.com/ClickHouse/ClickHouse/pull/57076) ([Kseniia Sumarokova](https://github.com/kssenii)). +* The limit for the number of connections per endpoint for background fetches was raised from `15` to the value of `background_fetches_pool_size` setting. - MergeTree-level setting `replicated_max_parallel_fetches_for_host` became obsolete - MergeTree-level settings `replicated_fetches_http_connection_timeout`, `replicated_fetches_http_send_timeout` and `replicated_fetches_http_receive_timeout` are moved to the Server-level. - Setting `keep_alive_timeout` is added to the list of Server-level settings. [#57523](https://github.com/ClickHouse/ClickHouse/pull/57523) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* Make querying `system.filesystem_cache` not memory intensive. [#57687](https://github.com/ClickHouse/ClickHouse/pull/57687) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Reduce memory usage on strings deserialization. [#57787](https://github.com/ClickHouse/ClickHouse/pull/57787) ([Maksim Kita](https://github.com/kitaisreal)). +* More efficient constructor for Enum - it makes sense when Enum has a boatload of values. [#57887](https://github.com/ClickHouse/ClickHouse/pull/57887) ([Duc Canh Le](https://github.com/canhld94)). +* An improvement for reading from the filesystem cache: always use `pread` method. [#57970](https://github.com/ClickHouse/ClickHouse/pull/57970) ([Nikita Taranov](https://github.com/nickitat)). +* Add optimization for AND notEquals chain in logical expression optimizer. This optimization is only available with the experimental Analyzer enabled. [#58214](https://github.com/ClickHouse/ClickHouse/pull/58214) ([Kevin Mingtarja](https://github.com/kevinmingtarja)). + +#### Improvement +* Support for soft memory limit in Keeper. It will refuse requests if the memory usage is close to the maximum. [#57271](https://github.com/ClickHouse/ClickHouse/pull/57271) ([Han Fei](https://github.com/hanfei1991)). [#57699](https://github.com/ClickHouse/ClickHouse/pull/57699) ([Han Fei](https://github.com/hanfei1991)). +* Make inserts into distributed tables handle updated cluster configuration properly. When the list of cluster nodes is dynamically updated, the Directory Monitor of the distribution table will update it. [#42826](https://github.com/ClickHouse/ClickHouse/pull/42826) ([zhongyuankai](https://github.com/zhongyuankai)). +* Do not allow creating a replicated table with inconsistent merge parameters. [#56833](https://github.com/ClickHouse/ClickHouse/pull/56833) ([Duc Canh Le](https://github.com/canhld94)). +* Show uncompressed size in `system.tables`. [#56618](https://github.com/ClickHouse/ClickHouse/issues/56618). [#57186](https://github.com/ClickHouse/ClickHouse/pull/57186) ([Chen Lixiang](https://github.com/chenlx0)). +* Add `skip_unavailable_shards` as a setting for `Distributed` tables that is similar to the corresponding query-level setting. Closes [#43666](https://github.com/ClickHouse/ClickHouse/issues/43666). [#57218](https://github.com/ClickHouse/ClickHouse/pull/57218) ([Gagan Goel](https://github.com/tntnatbry)). +* The function `substring` (aliases: `substr`, `mid`) can now be used with `Enum` types. Previously, the first function argument had to be a value of type `String` or `FixedString`. This improves compatibility with 3rd party tools such as Tableau via MySQL interface. [#57277](https://github.com/ClickHouse/ClickHouse/pull/57277) ([Serge Klochkov](https://github.com/slvrtrn)). +* Function `format` now supports arbitrary argument types (instead of only `String` and `FixedString` arguments). This is important to calculate `SELECT format('The {0} to all questions is {1}', 'answer', 42)`. [#57549](https://github.com/ClickHouse/ClickHouse/pull/57549) ([Robert Schulze](https://github.com/rschu1ze)). +* Allows to use the `date_trunc` function with a case-insensitive first argument. Both cases are now supported: `SELECT date_trunc('day', now())` and `SELECT date_trunc('DAY', now())`. [#57624](https://github.com/ClickHouse/ClickHouse/pull/57624) ([Yarik Briukhovetskyi](https://github.com/yariks5s)). +* Better hints when a table doesn't exist. [#57342](https://github.com/ClickHouse/ClickHouse/pull/57342) ([Bharat Nallan](https://github.com/bharatnc)). +* Allow to overwrite `max_partition_size_to_drop` and `max_table_size_to_drop` server settings in query time. [#57452](https://github.com/ClickHouse/ClickHouse/pull/57452) ([Jordi Villar](https://github.com/jrdi)). +* Slightly better inference of unnamed tupes in JSON formats. [#57751](https://github.com/ClickHouse/ClickHouse/pull/57751) ([Kruglov Pavel](https://github.com/Avogar)). +* Add support for read-only flag when connecting to Keeper (fixes [#53749](https://github.com/ClickHouse/ClickHouse/issues/53749)). [#57479](https://github.com/ClickHouse/ClickHouse/pull/57479) ([Mikhail Koviazin](https://github.com/mkmkme)). +* Fix possible distributed sends stuck due to "No such file or directory" (during recovering a batch from disk). Fix possible issues with `error_count` from `system.distribution_queue` (in case of `distributed_directory_monitor_max_sleep_time_ms` >5min). Introduce profile event to track async INSERT failures - `DistributedAsyncInsertionFailures`. [#57480](https://github.com/ClickHouse/ClickHouse/pull/57480) ([Azat Khuzhin](https://github.com/azat)). +* Support PostgreSQL generated columns and default column values in `MaterializedPostgreSQL` (experimental feature). Closes [#40449](https://github.com/ClickHouse/ClickHouse/issues/40449). [#57568](https://github.com/ClickHouse/ClickHouse/pull/57568) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Allow to apply some filesystem cache config settings changes without server restart. [#57578](https://github.com/ClickHouse/ClickHouse/pull/57578) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Properly handling PostgreSQL table structure with empty array. [#57618](https://github.com/ClickHouse/ClickHouse/pull/57618) ([Mike Kot](https://github.com/myrrc)). +* Expose the total number of errors occurred since last server restart as a `ClickHouseErrorMetric_ALL` metric. [#57627](https://github.com/ClickHouse/ClickHouse/pull/57627) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* Allow nodes in the configuration file with `from_env`/`from_zk` reference and non empty element with replace=1. [#57628](https://github.com/ClickHouse/ClickHouse/pull/57628) ([Azat Khuzhin](https://github.com/azat)). +* A table function `fuzzJSON` which allows generating a lot of malformed JSON for fuzzing. [#57646](https://github.com/ClickHouse/ClickHouse/pull/57646) ([Julia Kartseva](https://github.com/jkartseva)). +* Allow IPv6 to UInt128 conversion and binary arithmetic. [#57707](https://github.com/ClickHouse/ClickHouse/pull/57707) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)). +* Add a setting for `async inserts deduplication cache` - how long we wait for cache update. Deprecate setting `async_block_ids_cache_min_update_interval_ms`. Now cache is updated only in case of conflicts. [#57743](https://github.com/ClickHouse/ClickHouse/pull/57743) ([alesapin](https://github.com/alesapin)). +* `sleep()` function now can be cancelled with `KILL QUERY`. [#57746](https://github.com/ClickHouse/ClickHouse/pull/57746) ([Vitaly Baranov](https://github.com/vitlibar)). +* Forbid `CREATE TABLE ... AS SELECT` queries for `Replicated` table engines in the experimental `Replicated` database because they are not supported. Reference [#35408](https://github.com/ClickHouse/ClickHouse/issues/35408). [#57796](https://github.com/ClickHouse/ClickHouse/pull/57796) ([Nikolay Degterinsky](https://github.com/evillique)). +* Fix and improve transforming queries for external databases, to recursively obtain all compatible predicates. [#57888](https://github.com/ClickHouse/ClickHouse/pull/57888) ([flynn](https://github.com/ucasfl)). +* Support dynamic reloading of the filesystem cache size. Closes [#57866](https://github.com/ClickHouse/ClickHouse/issues/57866). [#57897](https://github.com/ClickHouse/ClickHouse/pull/57897) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Correctly support `system.stack_trace` for threads with blocked SIGRTMIN (these threads can exist in low-quality external libraries such as Apache rdkafka). [#57907](https://github.com/ClickHouse/ClickHouse/pull/57907) ([Azat Khuzhin](https://github.com/azat)). Aand also send signal to the threads only if it is not blocked to avoid waiting `storage_system_stack_trace_pipe_read_timeout_ms` when it does not make any sense. [#58136](https://github.com/ClickHouse/ClickHouse/pull/58136) ([Azat Khuzhin](https://github.com/azat)). +* Tolerate keeper failures in the quorum inserts' check. [#57986](https://github.com/ClickHouse/ClickHouse/pull/57986) ([Raúl Marín](https://github.com/Algunenano)). +* Add max/peak RSS (`MemoryResidentMax`) into system.asynchronous_metrics. [#58095](https://github.com/ClickHouse/ClickHouse/pull/58095) ([Azat Khuzhin](https://github.com/azat)). +* This PR allows users to use s3-style links (`https://` and `s3://`) without mentioning region if it's not default. Also find the correct region if the user mentioned the wrong one. [#58148](https://github.com/ClickHouse/ClickHouse/pull/58148) ([Yarik Briukhovetskyi](https://github.com/yariks5s)). +* `clickhouse-format --obfuscate` will know about Settings, MergeTreeSettings, and time zones and keep their names unchanged. [#58179](https://github.com/ClickHouse/ClickHouse/pull/58179) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Added explicit `finalize()` function in `ZipArchiveWriter`. Simplify too complicated code in `ZipArchiveWriter`. This fixes [#58074](https://github.com/ClickHouse/ClickHouse/issues/58074). [#58202](https://github.com/ClickHouse/ClickHouse/pull/58202) ([Vitaly Baranov](https://github.com/vitlibar)). +* Make caches with the same path use the same cache objects. This behaviour existed before, but was broken in 23.4. If such caches with the same path have different set of cache settings, an exception will be thrown, that this is not allowed. [#58264](https://github.com/ClickHouse/ClickHouse/pull/58264) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Parallel replicas (experimental feature): friendly settings [#57542](https://github.com/ClickHouse/ClickHouse/pull/57542) ([Igor Nikonov](https://github.com/devcrafter)). +* Parallel replicas (experimental feature): announcement response handling improvement [#57749](https://github.com/ClickHouse/ClickHouse/pull/57749) ([Igor Nikonov](https://github.com/devcrafter)). +* Parallel replicas (experimental feature): give more respect to `min_number_of_marks` in `ParallelReplicasReadingCoordinator` [#57763](https://github.com/ClickHouse/ClickHouse/pull/57763) ([Nikita Taranov](https://github.com/nickitat)). +* Parallel replicas (experimental feature): disable parallel replicas with IN (subquery) [#58133](https://github.com/ClickHouse/ClickHouse/pull/58133) ([Igor Nikonov](https://github.com/devcrafter)). +* Parallel replicas (experimental feature): add profile event 'ParallelReplicasUsedCount' [#58173](https://github.com/ClickHouse/ClickHouse/pull/58173) ([Igor Nikonov](https://github.com/devcrafter)). +* Non POST requests such as HEAD will be readonly similar to GET. [#58060](https://github.com/ClickHouse/ClickHouse/pull/58060) ([San](https://github.com/santrancisco)). +* Add `bytes_uncompressed` column to `system.part_log` [#58167](https://github.com/ClickHouse/ClickHouse/pull/58167) ([Jordi Villar](https://github.com/jrdi)). +* Add base backup name to `system.backups` and `system.backup_log` tables [#58178](https://github.com/ClickHouse/ClickHouse/pull/58178) ([Pradeep Chhetri](https://github.com/chhetripradeep)). +* Add support for specifying query parameters in the command line in clickhouse-local [#58210](https://github.com/ClickHouse/ClickHouse/pull/58210) ([Pradeep Chhetri](https://github.com/chhetripradeep)). + +#### Build/Testing/Packaging Improvement +* Randomize more settings [#39663](https://github.com/ClickHouse/ClickHouse/pull/39663) ([Anton Popov](https://github.com/CurtizJ)). +* Randomize disabled optimizations in CI [#57315](https://github.com/ClickHouse/ClickHouse/pull/57315) ([Raúl Marín](https://github.com/Algunenano)). +* Allow usage of Azure-related table engines/functions on macOS. [#51866](https://github.com/ClickHouse/ClickHouse/pull/51866) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* ClickHouse Fast Test now uses Musl instead of GLibc. [#57711](https://github.com/ClickHouse/ClickHouse/pull/57711) ([Alexey Milovidov](https://github.com/alexey-milovidov)). The fully-static Musl build is available to download from the CI. +* Run ClickBench for every commit. This closes [#57708](https://github.com/ClickHouse/ClickHouse/issues/57708). [#57712](https://github.com/ClickHouse/ClickHouse/pull/57712) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Remove the usage of a harmful C/POSIX `select` function from external libraries. [#57467](https://github.com/ClickHouse/ClickHouse/pull/57467) ([Igor Nikonov](https://github.com/devcrafter)). +* Settings only available in ClickHouse Cloud will be also present in the open-source ClickHouse build for convenience. [#57638](https://github.com/ClickHouse/ClickHouse/pull/57638) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). + +#### Bug Fix (user-visible misbehavior in an official stable release) +* Fixed a possibility of sorting order breakage in TTL GROUP BY [#49103](https://github.com/ClickHouse/ClickHouse/pull/49103) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* Fix: split `lttb` bucket strategy, first bucket and last bucket should only contain single point [#57003](https://github.com/ClickHouse/ClickHouse/pull/57003) ([FFish](https://github.com/wxybear)). +* Fix possible deadlock in the `Template` format during sync after error [#57004](https://github.com/ClickHouse/ClickHouse/pull/57004) ([Kruglov Pavel](https://github.com/Avogar)). +* Fix early stop while parsing a file with skipping lots of errors [#57006](https://github.com/ClickHouse/ClickHouse/pull/57006) ([Kruglov Pavel](https://github.com/Avogar)). +* Prevent dictionary's ACL bypass via the `dictionary` table function [#57362](https://github.com/ClickHouse/ClickHouse/pull/57362) ([Salvatore Mesoraca](https://github.com/aiven-sal)). +* Fix another case of a "non-ready set" error found by Fuzzer. [#57423](https://github.com/ClickHouse/ClickHouse/pull/57423) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Fix several issues regarding PostgreSQL `array_ndims` usage. [#57436](https://github.com/ClickHouse/ClickHouse/pull/57436) ([Ryan Jacobs](https://github.com/ryanmjacobs)). +* Fix RWLock inconsistency after write lock timeout [#57454](https://github.com/ClickHouse/ClickHouse/pull/57454) ([Vitaly Baranov](https://github.com/vitlibar)). Fix RWLock inconsistency after write lock timeout (again) [#57733](https://github.com/ClickHouse/ClickHouse/pull/57733) ([Vitaly Baranov](https://github.com/vitlibar)). +* Fix: don't exclude ephemeral column when building pushing to view chain [#57461](https://github.com/ClickHouse/ClickHouse/pull/57461) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)). +* MaterializedPostgreSQL (experimental issue): fix issue [#41922](https://github.com/ClickHouse/ClickHouse/issues/41922), add test for [#41923](https://github.com/ClickHouse/ClickHouse/issues/41923) [#57515](https://github.com/ClickHouse/ClickHouse/pull/57515) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Ignore ON CLUSTER clause in grant/revoke queries for management of replicated access entities. [#57538](https://github.com/ClickHouse/ClickHouse/pull/57538) ([MikhailBurdukov](https://github.com/MikhailBurdukov)). +* Fix crash in clickhouse-local [#57553](https://github.com/ClickHouse/ClickHouse/pull/57553) ([Nikolay Degterinsky](https://github.com/evillique)). +* A fix for Hash JOIN. [#57564](https://github.com/ClickHouse/ClickHouse/pull/57564) ([vdimir](https://github.com/vdimir)). +* Fix possible error in PostgreSQL source [#57567](https://github.com/ClickHouse/ClickHouse/pull/57567) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Fix type correction in Hash JOIN for nested LowCardinality. [#57614](https://github.com/ClickHouse/ClickHouse/pull/57614) ([vdimir](https://github.com/vdimir)). +* Avoid hangs of `system.stack_trace` by correctly prohibiting parallel reading from it. [#57641](https://github.com/ClickHouse/ClickHouse/pull/57641) ([Azat Khuzhin](https://github.com/azat)). +* Fix an error for aggregation of sparse columns with `any(...) RESPECT NULL` [#57710](https://github.com/ClickHouse/ClickHouse/pull/57710) ([Azat Khuzhin](https://github.com/azat)). +* Fix unary operators parsing [#57713](https://github.com/ClickHouse/ClickHouse/pull/57713) ([Nikolay Degterinsky](https://github.com/evillique)). +* Fix dependency loading for the experimental table engine `MaterializedPostgreSQL`. [#57754](https://github.com/ClickHouse/ClickHouse/pull/57754) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Fix retries for disconnected nodes for BACKUP/RESTORE ON CLUSTER [#57764](https://github.com/ClickHouse/ClickHouse/pull/57764) ([Vitaly Baranov](https://github.com/vitlibar)). +* Fix result of external aggregation in case of partially materialized projection [#57790](https://github.com/ClickHouse/ClickHouse/pull/57790) ([Anton Popov](https://github.com/CurtizJ)). +* Fix merge in aggregation functions with `*Map` combinator [#57795](https://github.com/ClickHouse/ClickHouse/pull/57795) ([Anton Popov](https://github.com/CurtizJ)). +* Disable `system.kafka_consumers` because it has a bug. [#57822](https://github.com/ClickHouse/ClickHouse/pull/57822) ([Azat Khuzhin](https://github.com/azat)). +* Fix LowCardinality keys support in Merge JOIN. [#57827](https://github.com/ClickHouse/ClickHouse/pull/57827) ([vdimir](https://github.com/vdimir)). +* A fix for `InterpreterCreateQuery` related to the sample block. [#57855](https://github.com/ClickHouse/ClickHouse/pull/57855) ([Maksim Kita](https://github.com/kitaisreal)). +* `addresses_expr` were ignored for named collections from PostgreSQL. [#57874](https://github.com/ClickHouse/ClickHouse/pull/57874) ([joelynch](https://github.com/joelynch)). +* Fix invalid memory access in BLAKE3 (Rust) [#57876](https://github.com/ClickHouse/ClickHouse/pull/57876) ([Raúl Marín](https://github.com/Algunenano)). Then it was rewritten from Rust to C++ for better [memory-safety](https://www.memorysafety.org/). [#57994](https://github.com/ClickHouse/ClickHouse/pull/57994) ([Raúl Marín](https://github.com/Algunenano)). +* Normalize function names in `CREATE INDEX` [#57906](https://github.com/ClickHouse/ClickHouse/pull/57906) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Fix handling of unavailable replicas before first request happened [#57933](https://github.com/ClickHouse/ClickHouse/pull/57933) ([Nikita Taranov](https://github.com/nickitat)). +* Fix literal alias misclassification [#57988](https://github.com/ClickHouse/ClickHouse/pull/57988) ([Chen768959](https://github.com/Chen768959)). +* Fix invalid preprocessing on Keeper [#58069](https://github.com/ClickHouse/ClickHouse/pull/58069) ([Antonio Andelic](https://github.com/antonio2368)). +* Fix integer overflow in the `Poco` library, related to `UTF32Encoding` [#58073](https://github.com/ClickHouse/ClickHouse/pull/58073) ([Andrey Fedotov](https://github.com/anfedotoff)). +* Fix parallel replicas (experimental feature) in presence of a scalar subquery with a big integer value [#58118](https://github.com/ClickHouse/ClickHouse/pull/58118) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix `accurateCastOrNull` for out-of-range `DateTime` [#58139](https://github.com/ClickHouse/ClickHouse/pull/58139) ([Andrey Zvonov](https://github.com/zvonand)). +* Fix possible `PARAMETER_OUT_OF_BOUND` error during subcolumns reading from a wide part in MergeTree [#58175](https://github.com/ClickHouse/ClickHouse/pull/58175) ([Kruglov Pavel](https://github.com/Avogar)). +* Fix a slow-down of CREATE VIEW with an enormous number of subqueries [#58220](https://github.com/ClickHouse/ClickHouse/pull/58220) ([Tao Wang](https://github.com/wangtZJU)). +* Fix parallel parsing for JSONCompactEachRow [#58181](https://github.com/ClickHouse/ClickHouse/pull/58181) ([Alexey Milovidov](https://github.com/alexey-milovidov)). [#58250](https://github.com/ClickHouse/ClickHouse/pull/58250) ([Kruglov Pavel](https://github.com/Avogar)). + + ### ClickHouse release 23.11, 2023-12-06 #### Backward Incompatible Change @@ -105,7 +246,6 @@ * Rewrite equality with `is null` check in JOIN ON section. Experimental *Analyzer only*. [#56538](https://github.com/ClickHouse/ClickHouse/pull/56538) ([vdimir](https://github.com/vdimir)). * Function`concat` now supports arbitrary argument types (instead of only String and FixedString arguments). This makes it behave more similar to MySQL `concat` implementation. For example, `SELECT concat('ab', 42)` now returns `ab42`. [#56540](https://github.com/ClickHouse/ClickHouse/pull/56540) ([Serge Klochkov](https://github.com/slvrtrn)). * Allow getting cache configuration from 'named_collection' section in config or from SQL created named collections. [#56541](https://github.com/ClickHouse/ClickHouse/pull/56541) ([Kseniia Sumarokova](https://github.com/kssenii)). -* Update `query_masking_rules` when reloading the config ([#56449](https://github.com/ClickHouse/ClickHouse/issues/56449)). [#56573](https://github.com/ClickHouse/ClickHouse/pull/56573) ([Mikhail Koviazin](https://github.com/mkmkme)). * PostgreSQL database engine: Make the removal of outdated tables less aggressive with unsuccessful postgres connection. [#56609](https://github.com/ClickHouse/ClickHouse/pull/56609) ([jsc0218](https://github.com/jsc0218)). * It took too much time to connnect to PG when URL is not right, so the relevant query stucks there and get cancelled. [#56648](https://github.com/ClickHouse/ClickHouse/pull/56648) ([jsc0218](https://github.com/jsc0218)). * Keeper improvement: disable compressed logs by default in Keeper. [#56763](https://github.com/ClickHouse/ClickHouse/pull/56763) ([Antonio Andelic](https://github.com/antonio2368)). diff --git a/LICENSE b/LICENSE index 65c5df824c6..c653e59a8f3 100644 --- a/LICENSE +++ b/LICENSE @@ -1,4 +1,4 @@ -Copyright 2016-2023 ClickHouse, Inc. +Copyright 2016-2024 ClickHouse, Inc. Apache License Version 2.0, January 2004 @@ -188,7 +188,7 @@ Copyright 2016-2023 ClickHouse, Inc. same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright 2016-2023 ClickHouse, Inc. + Copyright 2016-2024 ClickHouse, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/README.md b/README.md index bf8ef0b4e98..c56b3c2fd0d 100644 --- a/README.md +++ b/README.md @@ -33,12 +33,7 @@ curl https://clickhouse.com/ | sh ## Upcoming Events -* [**ClickHouse Meetup in Berlin**](https://www.meetup.com/clickhouse-berlin-user-group/events/296488501/) - Nov 30 -* [**ClickHouse Meetup in NYC**](https://www.meetup.com/clickhouse-new-york-user-group/events/296488779/) - Dec 11 -* [**ClickHouse Meetup in Sydney**](https://www.meetup.com/clickhouse-sydney-user-group/events/297638812/) - Dec 12 -* [**ClickHouse Meetup in Boston**](https://www.meetup.com/clickhouse-boston-user-group/events/296488840/) - Dec 12 - -Also, keep an eye out for upcoming meetups around the world. Somewhere else you want us to be? Please feel free to reach out to tyler clickhouse com. +Keep an eye out for upcoming meetups around the world. Somewhere else you want us to be? Please feel free to reach out to tyler clickhouse com. ## Recent Recordings * **Recent Meetup Videos**: [Meetup Playlist](https://www.youtube.com/playlist?list=PL0Z2YDlm0b3iNDUzpY1S3L_iV4nARda_U) Whenever possible recordings of the ClickHouse Community Meetups are edited and presented as individual talks. Current featuring "Modern SQL in 2023", "Fast, Concurrent, and Consistent Asynchronous INSERTS in ClickHouse", and "Full-Text Indices: Design and Experiments" diff --git a/SECURITY.md b/SECURITY.md index 7aaf9f3e5b9..a200e172a3b 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -13,9 +13,10 @@ The following versions of ClickHouse server are currently being supported with s | Version | Supported | |:-|:-| +| 23.12 | ✔️ | | 23.11 | ✔️ | | 23.10 | ✔️ | -| 23.9 | ✔️ | +| 23.9 | ❌ | | 23.8 | ✔️ | | 23.7 | ❌ | | 23.6 | ❌ | diff --git a/base/poco/Foundation/include/Poco/StreamUtil.h b/base/poco/Foundation/include/Poco/StreamUtil.h index fa1814a0f2e..ed0a4fb5154 100644 --- a/base/poco/Foundation/include/Poco/StreamUtil.h +++ b/base/poco/Foundation/include/Poco/StreamUtil.h @@ -69,6 +69,9 @@ // init() is called in the MyIOS constructor. // Therefore we replace each call to init() with // the poco_ios_init macro defined below. +// +// Also this macro will adjust exceptions() flags, since by default std::ios +// will hide exceptions, while in ClickHouse it is better to pass them through. #if !defined(POCO_IOS_INIT_HACK) @@ -79,7 +82,10 @@ #if defined(POCO_IOS_INIT_HACK) # define poco_ios_init(buf) #else -# define poco_ios_init(buf) init(buf) +# define poco_ios_init(buf) do { \ + init(buf); \ + this->exceptions(std::ios::failbit | std::ios::badbit); \ +} while (0) #endif diff --git a/base/poco/Foundation/include/Poco/UTF32Encoding.h b/base/poco/Foundation/include/Poco/UTF32Encoding.h index e6784e787cc..dafac005e83 100644 --- a/base/poco/Foundation/include/Poco/UTF32Encoding.h +++ b/base/poco/Foundation/include/Poco/UTF32Encoding.h @@ -70,6 +70,15 @@ public: int queryConvert(const unsigned char * bytes, int length) const; int sequenceLength(const unsigned char * bytes, int length) const; +protected: + static int safeToInt(Poco::UInt32 value) + { + if (value <= 0x10FFFF) + return static_cast(value); + else + return -1; + } + private: bool _flipBytes; static const char * _names[]; diff --git a/base/poco/Foundation/src/UTF32Encoding.cpp b/base/poco/Foundation/src/UTF32Encoding.cpp index ff07006a4fb..e600c5d9445 100644 --- a/base/poco/Foundation/src/UTF32Encoding.cpp +++ b/base/poco/Foundation/src/UTF32Encoding.cpp @@ -30,22 +30,22 @@ const char* UTF32Encoding::_names[] = const TextEncoding::CharacterMap UTF32Encoding::_charMap = { - /* 00 */ -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, - /* 10 */ -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, - /* 20 */ -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, - /* 30 */ -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, - /* 40 */ -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, - /* 50 */ -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, - /* 60 */ -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, - /* 70 */ -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, - /* 80 */ -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, - /* 90 */ -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, - /* a0 */ -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, - /* b0 */ -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, - /* c0 */ -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, - /* d0 */ -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, - /* e0 */ -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, - /* f0 */ -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, + /* 00 */ -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, + /* 10 */ -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, + /* 20 */ -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, + /* 30 */ -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, + /* 40 */ -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, + /* 50 */ -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, + /* 60 */ -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, + /* 70 */ -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, + /* 80 */ -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, + /* 90 */ -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, + /* a0 */ -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, + /* b0 */ -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, + /* c0 */ -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, + /* d0 */ -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, + /* e0 */ -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, + /* f0 */ -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, }; @@ -118,7 +118,7 @@ const TextEncoding::CharacterMap& UTF32Encoding::characterMap() const int UTF32Encoding::convert(const unsigned char* bytes) const { UInt32 uc; - unsigned char* p = (unsigned char*) &uc; + unsigned char* p = reinterpret_cast(&uc); *p++ = *bytes++; *p++ = *bytes++; *p++ = *bytes++; @@ -129,7 +129,7 @@ int UTF32Encoding::convert(const unsigned char* bytes) const ByteOrder::flipBytes(uc); } - return uc; + return safeToInt(uc); } @@ -138,7 +138,7 @@ int UTF32Encoding::convert(int ch, unsigned char* bytes, int length) const if (bytes && length >= 4) { UInt32 ch1 = _flipBytes ? ByteOrder::flipBytes((UInt32) ch) : (UInt32) ch; - unsigned char* p = (unsigned char*) &ch1; + unsigned char* p = reinterpret_cast(&ch1); *bytes++ = *p++; *bytes++ = *p++; *bytes++ = *p++; @@ -155,14 +155,14 @@ int UTF32Encoding::queryConvert(const unsigned char* bytes, int length) const if (length >= 4) { UInt32 uc; - unsigned char* p = (unsigned char*) &uc; + unsigned char* p = reinterpret_cast(&uc); *p++ = *bytes++; *p++ = *bytes++; *p++ = *bytes++; *p++ = *bytes++; if (_flipBytes) ByteOrder::flipBytes(uc); - return uc; + ret = safeToInt(uc); } return ret; diff --git a/base/poco/Util/src/XMLConfiguration.cpp b/base/poco/Util/src/XMLConfiguration.cpp index e0d363cc870..648084aa28e 100644 --- a/base/poco/Util/src/XMLConfiguration.cpp +++ b/base/poco/Util/src/XMLConfiguration.cpp @@ -18,6 +18,7 @@ #ifndef POCO_UTIL_NO_XMLCONFIGURATION +#include "Poco/String.h" #include "Poco/SAX/InputSource.h" #include "Poco/DOM/DOMParser.h" #include "Poco/DOM/Element.h" @@ -28,6 +29,8 @@ #include "Poco/NumberParser.h" #include "Poco/NumberFormatter.h" #include +#include +#include namespace Poco { @@ -275,8 +278,9 @@ void XMLConfiguration::enumerate(const std::string& key, Keys& range) const { if (pChild->nodeType() == Poco::XML::Node::ELEMENT_NODE) { - const std::string& nodeName = pChild->nodeName(); + std::string nodeName = pChild->nodeName(); size_t& count = keys[nodeName]; + replaceInPlace(nodeName, ".", "\\."); if (count) range.push_back(nodeName + "[" + NumberFormatter::format(count) + "]"); else @@ -379,7 +383,21 @@ Poco::XML::Node* XMLConfiguration::findNode(std::string::const_iterator& it, con { while (it != end && *it == _delim) ++it; std::string key; - while (it != end && *it != _delim && *it != '[') key += *it++; + while (it != end) + { + if (*it == '\\' && std::distance(it, end) > 1) + { + // Skip backslash, copy only the char after it + std::advance(it, 1); + key += *it++; + continue; + } + if (*it == _delim) + break; + if (*it == '[') + break; + key += *it++; + } return findNode(it, end, findElement(key, pNode, create), create); } } diff --git a/cmake/autogenerated_versions.txt b/cmake/autogenerated_versions.txt index bc41819b717..e5a8c064808 100644 --- a/cmake/autogenerated_versions.txt +++ b/cmake/autogenerated_versions.txt @@ -2,11 +2,11 @@ # NOTE: has nothing common with DBMS_TCP_PROTOCOL_VERSION, # only DBMS_TCP_PROTOCOL_VERSION should be incremented on protocol changes. -SET(VERSION_REVISION 54481) -SET(VERSION_MAJOR 23) -SET(VERSION_MINOR 12) +SET(VERSION_REVISION 54482) +SET(VERSION_MAJOR 24) +SET(VERSION_MINOR 1) SET(VERSION_PATCH 1) -SET(VERSION_GITHASH 05bc8ef1e02b9c7332f08091775b255d191341bf) -SET(VERSION_DESCRIBE v23.12.1.1-testing) -SET(VERSION_STRING 23.12.1.1) +SET(VERSION_GITHASH a2faa65b080a587026c86844f3a20c74d23a86f8) +SET(VERSION_DESCRIBE v24.1.1.1-testing) +SET(VERSION_STRING 24.1.1.1) # end of autochange diff --git a/cmake/target.cmake b/cmake/target.cmake index 1680715d15f..fb911ace7b5 100644 --- a/cmake/target.cmake +++ b/cmake/target.cmake @@ -12,6 +12,8 @@ elseif (CMAKE_SYSTEM_NAME MATCHES "FreeBSD") elseif (CMAKE_SYSTEM_NAME MATCHES "Darwin") set (OS_DARWIN 1) add_definitions(-D OS_DARWIN) + # For MAP_ANON/MAP_ANONYMOUS + add_definitions(-D _DARWIN_C_SOURCE) elseif (CMAKE_SYSTEM_NAME MATCHES "SunOS") set (OS_SUNOS 1) add_definitions(-D OS_SUNOS) @@ -73,8 +75,3 @@ if (CMAKE_CROSSCOMPILING) message (STATUS "Cross-compiling for target: ${CMAKE_CXX_COMPILE_TARGET}") endif () - -if (USE_MUSL) - # Does not work for unknown reason - set (ENABLE_RUST OFF CACHE INTERNAL "") -endif () diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index e91ab38ca00..1b5ba15187f 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -44,6 +44,7 @@ else () endif () add_contrib (miniselect-cmake miniselect) add_contrib (pdqsort-cmake pdqsort) +add_contrib (pocketfft-cmake pocketfft) add_contrib (crc32-vpmsum-cmake crc32-vpmsum) add_contrib (sparsehash-c11-cmake sparsehash-c11) add_contrib (abseil-cpp-cmake abseil-cpp) diff --git a/contrib/azure b/contrib/azure index 352ff0a61cb..060c54dfb0a 160000 --- a/contrib/azure +++ b/contrib/azure @@ -1 +1 @@ -Subproject commit 352ff0a61cb319ac1cc38c4058443ddf70147530 +Subproject commit 060c54dfb0abe869c065143303a9d3e9c54c29e3 diff --git a/contrib/azure-cmake/CMakeLists.txt b/contrib/azure-cmake/CMakeLists.txt index bb44c993e79..0d2512c9e6e 100644 --- a/contrib/azure-cmake/CMakeLists.txt +++ b/contrib/azure-cmake/CMakeLists.txt @@ -8,37 +8,21 @@ endif() set(AZURE_DIR "${ClickHouse_SOURCE_DIR}/contrib/azure") set(AZURE_SDK_LIBRARY_DIR "${AZURE_DIR}/sdk") -file(GLOB AZURE_SDK_CORE_SRC +file(GLOB AZURE_SDK_SRC "${AZURE_SDK_LIBRARY_DIR}/core/azure-core/src/*.cpp" "${AZURE_SDK_LIBRARY_DIR}/core/azure-core/src/cryptography/*.cpp" "${AZURE_SDK_LIBRARY_DIR}/core/azure-core/src/http/*.cpp" - "${AZURE_SDK_LIBRARY_DIR}/core/azure-core/src/http/curl/*.hpp" "${AZURE_SDK_LIBRARY_DIR}/core/azure-core/src/http/curl/*.cpp" - "${AZURE_SDK_LIBRARY_DIR}/core/azure-core/src/winhttp/*.cpp" "${AZURE_SDK_LIBRARY_DIR}/core/azure-core/src/io/*.cpp" - "${AZURE_SDK_LIBRARY_DIR}/core/azure-core/src/private/*.hpp" -) - -file(GLOB AZURE_SDK_IDENTITY_SRC + "${AZURE_SDK_LIBRARY_DIR}/core/azure-core/src/tracing/*.cpp" "${AZURE_SDK_LIBRARY_DIR}/identity/azure-identity/src/*.cpp" - "${AZURE_SDK_LIBRARY_DIR}/identity/azure-identity/src/private/*.hpp" -) - -file(GLOB AZURE_SDK_STORAGE_COMMON_SRC - "${AZURE_SDK_LIBRARY_DIR}/storage/azure-storage-common/src/*.cpp" - "${AZURE_SDK_LIBRARY_DIR}/storage/azure-storage-common/src/private/*.cpp" -) - -file(GLOB AZURE_SDK_STORAGE_BLOBS_SRC "${AZURE_SDK_LIBRARY_DIR}/storage/azure-storage-blobs/src/*.cpp" - "${AZURE_SDK_LIBRARY_DIR}/storage/azure-storage-blobs/src/private/*.hpp" + "${AZURE_SDK_LIBRARY_DIR}/storage/azure-storage-blobs/src/private/*.cpp" + "${AZURE_SDK_LIBRARY_DIR}/storage/azure-storage-common/src/*.cpp" ) file(GLOB AZURE_SDK_UNIFIED_SRC - ${AZURE_SDK_CORE_SRC} - ${AZURE_SDK_IDENTITY_SRC} - ${AZURE_SDK_STORAGE_COMMON_SRC} - ${AZURE_SDK_STORAGE_BLOBS_SRC} + ${AZURE_SDK_SRC} ) set(AZURE_SDK_INCLUDES diff --git a/contrib/boringssl b/contrib/boringssl index 8061ac62d67..aa6d2f865a2 160000 --- a/contrib/boringssl +++ b/contrib/boringssl @@ -1 +1 @@ -Subproject commit 8061ac62d67953e61b793042e33baf1352e67510 +Subproject commit aa6d2f865a2eab01cf94f197e11e36b6de47b5b4 diff --git a/contrib/librdkafka b/contrib/librdkafka index 6f3b483426a..2d2aab6f5b7 160000 --- a/contrib/librdkafka +++ b/contrib/librdkafka @@ -1 +1 @@ -Subproject commit 6f3b483426a8c8ec950e27e446bec175cf8b553f +Subproject commit 2d2aab6f5b79db1cfca15d7bf0dee75d00d82082 diff --git a/contrib/llvm-project-cmake/CMakeLists.txt b/contrib/llvm-project-cmake/CMakeLists.txt index 406bac73e90..d09060912d8 100644 --- a/contrib/llvm-project-cmake/CMakeLists.txt +++ b/contrib/llvm-project-cmake/CMakeLists.txt @@ -11,7 +11,9 @@ option (ENABLE_EMBEDDED_COMPILER "Enable support for JIT compilation during quer option (ENABLE_DWARF_PARSER "Enable support for DWARF input format (uses LLVM library)" ${ENABLE_DWARF_PARSER_DEFAULT}) -if (NOT ENABLE_EMBEDDED_COMPILER AND NOT ENABLE_DWARF_PARSER) +option (ENABLE_BLAKE3 "Enable BLAKE3 function" ${ENABLE_LIBRARIES}) + +if (NOT ENABLE_EMBEDDED_COMPILER AND NOT ENABLE_DWARF_PARSER AND NOT ENABLE_BLAKE3) message(STATUS "Not using LLVM") return() endif() @@ -26,61 +28,75 @@ set (LLVM_LIBRARY_DIRS "${ClickHouse_BINARY_DIR}/contrib/llvm-project/llvm") # and llvm cannot be compiled with bundled libcxx and 20 standard. set (CMAKE_CXX_STANDARD 14) -# This list was generated by listing all LLVM libraries, compiling the binary and removing all libraries while it still compiles. -set (REQUIRED_LLVM_LIBRARIES - LLVMExecutionEngine - LLVMRuntimeDyld - LLVMAsmPrinter - LLVMDebugInfoDWARF - LLVMGlobalISel - LLVMSelectionDAG - LLVMMCDisassembler - LLVMPasses - LLVMCodeGen - LLVMipo - LLVMBitWriter - LLVMInstrumentation - LLVMScalarOpts - LLVMAggressiveInstCombine - LLVMInstCombine - LLVMVectorize - LLVMTransformUtils - LLVMTarget - LLVMAnalysis - LLVMProfileData - LLVMObject - LLVMBitReader - LLVMCore - LLVMRemarks - LLVMBitstreamReader - LLVMMCParser - LLVMMC - LLVMBinaryFormat - LLVMDebugInfoCodeView - LLVMSupport - LLVMDemangle -) +if (ARCH_AMD64) + set (LLVM_TARGETS_TO_BUILD "X86" CACHE INTERNAL "") +elseif (ARCH_AARCH64) + set (LLVM_TARGETS_TO_BUILD "AArch64" CACHE INTERNAL "") +elseif (ARCH_PPC64LE) + set (LLVM_TARGETS_TO_BUILD "PowerPC" CACHE INTERNAL "") +elseif (ARCH_S390X) + set (LLVM_TARGETS_TO_BUILD "SystemZ" CACHE INTERNAL "") +elseif (ARCH_RISCV64) + set (LLVM_TARGETS_TO_BUILD "RISCV" CACHE INTERNAL "") +endif () + + +if (NOT ENABLE_EMBEDDED_COMPILER AND NOT ENABLE_DWARF_PARSER) + # Only compiling blake3 + set (REQUIRED_LLVM_LIBRARIES LLVMSupport) +else() + # This list was generated by listing all LLVM libraries, compiling the binary and removing all libraries while it still compiles. + set (REQUIRED_LLVM_LIBRARIES + LLVMExecutionEngine + LLVMRuntimeDyld + LLVMAsmPrinter + LLVMDebugInfoDWARF + LLVMGlobalISel + LLVMSelectionDAG + LLVMMCDisassembler + LLVMPasses + LLVMCodeGen + LLVMipo + LLVMBitWriter + LLVMInstrumentation + LLVMScalarOpts + LLVMAggressiveInstCombine + LLVMInstCombine + LLVMVectorize + LLVMTransformUtils + LLVMTarget + LLVMAnalysis + LLVMProfileData + LLVMObject + LLVMBitReader + LLVMCore + LLVMRemarks + LLVMBitstreamReader + LLVMMCParser + LLVMMC + LLVMBinaryFormat + LLVMDebugInfoCodeView + LLVMSupport + LLVMDemangle + ) + + if (ARCH_AMD64) + list(APPEND REQUIRED_LLVM_LIBRARIES LLVMX86Info LLVMX86Desc LLVMX86CodeGen) + elseif (ARCH_AARCH64) + list(APPEND REQUIRED_LLVM_LIBRARIES LLVMAArch64Info LLVMAArch64Desc LLVMAArch64CodeGen) + elseif (ARCH_PPC64LE) + list(APPEND REQUIRED_LLVM_LIBRARIES LLVMPowerPCInfo LLVMPowerPCDesc LLVMPowerPCCodeGen) + elseif (ARCH_S390X) + list(APPEND REQUIRED_LLVM_LIBRARIES LLVMSystemZInfo LLVMSystemZDesc LLVMSystemZCodeGen) + elseif (ARCH_RISCV64) + list(APPEND REQUIRED_LLVM_LIBRARIES LLVMRISCVInfo LLVMRISCVDesc LLVMRISCVCodeGen) + endif () +endif() + # Skip useless "install" instructions from CMake: set (LLVM_INSTALL_TOOLCHAIN_ONLY 1 CACHE INTERNAL "") -if (ARCH_AMD64) - set (LLVM_TARGETS_TO_BUILD "X86" CACHE INTERNAL "") - list(APPEND REQUIRED_LLVM_LIBRARIES LLVMX86Info LLVMX86Desc LLVMX86CodeGen) -elseif (ARCH_AARCH64) - set (LLVM_TARGETS_TO_BUILD "AArch64" CACHE INTERNAL "") - list(APPEND REQUIRED_LLVM_LIBRARIES LLVMAArch64Info LLVMAArch64Desc LLVMAArch64CodeGen) -elseif (ARCH_PPC64LE) - set (LLVM_TARGETS_TO_BUILD "PowerPC" CACHE INTERNAL "") - list(APPEND REQUIRED_LLVM_LIBRARIES LLVMPowerPCInfo LLVMPowerPCDesc LLVMPowerPCCodeGen) -elseif (ARCH_S390X) - set (LLVM_TARGETS_TO_BUILD "SystemZ" CACHE INTERNAL "") - list(APPEND REQUIRED_LLVM_LIBRARIES LLVMSystemZInfo LLVMSystemZDesc LLVMSystemZCodeGen) -elseif (ARCH_RISCV64) - set (LLVM_TARGETS_TO_BUILD "RISCV" CACHE INTERNAL "") - list(APPEND REQUIRED_LLVM_LIBRARIES LLVMRISCVInfo LLVMRISCVDesc LLVMRISCVCodeGen) -endif () - message (STATUS "LLVM TARGETS TO BUILD ${LLVM_TARGETS_TO_BUILD}") set (CMAKE_INSTALL_RPATH "ON") # Do not adjust RPATH in llvm, since then it will not be able to find libcxx/libcxxabi/libunwind diff --git a/contrib/mariadb-connector-c-cmake/CMakeLists.txt b/contrib/mariadb-connector-c-cmake/CMakeLists.txt index 18d1510a57b..4257828890f 100644 --- a/contrib/mariadb-connector-c-cmake/CMakeLists.txt +++ b/contrib/mariadb-connector-c-cmake/CMakeLists.txt @@ -1,4 +1,4 @@ -if(OS_LINUX AND TARGET OpenSSL::SSL) +if((OS_LINUX OR OS_DARWIN) AND TARGET OpenSSL::SSL) option(ENABLE_MYSQL "Enable MySQL" ${ENABLE_LIBRARIES}) else () option(ENABLE_MYSQL "Enable MySQL" FALSE) @@ -73,7 +73,7 @@ set(HAVE_SYS_TYPES_H 1) set(HAVE_SYS_UN_H 1) set(HAVE_UNISTD_H 1) set(HAVE_UTIME_H 1) -set(HAVE_UCONTEXT_H 1) +set(HAVE_UCONTEXT_H 0) set(HAVE_ALLOCA 1) set(HAVE_DLERROR 0) set(HAVE_DLOPEN 0) @@ -116,9 +116,13 @@ CONFIGURE_FILE(${CC_SOURCE_DIR}/include/ma_config.h.in CONFIGURE_FILE(${CC_SOURCE_DIR}/include/mariadb_version.h.in ${CC_BINARY_DIR}/include-public/mariadb_version.h) -if(WITH_SSL) +if (WITH_SSL) set(SYSTEM_LIBS ${SYSTEM_LIBS} ${SSL_LIBRARIES}) -endif() +endif () + +if (OS_DARWIN) + set(SYSTEM_LIBS ${SYSTEM_LIBS} iconv) +endif () function(REGISTER_PLUGIN) @@ -227,15 +231,8 @@ ${CC_SOURCE_DIR}/libmariadb/secure/openssl_crypt.c ${CC_BINARY_DIR}/libmariadb/ma_client_plugin.c ) -if(ICONV_INCLUDE_DIR) - include_directories(BEFORE ${ICONV_INCLUDE_DIR}) -endif() add_definitions(-DLIBICONV_PLUG) -if(WITH_DYNCOL) - set(LIBMARIADB_SOURCES ${LIBMARIADB_SOURCES} ${CC_SOURCE_DIR}/libmariadb/mariadb_dyncol.c) -endif() - set(LIBMARIADB_SOURCES ${LIBMARIADB_SOURCES} ${CC_SOURCE_DIR}/libmariadb/mariadb_async.c ${CC_SOURCE_DIR}/libmariadb/ma_context.c) diff --git a/contrib/pocketfft b/contrib/pocketfft new file mode 160000 index 00000000000..9efd4da52cf --- /dev/null +++ b/contrib/pocketfft @@ -0,0 +1 @@ +Subproject commit 9efd4da52cf8d28d14531d14e43ad9d913807546 diff --git a/contrib/pocketfft-cmake/CMakeLists.txt b/contrib/pocketfft-cmake/CMakeLists.txt new file mode 100644 index 00000000000..01911ee4496 --- /dev/null +++ b/contrib/pocketfft-cmake/CMakeLists.txt @@ -0,0 +1,10 @@ +option (ENABLE_POCKETFFT "Enable pocketfft" ${ENABLE_LIBRARIES}) + +if (NOT ENABLE_POCKETFFT) + message(STATUS "Not using pocketfft") + return() +endif() + +add_library(_pocketfft INTERFACE) +target_include_directories(_pocketfft INTERFACE ${ClickHouse_SOURCE_DIR}/contrib/pocketfft) +add_library(ch_contrib::pocketfft ALIAS _pocketfft) diff --git a/docker/images.json b/docker/images.json index 1535715648c..d2f098f53d7 100644 --- a/docker/images.json +++ b/docker/images.json @@ -125,6 +125,7 @@ "docker/test/server-jepsen", "docker/test/sqllogic", "docker/test/sqltest", + "docker/test/clickbench", "docker/test/stateless" ] }, @@ -145,6 +146,10 @@ "name": "clickhouse/server-jepsen-test", "dependent": [] }, + "docker/test/clickbench": { + "name": "clickhouse/clickbench", + "dependent": [] + }, "docker/test/install/deb": { "name": "clickhouse/install-deb-test", "dependent": [] diff --git a/docker/keeper/Dockerfile b/docker/keeper/Dockerfile index 8fc639af1a7..145f5d13cc2 100644 --- a/docker/keeper/Dockerfile +++ b/docker/keeper/Dockerfile @@ -34,8 +34,9 @@ RUN arch=${TARGETARCH:-amd64} \ # lts / testing / prestable / etc ARG REPO_CHANNEL="stable" ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}" -ARG VERSION="23.11.1.2711" +ARG VERSION="23.12.1.1368" ARG PACKAGES="clickhouse-keeper" +ARG DIRECT_DOWNLOAD_URLS="" # user/group precreated explicitly with fixed uid/gid on purpose. # It is especially important for rootless containers: in that case entrypoint @@ -47,15 +48,27 @@ ARG PACKAGES="clickhouse-keeper" ARG TARGETARCH RUN arch=${TARGETARCH:-amd64} \ - && for package in ${PACKAGES}; do \ - ( \ - cd /tmp \ - && echo "Get ${REPOSITORY}/${package}-${VERSION}-${arch}.tgz" \ + && cd /tmp && rm -f /tmp/*tgz && rm -f /tmp/*tgz.sha512 |: \ + && if [ -n "${DIRECT_DOWNLOAD_URLS}" ]; then \ + echo "installing from provided urls with tgz packages: ${DIRECT_DOWNLOAD_URLS}" \ + && for url in $DIRECT_DOWNLOAD_URLS; do \ + echo "Get ${url}" \ + && wget -c -q "$url" \ + ; done \ + else \ + for package in ${PACKAGES}; do \ + cd /tmp \ + && echo "Get ${REPOSITORY}/${package}-${VERSION}-${arch}.tgz" \ && wget -c -q "${REPOSITORY}/${package}-${VERSION}-${arch}.tgz" \ && wget -c -q "${REPOSITORY}/${package}-${VERSION}-${arch}.tgz.sha512" \ - && sed 's:/output/:/tmp/:' < "${package}-${VERSION}-${arch}.tgz.sha512" | sha512sum -c \ - && tar xvzf "${package}-${VERSION}-${arch}.tgz" --strip-components=1 -C / \ - ) \ + ; done \ + fi \ + && cat *.tgz.sha512 | sha512sum -c \ + && for file in *.tgz; do \ + if [ -f "$file" ]; then \ + echo "Unpacking $file"; \ + tar xvzf "$file" --strip-components=1 -C /; \ + fi \ ; done \ && rm /tmp/*.tgz /install -r \ && addgroup -S -g 101 clickhouse \ diff --git a/docker/packager/README.md b/docker/packager/README.md index 3a91f9a63f0..e0b7f38ea58 100644 --- a/docker/packager/README.md +++ b/docker/packager/README.md @@ -3,10 +3,10 @@ compilers and build settings. Correctly configured Docker daemon is single depen Usage: -Build deb package with `clang-14` in `debug` mode: +Build deb package with `clang-17` in `debug` mode: ``` $ mkdir deb/test_output -$ ./packager --output-dir deb/test_output/ --package-type deb --compiler=clang-14 --debug-build +$ ./packager --output-dir deb/test_output/ --package-type deb --compiler=clang-17 --debug-build $ ls -l deb/test_output -rw-r--r-- 1 root root 3730 clickhouse-client_22.2.2+debug_all.deb -rw-r--r-- 1 root root 84221888 clickhouse-common-static_22.2.2+debug_amd64.deb @@ -17,11 +17,11 @@ $ ls -l deb/test_output ``` -Build ClickHouse binary with `clang-14` and `address` sanitizer in `relwithdebuginfo` +Build ClickHouse binary with `clang-17` and `address` sanitizer in `relwithdebuginfo` mode: ``` $ mkdir $HOME/some_clickhouse -$ ./packager --output-dir=$HOME/some_clickhouse --package-type binary --compiler=clang-14 --sanitizer=address +$ ./packager --output-dir=$HOME/some_clickhouse --package-type binary --compiler=clang-17 --sanitizer=address $ ls -l $HOME/some_clickhouse -rwxr-xr-x 1 root root 787061952 clickhouse lrwxrwxrwx 1 root root 10 clickhouse-benchmark -> clickhouse diff --git a/docker/packager/binary/Dockerfile b/docker/packager/binary/Dockerfile index 20fb97c80bb..1a99ab0d0b6 100644 --- a/docker/packager/binary/Dockerfile +++ b/docker/packager/binary/Dockerfile @@ -49,6 +49,7 @@ RUN curl https://sh.rustup.rs -sSf | bash -s -- -y && \ chmod 777 -R /rust && \ rustup toolchain install nightly-2023-07-04 && \ rustup default nightly-2023-07-04 && \ + rustup toolchain remove stable && \ rustup component add rust-src && \ rustup target add x86_64-unknown-linux-gnu && \ rustup target add aarch64-unknown-linux-gnu && \ diff --git a/docker/packager/binary/build.sh b/docker/packager/binary/build.sh index fd9bfcaabb2..b63643419fe 100755 --- a/docker/packager/binary/build.sh +++ b/docker/packager/binary/build.sh @@ -149,7 +149,7 @@ then mkdir -p "$PERF_OUTPUT" cp -r ../tests/performance "$PERF_OUTPUT" cp -r ../tests/config/top_level_domains "$PERF_OUTPUT" - cp -r ../docker/test/performance-comparison/config "$PERF_OUTPUT" ||: + cp -r ../tests/performance/scripts/config "$PERF_OUTPUT" ||: for SRC in /output/clickhouse*; do # Copy all clickhouse* files except packages and bridges [[ "$SRC" != *.* ]] && [[ "$SRC" != *-bridge ]] && \ @@ -160,7 +160,7 @@ then ln -sf clickhouse "$PERF_OUTPUT"/clickhouse-keeper fi - cp -r ../docker/test/performance-comparison "$PERF_OUTPUT"/scripts ||: + cp -r ../tests/performance/scripts "$PERF_OUTPUT"/scripts ||: prepare_combined_output "$PERF_OUTPUT" # We have to know the revision that corresponds to this binary build. diff --git a/docker/server/Dockerfile.alpine b/docker/server/Dockerfile.alpine index 76b03218eab..26d65eb3ccc 100644 --- a/docker/server/Dockerfile.alpine +++ b/docker/server/Dockerfile.alpine @@ -32,8 +32,9 @@ RUN arch=${TARGETARCH:-amd64} \ # lts / testing / prestable / etc ARG REPO_CHANNEL="stable" ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}" -ARG VERSION="23.11.1.2711" +ARG VERSION="23.12.1.1368" ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static" +ARG DIRECT_DOWNLOAD_URLS="" # user/group precreated explicitly with fixed uid/gid on purpose. # It is especially important for rootless containers: in that case entrypoint @@ -43,15 +44,26 @@ ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static" # The same uid / gid (101) is used both for alpine and ubuntu. RUN arch=${TARGETARCH:-amd64} \ - && for package in ${PACKAGES}; do \ - ( \ - cd /tmp \ - && echo "Get ${REPOSITORY}/${package}-${VERSION}-${arch}.tgz" \ + && cd /tmp \ + && if [ -n "${DIRECT_DOWNLOAD_URLS}" ]; then \ + echo "installing from provided urls with tgz packages: ${DIRECT_DOWNLOAD_URLS}" \ + && for url in $DIRECT_DOWNLOAD_URLS; do \ + echo "Get ${url}" \ + && wget -c -q "$url" \ + ; done \ + else \ + for package in ${PACKAGES}; do \ + echo "Get ${REPOSITORY}/${package}-${VERSION}-${arch}.tgz" \ && wget -c -q "${REPOSITORY}/${package}-${VERSION}-${arch}.tgz" \ && wget -c -q "${REPOSITORY}/${package}-${VERSION}-${arch}.tgz.sha512" \ - && sed 's:/output/:/tmp/:' < "${package}-${VERSION}-${arch}.tgz.sha512" | sha512sum -c \ - && tar xvzf "${package}-${VERSION}-${arch}.tgz" --strip-components=1 -C / \ - ) \ + ; done \ + fi \ + && cat *.tgz.sha512 | sed 's:/output/:/tmp/:' | sha512sum -c \ + && for file in *.tgz; do \ + if [ -f "$file" ]; then \ + echo "Unpacking $file"; \ + tar xvzf "$file" --strip-components=1 -C /; \ + fi \ ; done \ && rm /tmp/*.tgz /install -r \ && addgroup -S -g 101 clickhouse \ diff --git a/docker/server/Dockerfile.ubuntu b/docker/server/Dockerfile.ubuntu index c6dfcf9f679..5b96b208b11 100644 --- a/docker/server/Dockerfile.ubuntu +++ b/docker/server/Dockerfile.ubuntu @@ -30,13 +30,14 @@ RUN sed -i "s|http://archive.ubuntu.com|${apt_archive}|g" /etc/apt/sources.list ARG REPO_CHANNEL="stable" ARG REPOSITORY="deb [signed-by=/usr/share/keyrings/clickhouse-keyring.gpg] https://packages.clickhouse.com/deb ${REPO_CHANNEL} main" -ARG VERSION="23.11.1.2711" +ARG VERSION="23.12.1.1368" ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static" # set non-empty deb_location_url url to create a docker image # from debs created by CI build, for example: # docker build . --network host --build-arg version="21.4.1.6282" --build-arg deb_location_url="https://..." -t ... ARG deb_location_url="" +ARG DIRECT_DOWNLOAD_URLS="" # set non-empty single_binary_location_url to create docker image # from a single binary url (useful for non-standard builds - with sanitizers, for arm64). @@ -44,6 +45,18 @@ ARG single_binary_location_url="" ARG TARGETARCH +# install from direct URL +RUN if [ -n "${DIRECT_DOWNLOAD_URLS}" ]; then \ + echo "installing from custom predefined urls with deb packages: ${DIRECT_DOWNLOAD_URLS}" \ + && rm -rf /tmp/clickhouse_debs \ + && mkdir -p /tmp/clickhouse_debs \ + && for url in $DIRECT_DOWNLOAD_URLS; do \ + wget --progress=bar:force:noscroll "$url" -P /tmp/clickhouse_debs || exit 1 \ + ; done \ + && dpkg -i /tmp/clickhouse_debs/*.deb \ + && rm -rf /tmp/* ; \ + fi + # install from a web location with deb packages RUN arch="${TARGETARCH:-amd64}" \ && if [ -n "${deb_location_url}" ]; then \ diff --git a/docker/test/base/Dockerfile b/docker/test/base/Dockerfile index b55baa0e0fc..b48017fdacc 100644 --- a/docker/test/base/Dockerfile +++ b/docker/test/base/Dockerfile @@ -12,6 +12,7 @@ RUN apt-get update \ ripgrep \ zstd \ locales \ + sudo \ --yes --no-install-recommends # Sanitizer options for services (clickhouse-server) diff --git a/docker/test/base/setup_export_logs.sh b/docker/test/base/setup_export_logs.sh index 6e3721956c0..ea82e071112 100755 --- a/docker/test/base/setup_export_logs.sh +++ b/docker/test/base/setup_export_logs.sh @@ -21,7 +21,7 @@ EXTRA_ORDER_BY_COLUMNS=${EXTRA_ORDER_BY_COLUMNS:-"check_name, "} # trace_log needs more columns for symbolization EXTRA_COLUMNS_TRACE_LOG="${EXTRA_COLUMNS} symbols Array(LowCardinality(String)), lines Array(LowCardinality(String)), " -EXTRA_COLUMNS_EXPRESSION_TRACE_LOG="${EXTRA_COLUMNS_EXPRESSION}, arrayMap(x -> toLowCardinality(demangle(addressToSymbol(x))), trace) AS symbols, arrayMap(x -> toLowCardinality(addressToLine(x)), trace) AS lines" +EXTRA_COLUMNS_EXPRESSION_TRACE_LOG="${EXTRA_COLUMNS_EXPRESSION}, arrayMap(x -> demangle(addressToSymbol(x)), trace)::Array(LowCardinality(String)) AS symbols, arrayMap(x -> addressToLine(x), trace)::Array(LowCardinality(String)) AS lines" function __set_connection_args diff --git a/docker/test/clickbench/Dockerfile b/docker/test/clickbench/Dockerfile new file mode 100644 index 00000000000..0b6b1736e03 --- /dev/null +++ b/docker/test/clickbench/Dockerfile @@ -0,0 +1,10 @@ +ARG FROM_TAG=latest +FROM clickhouse/test-base:$FROM_TAG + +ENV TZ=Europe/Amsterdam +RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone + +COPY *.sh / +COPY *.sql / + +CMD ["/bin/bash", "/run.sh"] diff --git a/docker/test/clickbench/create.sql b/docker/test/clickbench/create.sql new file mode 100644 index 00000000000..9f18a47474b --- /dev/null +++ b/docker/test/clickbench/create.sql @@ -0,0 +1,112 @@ +ATTACH TABLE hits UUID 'c449dfbf-ba06-4d13-abec-8396559eb955' +( + WatchID BIGINT NOT NULL, + JavaEnable SMALLINT NOT NULL, + Title TEXT NOT NULL, + GoodEvent SMALLINT NOT NULL, + EventTime TIMESTAMP NOT NULL, + EventDate Date NOT NULL, + CounterID INTEGER NOT NULL, + ClientIP INTEGER NOT NULL, + RegionID INTEGER NOT NULL, + UserID BIGINT NOT NULL, + CounterClass SMALLINT NOT NULL, + OS SMALLINT NOT NULL, + UserAgent SMALLINT NOT NULL, + URL TEXT NOT NULL, + Referer TEXT NOT NULL, + IsRefresh SMALLINT NOT NULL, + RefererCategoryID SMALLINT NOT NULL, + RefererRegionID INTEGER NOT NULL, + URLCategoryID SMALLINT NOT NULL, + URLRegionID INTEGER NOT NULL, + ResolutionWidth SMALLINT NOT NULL, + ResolutionHeight SMALLINT NOT NULL, + ResolutionDepth SMALLINT NOT NULL, + FlashMajor SMALLINT NOT NULL, + FlashMinor SMALLINT NOT NULL, + FlashMinor2 TEXT NOT NULL, + NetMajor SMALLINT NOT NULL, + NetMinor SMALLINT NOT NULL, + UserAgentMajor SMALLINT NOT NULL, + UserAgentMinor VARCHAR(255) NOT NULL, + CookieEnable SMALLINT NOT NULL, + JavascriptEnable SMALLINT NOT NULL, + IsMobile SMALLINT NOT NULL, + MobilePhone SMALLINT NOT NULL, + MobilePhoneModel TEXT NOT NULL, + Params TEXT NOT NULL, + IPNetworkID INTEGER NOT NULL, + TraficSourceID SMALLINT NOT NULL, + SearchEngineID SMALLINT NOT NULL, + SearchPhrase TEXT NOT NULL, + AdvEngineID SMALLINT NOT NULL, + IsArtifical SMALLINT NOT NULL, + WindowClientWidth SMALLINT NOT NULL, + WindowClientHeight SMALLINT NOT NULL, + ClientTimeZone SMALLINT NOT NULL, + ClientEventTime TIMESTAMP NOT NULL, + SilverlightVersion1 SMALLINT NOT NULL, + SilverlightVersion2 SMALLINT NOT NULL, + SilverlightVersion3 INTEGER NOT NULL, + SilverlightVersion4 SMALLINT NOT NULL, + PageCharset TEXT NOT NULL, + CodeVersion INTEGER NOT NULL, + IsLink SMALLINT NOT NULL, + IsDownload SMALLINT NOT NULL, + IsNotBounce SMALLINT NOT NULL, + FUniqID BIGINT NOT NULL, + OriginalURL TEXT NOT NULL, + HID INTEGER NOT NULL, + IsOldCounter SMALLINT NOT NULL, + IsEvent SMALLINT NOT NULL, + IsParameter SMALLINT NOT NULL, + DontCountHits SMALLINT NOT NULL, + WithHash SMALLINT NOT NULL, + HitColor CHAR NOT NULL, + LocalEventTime TIMESTAMP NOT NULL, + Age SMALLINT NOT NULL, + Sex SMALLINT NOT NULL, + Income SMALLINT NOT NULL, + Interests SMALLINT NOT NULL, + Robotness SMALLINT NOT NULL, + RemoteIP INTEGER NOT NULL, + WindowName INTEGER NOT NULL, + OpenerName INTEGER NOT NULL, + HistoryLength SMALLINT NOT NULL, + BrowserLanguage TEXT NOT NULL, + BrowserCountry TEXT NOT NULL, + SocialNetwork TEXT NOT NULL, + SocialAction TEXT NOT NULL, + HTTPError SMALLINT NOT NULL, + SendTiming INTEGER NOT NULL, + DNSTiming INTEGER NOT NULL, + ConnectTiming INTEGER NOT NULL, + ResponseStartTiming INTEGER NOT NULL, + ResponseEndTiming INTEGER NOT NULL, + FetchTiming INTEGER NOT NULL, + SocialSourceNetworkID SMALLINT NOT NULL, + SocialSourcePage TEXT NOT NULL, + ParamPrice BIGINT NOT NULL, + ParamOrderID TEXT NOT NULL, + ParamCurrency TEXT NOT NULL, + ParamCurrencyID SMALLINT NOT NULL, + OpenstatServiceName TEXT NOT NULL, + OpenstatCampaignID TEXT NOT NULL, + OpenstatAdID TEXT NOT NULL, + OpenstatSourceID TEXT NOT NULL, + UTMSource TEXT NOT NULL, + UTMMedium TEXT NOT NULL, + UTMCampaign TEXT NOT NULL, + UTMContent TEXT NOT NULL, + UTMTerm TEXT NOT NULL, + FromTag TEXT NOT NULL, + HasGCLID SMALLINT NOT NULL, + RefererHash BIGINT NOT NULL, + URLHash BIGINT NOT NULL, + CLID INTEGER NOT NULL, + PRIMARY KEY (CounterID, EventDate, UserID, EventTime, WatchID) +) +ENGINE = MergeTree +SETTINGS disk = disk(type = cache, path = '/dev/shm/clickhouse/', max_size = '16G', + disk = disk(type = web, endpoint = 'https://clickhouse-datasets-web.s3.us-east-1.amazonaws.com/')); diff --git a/docker/test/clickbench/queries.sql b/docker/test/clickbench/queries.sql new file mode 100644 index 00000000000..31f65fc898d --- /dev/null +++ b/docker/test/clickbench/queries.sql @@ -0,0 +1,43 @@ +SELECT COUNT(*) FROM hits; +SELECT COUNT(*) FROM hits WHERE AdvEngineID <> 0; +SELECT SUM(AdvEngineID), COUNT(*), AVG(ResolutionWidth) FROM hits; +SELECT AVG(UserID) FROM hits; +SELECT COUNT(DISTINCT UserID) FROM hits; +SELECT COUNT(DISTINCT SearchPhrase) FROM hits; +SELECT MIN(EventDate), MAX(EventDate) FROM hits; +SELECT AdvEngineID, COUNT(*) FROM hits WHERE AdvEngineID <> 0 GROUP BY AdvEngineID ORDER BY COUNT(*) DESC; +SELECT RegionID, COUNT(DISTINCT UserID) AS u FROM hits GROUP BY RegionID ORDER BY u DESC LIMIT 10; +SELECT RegionID, SUM(AdvEngineID), COUNT(*) AS c, AVG(ResolutionWidth), COUNT(DISTINCT UserID) FROM hits GROUP BY RegionID ORDER BY c DESC LIMIT 10; +SELECT MobilePhoneModel, COUNT(DISTINCT UserID) AS u FROM hits WHERE MobilePhoneModel <> '' GROUP BY MobilePhoneModel ORDER BY u DESC LIMIT 10; +SELECT MobilePhone, MobilePhoneModel, COUNT(DISTINCT UserID) AS u FROM hits WHERE MobilePhoneModel <> '' GROUP BY MobilePhone, MobilePhoneModel ORDER BY u DESC LIMIT 10; +SELECT SearchPhrase, COUNT(*) AS c FROM hits WHERE SearchPhrase <> '' GROUP BY SearchPhrase ORDER BY c DESC LIMIT 10; +SELECT SearchPhrase, COUNT(DISTINCT UserID) AS u FROM hits WHERE SearchPhrase <> '' GROUP BY SearchPhrase ORDER BY u DESC LIMIT 10; +SELECT SearchEngineID, SearchPhrase, COUNT(*) AS c FROM hits WHERE SearchPhrase <> '' GROUP BY SearchEngineID, SearchPhrase ORDER BY c DESC LIMIT 10; +SELECT UserID, COUNT(*) FROM hits GROUP BY UserID ORDER BY COUNT(*) DESC LIMIT 10; +SELECT UserID, SearchPhrase, COUNT(*) FROM hits GROUP BY UserID, SearchPhrase ORDER BY COUNT(*) DESC LIMIT 10; +SELECT UserID, SearchPhrase, COUNT(*) FROM hits GROUP BY UserID, SearchPhrase LIMIT 10; +SELECT UserID, extract(minute FROM EventTime) AS m, SearchPhrase, COUNT(*) FROM hits GROUP BY UserID, m, SearchPhrase ORDER BY COUNT(*) DESC LIMIT 10; +SELECT UserID FROM hits WHERE UserID = 435090932899640449; +SELECT COUNT(*) FROM hits WHERE URL LIKE '%google%'; +SELECT SearchPhrase, MIN(URL), COUNT(*) AS c FROM hits WHERE URL LIKE '%google%' AND SearchPhrase <> '' GROUP BY SearchPhrase ORDER BY c DESC LIMIT 10; +SELECT SearchPhrase, MIN(URL), MIN(Title), COUNT(*) AS c, COUNT(DISTINCT UserID) FROM hits WHERE Title LIKE '%Google%' AND URL NOT LIKE '%.google.%' AND SearchPhrase <> '' GROUP BY SearchPhrase ORDER BY c DESC LIMIT 10; +SELECT * FROM hits WHERE URL LIKE '%google%' ORDER BY EventTime LIMIT 10; +SELECT SearchPhrase FROM hits WHERE SearchPhrase <> '' ORDER BY EventTime LIMIT 10; +SELECT SearchPhrase FROM hits WHERE SearchPhrase <> '' ORDER BY SearchPhrase LIMIT 10; +SELECT SearchPhrase FROM hits WHERE SearchPhrase <> '' ORDER BY EventTime, SearchPhrase LIMIT 10; +SELECT CounterID, AVG(length(URL)) AS l, COUNT(*) AS c FROM hits WHERE URL <> '' GROUP BY CounterID HAVING COUNT(*) > 100000 ORDER BY l DESC LIMIT 25; +SELECT REGEXP_REPLACE(Referer, '^https?://(?:www\.)?([^/]+)/.*$', '\1') AS k, AVG(length(Referer)) AS l, COUNT(*) AS c, MIN(Referer) FROM hits WHERE Referer <> '' GROUP BY k HAVING COUNT(*) > 100000 ORDER BY l DESC LIMIT 25; +SELECT SUM(ResolutionWidth), SUM(ResolutionWidth + 1), SUM(ResolutionWidth + 2), SUM(ResolutionWidth + 3), SUM(ResolutionWidth + 4), SUM(ResolutionWidth + 5), SUM(ResolutionWidth + 6), SUM(ResolutionWidth + 7), SUM(ResolutionWidth + 8), SUM(ResolutionWidth + 9), SUM(ResolutionWidth + 10), SUM(ResolutionWidth + 11), SUM(ResolutionWidth + 12), SUM(ResolutionWidth + 13), SUM(ResolutionWidth + 14), SUM(ResolutionWidth + 15), SUM(ResolutionWidth + 16), SUM(ResolutionWidth + 17), SUM(ResolutionWidth + 18), SUM(ResolutionWidth + 19), SUM(ResolutionWidth + 20), SUM(ResolutionWidth + 21), SUM(ResolutionWidth + 22), SUM(ResolutionWidth + 23), SUM(ResolutionWidth + 24), SUM(ResolutionWidth + 25), SUM(ResolutionWidth + 26), SUM(ResolutionWidth + 27), SUM(ResolutionWidth + 28), SUM(ResolutionWidth + 29), SUM(ResolutionWidth + 30), SUM(ResolutionWidth + 31), SUM(ResolutionWidth + 32), SUM(ResolutionWidth + 33), SUM(ResolutionWidth + 34), SUM(ResolutionWidth + 35), SUM(ResolutionWidth + 36), SUM(ResolutionWidth + 37), SUM(ResolutionWidth + 38), SUM(ResolutionWidth + 39), SUM(ResolutionWidth + 40), SUM(ResolutionWidth + 41), SUM(ResolutionWidth + 42), SUM(ResolutionWidth + 43), SUM(ResolutionWidth + 44), SUM(ResolutionWidth + 45), SUM(ResolutionWidth + 46), SUM(ResolutionWidth + 47), SUM(ResolutionWidth + 48), SUM(ResolutionWidth + 49), SUM(ResolutionWidth + 50), SUM(ResolutionWidth + 51), SUM(ResolutionWidth + 52), SUM(ResolutionWidth + 53), SUM(ResolutionWidth + 54), SUM(ResolutionWidth + 55), SUM(ResolutionWidth + 56), SUM(ResolutionWidth + 57), SUM(ResolutionWidth + 58), SUM(ResolutionWidth + 59), SUM(ResolutionWidth + 60), SUM(ResolutionWidth + 61), SUM(ResolutionWidth + 62), SUM(ResolutionWidth + 63), SUM(ResolutionWidth + 64), SUM(ResolutionWidth + 65), SUM(ResolutionWidth + 66), SUM(ResolutionWidth + 67), SUM(ResolutionWidth + 68), SUM(ResolutionWidth + 69), SUM(ResolutionWidth + 70), SUM(ResolutionWidth + 71), SUM(ResolutionWidth + 72), SUM(ResolutionWidth + 73), SUM(ResolutionWidth + 74), SUM(ResolutionWidth + 75), SUM(ResolutionWidth + 76), SUM(ResolutionWidth + 77), SUM(ResolutionWidth + 78), SUM(ResolutionWidth + 79), SUM(ResolutionWidth + 80), SUM(ResolutionWidth + 81), SUM(ResolutionWidth + 82), SUM(ResolutionWidth + 83), SUM(ResolutionWidth + 84), SUM(ResolutionWidth + 85), SUM(ResolutionWidth + 86), SUM(ResolutionWidth + 87), SUM(ResolutionWidth + 88), SUM(ResolutionWidth + 89) FROM hits; +SELECT SearchEngineID, ClientIP, COUNT(*) AS c, SUM(IsRefresh), AVG(ResolutionWidth) FROM hits WHERE SearchPhrase <> '' GROUP BY SearchEngineID, ClientIP ORDER BY c DESC LIMIT 10; +SELECT WatchID, ClientIP, COUNT(*) AS c, SUM(IsRefresh), AVG(ResolutionWidth) FROM hits WHERE SearchPhrase <> '' GROUP BY WatchID, ClientIP ORDER BY c DESC LIMIT 10; +SELECT WatchID, ClientIP, COUNT(*) AS c, SUM(IsRefresh), AVG(ResolutionWidth) FROM hits GROUP BY WatchID, ClientIP ORDER BY c DESC LIMIT 10; +SELECT URL, COUNT(*) AS c FROM hits GROUP BY URL ORDER BY c DESC LIMIT 10; +SELECT 1, URL, COUNT(*) AS c FROM hits GROUP BY 1, URL ORDER BY c DESC LIMIT 10; +SELECT ClientIP, ClientIP - 1, ClientIP - 2, ClientIP - 3, COUNT(*) AS c FROM hits GROUP BY ClientIP, ClientIP - 1, ClientIP - 2, ClientIP - 3 ORDER BY c DESC LIMIT 10; +SELECT URL, COUNT(*) AS PageViews FROM hits WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND DontCountHits = 0 AND IsRefresh = 0 AND URL <> '' GROUP BY URL ORDER BY PageViews DESC LIMIT 10; +SELECT Title, COUNT(*) AS PageViews FROM hits WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND DontCountHits = 0 AND IsRefresh = 0 AND Title <> '' GROUP BY Title ORDER BY PageViews DESC LIMIT 10; +SELECT URL, COUNT(*) AS PageViews FROM hits WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND IsRefresh = 0 AND IsLink <> 0 AND IsDownload = 0 GROUP BY URL ORDER BY PageViews DESC LIMIT 10 OFFSET 1000; +SELECT TraficSourceID, SearchEngineID, AdvEngineID, CASE WHEN (SearchEngineID = 0 AND AdvEngineID = 0) THEN Referer ELSE '' END AS Src, URL AS Dst, COUNT(*) AS PageViews FROM hits WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND IsRefresh = 0 GROUP BY TraficSourceID, SearchEngineID, AdvEngineID, Src, Dst ORDER BY PageViews DESC LIMIT 10 OFFSET 1000; +SELECT URLHash, EventDate, COUNT(*) AS PageViews FROM hits WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND IsRefresh = 0 AND TraficSourceID IN (-1, 6) AND RefererHash = 3594120000172545465 GROUP BY URLHash, EventDate ORDER BY PageViews DESC LIMIT 10 OFFSET 100; +SELECT WindowClientWidth, WindowClientHeight, COUNT(*) AS PageViews FROM hits WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND IsRefresh = 0 AND DontCountHits = 0 AND URLHash = 2868770270353813622 GROUP BY WindowClientWidth, WindowClientHeight ORDER BY PageViews DESC LIMIT 10 OFFSET 10000; +SELECT DATE_TRUNC('minute', EventTime) AS M, COUNT(*) AS PageViews FROM hits WHERE CounterID = 62 AND EventDate >= '2013-07-14' AND EventDate <= '2013-07-15' AND IsRefresh = 0 AND DontCountHits = 0 GROUP BY DATE_TRUNC('minute', EventTime) ORDER BY DATE_TRUNC('minute', EventTime) LIMIT 10 OFFSET 1000; diff --git a/docker/test/clickbench/run.sh b/docker/test/clickbench/run.sh new file mode 100755 index 00000000000..3d27a40bb74 --- /dev/null +++ b/docker/test/clickbench/run.sh @@ -0,0 +1,79 @@ +#!/bin/bash + +SCRIPT_PID=$! +(sleep 1200 && kill -9 $SCRIPT_PID) & + +# shellcheck disable=SC1091 +source /setup_export_logs.sh + +# fail on errors, verbose and export all env variables +set -e -x -a + +dpkg -i package_folder/clickhouse-common-static_*.deb +dpkg -i package_folder/clickhouse-server_*.deb +dpkg -i package_folder/clickhouse-client_*.deb + +# A directory for cache +mkdir /dev/shm/clickhouse +chown clickhouse:clickhouse /dev/shm/clickhouse + +# Allow introspection functions, needed for sending the logs +echo " +profiles: + default: + allow_introspection_functions: 1 +" > /etc/clickhouse-server/users.d/allow_introspection_functions.yaml + +# Enable text_log +echo " +text_log: +" > /etc/clickhouse-server/config.d/text_log.yaml + +config_logs_export_cluster /etc/clickhouse-server/config.d/system_logs_export.yaml + +clickhouse start + +# Wait for the server to start, but not for too long. +for _ in {1..100} +do + clickhouse-client --query "SELECT 1" && break + sleep 1 +done + +setup_logs_replication + +# Load the data + +clickhouse-client --time < /create.sql + +# Run the queries + +set +x + +TRIES=3 +QUERY_NUM=1 +while read -r query; do + echo -n "[" + for i in $(seq 1 $TRIES); do + RES=$(clickhouse-client --query_id "q${QUERY_NUM}-${i}" --time --format Null --query "$query" --progress 0 2>&1 ||:) + echo -n "${RES}" + [[ "$i" != "$TRIES" ]] && echo -n ", " + + echo "${QUERY_NUM},${i},${RES}" >> /test_output/test_results.tsv + done + echo "]," + + QUERY_NUM=$((QUERY_NUM + 1)) +done < /queries.sql + +set -x + +clickhouse-client --query "SELECT total_bytes FROM system.tables WHERE name = 'hits' AND database = 'default'" + +clickhouse-client -q "system flush logs" ||: +stop_logs_replication +clickhouse stop + +mv /var/log/clickhouse-server/* /test_output/ + +echo -e "success\tClickBench finished" > /test_output/check_status.tsv diff --git a/docker/test/integration/runner/Dockerfile b/docker/test/integration/runner/Dockerfile index 458ca2b1da8..c795fbf0672 100644 --- a/docker/test/integration/runner/Dockerfile +++ b/docker/test/integration/runner/Dockerfile @@ -74,7 +74,7 @@ RUN python3 -m pip install --no-cache-dir \ delta-spark==2.3.0 \ dict2xml \ dicttoxml \ - docker \ + docker==6.1.3 \ docker-compose==1.29.2 \ grpcio \ grpcio-tools \ diff --git a/docker/test/integration/runner/compose/docker_compose_minio.yml b/docker/test/integration/runner/compose/docker_compose_minio.yml index 45e55e7a79c..4255a529f6d 100644 --- a/docker/test/integration/runner/compose/docker_compose_minio.yml +++ b/docker/test/integration/runner/compose/docker_compose_minio.yml @@ -34,7 +34,7 @@ services: # Empty container to run proxy resolver. resolver: - image: clickhouse/python-bottle + image: clickhouse/python-bottle:${DOCKER_PYTHON_BOTTLE_TAG:-latest} expose: - "8080" tty: true diff --git a/docker/test/performance-comparison/Dockerfile b/docker/test/performance-comparison/Dockerfile index d31663f9071..e4ced104445 100644 --- a/docker/test/performance-comparison/Dockerfile +++ b/docker/test/performance-comparison/Dockerfile @@ -39,18 +39,8 @@ RUN apt-get update \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* -COPY * / +COPY run.sh / -# Bind everything to one NUMA node, if there's more than one. Theoretically the -# node #0 should be less stable because of system interruptions. We bind -# randomly to node 1 or 0 to gather some statistics on that. We have to bind -# both servers and the tmpfs on which the database is stored. How to do it -# is unclear, but by default tmpfs uses -# 'process allocation policy', not sure which process but hopefully the one that -# writes to it, so just bind the downloader script as well. -# https://www.kernel.org/doc/Documentation/filesystems/tmpfs.txt -# Double-escaped backslashes are a tribute to the engineering wonder of docker -- -# it gives '/bin/sh: 1: [bash,: not found' otherwise. -CMD ["bash", "-c", "node=$((RANDOM % $(numactl --hardware | sed -n 's/^.*available:\\(.*\\)nodes.*$/\\1/p'))); echo Will bind to NUMA node $node; numactl --cpunodebind=$node --membind=$node /entrypoint.sh"] +CMD ["bash", "/run.sh"] # docker run --network=host --volume :/workspace --volume=:/output -e PR_TO_TEST=<> -e SHA_TO_TEST=<> clickhouse/performance-comparison diff --git a/docker/test/performance-comparison/run.sh b/docker/test/performance-comparison/run.sh new file mode 100644 index 00000000000..7afb5da59b1 --- /dev/null +++ b/docker/test/performance-comparison/run.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +entry="/usr/share/clickhouse-test/performance/scripts/entrypoint.sh" +[ ! -e "$entry" ] && echo "ERROR: test scripts are not found" && exit 1 + +# Bind everything to one NUMA node, if there's more than one. Theoretically the +# node #0 should be less stable because of system interruptions. We bind +# randomly to node 1 or 0 to gather some statistics on that. We have to bind +# both servers and the tmpfs on which the database is stored. How to do it +# is unclear, but by default tmpfs uses +# 'process allocation policy', not sure which process but hopefully the one that +# writes to it, so just bind the downloader script as well. +# https://www.kernel.org/doc/Documentation/filesystems/tmpfs.txt +# Double-escaped backslashes are a tribute to the engineering wonder of docker -- +# it gives '/bin/sh: 1: [bash,: not found' otherwise. +node=$(( RANDOM % $(numactl --hardware | sed -n 's/^.*available:\(.*\)nodes.*$/\1/p') )); +echo Will bind to NUMA node $node; +numactl --cpunodebind=$node --membind=$node $entry diff --git a/docker/test/stateful/run.sh b/docker/test/stateful/run.sh index ad3c3477b37..c9ce5697182 100755 --- a/docker/test/stateful/run.sh +++ b/docker/test/stateful/run.sh @@ -24,6 +24,28 @@ azurite-blob --blobHost 0.0.0.0 --blobPort 10000 --debug /azurite_log & config_logs_export_cluster /etc/clickhouse-server/config.d/system_logs_export.yaml +cache_policy="" +if [ $(( $(date +%-d) % 2 )) -eq 1 ]; then + cache_policy="SLRU" +else + cache_policy="LRU" +fi + +echo "Using cache policy: $cache_policy" + +if [ "$cache_policy" = "SLRU" ]; then + sudo cat /etc/clickhouse-server/config.d/storage_conf.xml \ + | sed "s|LRU|SLRU|" \ + > /etc/clickhouse-server/config.d/storage_conf.xml.tmp + mv /etc/clickhouse-server/config.d/storage_conf.xml.tmp /etc/clickhouse-server/config.d/storage_conf.xml +fi + +if [[ -n "$USE_S3_STORAGE_FOR_MERGE_TREE" ]] && [[ "$USE_S3_STORAGE_FOR_MERGE_TREE" -eq 1 ]]; then + # It is not needed, we will explicitly create tables on s3. + # We do not have statefull tests with s3 storage run in public repository, but this is needed for another repository. + rm /etc/clickhouse-server/config.d/s3_storage_policy_for_merge_tree_by_default.xml +fi + function start() { if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then @@ -107,8 +129,76 @@ if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]] else clickhouse-client --query "CREATE DATABASE test" clickhouse-client --query "SHOW TABLES FROM test" - clickhouse-client --query "RENAME TABLE datasets.hits_v1 TO test.hits" - clickhouse-client --query "RENAME TABLE datasets.visits_v1 TO test.visits" + if [[ -n "$USE_S3_STORAGE_FOR_MERGE_TREE" ]] && [[ "$USE_S3_STORAGE_FOR_MERGE_TREE" -eq 1 ]]; then + clickhouse-client --query "CREATE TABLE test.hits (WatchID UInt64, JavaEnable UInt8, Title String, GoodEvent Int16, + EventTime DateTime, EventDate Date, CounterID UInt32, ClientIP UInt32, ClientIP6 FixedString(16), RegionID UInt32, + UserID UInt64, CounterClass Int8, OS UInt8, UserAgent UInt8, URL String, Referer String, URLDomain String, + RefererDomain String, Refresh UInt8, IsRobot UInt8, RefererCategories Array(UInt16), URLCategories Array(UInt16), + URLRegions Array(UInt32), RefererRegions Array(UInt32), ResolutionWidth UInt16, ResolutionHeight UInt16, ResolutionDepth UInt8, + FlashMajor UInt8, FlashMinor UInt8, FlashMinor2 String, NetMajor UInt8, NetMinor UInt8, UserAgentMajor UInt16, + UserAgentMinor FixedString(2), CookieEnable UInt8, JavascriptEnable UInt8, IsMobile UInt8, MobilePhone UInt8, + MobilePhoneModel String, Params String, IPNetworkID UInt32, TraficSourceID Int8, SearchEngineID UInt16, + SearchPhrase String, AdvEngineID UInt8, IsArtifical UInt8, WindowClientWidth UInt16, WindowClientHeight UInt16, + ClientTimeZone Int16, ClientEventTime DateTime, SilverlightVersion1 UInt8, SilverlightVersion2 UInt8, SilverlightVersion3 UInt32, + SilverlightVersion4 UInt16, PageCharset String, CodeVersion UInt32, IsLink UInt8, IsDownload UInt8, IsNotBounce UInt8, + FUniqID UInt64, HID UInt32, IsOldCounter UInt8, IsEvent UInt8, IsParameter UInt8, DontCountHits UInt8, WithHash UInt8, + HitColor FixedString(1), UTCEventTime DateTime, Age UInt8, Sex UInt8, Income UInt8, Interests UInt16, Robotness UInt8, + GeneralInterests Array(UInt16), RemoteIP UInt32, RemoteIP6 FixedString(16), WindowName Int32, OpenerName Int32, + HistoryLength Int16, BrowserLanguage FixedString(2), BrowserCountry FixedString(2), SocialNetwork String, SocialAction String, + HTTPError UInt16, SendTiming Int32, DNSTiming Int32, ConnectTiming Int32, ResponseStartTiming Int32, ResponseEndTiming Int32, + FetchTiming Int32, RedirectTiming Int32, DOMInteractiveTiming Int32, DOMContentLoadedTiming Int32, DOMCompleteTiming Int32, + LoadEventStartTiming Int32, LoadEventEndTiming Int32, NSToDOMContentLoadedTiming Int32, FirstPaintTiming Int32, + RedirectCount Int8, SocialSourceNetworkID UInt8, SocialSourcePage String, ParamPrice Int64, ParamOrderID String, + ParamCurrency FixedString(3), ParamCurrencyID UInt16, GoalsReached Array(UInt32), OpenstatServiceName String, + OpenstatCampaignID String, OpenstatAdID String, OpenstatSourceID String, UTMSource String, UTMMedium String, + UTMCampaign String, UTMContent String, UTMTerm String, FromTag String, HasGCLID UInt8, RefererHash UInt64, + URLHash UInt64, CLID UInt32, YCLID UInt64, ShareService String, ShareURL String, ShareTitle String, + ParsedParams Nested(Key1 String, Key2 String, Key3 String, Key4 String, Key5 String, ValueDouble Float64), + IslandID FixedString(16), RequestNum UInt32, RequestTry UInt8) ENGINE = MergeTree() PARTITION BY toYYYYMM(EventDate) + ORDER BY (CounterID, EventDate, intHash32(UserID)) SAMPLE BY intHash32(UserID) SETTINGS index_granularity = 8192, storage_policy='s3_cache'" + clickhouse-client --query "CREATE TABLE test.visits (CounterID UInt32, StartDate Date, Sign Int8, IsNew UInt8, + VisitID UInt64, UserID UInt64, StartTime DateTime, Duration UInt32, UTCStartTime DateTime, PageViews Int32, + Hits Int32, IsBounce UInt8, Referer String, StartURL String, RefererDomain String, StartURLDomain String, + EndURL String, LinkURL String, IsDownload UInt8, TraficSourceID Int8, SearchEngineID UInt16, SearchPhrase String, + AdvEngineID UInt8, PlaceID Int32, RefererCategories Array(UInt16), URLCategories Array(UInt16), URLRegions Array(UInt32), + RefererRegions Array(UInt32), IsYandex UInt8, GoalReachesDepth Int32, GoalReachesURL Int32, GoalReachesAny Int32, + SocialSourceNetworkID UInt8, SocialSourcePage String, MobilePhoneModel String, ClientEventTime DateTime, RegionID UInt32, + ClientIP UInt32, ClientIP6 FixedString(16), RemoteIP UInt32, RemoteIP6 FixedString(16), IPNetworkID UInt32, + SilverlightVersion3 UInt32, CodeVersion UInt32, ResolutionWidth UInt16, ResolutionHeight UInt16, UserAgentMajor UInt16, + UserAgentMinor UInt16, WindowClientWidth UInt16, WindowClientHeight UInt16, SilverlightVersion2 UInt8, SilverlightVersion4 UInt16, + FlashVersion3 UInt16, FlashVersion4 UInt16, ClientTimeZone Int16, OS UInt8, UserAgent UInt8, ResolutionDepth UInt8, + FlashMajor UInt8, FlashMinor UInt8, NetMajor UInt8, NetMinor UInt8, MobilePhone UInt8, SilverlightVersion1 UInt8, + Age UInt8, Sex UInt8, Income UInt8, JavaEnable UInt8, CookieEnable UInt8, JavascriptEnable UInt8, IsMobile UInt8, + BrowserLanguage UInt16, BrowserCountry UInt16, Interests UInt16, Robotness UInt8, GeneralInterests Array(UInt16), + Params Array(String), Goals Nested(ID UInt32, Serial UInt32, EventTime DateTime, Price Int64, OrderID String, CurrencyID UInt32), + WatchIDs Array(UInt64), ParamSumPrice Int64, ParamCurrency FixedString(3), ParamCurrencyID UInt16, ClickLogID UInt64, + ClickEventID Int32, ClickGoodEvent Int32, ClickEventTime DateTime, ClickPriorityID Int32, ClickPhraseID Int32, ClickPageID Int32, + ClickPlaceID Int32, ClickTypeID Int32, ClickResourceID Int32, ClickCost UInt32, ClickClientIP UInt32, ClickDomainID UInt32, + ClickURL String, ClickAttempt UInt8, ClickOrderID UInt32, ClickBannerID UInt32, ClickMarketCategoryID UInt32, ClickMarketPP UInt32, + ClickMarketCategoryName String, ClickMarketPPName String, ClickAWAPSCampaignName String, ClickPageName String, ClickTargetType UInt16, + ClickTargetPhraseID UInt64, ClickContextType UInt8, ClickSelectType Int8, ClickOptions String, ClickGroupBannerID Int32, + OpenstatServiceName String, OpenstatCampaignID String, OpenstatAdID String, OpenstatSourceID String, UTMSource String, + UTMMedium String, UTMCampaign String, UTMContent String, UTMTerm String, FromTag String, HasGCLID UInt8, FirstVisit DateTime, + PredLastVisit Date, LastVisit Date, TotalVisits UInt32, TraficSource Nested(ID Int8, SearchEngineID UInt16, AdvEngineID UInt8, + PlaceID UInt16, SocialSourceNetworkID UInt8, Domain String, SearchPhrase String, SocialSourcePage String), Attendance FixedString(16), + CLID UInt32, YCLID UInt64, NormalizedRefererHash UInt64, SearchPhraseHash UInt64, RefererDomainHash UInt64, NormalizedStartURLHash UInt64, + StartURLDomainHash UInt64, NormalizedEndURLHash UInt64, TopLevelDomain UInt64, URLScheme UInt64, OpenstatServiceNameHash UInt64, + OpenstatCampaignIDHash UInt64, OpenstatAdIDHash UInt64, OpenstatSourceIDHash UInt64, UTMSourceHash UInt64, UTMMediumHash UInt64, + UTMCampaignHash UInt64, UTMContentHash UInt64, UTMTermHash UInt64, FromHash UInt64, WebVisorEnabled UInt8, WebVisorActivity UInt32, + ParsedParams Nested(Key1 String, Key2 String, Key3 String, Key4 String, Key5 String, ValueDouble Float64), + Market Nested(Type UInt8, GoalID UInt32, OrderID String, OrderPrice Int64, PP UInt32, DirectPlaceID UInt32, DirectOrderID UInt32, + DirectBannerID UInt32, GoodID String, GoodName String, GoodQuantity Int32, GoodPrice Int64), IslandID FixedString(16)) + ENGINE = CollapsingMergeTree(Sign) PARTITION BY toYYYYMM(StartDate) ORDER BY (CounterID, StartDate, intHash32(UserID), VisitID) + SAMPLE BY intHash32(UserID) SETTINGS index_granularity = 8192, storage_policy='s3_cache'" + + clickhouse-client --query "INSERT INTO test.hits SELECT * FROM datasets.hits_v1 SETTINGS enable_filesystem_cache_on_write_operations=0" + clickhouse-client --query "INSERT INTO test.visits SELECT * FROM datasets.visits_v1 SETTINGS enable_filesystem_cache_on_write_operations=0" + clickhouse-client --query "DROP TABLE datasets.visits_v1 SYNC" + clickhouse-client --query "DROP TABLE datasets.hits_v1 SYNC" + else + clickhouse-client --query "RENAME TABLE datasets.hits_v1 TO test.hits" + clickhouse-client --query "RENAME TABLE datasets.visits_v1 TO test.visits" + fi clickhouse-client --query "CREATE TABLE test.hits_s3 (WatchID UInt64, JavaEnable UInt8, Title String, GoodEvent Int16, EventTime DateTime, EventDate Date, CounterID UInt32, ClientIP UInt32, ClientIP6 FixedString(16), RegionID UInt32, UserID UInt64, CounterClass Int8, OS UInt8, UserAgent UInt8, URL String, Referer String, URLDomain String, RefererDomain String, Refresh UInt8, IsRobot UInt8, RefererCategories Array(UInt16), URLCategories Array(UInt16), URLRegions Array(UInt32), RefererRegions Array(UInt32), ResolutionWidth UInt16, ResolutionHeight UInt16, ResolutionDepth UInt8, FlashMajor UInt8, FlashMinor UInt8, FlashMinor2 String, NetMajor UInt8, NetMinor UInt8, UserAgentMajor UInt16, UserAgentMinor FixedString(2), CookieEnable UInt8, JavascriptEnable UInt8, IsMobile UInt8, MobilePhone UInt8, MobilePhoneModel String, Params String, IPNetworkID UInt32, TraficSourceID Int8, SearchEngineID UInt16, SearchPhrase String, AdvEngineID UInt8, IsArtifical UInt8, WindowClientWidth UInt16, WindowClientHeight UInt16, ClientTimeZone Int16, ClientEventTime DateTime, SilverlightVersion1 UInt8, SilverlightVersion2 UInt8, SilverlightVersion3 UInt32, SilverlightVersion4 UInt16, PageCharset String, CodeVersion UInt32, IsLink UInt8, IsDownload UInt8, IsNotBounce UInt8, FUniqID UInt64, HID UInt32, IsOldCounter UInt8, IsEvent UInt8, IsParameter UInt8, DontCountHits UInt8, WithHash UInt8, HitColor FixedString(1), UTCEventTime DateTime, Age UInt8, Sex UInt8, Income UInt8, Interests UInt16, Robotness UInt8, GeneralInterests Array(UInt16), RemoteIP UInt32, RemoteIP6 FixedString(16), WindowName Int32, OpenerName Int32, HistoryLength Int16, BrowserLanguage FixedString(2), BrowserCountry FixedString(2), SocialNetwork String, SocialAction String, HTTPError UInt16, SendTiming Int32, DNSTiming Int32, ConnectTiming Int32, ResponseStartTiming Int32, ResponseEndTiming Int32, FetchTiming Int32, RedirectTiming Int32, DOMInteractiveTiming Int32, DOMContentLoadedTiming Int32, DOMCompleteTiming Int32, LoadEventStartTiming Int32, LoadEventEndTiming Int32, NSToDOMContentLoadedTiming Int32, FirstPaintTiming Int32, RedirectCount Int8, SocialSourceNetworkID UInt8, SocialSourcePage String, ParamPrice Int64, ParamOrderID String, ParamCurrency FixedString(3), ParamCurrencyID UInt16, GoalsReached Array(UInt32), OpenstatServiceName String, OpenstatCampaignID String, OpenstatAdID String, OpenstatSourceID String, UTMSource String, UTMMedium String, UTMCampaign String, UTMContent String, UTMTerm String, FromTag String, HasGCLID UInt8, RefererHash UInt64, URLHash UInt64, CLID UInt32, YCLID UInt64, ShareService String, ShareURL String, ShareTitle String, ParsedParams Nested(Key1 String, Key2 String, Key3 String, Key4 String, Key5 String, ValueDouble Float64), IslandID FixedString(16), RequestNum UInt32, RequestTry UInt8) ENGINE = MergeTree() PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDate, intHash32(UserID)) SAMPLE BY intHash32(UserID) SETTINGS index_granularity = 8192, storage_policy='s3_cache'" clickhouse-client --query "INSERT INTO test.hits_s3 SELECT * FROM test.hits SETTINGS enable_filesystem_cache_on_write_operations=0" fi @@ -128,6 +218,10 @@ function run_tests() ADDITIONAL_OPTIONS+=('--replicated-database') fi + if [[ -n "$USE_S3_STORAGE_FOR_MERGE_TREE" ]] && [[ "$USE_S3_STORAGE_FOR_MERGE_TREE" -eq 1 ]]; then + ADDITIONAL_OPTIONS+=('--s3-storage') + fi + if [[ -n "$USE_DATABASE_ORDINARY" ]] && [[ "$USE_DATABASE_ORDINARY" -eq 1 ]]; then ADDITIONAL_OPTIONS+=('--db-engine=Ordinary') fi @@ -135,7 +229,7 @@ function run_tests() set +e if [[ -n "$USE_PARALLEL_REPLICAS" ]] && [[ "$USE_PARALLEL_REPLICAS" -eq 1 ]]; then - clickhouse-test --client="clickhouse-client --use_hedged_requests=0 --allow_experimental_parallel_reading_from_replicas=1 --parallel_replicas_for_non_replicated_merge_tree=1 \ + clickhouse-test --client="clickhouse-client --allow_experimental_parallel_reading_from_replicas=1 --parallel_replicas_for_non_replicated_merge_tree=1 \ --max_parallel_replicas=100 --cluster_for_parallel_replicas='parallel_replicas'" \ -j 2 --testname --shard --zookeeper --check-zookeeper-session --no-stateless --no-parallel-replicas --hung-check --print-time "${ADDITIONAL_OPTIONS[@]}" \ "$SKIP_TESTS_OPTION" 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee test_output/test_result.txt diff --git a/docker/test/stateful/s3downloader b/docker/test/stateful/s3downloader index 96f2aa96dd5..77601fb5af6 100755 --- a/docker/test/stateful/s3downloader +++ b/docker/test/stateful/s3downloader @@ -30,7 +30,7 @@ def build_url(base_url, dataset): return os.path.join(base_url, dataset, "partitions", AVAILABLE_DATASETS[dataset]) -def dowload_with_progress(url, path): +def download_with_progress(url, path): logging.info("Downloading from %s to temp path %s", url, path) for i in range(RETRIES_COUNT): try: @@ -110,7 +110,7 @@ if __name__ == "__main__": temp_archive_path = _get_temp_file_name() try: download_url_for_dataset = build_url(args.url_prefix, dataset) - dowload_with_progress(download_url_for_dataset, temp_archive_path) + download_with_progress(download_url_for_dataset, temp_archive_path) unpack_to_clickhouse_directory(temp_archive_path, args.clickhouse_data_path) except Exception as ex: logging.info("Some exception occured %s", str(ex)) diff --git a/docker/test/stateless/run.sh b/docker/test/stateless/run.sh index a2e2a708aaf..4e9486d7286 100755 --- a/docker/test/stateless/run.sh +++ b/docker/test/stateless/run.sh @@ -58,6 +58,7 @@ if [[ -n "$BUGFIX_VALIDATE_CHECK" ]] && [[ "$BUGFIX_VALIDATE_CHECK" -eq 1 ]]; th # it contains some new settings, but we can safely remove it rm /etc/clickhouse-server/users.d/s3_cache_new.xml + rm /etc/clickhouse-server/config.d/zero_copy_destructive_operations.xml fi # For flaky check we also enable thread fuzzer @@ -216,11 +217,11 @@ export -f run_tests if [ "$NUM_TRIES" -gt "1" ]; then # We don't run tests with Ordinary database in PRs, only in master. # So run new/changed tests with Ordinary at least once in flaky check. - timeout "$MAX_RUN_TIME" bash -c 'NUM_TRIES=1; USE_DATABASE_ORDINARY=1; run_tests' \ + timeout_with_logging "$MAX_RUN_TIME" bash -c 'NUM_TRIES=1; USE_DATABASE_ORDINARY=1; run_tests' \ | sed 's/All tests have finished//' | sed 's/No tests were run//' ||: fi -timeout "$MAX_RUN_TIME" bash -c run_tests ||: +timeout_with_logging "$MAX_RUN_TIME" bash -c run_tests ||: echo "Files in current directory" ls -la ./ @@ -300,9 +301,6 @@ if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]] rg -Fa "" /var/log/clickhouse-server/clickhouse-server2.log ||: zstd --threads=0 < /var/log/clickhouse-server/clickhouse-server1.log > /test_output/clickhouse-server1.log.zst ||: zstd --threads=0 < /var/log/clickhouse-server/clickhouse-server2.log > /test_output/clickhouse-server2.log.zst ||: - # FIXME: remove once only github actions will be left - rm /var/log/clickhouse-server/clickhouse-server1.log - rm /var/log/clickhouse-server/clickhouse-server2.log mv /var/log/clickhouse-server/stderr1.log /test_output/ ||: mv /var/log/clickhouse-server/stderr2.log /test_output/ ||: tar -chf /test_output/coordination1.tar /var/lib/clickhouse1/coordination ||: diff --git a/docker/test/stateless/utils.lib b/docker/test/stateless/utils.lib index 1204434d853..9b6ab535a90 100644 --- a/docker/test/stateless/utils.lib +++ b/docker/test/stateless/utils.lib @@ -35,4 +35,17 @@ function fn_exists() { declare -F "$1" > /dev/null; } +function timeout_with_logging() { + local exit_code=0 + + timeout "${@}" || exit_code="${?}" + + if [[ "${exit_code}" -eq "124" ]] + then + echo "The command 'timeout ${*}' has been killed by timeout" + fi + + return $exit_code +} + # vi: ft=bash diff --git a/docker/test/stress/run.sh b/docker/test/stress/run.sh index afc1a386a48..67056cc1bc1 100644 --- a/docker/test/stress/run.sh +++ b/docker/test/stress/run.sh @@ -65,9 +65,27 @@ chmod 777 -R /var/lib/clickhouse clickhouse-client --query "ATTACH DATABASE IF NOT EXISTS datasets ENGINE = Ordinary" clickhouse-client --query "CREATE DATABASE IF NOT EXISTS test" + stop mv /var/log/clickhouse-server/clickhouse-server.log /var/log/clickhouse-server/clickhouse-server.initial.log +# Randomize cache policies. +cache_policy="" +if [ $(( $(date +%-d) % 2 )) -eq 1 ]; then + cache_policy="SLRU" +else + cache_policy="LRU" +fi + +echo "Using cache policy: $cache_policy" + +if [ "$cache_policy" = "SLRU" ]; then + sudo cat /etc/clickhouse-server/config.d/storage_conf.xml \ + | sed "s|LRU|SLRU|" \ + > /etc/clickhouse-server/config.d/storage_conf.xml.tmp + mv /etc/clickhouse-server/config.d/storage_conf.xml.tmp /etc/clickhouse-server/config.d/storage_conf.xml +fi + start clickhouse-client --query "SHOW TABLES FROM datasets" @@ -191,6 +209,13 @@ sudo cat /etc/clickhouse-server/config.d/logger_trace.xml \ > /etc/clickhouse-server/config.d/logger_trace.xml.tmp mv /etc/clickhouse-server/config.d/logger_trace.xml.tmp /etc/clickhouse-server/config.d/logger_trace.xml +if [ "$cache_policy" = "SLRU" ]; then + sudo cat /etc/clickhouse-server/config.d/storage_conf.xml \ + | sed "s|LRU|SLRU|" \ + > /etc/clickhouse-server/config.d/storage_conf.xml.tmp + mv /etc/clickhouse-server/config.d/storage_conf.xml.tmp /etc/clickhouse-server/config.d/storage_conf.xml +fi + # Randomize async_load_databases if [ $(( $(date +%-d) % 2 )) -eq 1 ]; then sudo echo "true" \ diff --git a/docker/test/style/run.sh b/docker/test/style/run.sh index 315efb9e6c4..cc6cb292b66 100755 --- a/docker/test/style/run.sh +++ b/docker/test/style/run.sh @@ -23,6 +23,7 @@ echo "Check submodules" | ts ./check-submodules |& tee /test_output/submodules_output.txt echo "Check shell scripts with shellcheck" | ts ./shellcheck-run.sh |& tee /test_output/shellcheck_output.txt + /process_style_check_result.py || echo -e "failure\tCannot parse results" > /test_output/check_status.tsv echo "Check help for changelog generator works" | ts cd ../changelog || exit 1 diff --git a/docker/test/upgrade/run.sh b/docker/test/upgrade/run.sh index 158ac19229e..f014fce49f6 100644 --- a/docker/test/upgrade/run.sh +++ b/docker/test/upgrade/run.sh @@ -77,6 +77,7 @@ remove_keeper_config "create_if_not_exists" "[01]" # it contains some new settings, but we can safely remove it rm /etc/clickhouse-server/config.d/merge_tree.xml rm /etc/clickhouse-server/config.d/enable_wait_for_shutdown_replicated_tables.xml +rm /etc/clickhouse-server/config.d/zero_copy_destructive_operations.xml rm /etc/clickhouse-server/users.d/nonconst_timezone.xml rm /etc/clickhouse-server/users.d/s3_cache_new.xml rm /etc/clickhouse-server/users.d/replicated_ddl_entry.xml @@ -115,6 +116,7 @@ sudo chgrp clickhouse /etc/clickhouse-server/config.d/s3_storage_policy_by_defau # it contains some new settings, but we can safely remove it rm /etc/clickhouse-server/config.d/merge_tree.xml rm /etc/clickhouse-server/config.d/enable_wait_for_shutdown_replicated_tables.xml +rm /etc/clickhouse-server/config.d/zero_copy_destructive_operations.xml rm /etc/clickhouse-server/users.d/nonconst_timezone.xml rm /etc/clickhouse-server/users.d/s3_cache_new.xml rm /etc/clickhouse-server/users.d/replicated_ddl_entry.xml diff --git a/docs/changelogs/v23.11.2.11-stable.md b/docs/changelogs/v23.11.2.11-stable.md new file mode 100644 index 00000000000..490cc9a4590 --- /dev/null +++ b/docs/changelogs/v23.11.2.11-stable.md @@ -0,0 +1,22 @@ +--- +sidebar_position: 1 +sidebar_label: 2023 +--- + +# 2023 Changelog + +### ClickHouse release v23.11.2.11-stable (6e5411358c8) FIXME as compared to v23.11.1.2711-stable (05bc8ef1e02) + +#### Improvement +* Backported in [#57661](https://github.com/ClickHouse/ClickHouse/issues/57661): Handle sigabrt case when getting PostgreSQl table structure with empty array. [#57618](https://github.com/ClickHouse/ClickHouse/pull/57618) ([Mike Kot (Михаил Кот)](https://github.com/myrrc)). + +#### Bug Fix (user-visible misbehavior in an official stable release) + +* Ignore ON CLUSTER clause in grant/revoke queries for management of replicated access entities. [#57538](https://github.com/ClickHouse/ClickHouse/pull/57538) ([MikhailBurdukov](https://github.com/MikhailBurdukov)). +* Fix SIGSEGV for aggregation of sparse columns with any() RESPECT NULL [#57710](https://github.com/ClickHouse/ClickHouse/pull/57710) ([Azat Khuzhin](https://github.com/azat)). +* Fix bug window functions: revert [#39631](https://github.com/ClickHouse/ClickHouse/issues/39631) [#57766](https://github.com/ClickHouse/ClickHouse/pull/57766) ([Alexey Milovidov](https://github.com/alexey-milovidov)). + +#### NOT FOR CHANGELOG / INSIGNIFICANT + +* Pin alpine version of integration tests helper container [#57669](https://github.com/ClickHouse/ClickHouse/pull/57669) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). + diff --git a/docs/changelogs/v23.11.3.23-stable.md b/docs/changelogs/v23.11.3.23-stable.md new file mode 100644 index 00000000000..7fcc65beb54 --- /dev/null +++ b/docs/changelogs/v23.11.3.23-stable.md @@ -0,0 +1,26 @@ +--- +sidebar_position: 1 +sidebar_label: 2023 +--- + +# 2023 Changelog + +### ClickHouse release v23.11.3.23-stable (a14ab450b0e) FIXME as compared to v23.11.2.11-stable (6e5411358c8) + +#### Bug Fix (user-visible misbehavior in an official stable release) + +* Fix invalid memory access in BLAKE3 (Rust) [#57876](https://github.com/ClickHouse/ClickHouse/pull/57876) ([Raúl Marín](https://github.com/Algunenano)). +* Normalize function names in CREATE INDEX [#57906](https://github.com/ClickHouse/ClickHouse/pull/57906) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Fix handling of unavailable replicas before first request happened [#57933](https://github.com/ClickHouse/ClickHouse/pull/57933) ([Nikita Taranov](https://github.com/nickitat)). +* Revert "Fix bug window functions: revert [#39631](https://github.com/ClickHouse/ClickHouse/issues/39631)" [#58031](https://github.com/ClickHouse/ClickHouse/pull/58031) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). + +#### NO CL CATEGORY + +* Backported in [#57918](https://github.com/ClickHouse/ClickHouse/issues/57918):. [#57909](https://github.com/ClickHouse/ClickHouse/pull/57909) ([Alexey Milovidov](https://github.com/alexey-milovidov)). + +#### NOT FOR CHANGELOG / INSIGNIFICANT + +* Remove heavy rust stable toolchain [#57905](https://github.com/ClickHouse/ClickHouse/pull/57905) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Fix docker image for integration tests (fixes CI) [#57952](https://github.com/ClickHouse/ClickHouse/pull/57952) ([Azat Khuzhin](https://github.com/azat)). +* Always use `pread` for reading cache segments [#57970](https://github.com/ClickHouse/ClickHouse/pull/57970) ([Nikita Taranov](https://github.com/nickitat)). + diff --git a/docs/changelogs/v23.12.1.1368-stable.md b/docs/changelogs/v23.12.1.1368-stable.md new file mode 100644 index 00000000000..1a322ae9c0f --- /dev/null +++ b/docs/changelogs/v23.12.1.1368-stable.md @@ -0,0 +1,327 @@ +--- +sidebar_position: 1 +sidebar_label: 2023 +--- + +# 2023 Changelog + +### ClickHouse release v23.12.1.1368-stable (a2faa65b080) FIXME as compared to v23.11.1.2711-stable (05bc8ef1e02) + +#### Backward Incompatible Change +* Fix check for non-deterministic functions in TTL expressions. Previously, you could create a TTL expression with non-deterministic functions in some cases, which could lead to undefined behavior later. This fixes [#37250](https://github.com/ClickHouse/ClickHouse/issues/37250). Disallow TTL expressions that don't depend on any columns of a table by default. It can be allowed back by `SET allow_suspicious_ttl_expressions = 1` or `SET compatibility = '23.11'`. Closes [#37286](https://github.com/ClickHouse/ClickHouse/issues/37286). [#51858](https://github.com/ClickHouse/ClickHouse/pull/51858) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Remove function `arrayFold` because it has a bug. This closes [#57816](https://github.com/ClickHouse/ClickHouse/issues/57816). This closes [#57458](https://github.com/ClickHouse/ClickHouse/issues/57458). [#57836](https://github.com/ClickHouse/ClickHouse/pull/57836) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Remove the feature of `is_deleted` row in ReplacingMergeTree and the `CLEANUP` modifier for the OPTIMIZE query. This fixes [#57930](https://github.com/ClickHouse/ClickHouse/issues/57930). This closes [#54988](https://github.com/ClickHouse/ClickHouse/issues/54988). This closes [#54570](https://github.com/ClickHouse/ClickHouse/issues/54570). This closes [#50346](https://github.com/ClickHouse/ClickHouse/issues/50346). This closes [#47579](https://github.com/ClickHouse/ClickHouse/issues/47579). The feature has to be removed because it is not good. We have to remove it as quickly as possible, because there is no other option. [#57932](https://github.com/ClickHouse/ClickHouse/pull/57932) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* The MergeTree setting `clean_deleted_rows` is deprecated, it has no effect anymore. The `CLEANUP` keyword for `OPTIMIZE` is not allowed by default (unless `allow_experimental_replacing_merge_with_cleanup` is enabled). [#58267](https://github.com/ClickHouse/ClickHouse/pull/58267) ([Alexander Tokmakov](https://github.com/tavplubix)). + +#### New Feature +* Allow disabling of HEAD request before GET request. [#54602](https://github.com/ClickHouse/ClickHouse/pull/54602) ([Fionera](https://github.com/fionera)). +* Add a HTTP endpoint for checking if Keeper is ready to accept traffic. [#55876](https://github.com/ClickHouse/ClickHouse/pull/55876) ([Konstantin Bogdanov](https://github.com/thevar1able)). +* Add 'union' mode for schema inference. In this mode the resulting table schema is the union of all files schemas (so schema is inferred from each file). The mode of schema inference is controlled by a setting `schema_inference_mode` with 2 possible values - `default` and `union`. Closes [#55428](https://github.com/ClickHouse/ClickHouse/issues/55428). [#55892](https://github.com/ClickHouse/ClickHouse/pull/55892) ([Kruglov Pavel](https://github.com/Avogar)). +* Add new setting `input_format_csv_try_infer_numbers_from_strings` that allows to infer numbers from strings in CSV format. Closes [#56455](https://github.com/ClickHouse/ClickHouse/issues/56455). [#56859](https://github.com/ClickHouse/ClickHouse/pull/56859) ([Kruglov Pavel](https://github.com/Avogar)). +* Refreshable materialized views. [#56946](https://github.com/ClickHouse/ClickHouse/pull/56946) ([Michael Kolupaev](https://github.com/al13n321)). +* Add more warnings on the number of databases, tables. [#57375](https://github.com/ClickHouse/ClickHouse/pull/57375) ([凌涛](https://github.com/lingtaolf)). +* Added a new mutation command `ALTER TABLE
APPLY DELETED MASK`, which allows to enforce applying of mask written by lightweight delete and to remove rows marked as deleted from disk. [#57433](https://github.com/ClickHouse/ClickHouse/pull/57433) ([Anton Popov](https://github.com/CurtizJ)). +* Added a new SQL function `sqid` to generate Sqids (https://sqids.org/), example: `SELECT sqid(125, 126)`. [#57512](https://github.com/ClickHouse/ClickHouse/pull/57512) ([Robert Schulze](https://github.com/rschu1ze)). +* Dictionary with `HASHED_ARRAY` (and `COMPLEX_KEY_HASHED_ARRAY`) layout supports `SHARDS` similarly to `HASHED`. [#57544](https://github.com/ClickHouse/ClickHouse/pull/57544) ([vdimir](https://github.com/vdimir)). +* Add asynchronous metrics for total primary key bytes and total allocated primary key bytes in memory. [#57551](https://github.com/ClickHouse/ClickHouse/pull/57551) ([Bharat Nallan](https://github.com/bharatnc)). +* Table system.dropped_tables_parts contains parts of system.dropped_tables tables (dropped but not yet removed tables). [#57555](https://github.com/ClickHouse/ClickHouse/pull/57555) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)). +* Add `FORMAT_BYTES` as an alias for `formatReadableSize`. [#57592](https://github.com/ClickHouse/ClickHouse/pull/57592) ([Bharat Nallan](https://github.com/bharatnc)). +* Add SHA512_256 function. [#57645](https://github.com/ClickHouse/ClickHouse/pull/57645) ([Bharat Nallan](https://github.com/bharatnc)). +* Allow passing optional SESSION_TOKEN to `s3` table function. [#57850](https://github.com/ClickHouse/ClickHouse/pull/57850) ([Shani Elharrar](https://github.com/shanielh)). +* Clause `ORDER BY` now supports specifying `ALL`, meaning that ClickHouse sorts by all columns in the `SELECT` clause. Example: `SELECT col1, col2 FROM tab WHERE [...] ORDER BY ALL`. [#57875](https://github.com/ClickHouse/ClickHouse/pull/57875) ([zhongyuankai](https://github.com/zhongyuankai)). +* Added functions for punycode encoding/decoding: `punycodeEncode()` and `punycodeDecode()`. [#57969](https://github.com/ClickHouse/ClickHouse/pull/57969) ([Robert Schulze](https://github.com/rschu1ze)). +* This PR reproduces the implementation of `PASTE JOIN`, which allows users to join tables without `ON` clause. Example: ``` SQL SELECT * FROM ( SELECT number AS a FROM numbers(2) ) AS t1 PASTE JOIN ( SELECT number AS a FROM numbers(2) ORDER BY a DESC ) AS t2. [#57995](https://github.com/ClickHouse/ClickHouse/pull/57995) ([Yarik Briukhovetskyi](https://github.com/yariks5s)). +* A handler `/binary` opens a visual viewer of symbols inside the ClickHouse binary. [#58211](https://github.com/ClickHouse/ClickHouse/pull/58211) ([Alexey Milovidov](https://github.com/alexey-milovidov)). + +#### Performance Improvement +* Made copy between s3 disks using a s3-server-side copy instead of copying through the buffer. Improves `BACKUP/RESTORE` operations and `clickhouse-disks copy` command. [#56744](https://github.com/ClickHouse/ClickHouse/pull/56744) ([MikhailBurdukov](https://github.com/MikhailBurdukov)). +* HashJoin respects setting `max_joined_block_size_rows` and do not produce large blocks for `ALL JOIN`. [#56996](https://github.com/ClickHouse/ClickHouse/pull/56996) ([vdimir](https://github.com/vdimir)). +* Release memory for aggregation earlier. This may avoid unnecessary external aggregation. [#57691](https://github.com/ClickHouse/ClickHouse/pull/57691) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Improve performance of string serialization. [#57717](https://github.com/ClickHouse/ClickHouse/pull/57717) ([Maksim Kita](https://github.com/kitaisreal)). +* Support trivial count optimization for `Merge`-engine tables. [#57867](https://github.com/ClickHouse/ClickHouse/pull/57867) ([skyoct](https://github.com/skyoct)). +* Optimized aggregation in some cases. [#57872](https://github.com/ClickHouse/ClickHouse/pull/57872) ([Anton Popov](https://github.com/CurtizJ)). +* The `hasAny()` function can now take advantage of the full-text skipping indices. [#57878](https://github.com/ClickHouse/ClickHouse/pull/57878) ([Jpnock](https://github.com/Jpnock)). +* Function `if(cond, then, else)` (and its alias `cond ? : then : else`) were optimized to use branch-free evaluation. [#57885](https://github.com/ClickHouse/ClickHouse/pull/57885) ([zhanglistar](https://github.com/zhanglistar)). +* Extract non intersecting parts ranges from MergeTree table during FINAL processing. That way we can avoid additional FINAL logic for this non intersecting parts ranges. In case when amount of duplicate values with same primary key is low, performance will be almost the same as without FINAL. Improve reading performance for MergeTree FINAL when `do_not_merge_across_partitions_select_final` setting is set. [#58120](https://github.com/ClickHouse/ClickHouse/pull/58120) ([Maksim Kita](https://github.com/kitaisreal)). +* MergeTree automatically derive `do_not_merge_across_partitions_select_final` setting if partition key expression contains only columns from primary key expression. [#58218](https://github.com/ClickHouse/ClickHouse/pull/58218) ([Maksim Kita](https://github.com/kitaisreal)). +* Speedup MIN and MAX for native types. [#58231](https://github.com/ClickHouse/ClickHouse/pull/58231) ([Raúl Marín](https://github.com/Algunenano)). + +#### Improvement +* Make inserts into distributed tables handle updated cluster configuration properly. When the list of cluster nodes is dynamically updated, the Directory Monitor of the distribution table cannot sense the new node, and the Directory Monitor must be re-noded to sense it. [#42826](https://github.com/ClickHouse/ClickHouse/pull/42826) ([zhongyuankai](https://github.com/zhongyuankai)). +* Replace --no-system-tables with loading virtual tables of system database lazily. [#55271](https://github.com/ClickHouse/ClickHouse/pull/55271) ([Azat Khuzhin](https://github.com/azat)). +* Clickhouse-test print case sn, current time and case name in one test case. [#55710](https://github.com/ClickHouse/ClickHouse/pull/55710) ([guoxiaolong](https://github.com/guoxiaolongzte)). +* Do not allow creating replicated table with inconsistent merge params. [#56833](https://github.com/ClickHouse/ClickHouse/pull/56833) ([Duc Canh Le](https://github.com/canhld94)). +* Implement SLRU cache policy for filesystem cache. [#57076](https://github.com/ClickHouse/ClickHouse/pull/57076) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Show uncompressed size in `system.tables`, obtained from data parts' checksums [#56618](https://github.com/ClickHouse/ClickHouse/issues/56618). [#57186](https://github.com/ClickHouse/ClickHouse/pull/57186) ([Chen Lixiang](https://github.com/chenlx0)). +* Add `skip_unavailable_shards` as a setting for `Distributed` tables that is similar to the corresponding query-level setting. Closes [#43666](https://github.com/ClickHouse/ClickHouse/issues/43666). [#57218](https://github.com/ClickHouse/ClickHouse/pull/57218) ([Gagan Goel](https://github.com/tntnatbry)). +* Function `substring()` (aliases: `substr`, `mid`) can now be used with `Enum` types. Previously, the first function argument had to be a value of type `String` or `FixedString`. This improves compatibility with 3rd party tools such as Tableau via MySQL interface. [#57277](https://github.com/ClickHouse/ClickHouse/pull/57277) ([Serge Klochkov](https://github.com/slvrtrn)). +* Better hints when a table doesn't exist. [#57342](https://github.com/ClickHouse/ClickHouse/pull/57342) ([Bharat Nallan](https://github.com/bharatnc)). +* Allow to overwrite `max_partition_size_to_drop` and `max_table_size_to_drop` server settings in query time. [#57452](https://github.com/ClickHouse/ClickHouse/pull/57452) ([Jordi Villar](https://github.com/jrdi)). +* Add support for read-only flag when connecting to the ZooKeeper server (fixes [#53749](https://github.com/ClickHouse/ClickHouse/issues/53749)). [#57479](https://github.com/ClickHouse/ClickHouse/pull/57479) ([Mikhail Koviazin](https://github.com/mkmkme)). +* Fix possible distributed sends stuck due to "No such file or directory" (during recovering batch from disk). Fix possible issues with `error_count` from `system.distribution_queue` (in case of `distributed_directory_monitor_max_sleep_time_ms` >5min). Introduce profile event to track async INSERT failures - `DistributedAsyncInsertionFailures`. [#57480](https://github.com/ClickHouse/ClickHouse/pull/57480) ([Azat Khuzhin](https://github.com/azat)). +* The limit for the number of connections per endpoint for background fetches was raised from `15` to the value of `background_fetches_pool_size` setting. - MergeTree-level setting `replicated_max_parallel_fetches_for_host` became obsolete - MergeTree-level settings `replicated_fetches_http_connection_timeout`, `replicated_fetches_http_send_timeout` and `replicated_fetches_http_receive_timeout` are moved to the Server-level. - Setting `keep_alive_timeout` is added to the list of Server-level settings. [#57523](https://github.com/ClickHouse/ClickHouse/pull/57523) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* It is now possible to refer to ALIAS column in index (non-primary-key) definitions (issue [#55650](https://github.com/ClickHouse/ClickHouse/issues/55650)). Example: `CREATE TABLE tab(col UInt32, col_alias ALIAS col + 1, INDEX idx (col_alias) TYPE minmax) ENGINE = MergeTree ORDER BY col;`. [#57546](https://github.com/ClickHouse/ClickHouse/pull/57546) ([Robert Schulze](https://github.com/rschu1ze)). +* Function `format()` now supports arbitrary argument types (instead of only `String` and `FixedString` arguments). This is important to calculate `SELECT format('The {0} to all questions is {1}', 'answer', 42)`. [#57549](https://github.com/ClickHouse/ClickHouse/pull/57549) ([Robert Schulze](https://github.com/rschu1ze)). +* Support PostgreSQL generated columns and default column values in `MaterializedPostgreSQL`. Closes [#40449](https://github.com/ClickHouse/ClickHouse/issues/40449). [#57568](https://github.com/ClickHouse/ClickHouse/pull/57568) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Allow to apply some filesystem cache config settings changes without server restart. [#57578](https://github.com/ClickHouse/ClickHouse/pull/57578) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Handle sigabrt case when getting PostgreSQl table structure with empty array. [#57618](https://github.com/ClickHouse/ClickHouse/pull/57618) ([Mike Kot (Михаил Кот)](https://github.com/myrrc)). +* Allows to use the `date_trunc()` function with the first argument not depending on the case of it. Both cases are now supported: `SELECT date_trunc('day', now())` and `SELECT date_trunc('DAY', now())`. [#57624](https://github.com/ClickHouse/ClickHouse/pull/57624) ([Yarik Briukhovetskyi](https://github.com/yariks5s)). +* Expose the total number of errors occurred since last server as a `ClickHouseErrorMetric_ALL` metric. [#57627](https://github.com/ClickHouse/ClickHouse/pull/57627) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* Allow nodes in config with from_env/from_zk and non empty element with replace=1. [#57628](https://github.com/ClickHouse/ClickHouse/pull/57628) ([Azat Khuzhin](https://github.com/azat)). +* Generate malformed output that cannot be parsed as JSON. [#57646](https://github.com/ClickHouse/ClickHouse/pull/57646) ([Julia Kartseva](https://github.com/jkartseva)). +* Consider lightweight deleted rows when selecting parts to merge if enabled. [#57648](https://github.com/ClickHouse/ClickHouse/pull/57648) ([Zhuo Qiu](https://github.com/jewelzqiu)). +* Make querying system.filesystem_cache not memory intensive. [#57687](https://github.com/ClickHouse/ClickHouse/pull/57687) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Allow IPv6 to UInt128 conversion and binary arithmetic. [#57707](https://github.com/ClickHouse/ClickHouse/pull/57707) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)). +* Support negative positional arguments. Closes [#57736](https://github.com/ClickHouse/ClickHouse/issues/57736). [#57741](https://github.com/ClickHouse/ClickHouse/pull/57741) ([flynn](https://github.com/ucasfl)). +* Add a setting for `async inserts deduplication cache` -- how long we wait for cache update. Deprecate setting `async_block_ids_cache_min_update_interval_ms`. Now cache is updated only in case of conflicts. [#57743](https://github.com/ClickHouse/ClickHouse/pull/57743) ([alesapin](https://github.com/alesapin)). +* `sleep()` function now can be cancelled with `KILL QUERY`. [#57746](https://github.com/ClickHouse/ClickHouse/pull/57746) ([Vitaly Baranov](https://github.com/vitlibar)). +* Slightly better inference of unnamed tupes in JSON formats. [#57751](https://github.com/ClickHouse/ClickHouse/pull/57751) ([Kruglov Pavel](https://github.com/Avogar)). +* Refactor UserDefinedSQL* classes to make it possible to add SQL UDF storages which are different from ZooKeeper and Disk. [#57752](https://github.com/ClickHouse/ClickHouse/pull/57752) ([Natasha Chizhonkova](https://github.com/chizhonkova)). +* Forbid `CREATE TABLE ... AS SELECT` queries for Replicated table engines in Replicated database because they are broken. Reference [#35408](https://github.com/ClickHouse/ClickHouse/issues/35408). [#57796](https://github.com/ClickHouse/ClickHouse/pull/57796) ([Nikolay Degterinsky](https://github.com/evillique)). +* Fix and improve transform query for external database, we should recursively obtain all compatible predicates. [#57888](https://github.com/ClickHouse/ClickHouse/pull/57888) ([flynn](https://github.com/ucasfl)). +* Support dynamic reloading of filesystem cache size. Closes [#57866](https://github.com/ClickHouse/ClickHouse/issues/57866). [#57897](https://github.com/ClickHouse/ClickHouse/pull/57897) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Fix system.stack_trace for threads with blocked SIGRTMIN. [#57907](https://github.com/ClickHouse/ClickHouse/pull/57907) ([Azat Khuzhin](https://github.com/azat)). +* Added a new setting `readonly` which can be used to specify a s3 disk is read only. It can be useful to create a table with read only `s3_plain` type disk. [#57977](https://github.com/ClickHouse/ClickHouse/pull/57977) ([Pengyuan Bian](https://github.com/bianpengyuan)). +* Support keeper failures in quorum check. [#57986](https://github.com/ClickHouse/ClickHouse/pull/57986) ([Raúl Marín](https://github.com/Algunenano)). +* Add max/peak RSS (`MemoryResidentMax`) into system.asynchronous_metrics. [#58095](https://github.com/ClickHouse/ClickHouse/pull/58095) ([Azat Khuzhin](https://github.com/azat)). +* Fix system.stack_trace for threads with blocked SIGRTMIN (and also send signal to the threads only if it is not blocked to avoid waiting `storage_system_stack_trace_pipe_read_timeout_ms` when it does not make any sense). [#58136](https://github.com/ClickHouse/ClickHouse/pull/58136) ([Azat Khuzhin](https://github.com/azat)). +* This PR allows users to use s3 links (`https://` and `s3://`) without mentioning region if it's not default. Also find the correct region if the user mentioned the wrong one. ### Documentation entry for user-facing changes. [#58148](https://github.com/ClickHouse/ClickHouse/pull/58148) ([Yarik Briukhovetskyi](https://github.com/yariks5s)). +* `clickhouse-format --obfuscate` will know about Settings, MergeTreeSettings, and time zones and keep their names unchanged. [#58179](https://github.com/ClickHouse/ClickHouse/pull/58179) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Added explicit `finalize()` function in `ZipArchiveWriter`. Simplify too complicated code in `ZipArchiveWriter`. This PR fixes [#58074](https://github.com/ClickHouse/ClickHouse/issues/58074). [#58202](https://github.com/ClickHouse/ClickHouse/pull/58202) ([Vitaly Baranov](https://github.com/vitlibar)). +* The primary key analysis in MergeTree tables will now be applied to predicates that include the virtual column `_part_offset` (optionally with `_part`). This feature can serve as a poor man's secondary index. [#58224](https://github.com/ClickHouse/ClickHouse/pull/58224) ([Amos Bird](https://github.com/amosbird)). +* Make caches with the same path use the same cache objects. This behaviour existed before, but was broken in https://github.com/ClickHouse/ClickHouse/pull/48805 (in 23.4). If such caches with the same path have different set of cache settings, an exception will be thrown, that this is not allowed. [#58264](https://github.com/ClickHouse/ClickHouse/pull/58264) ([Kseniia Sumarokova](https://github.com/kssenii)). + +#### Build/Testing/Packaging Improvement +* Allow usage of Azure-related table engines/functions on macOS. [#51866](https://github.com/ClickHouse/ClickHouse/pull/51866) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* ClickHouse Fast Test now uses Musl instead of GLibc. [#57711](https://github.com/ClickHouse/ClickHouse/pull/57711) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Run ClickBench for every commit. This closes [#57708](https://github.com/ClickHouse/ClickHouse/issues/57708). [#57712](https://github.com/ClickHouse/ClickHouse/pull/57712) ([Alexey Milovidov](https://github.com/alexey-milovidov)). + +#### Bug Fix (user-visible misbehavior in an official stable release) + +* Fixed a sorting order breakage in TTL GROUP BY [#49103](https://github.com/ClickHouse/ClickHouse/pull/49103) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* fix: split lttb bucket strategy, first bucket and last bucket should only contain single point [#57003](https://github.com/ClickHouse/ClickHouse/pull/57003) ([FFish](https://github.com/wxybear)). +* Fix possible deadlock in Template format during sync after error [#57004](https://github.com/ClickHouse/ClickHouse/pull/57004) ([Kruglov Pavel](https://github.com/Avogar)). +* Fix early stop while parsing file with skipping lots of errors [#57006](https://github.com/ClickHouse/ClickHouse/pull/57006) ([Kruglov Pavel](https://github.com/Avogar)). +* Prevent dictionary's ACL bypass via dictionary() table function [#57362](https://github.com/ClickHouse/ClickHouse/pull/57362) ([Salvatore Mesoraca](https://github.com/aiven-sal)). +* Fix another case of non-ready set. [#57423](https://github.com/ClickHouse/ClickHouse/pull/57423) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Fix several issues regarding PostgreSQL `array_ndims` usage. [#57436](https://github.com/ClickHouse/ClickHouse/pull/57436) ([Ryan Jacobs](https://github.com/ryanmjacobs)). +* Fix RWLock inconsistency after write lock timeout [#57454](https://github.com/ClickHouse/ClickHouse/pull/57454) ([Vitaly Baranov](https://github.com/vitlibar)). +* Fix: don't exclude ephemeral column when building pushing to view chain [#57461](https://github.com/ClickHouse/ClickHouse/pull/57461) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)). +* MaterializedPostgreSQL: fix issue [#41922](https://github.com/ClickHouse/ClickHouse/issues/41922), add test for [#41923](https://github.com/ClickHouse/ClickHouse/issues/41923) [#57515](https://github.com/ClickHouse/ClickHouse/pull/57515) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Ignore ON CLUSTER clause in grant/revoke queries for management of replicated access entities. [#57538](https://github.com/ClickHouse/ClickHouse/pull/57538) ([MikhailBurdukov](https://github.com/MikhailBurdukov)). +* Fix crash in clickhouse-local [#57553](https://github.com/ClickHouse/ClickHouse/pull/57553) ([Nikolay Degterinsky](https://github.com/evillique)). +* Materialize block in HashJoin for Type::EMPTY [#57564](https://github.com/ClickHouse/ClickHouse/pull/57564) ([vdimir](https://github.com/vdimir)). +* Fix possible segfault in PostgreSQLSource [#57567](https://github.com/ClickHouse/ClickHouse/pull/57567) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Fix type correction in HashJoin for nested low cardinality [#57614](https://github.com/ClickHouse/ClickHouse/pull/57614) ([vdimir](https://github.com/vdimir)). +* Avoid hangs of system.stack_trace by correctly prohibit parallel read from it [#57641](https://github.com/ClickHouse/ClickHouse/pull/57641) ([Azat Khuzhin](https://github.com/azat)). +* Fix SIGSEGV for aggregation of sparse columns with any() RESPECT NULL [#57710](https://github.com/ClickHouse/ClickHouse/pull/57710) ([Azat Khuzhin](https://github.com/azat)). +* Fix unary operators parsing [#57713](https://github.com/ClickHouse/ClickHouse/pull/57713) ([Nikolay Degterinsky](https://github.com/evillique)). +* Fix RWLock inconsistency after write lock timeout (again) [#57733](https://github.com/ClickHouse/ClickHouse/pull/57733) ([Vitaly Baranov](https://github.com/vitlibar)). +* Table engine MaterializedPostgreSQL fix dependency loading [#57754](https://github.com/ClickHouse/ClickHouse/pull/57754) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Fix retries for disconnected nodes for BACKUP/RESTORE ON CLUSTER [#57764](https://github.com/ClickHouse/ClickHouse/pull/57764) ([Vitaly Baranov](https://github.com/vitlibar)). +* Fix bug window functions: revert [#39631](https://github.com/ClickHouse/ClickHouse/issues/39631) [#57766](https://github.com/ClickHouse/ClickHouse/pull/57766) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix result of external aggregation in case of partially materialized projection [#57790](https://github.com/ClickHouse/ClickHouse/pull/57790) ([Anton Popov](https://github.com/CurtizJ)). +* Fix merge in aggregation functions with `*Map` combinator [#57795](https://github.com/ClickHouse/ClickHouse/pull/57795) ([Anton Popov](https://github.com/CurtizJ)). +* Disable system.kafka_consumers by default (due to possible live memory leak) [#57822](https://github.com/ClickHouse/ClickHouse/pull/57822) ([Azat Khuzhin](https://github.com/azat)). +* Fix low-cardinality keys support in MergeJoin [#57827](https://github.com/ClickHouse/ClickHouse/pull/57827) ([vdimir](https://github.com/vdimir)). +* Create consumers for Kafka tables on fly (but keep them for some period since last used) [#57829](https://github.com/ClickHouse/ClickHouse/pull/57829) ([Azat Khuzhin](https://github.com/azat)). +* InterpreterCreateQuery sample block fix [#57855](https://github.com/ClickHouse/ClickHouse/pull/57855) ([Maksim Kita](https://github.com/kitaisreal)). +* bugfix: addresses_expr ignored for psql named collections [#57874](https://github.com/ClickHouse/ClickHouse/pull/57874) ([joelynch](https://github.com/joelynch)). +* Fix invalid memory access in BLAKE3 (Rust) [#57876](https://github.com/ClickHouse/ClickHouse/pull/57876) ([Raúl Marín](https://github.com/Algunenano)). +* Resurrect `arrayFold()` [#57879](https://github.com/ClickHouse/ClickHouse/pull/57879) ([Robert Schulze](https://github.com/rschu1ze)). +* Normalize function names in CREATE INDEX [#57906](https://github.com/ClickHouse/ClickHouse/pull/57906) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Fix handling of unavailable replicas before first request happened [#57933](https://github.com/ClickHouse/ClickHouse/pull/57933) ([Nikita Taranov](https://github.com/nickitat)). +* Fix literal alias misclassification [#57988](https://github.com/ClickHouse/ClickHouse/pull/57988) ([Chen768959](https://github.com/Chen768959)). +* Revert "Fix bug window functions: revert [#39631](https://github.com/ClickHouse/ClickHouse/issues/39631)" [#58031](https://github.com/ClickHouse/ClickHouse/pull/58031) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Fix invalid preprocessing on Keeper [#58069](https://github.com/ClickHouse/ClickHouse/pull/58069) ([Antonio Andelic](https://github.com/antonio2368)). +* Fix Integer overflow in Poco::UTF32Encoding [#58073](https://github.com/ClickHouse/ClickHouse/pull/58073) ([Andrey Fedotov](https://github.com/anfedotoff)). +* Fix parallel replicas in presence of a scalar subquery with a big integer value [#58118](https://github.com/ClickHouse/ClickHouse/pull/58118) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix `accurateCastOrNull` for out-of-range DateTime [#58139](https://github.com/ClickHouse/ClickHouse/pull/58139) ([Andrey Zvonov](https://github.com/zvonand)). +* Fix possible PARAMETER_OUT_OF_BOUND error during subcolumns reading from wide part in MergeTree [#58175](https://github.com/ClickHouse/ClickHouse/pull/58175) ([Kruglov Pavel](https://github.com/Avogar)). +* Remove parallel parsing for JSONCompactEachRow [#58181](https://github.com/ClickHouse/ClickHouse/pull/58181) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* fix CREATE VIEW hang [#58220](https://github.com/ClickHouse/ClickHouse/pull/58220) ([Tao Wang](https://github.com/wangtZJU)). +* Fix parallel parsing for JSONCompactEachRow [#58250](https://github.com/ClickHouse/ClickHouse/pull/58250) ([Kruglov Pavel](https://github.com/Avogar)). + +#### NO CL ENTRY + +* NO CL ENTRY: 'Revert "Revert "Update Sentry""'. [#57694](https://github.com/ClickHouse/ClickHouse/pull/57694) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* NO CL ENTRY: 'Revert "Fix RWLock inconsistency after write lock timeout"'. [#57730](https://github.com/ClickHouse/ClickHouse/pull/57730) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* NO CL ENTRY: 'Revert "improve CI with digest for docker, build and test jobs"'. [#57903](https://github.com/ClickHouse/ClickHouse/pull/57903) ([Max K.](https://github.com/mkaynov)). +* NO CL ENTRY: 'Reapply "improve CI with digest for docker, build and test jobs"'. [#57904](https://github.com/ClickHouse/ClickHouse/pull/57904) ([Max K.](https://github.com/mkaynov)). +* NO CL ENTRY: 'Revert "Merge pull request [#56573](https://github.com/ClickHouse/ClickHouse/issues/56573) from mkmkme/mkmkme/reload-config"'. [#57909](https://github.com/ClickHouse/ClickHouse/pull/57909) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* NO CL ENTRY: 'Revert "Add system.dropped_tables_parts table"'. [#58022](https://github.com/ClickHouse/ClickHouse/pull/58022) ([Antonio Andelic](https://github.com/antonio2368)). +* NO CL ENTRY: 'Revert "Consider lightweight deleted rows when selecting parts to merge"'. [#58097](https://github.com/ClickHouse/ClickHouse/pull/58097) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* NO CL ENTRY: 'Revert "Fix leftover processes/hangs in tests"'. [#58207](https://github.com/ClickHouse/ClickHouse/pull/58207) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* NO CL ENTRY: 'Revert "Create consumers for Kafka tables on fly (but keep them for some period since last used)"'. [#58272](https://github.com/ClickHouse/ClickHouse/pull/58272) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* NO CL ENTRY: 'Revert "Implement punycode encoding/decoding"'. [#58277](https://github.com/ClickHouse/ClickHouse/pull/58277) ([Alexey Milovidov](https://github.com/alexey-milovidov)). + +#### NOT FOR CHANGELOG / INSIGNIFICANT + +* Randomize more settings [#39663](https://github.com/ClickHouse/ClickHouse/pull/39663) ([Anton Popov](https://github.com/CurtizJ)). +* Add more tests for `compile_expressions` [#51113](https://github.com/ClickHouse/ClickHouse/pull/51113) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* [RFC] Correctly wait background threads [#52717](https://github.com/ClickHouse/ClickHouse/pull/52717) ([Azat Khuzhin](https://github.com/azat)). +* improve CI with digest for docker, build and test jobs [#56317](https://github.com/ClickHouse/ClickHouse/pull/56317) ([Max K.](https://github.com/mkaynov)). +* Prepare the introduction of more keeper faults [#56917](https://github.com/ClickHouse/ClickHouse/pull/56917) ([Raúl Marín](https://github.com/Algunenano)). +* Analyzer: Fix assert in tryReplaceAndEqualsChainsWithConstant [#57139](https://github.com/ClickHouse/ClickHouse/pull/57139) ([vdimir](https://github.com/vdimir)). +* Check what will happen if we build ClickHouse with Musl [#57180](https://github.com/ClickHouse/ClickHouse/pull/57180) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* support memory soft limit for keeper [#57271](https://github.com/ClickHouse/ClickHouse/pull/57271) ([Han Fei](https://github.com/hanfei1991)). +* Randomize disabled optimizations in CI [#57315](https://github.com/ClickHouse/ClickHouse/pull/57315) ([Raúl Marín](https://github.com/Algunenano)). +* Don't throw if noop when dropping database replica in batch [#57337](https://github.com/ClickHouse/ClickHouse/pull/57337) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Better JSON -> JSONEachRow fallback without catching exceptions [#57364](https://github.com/ClickHouse/ClickHouse/pull/57364) ([Kruglov Pavel](https://github.com/Avogar)). +* Add tests for [#48496](https://github.com/ClickHouse/ClickHouse/issues/48496) [#57414](https://github.com/ClickHouse/ClickHouse/pull/57414) ([Raúl Marín](https://github.com/Algunenano)). +* Add profile event for cache lookup in `ThreadPoolRemoteFSReader` [#57437](https://github.com/ClickHouse/ClickHouse/pull/57437) ([Nikita Taranov](https://github.com/nickitat)). +* Remove select() usage [#57467](https://github.com/ClickHouse/ClickHouse/pull/57467) ([Igor Nikonov](https://github.com/devcrafter)). +* Parallel replicas: friendly settings [#57542](https://github.com/ClickHouse/ClickHouse/pull/57542) ([Igor Nikonov](https://github.com/devcrafter)). +* Fix formatting string prompt error [#57569](https://github.com/ClickHouse/ClickHouse/pull/57569) ([skyoct](https://github.com/skyoct)). +* Tune CI scale up/down multipliers [#57572](https://github.com/ClickHouse/ClickHouse/pull/57572) ([Max K.](https://github.com/mkaynov)). +* Revert "Revert "Implemented series period detect method using pocketfft lib"" [#57574](https://github.com/ClickHouse/ClickHouse/pull/57574) ([Bhavna Jindal](https://github.com/bhavnajindal)). +* Correctly handle errors during opening query in editor in client [#57587](https://github.com/ClickHouse/ClickHouse/pull/57587) ([Azat Khuzhin](https://github.com/azat)). +* Add a test for [#55251](https://github.com/ClickHouse/ClickHouse/issues/55251) [#57588](https://github.com/ClickHouse/ClickHouse/pull/57588) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Add a test for [#48039](https://github.com/ClickHouse/ClickHouse/issues/48039) [#57593](https://github.com/ClickHouse/ClickHouse/pull/57593) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Update CHANGELOG.md [#57594](https://github.com/ClickHouse/ClickHouse/pull/57594) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Update version after release [#57595](https://github.com/ClickHouse/ClickHouse/pull/57595) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Update version_date.tsv and changelogs after v23.11.1.2711-stable [#57597](https://github.com/ClickHouse/ClickHouse/pull/57597) ([robot-clickhouse](https://github.com/robot-clickhouse)). +* Identify failed jobs in lambda and mark as steps=0 [#57600](https://github.com/ClickHouse/ClickHouse/pull/57600) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Fix flaky test: distinct in order with analyzer [#57606](https://github.com/ClickHouse/ClickHouse/pull/57606) ([Igor Nikonov](https://github.com/devcrafter)). +* CHJIT add assembly printer [#57610](https://github.com/ClickHouse/ClickHouse/pull/57610) ([Maksim Kita](https://github.com/kitaisreal)). +* Fix parsing virtual hosted S3 URI in clickhouse_backupview script [#57612](https://github.com/ClickHouse/ClickHouse/pull/57612) ([Daniel Pozo Escalona](https://github.com/danipozo)). +* Fix docs for `fileCluster` [#57613](https://github.com/ClickHouse/ClickHouse/pull/57613) ([Andrey Zvonov](https://github.com/zvonand)). +* Analyzer: Fix logical error in MultiIfToIfPass [#57622](https://github.com/ClickHouse/ClickHouse/pull/57622) ([vdimir](https://github.com/vdimir)). +* Throw more clear exception [#57626](https://github.com/ClickHouse/ClickHouse/pull/57626) ([alesapin](https://github.com/alesapin)). +* Fix "logs and exception messages formatting", part 1 [#57630](https://github.com/ClickHouse/ClickHouse/pull/57630) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix "logs and exception messages formatting", part 2 [#57632](https://github.com/ClickHouse/ClickHouse/pull/57632) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix "logs and exception messages formatting", part 3 [#57633](https://github.com/ClickHouse/ClickHouse/pull/57633) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix "logs and exception messages formatting", part 4 [#57634](https://github.com/ClickHouse/ClickHouse/pull/57634) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Remove bad test (1) [#57636](https://github.com/ClickHouse/ClickHouse/pull/57636) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Remove bad test (2) [#57637](https://github.com/ClickHouse/ClickHouse/pull/57637) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* ClickHouse Cloud promotion [#57638](https://github.com/ClickHouse/ClickHouse/pull/57638) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* Remove bad test (3) [#57639](https://github.com/ClickHouse/ClickHouse/pull/57639) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Remove bad test (4) [#57640](https://github.com/ClickHouse/ClickHouse/pull/57640) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Random changes in random files [#57642](https://github.com/ClickHouse/ClickHouse/pull/57642) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* Merge half of [#51113](https://github.com/ClickHouse/ClickHouse/issues/51113) [#57643](https://github.com/ClickHouse/ClickHouse/pull/57643) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Analyzer: Fix JOIN ON true with join_use_nulls [#57662](https://github.com/ClickHouse/ClickHouse/pull/57662) ([vdimir](https://github.com/vdimir)). +* Pin alpine version of integration tests helper container [#57669](https://github.com/ClickHouse/ClickHouse/pull/57669) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Add support for system.stack_trace filtering optimizations for analyzer [#57682](https://github.com/ClickHouse/ClickHouse/pull/57682) ([Azat Khuzhin](https://github.com/azat)). +* test for [#33308](https://github.com/ClickHouse/ClickHouse/issues/33308) [#57693](https://github.com/ClickHouse/ClickHouse/pull/57693) ([Denny Crane](https://github.com/den-crane)). +* support keeper memory soft limit ratio [#57699](https://github.com/ClickHouse/ClickHouse/pull/57699) ([Han Fei](https://github.com/hanfei1991)). +* Fix test_dictionaries_update_and_reload/test.py::test_reload_while_loading flakiness [#57714](https://github.com/ClickHouse/ClickHouse/pull/57714) ([Azat Khuzhin](https://github.com/azat)). +* Tune autoscale to scale for single job in the queue [#57742](https://github.com/ClickHouse/ClickHouse/pull/57742) ([Max K.](https://github.com/mkaynov)). +* Tune network memory for dockerhub proxy hosts [#57744](https://github.com/ClickHouse/ClickHouse/pull/57744) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Parallel replicas: announcement response handling improvement [#57749](https://github.com/ClickHouse/ClickHouse/pull/57749) ([Igor Nikonov](https://github.com/devcrafter)). +* Fix building Rust with Musl [#57756](https://github.com/ClickHouse/ClickHouse/pull/57756) ([Azat Khuzhin](https://github.com/azat)). +* Fix flaky test_parallel_replicas_distributed_read_from_all [#57757](https://github.com/ClickHouse/ClickHouse/pull/57757) ([Igor Nikonov](https://github.com/devcrafter)). +* Minor refactoring of toStartOfInterval() [#57761](https://github.com/ClickHouse/ClickHouse/pull/57761) ([Robert Schulze](https://github.com/rschu1ze)). +* Don't run test 02919_skip_lots_of_parsing_errors on aarch64 [#57762](https://github.com/ClickHouse/ClickHouse/pull/57762) ([Kruglov Pavel](https://github.com/Avogar)). +* More respect to `min_number_of_marks` in `ParallelReplicasReadingCoordinator` [#57763](https://github.com/ClickHouse/ClickHouse/pull/57763) ([Nikita Taranov](https://github.com/nickitat)). +* SerializationString reduce memory usage [#57787](https://github.com/ClickHouse/ClickHouse/pull/57787) ([Maksim Kita](https://github.com/kitaisreal)). +* Fix ThreadSanitizer data race in librdkafka [#57791](https://github.com/ClickHouse/ClickHouse/pull/57791) ([Ilya Golshtein](https://github.com/ilejn)). +* Rename `system.async_loader` into `system.asynchronous_loader` [#57793](https://github.com/ClickHouse/ClickHouse/pull/57793) ([Sergei Trifonov](https://github.com/serxa)). +* Set replica number to its position in cluster definition [#57800](https://github.com/ClickHouse/ClickHouse/pull/57800) ([Nikita Taranov](https://github.com/nickitat)). +* fix clickhouse-client invocation in 02327_capnproto_protobuf_empty_messages [#57804](https://github.com/ClickHouse/ClickHouse/pull/57804) ([Mikhail Koviazin](https://github.com/mkmkme)). +* Fix flaky test_parallel_replicas_over_distributed [#57809](https://github.com/ClickHouse/ClickHouse/pull/57809) ([Igor Nikonov](https://github.com/devcrafter)). +* Revert [#57741](https://github.com/ClickHouse/ClickHouse/issues/57741) [#57811](https://github.com/ClickHouse/ClickHouse/pull/57811) ([Raúl Marín](https://github.com/Algunenano)). +* Dumb down `substring()` tests [#57821](https://github.com/ClickHouse/ClickHouse/pull/57821) ([Robert Schulze](https://github.com/rschu1ze)). +* Update version_date.tsv and changelogs after v23.11.2.11-stable [#57824](https://github.com/ClickHouse/ClickHouse/pull/57824) ([robot-clickhouse](https://github.com/robot-clickhouse)). +* Fix 02906_force_optimize_projection_name [#57826](https://github.com/ClickHouse/ClickHouse/pull/57826) ([Yarik Briukhovetskyi](https://github.com/yariks5s)). +* ClickBench: slightly better [#57831](https://github.com/ClickHouse/ClickHouse/pull/57831) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix 02932_kill_query_sleep flakiness [#57849](https://github.com/ClickHouse/ClickHouse/pull/57849) ([Azat Khuzhin](https://github.com/azat)). +* Revert "Replace --no-system-tables with loading virtual tables of system database lazily" [#57851](https://github.com/ClickHouse/ClickHouse/pull/57851) ([Azat Khuzhin](https://github.com/azat)). +* Fix memory leak in StorageHDFS [#57860](https://github.com/ClickHouse/ClickHouse/pull/57860) ([Andrey Zvonov](https://github.com/zvonand)). +* Remove hardcoded clickhouse-client invocations from tests [#57861](https://github.com/ClickHouse/ClickHouse/pull/57861) ([Mikhail Koviazin](https://github.com/mkmkme)). +* Follow up to [#57568](https://github.com/ClickHouse/ClickHouse/issues/57568) [#57863](https://github.com/ClickHouse/ClickHouse/pull/57863) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Fix assertion in HashJoin [#57873](https://github.com/ClickHouse/ClickHouse/pull/57873) ([vdimir](https://github.com/vdimir)). +* More efficient constructor for SerializationEnum [#57887](https://github.com/ClickHouse/ClickHouse/pull/57887) ([Duc Canh Le](https://github.com/canhld94)). +* Fix test_unset_skip_unavailable_shards [#57895](https://github.com/ClickHouse/ClickHouse/pull/57895) ([Raúl Marín](https://github.com/Algunenano)). +* Add argument to fill the gap in cherry-pick [#57896](https://github.com/ClickHouse/ClickHouse/pull/57896) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Delete debug logging in OutputFormatWithUTF8ValidationAdaptor [#57899](https://github.com/ClickHouse/ClickHouse/pull/57899) ([Kruglov Pavel](https://github.com/Avogar)). +* Remove heavy rust stable toolchain [#57905](https://github.com/ClickHouse/ClickHouse/pull/57905) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Improvements for 00002_log_and_exception_messages_formatting [#57910](https://github.com/ClickHouse/ClickHouse/pull/57910) ([Raúl Marín](https://github.com/Algunenano)). +* Update CHANGELOG.md [#57911](https://github.com/ClickHouse/ClickHouse/pull/57911) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* remove cruft from TablesLoader [#57938](https://github.com/ClickHouse/ClickHouse/pull/57938) ([Bharat Nallan](https://github.com/bharatnc)). +* Fix `/dashboard` work with passwords [#57948](https://github.com/ClickHouse/ClickHouse/pull/57948) ([Sergei Trifonov](https://github.com/serxa)). +* Remove wrong test [#57950](https://github.com/ClickHouse/ClickHouse/pull/57950) ([Sergei Trifonov](https://github.com/serxa)). +* Fix docker image for integration tests (fixes CI) [#57952](https://github.com/ClickHouse/ClickHouse/pull/57952) ([Azat Khuzhin](https://github.com/azat)). +* Remove C++ templates (normalizeQuery) [#57963](https://github.com/ClickHouse/ClickHouse/pull/57963) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* A small fix for dashboard [#57964](https://github.com/ClickHouse/ClickHouse/pull/57964) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Always use `pread` for reading cache segments [#57970](https://github.com/ClickHouse/ClickHouse/pull/57970) ([Nikita Taranov](https://github.com/nickitat)). +* Improve some tests [#57973](https://github.com/ClickHouse/ClickHouse/pull/57973) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Revert "Merge pull request [#57907](https://github.com/ClickHouse/ClickHouse/issues/57907) from azat/system.stack_trace-rt_tgsigqueueinfo" [#57974](https://github.com/ClickHouse/ClickHouse/pull/57974) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Add a test for [#49708](https://github.com/ClickHouse/ClickHouse/issues/49708) [#57979](https://github.com/ClickHouse/ClickHouse/pull/57979) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix style-check checkout head-ref [#57989](https://github.com/ClickHouse/ClickHouse/pull/57989) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* refine error message [#57991](https://github.com/ClickHouse/ClickHouse/pull/57991) ([Han Fei](https://github.com/hanfei1991)). +* CI for docs only fix [#57992](https://github.com/ClickHouse/ClickHouse/pull/57992) ([Max K.](https://github.com/mkaynov)). +* Replace rust's BLAKE3 with llvm's implementation [#57994](https://github.com/ClickHouse/ClickHouse/pull/57994) ([Raúl Marín](https://github.com/Algunenano)). +* Better trivial count optimization for storage `Merge` [#57996](https://github.com/ClickHouse/ClickHouse/pull/57996) ([Anton Popov](https://github.com/CurtizJ)). +* enhanced docs for `date_trunc()` [#58000](https://github.com/ClickHouse/ClickHouse/pull/58000) ([Yarik Briukhovetskyi](https://github.com/yariks5s)). +* CI: add needs_changed_files flag for pr_info [#58003](https://github.com/ClickHouse/ClickHouse/pull/58003) ([Max K.](https://github.com/mkaynov)). +* more messages in ci [#58007](https://github.com/ClickHouse/ClickHouse/pull/58007) ([Sema Checherinda](https://github.com/CheSema)). +* Test parallel replicas with force_primary_key setting [#58010](https://github.com/ClickHouse/ClickHouse/pull/58010) ([Igor Nikonov](https://github.com/devcrafter)). +* Update 00002_log_and_exception_messages_formatting.sql [#58012](https://github.com/ClickHouse/ClickHouse/pull/58012) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Fix rare race in external sort/aggregation with temporary data in cache [#58013](https://github.com/ClickHouse/ClickHouse/pull/58013) ([Anton Popov](https://github.com/CurtizJ)). +* Fix segfault in FuzzJSON engine [#58015](https://github.com/ClickHouse/ClickHouse/pull/58015) ([Julia Kartseva](https://github.com/jkartseva)). +* fix freebsd build [#58019](https://github.com/ClickHouse/ClickHouse/pull/58019) ([Julia Kartseva](https://github.com/jkartseva)). +* Rename canUseParallelReplicas to canUseTaskBasedParallelReplicas [#58025](https://github.com/ClickHouse/ClickHouse/pull/58025) ([Raúl Marín](https://github.com/Algunenano)). +* Remove fixed tests from analyzer_tech_debt.txt [#58028](https://github.com/ClickHouse/ClickHouse/pull/58028) ([Raúl Marín](https://github.com/Algunenano)). +* More verbose errors on 00002_log_and_exception_messages_formatting [#58037](https://github.com/ClickHouse/ClickHouse/pull/58037) ([Raúl Marín](https://github.com/Algunenano)). +* Make window insert result into constant [#58045](https://github.com/ClickHouse/ClickHouse/pull/58045) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* CI: Happy new year [#58046](https://github.com/ClickHouse/ClickHouse/pull/58046) ([Raúl Marín](https://github.com/Algunenano)). +* Follow up for [#57691](https://github.com/ClickHouse/ClickHouse/issues/57691) [#58048](https://github.com/ClickHouse/ClickHouse/pull/58048) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* always run ast_fuzz and sqllancer [#58049](https://github.com/ClickHouse/ClickHouse/pull/58049) ([Max K.](https://github.com/mkaynov)). +* Add GH status for PR formating [#58050](https://github.com/ClickHouse/ClickHouse/pull/58050) ([Max K.](https://github.com/mkaynov)). +* Small improvement for SystemLogBase [#58051](https://github.com/ClickHouse/ClickHouse/pull/58051) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Bump Azure to v1.6.0 [#58052](https://github.com/ClickHouse/ClickHouse/pull/58052) ([Robert Schulze](https://github.com/rschu1ze)). +* Correct values for randomization [#58058](https://github.com/ClickHouse/ClickHouse/pull/58058) ([Anton Popov](https://github.com/CurtizJ)). +* Non post request should be readonly [#58060](https://github.com/ClickHouse/ClickHouse/pull/58060) ([San](https://github.com/santrancisco)). +* Revert "Merge pull request [#55710](https://github.com/ClickHouse/ClickHouse/issues/55710) from guoxiaolongzte/clickhouse-test… [#58066](https://github.com/ClickHouse/ClickHouse/pull/58066) ([Raúl Marín](https://github.com/Algunenano)). +* fix typo in the test 02479 [#58072](https://github.com/ClickHouse/ClickHouse/pull/58072) ([Sema Checherinda](https://github.com/CheSema)). +* Bump Azure to 1.7.2 [#58075](https://github.com/ClickHouse/ClickHouse/pull/58075) ([Robert Schulze](https://github.com/rschu1ze)). +* Fix flaky test `02567_and_consistency` [#58076](https://github.com/ClickHouse/ClickHouse/pull/58076) ([Anton Popov](https://github.com/CurtizJ)). +* Fix Tests Bugfix Validate Check [#58078](https://github.com/ClickHouse/ClickHouse/pull/58078) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Fix for nightly job for digest-ci [#58079](https://github.com/ClickHouse/ClickHouse/pull/58079) ([Max K.](https://github.com/mkaynov)). +* Test for parallel replicas with remote() [#58081](https://github.com/ClickHouse/ClickHouse/pull/58081) ([Igor Nikonov](https://github.com/devcrafter)). +* Minor cosmetic changes [#58092](https://github.com/ClickHouse/ClickHouse/pull/58092) ([Raúl Marín](https://github.com/Algunenano)). +* Reintroduce OPTIMIZE CLEANUP as no-op [#58100](https://github.com/ClickHouse/ClickHouse/pull/58100) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Add compatibility in the replication protocol for a removed feature [#58104](https://github.com/ClickHouse/ClickHouse/pull/58104) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Flaky 02922_analyzer_aggregate_nothing_type [#58105](https://github.com/ClickHouse/ClickHouse/pull/58105) ([Raúl Marín](https://github.com/Algunenano)). +* Update version_date.tsv and changelogs after v23.11.3.23-stable [#58106](https://github.com/ClickHouse/ClickHouse/pull/58106) ([robot-clickhouse](https://github.com/robot-clickhouse)). +* Limited CI on the master for docs only change [#58121](https://github.com/ClickHouse/ClickHouse/pull/58121) ([Max K.](https://github.com/mkaynov)). +* style fix [#58125](https://github.com/ClickHouse/ClickHouse/pull/58125) ([Max K.](https://github.com/mkaynov)). +* Support "do not test" label with ci.py [#58128](https://github.com/ClickHouse/ClickHouse/pull/58128) ([Max K.](https://github.com/mkaynov)). +* Use the single images list for integration tests everywhere [#58130](https://github.com/ClickHouse/ClickHouse/pull/58130) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Disable parallel replicas with IN (subquery) [#58133](https://github.com/ClickHouse/ClickHouse/pull/58133) ([Igor Nikonov](https://github.com/devcrafter)). +* Fix clang-tidy [#58134](https://github.com/ClickHouse/ClickHouse/pull/58134) ([Raúl Marín](https://github.com/Algunenano)). +* Run build report check job on build failures, fix [#58135](https://github.com/ClickHouse/ClickHouse/pull/58135) ([Max K.](https://github.com/mkaynov)). +* Fix dashboard legend sorting and rows number [#58151](https://github.com/ClickHouse/ClickHouse/pull/58151) ([Sergei Trifonov](https://github.com/serxa)). +* Remove retryStrategy assignments overwritten in ClientFactory::create() [#58163](https://github.com/ClickHouse/ClickHouse/pull/58163) ([Daniel Pozo Escalona](https://github.com/danipozo)). +* Helper improvements [#58164](https://github.com/ClickHouse/ClickHouse/pull/58164) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Pass through exceptions for reading from S3 [#58165](https://github.com/ClickHouse/ClickHouse/pull/58165) ([Azat Khuzhin](https://github.com/azat)). +* [RFC] Adjust all std::ios implementations in poco to set failbit/badbit by default [#58166](https://github.com/ClickHouse/ClickHouse/pull/58166) ([Azat Khuzhin](https://github.com/azat)). +* Add bytes_uncompressed to system.part_log [#58167](https://github.com/ClickHouse/ClickHouse/pull/58167) ([Jordi Villar](https://github.com/jrdi)). +* Update docker/test/stateful/run.sh [#58168](https://github.com/ClickHouse/ClickHouse/pull/58168) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Update 00165_jit_aggregate_functions.sql [#58169](https://github.com/ClickHouse/ClickHouse/pull/58169) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Update clickhouse-test [#58170](https://github.com/ClickHouse/ClickHouse/pull/58170) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Profile event 'ParallelReplicasUsedCount' [#58173](https://github.com/ClickHouse/ClickHouse/pull/58173) ([Igor Nikonov](https://github.com/devcrafter)). +* Fix flaky test `02719_aggregate_with_empty_string_key` [#58176](https://github.com/ClickHouse/ClickHouse/pull/58176) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix [#58171](https://github.com/ClickHouse/ClickHouse/issues/58171) [#58177](https://github.com/ClickHouse/ClickHouse/pull/58177) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Add base backup name to system.backups and system.backup_log tables [#58178](https://github.com/ClickHouse/ClickHouse/pull/58178) ([Pradeep Chhetri](https://github.com/chhetripradeep)). +* Fix use-after-move [#58182](https://github.com/ClickHouse/ClickHouse/pull/58182) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Looking at strange code [#58196](https://github.com/ClickHouse/ClickHouse/pull/58196) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix all Exception with missing arguments [#58198](https://github.com/ClickHouse/ClickHouse/pull/58198) ([Azat Khuzhin](https://github.com/azat)). +* Fix leftover processes/hangs in tests [#58200](https://github.com/ClickHouse/ClickHouse/pull/58200) ([Azat Khuzhin](https://github.com/azat)). +* Fix DWARFBlockInputFormat failing on DWARF 5 unit address ranges [#58204](https://github.com/ClickHouse/ClickHouse/pull/58204) ([Michael Kolupaev](https://github.com/al13n321)). +* Fix error in archive reader [#58206](https://github.com/ClickHouse/ClickHouse/pull/58206) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix DWARFBlockInputFormat using wrong base address sometimes [#58208](https://github.com/ClickHouse/ClickHouse/pull/58208) ([Michael Kolupaev](https://github.com/al13n321)). +* Add support for specifying query parameters in the command line in clickhouse-local [#58210](https://github.com/ClickHouse/ClickHouse/pull/58210) ([Pradeep Chhetri](https://github.com/chhetripradeep)). +* Fix leftover processes/hangs in tests (resubmit) [#58213](https://github.com/ClickHouse/ClickHouse/pull/58213) ([Azat Khuzhin](https://github.com/azat)). +* Add optimization for AND notEquals chain in logical expression optimizer [#58214](https://github.com/ClickHouse/ClickHouse/pull/58214) ([Kevin Mingtarja](https://github.com/kevinmingtarja)). +* Fix syntax and doc [#58221](https://github.com/ClickHouse/ClickHouse/pull/58221) ([San](https://github.com/santrancisco)). +* Cleanup some known short messages [#58226](https://github.com/ClickHouse/ClickHouse/pull/58226) ([Raúl Marín](https://github.com/Algunenano)). +* Some code refactoring (was an attempt to improve build time, but failed) [#58237](https://github.com/ClickHouse/ClickHouse/pull/58237) ([Azat Khuzhin](https://github.com/azat)). +* Fix perf test README [#58245](https://github.com/ClickHouse/ClickHouse/pull/58245) ([Raúl Marín](https://github.com/Algunenano)). +* [Analyzer] Add test for [#57086](https://github.com/ClickHouse/ClickHouse/issues/57086) [#58249](https://github.com/ClickHouse/ClickHouse/pull/58249) ([Raúl Marín](https://github.com/Algunenano)). +* Reintroduce compatibility with `is_deleted` on a syntax level [#58251](https://github.com/ClickHouse/ClickHouse/pull/58251) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Avoid throwing ABORTED on normal situations [#58252](https://github.com/ClickHouse/ClickHouse/pull/58252) ([Raúl Marín](https://github.com/Algunenano)). +* Remove mayBenefitFromIndexForIn [#58265](https://github.com/ClickHouse/ClickHouse/pull/58265) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Allow a few retries when committing a part during shutdown [#58269](https://github.com/ClickHouse/ClickHouse/pull/58269) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Revert [#58267](https://github.com/ClickHouse/ClickHouse/issues/58267) [#58274](https://github.com/ClickHouse/ClickHouse/pull/58274) ([Alexey Milovidov](https://github.com/alexey-milovidov)). + diff --git a/docs/en/development/build-cross-s390x.md b/docs/en/development/build-cross-s390x.md index 088dd6f2679..b7cda515d77 100644 --- a/docs/en/development/build-cross-s390x.md +++ b/docs/en/development/build-cross-s390x.md @@ -1,206 +1,206 @@ ---- -slug: /en/development/build-cross-s390x -sidebar_position: 69 -title: How to Build, Run and Debug ClickHouse on Linux for s390x (zLinux) -sidebar_label: Build on Linux for s390x (zLinux) ---- - -As of writing (2023/3/10) building for s390x considered to be experimental. Not all features can be enabled, has broken features and is currently under active development. - - -## Building - -As s390x does not support boringssl, it uses OpenSSL and has two related build options. -- By default, the s390x build will dynamically link to OpenSSL libraries. It will build OpenSSL shared objects, so it's not necessary to install OpenSSL beforehand. (This option is recommended in all cases.) -- Another option is to build OpenSSL in-tree. In this case two build flags need to be supplied to cmake -```bash --DENABLE_OPENSSL_DYNAMIC=0 -DENABLE_OPENSSL=1 -``` - -These instructions assume that the host machine is x86_64 and has all the tooling required to build natively based on the [build instructions](../development/build.md). It also assumes that the host is Ubuntu 22.04 but the following instructions should also work on Ubuntu 20.04. - -In addition to installing the tooling used to build natively, the following additional packages need to be installed: - -```bash -apt-get install binutils-s390x-linux-gnu libc6-dev-s390x-cross gcc-s390x-linux-gnu binfmt-support qemu-user-static -``` - -If you wish to cross compile rust code install the rust cross compile target for s390x: -```bash -rustup target add s390x-unknown-linux-gnu -``` - -To build for s390x: -```bash -cmake -DCMAKE_TOOLCHAIN_FILE=cmake/linux/toolchain-s390x.cmake .. -ninja -``` - -## Running - -Once built, the binary can be run with, eg.: - -```bash -qemu-s390x-static -L /usr/s390x-linux-gnu ./clickhouse -``` - -## Debugging - -Install LLDB: - -```bash -apt-get install lldb-15 -``` - -To Debug a s390x executable, run clickhouse using QEMU in debug mode: - -```bash -qemu-s390x-static -g 31338 -L /usr/s390x-linux-gnu ./clickhouse -``` - -In another shell run LLDB and attach, replace `` and `` with the values corresponding to your environment. -```bash -lldb-15 -(lldb) target create ./clickhouse -Current executable set to '//ClickHouse//programs/clickhouse' (s390x). -(lldb) settings set target.source-map //ClickHouse -(lldb) gdb-remote 31338 -Process 1 stopped -* thread #1, stop reason = signal SIGTRAP - frame #0: 0x0000004020e74cd0 --> 0x4020e74cd0: lgr %r2, %r15 - 0x4020e74cd4: aghi %r15, -160 - 0x4020e74cd8: xc 0(8,%r15), 0(%r15) - 0x4020e74cde: brasl %r14, 275429939040 -(lldb) b main -Breakpoint 1: 9 locations. -(lldb) c -Process 1 resuming -Process 1 stopped -* thread #1, stop reason = breakpoint 1.1 - frame #0: 0x0000004005cd9fc0 clickhouse`main(argc_=1, argv_=0x0000004020e594a8) at main.cpp:450:17 - 447 #if !defined(FUZZING_MODE) - 448 int main(int argc_, char ** argv_) - 449 { --> 450 inside_main = true; - 451 SCOPE_EXIT({ inside_main = false; }); - 452 - 453 /// PHDR cache is required for query profiler to work reliably -``` - -## Visual Studio Code integration - -- [CodeLLDB](https://github.com/vadimcn/vscode-lldb) extension is required for visual debugging. -- [Command Variable](https://github.com/rioj7/command-variable) extension can help dynamic launches if using [CMake Variants](https://github.com/microsoft/vscode-cmake-tools/blob/main/docs/variants.md). -- Make sure to set the backend to your LLVM installation eg. `"lldb.library": "/usr/lib/x86_64-linux-gnu/liblldb-15.so"` -- Make sure to run the clickhouse executable in debug mode prior to launch. (It is also possible to create a `preLaunchTask` that automates this) - -### Example configurations -#### cmake-variants.yaml -```yaml -buildType: - default: relwithdebinfo - choices: - debug: - short: Debug - long: Emit debug information - buildType: Debug - release: - short: Release - long: Optimize generated code - buildType: Release - relwithdebinfo: - short: RelWithDebInfo - long: Release with Debug Info - buildType: RelWithDebInfo - tsan: - short: MinSizeRel - long: Minimum Size Release - buildType: MinSizeRel - -toolchain: - default: default - description: Select toolchain - choices: - default: - short: x86_64 - long: x86_64 - s390x: - short: s390x - long: s390x - settings: - CMAKE_TOOLCHAIN_FILE: cmake/linux/toolchain-s390x.cmake -``` - -#### launch.json -```json -{ - "version": "0.2.0", - "configurations": [ - { - "type": "lldb", - "request": "custom", - "name": "(lldb) Launch s390x with qemu", - "targetCreateCommands": ["target create ${command:cmake.launchTargetPath}"], - "processCreateCommands": ["gdb-remote 2159"], - "preLaunchTask": "Run ClickHouse" - } - ] -} -``` - -#### settings.json -This would also put different builds under different subfolders of the `build` folder. -```json -{ - "cmake.buildDirectory": "${workspaceFolder}/build/${buildKitVendor}-${buildKitVersion}-${variant:toolchain}-${variant:buildType}", - "lldb.library": "/usr/lib/x86_64-linux-gnu/liblldb-15.so" -} -``` - -#### run-debug.sh -```sh -#! /bin/sh -echo 'Starting debugger session' -cd $1 -qemu-s390x-static -g 2159 -L /usr/s390x-linux-gnu $2 $3 $4 -``` - -#### tasks.json -Defines a task to run the compiled executable in `server` mode under a `tmp` folder next to the binaries, with configuration from under `programs/server/config.xml`. -```json -{ - "version": "2.0.0", - "tasks": [ - { - "label": "Run ClickHouse", - "type": "shell", - "isBackground": true, - "command": "${workspaceFolder}/.vscode/run-debug.sh", - "args": [ - "${command:cmake.launchTargetDirectory}/tmp", - "${command:cmake.launchTargetPath}", - "server", - "--config-file=${workspaceFolder}/programs/server/config.xml" - ], - "problemMatcher": [ - { - "pattern": [ - { - "regexp": ".", - "file": 1, - "location": 2, - "message": 3 - } - ], - "background": { - "activeOnStart": true, - "beginsPattern": "^Starting debugger session", - "endsPattern": ".*" - } - } - ] - } - ] -} -``` +--- +slug: /en/development/build-cross-s390x +sidebar_position: 69 +title: How to Build, Run and Debug ClickHouse on Linux for s390x (zLinux) +sidebar_label: Build on Linux for s390x (zLinux) +--- + +As of writing (2023/3/10) building for s390x considered to be experimental. Not all features can be enabled, has broken features and is currently under active development. + + +## Building + +As s390x does not support boringssl, it uses OpenSSL and has two related build options. +- By default, the s390x build will dynamically link to OpenSSL libraries. It will build OpenSSL shared objects, so it's not necessary to install OpenSSL beforehand. (This option is recommended in all cases.) +- Another option is to build OpenSSL in-tree. In this case two build flags need to be supplied to cmake +```bash +-DENABLE_OPENSSL_DYNAMIC=0 -DENABLE_OPENSSL=1 +``` + +These instructions assume that the host machine is x86_64 and has all the tooling required to build natively based on the [build instructions](../development/build.md). It also assumes that the host is Ubuntu 22.04 but the following instructions should also work on Ubuntu 20.04. + +In addition to installing the tooling used to build natively, the following additional packages need to be installed: + +```bash +apt-get install binutils-s390x-linux-gnu libc6-dev-s390x-cross gcc-s390x-linux-gnu binfmt-support qemu-user-static +``` + +If you wish to cross compile rust code install the rust cross compile target for s390x: +```bash +rustup target add s390x-unknown-linux-gnu +``` + +To build for s390x: +```bash +cmake -DCMAKE_TOOLCHAIN_FILE=cmake/linux/toolchain-s390x.cmake .. +ninja +``` + +## Running + +Once built, the binary can be run with, eg.: + +```bash +qemu-s390x-static -L /usr/s390x-linux-gnu ./clickhouse +``` + +## Debugging + +Install LLDB: + +```bash +apt-get install lldb-15 +``` + +To Debug a s390x executable, run clickhouse using QEMU in debug mode: + +```bash +qemu-s390x-static -g 31338 -L /usr/s390x-linux-gnu ./clickhouse +``` + +In another shell run LLDB and attach, replace `` and `` with the values corresponding to your environment. +```bash +lldb-15 +(lldb) target create ./clickhouse +Current executable set to '//ClickHouse//programs/clickhouse' (s390x). +(lldb) settings set target.source-map //ClickHouse +(lldb) gdb-remote 31338 +Process 1 stopped +* thread #1, stop reason = signal SIGTRAP + frame #0: 0x0000004020e74cd0 +-> 0x4020e74cd0: lgr %r2, %r15 + 0x4020e74cd4: aghi %r15, -160 + 0x4020e74cd8: xc 0(8,%r15), 0(%r15) + 0x4020e74cde: brasl %r14, 275429939040 +(lldb) b main +Breakpoint 1: 9 locations. +(lldb) c +Process 1 resuming +Process 1 stopped +* thread #1, stop reason = breakpoint 1.1 + frame #0: 0x0000004005cd9fc0 clickhouse`main(argc_=1, argv_=0x0000004020e594a8) at main.cpp:450:17 + 447 #if !defined(FUZZING_MODE) + 448 int main(int argc_, char ** argv_) + 449 { +-> 450 inside_main = true; + 451 SCOPE_EXIT({ inside_main = false; }); + 452 + 453 /// PHDR cache is required for query profiler to work reliably +``` + +## Visual Studio Code integration + +- [CodeLLDB](https://github.com/vadimcn/vscode-lldb) extension is required for visual debugging. +- [Command Variable](https://github.com/rioj7/command-variable) extension can help dynamic launches if using [CMake Variants](https://github.com/microsoft/vscode-cmake-tools/blob/main/docs/variants.md). +- Make sure to set the backend to your LLVM installation eg. `"lldb.library": "/usr/lib/x86_64-linux-gnu/liblldb-15.so"` +- Make sure to run the clickhouse executable in debug mode prior to launch. (It is also possible to create a `preLaunchTask` that automates this) + +### Example configurations +#### cmake-variants.yaml +```yaml +buildType: + default: relwithdebinfo + choices: + debug: + short: Debug + long: Emit debug information + buildType: Debug + release: + short: Release + long: Optimize generated code + buildType: Release + relwithdebinfo: + short: RelWithDebInfo + long: Release with Debug Info + buildType: RelWithDebInfo + tsan: + short: MinSizeRel + long: Minimum Size Release + buildType: MinSizeRel + +toolchain: + default: default + description: Select toolchain + choices: + default: + short: x86_64 + long: x86_64 + s390x: + short: s390x + long: s390x + settings: + CMAKE_TOOLCHAIN_FILE: cmake/linux/toolchain-s390x.cmake +``` + +#### launch.json +```json +{ + "version": "0.2.0", + "configurations": [ + { + "type": "lldb", + "request": "custom", + "name": "(lldb) Launch s390x with qemu", + "targetCreateCommands": ["target create ${command:cmake.launchTargetPath}"], + "processCreateCommands": ["gdb-remote 2159"], + "preLaunchTask": "Run ClickHouse" + } + ] +} +``` + +#### settings.json +This would also put different builds under different subfolders of the `build` folder. +```json +{ + "cmake.buildDirectory": "${workspaceFolder}/build/${buildKitVendor}-${buildKitVersion}-${variant:toolchain}-${variant:buildType}", + "lldb.library": "/usr/lib/x86_64-linux-gnu/liblldb-15.so" +} +``` + +#### run-debug.sh +```sh +#! /bin/sh +echo 'Starting debugger session' +cd $1 +qemu-s390x-static -g 2159 -L /usr/s390x-linux-gnu $2 $3 $4 +``` + +#### tasks.json +Defines a task to run the compiled executable in `server` mode under a `tmp` folder next to the binaries, with configuration from under `programs/server/config.xml`. +```json +{ + "version": "2.0.0", + "tasks": [ + { + "label": "Run ClickHouse", + "type": "shell", + "isBackground": true, + "command": "${workspaceFolder}/.vscode/run-debug.sh", + "args": [ + "${command:cmake.launchTargetDirectory}/tmp", + "${command:cmake.launchTargetPath}", + "server", + "--config-file=${workspaceFolder}/programs/server/config.xml" + ], + "problemMatcher": [ + { + "pattern": [ + { + "regexp": ".", + "file": 1, + "location": 2, + "message": 3 + } + ], + "background": { + "activeOnStart": true, + "beginsPattern": "^Starting debugger session", + "endsPattern": ".*" + } + } + ] + } + ] +} +``` diff --git a/docs/en/development/build-osx.md b/docs/en/development/build-osx.md index e65de4a37e0..39ccc9a78c3 100644 --- a/docs/en/development/build-osx.md +++ b/docs/en/development/build-osx.md @@ -3,7 +3,7 @@ slug: /en/development/build-osx sidebar_position: 65 sidebar_label: Build on macOS title: How to Build ClickHouse on macOS -description: How to build ClickHouse on macOS +description: How to build ClickHouse on macOS for macOS --- :::info You don't have to build ClickHouse yourself! diff --git a/docs/en/development/developer-instruction.md b/docs/en/development/developer-instruction.md index 9d6a80de904..31346c77949 100644 --- a/docs/en/development/developer-instruction.md +++ b/docs/en/development/developer-instruction.md @@ -7,42 +7,39 @@ description: Prerequisites and an overview of how to build ClickHouse # Getting Started Guide for Building ClickHouse -The building of ClickHouse is supported on Linux, FreeBSD and macOS. +ClickHouse can be build on Linux, FreeBSD and macOS. If you use Windows, you can still build ClickHouse in a virtual machine running Linux, e.g. [VirtualBox](https://www.virtualbox.org/) with Ubuntu. -If you use Windows, you need to create a virtual machine with Ubuntu. To start working with a virtual machine please install VirtualBox. You can download Ubuntu from the website: https://www.ubuntu.com/#download. Please create a virtual machine from the downloaded image (you should reserve at least 4GB of RAM for it). To run a command-line terminal in Ubuntu, please locate a program containing the word “terminal” in its name (gnome-terminal, konsole etc.) or just press Ctrl+Alt+T. - -ClickHouse cannot work or build on a 32-bit system. You should acquire access to a 64-bit system and you can continue reading. +ClickHouse requires a 64-bit system to compile and run, 32-bit systems do not work. ## Creating a Repository on GitHub {#creating-a-repository-on-github} -To start working with ClickHouse repository you will need a GitHub account. +To start developing for ClickHouse you will need a [GitHub](https://www.virtualbox.org/) account. Please also generate a SSH key locally (if you don't have one already) and upload the public key to GitHub as this is a prerequisite for contributing patches. -You probably already have one, but if you do not, please register at https://github.com. In case you do not have SSH keys, you should generate them and then upload them on GitHub. It is required for sending over your patches. It is also possible to use the same SSH keys that you use with any other SSH servers - probably you already have those. +Next, create a fork of the [ClickHouse repository](https://github.com/ClickHouse/ClickHouse/) in your personal account by clicking the "fork" button in the upper right corner. -Create a fork of ClickHouse repository. To do that please click on the “fork” button in the upper right corner at https://github.com/ClickHouse/ClickHouse. It will fork your own copy of ClickHouse/ClickHouse to your account. +To contribute, e.g. a fix for an issue or a feature, please commit your changes to a branch in your fork, then create a "pull request" with the changes to the main repository. -The development process consists of first committing the intended changes into your fork of ClickHouse and then creating a “pull request” for these changes to be accepted into the main repository (ClickHouse/ClickHouse). +For working with Git repositories, please install `git`. In Ubuntu run these commands in a terminal: -To work with Git repositories, please install `git`. To do that in Ubuntu you would run in the command line terminal: +```sh +sudo apt update +sudo apt install git +``` - sudo apt update - sudo apt install git - -A brief manual on using Git can be found [here](https://education.github.com/git-cheat-sheet-education.pdf). -For a detailed manual on Git see [here](https://git-scm.com/book/en/v2). +A cheatsheet for using Git can be found [here](https://education.github.com/git-cheat-sheet-education.pdf). The detailed manual for Git is [here](https://git-scm.com/book/en/v2). ## Cloning a Repository to Your Development Machine {#cloning-a-repository-to-your-development-machine} -Next, you need to download the source files onto your working machine. This is called “to clone a repository” because it creates a local copy of the repository on your working machine. +First, download the source files to your working machine, i.e. clone the repository: -Run in your terminal: +```sh +git clone git@github.com:your_github_username/ClickHouse.git # replace placeholder with your GitHub user name +cd ClickHouse +``` - git clone git@github.com:your_github_username/ClickHouse.git # replace placeholder with your GitHub user name - cd ClickHouse +This command creates a directory `ClickHouse/` containing the source code of ClickHouse. If you specify a custom checkout directory after the URL but it is important that this path does not contain whitespaces as it may lead to problems with the build later on. -This command will create a directory `ClickHouse/` containing the source code of ClickHouse. If you specify a custom checkout directory (after the URL), it is important that this path does not contain whitespaces as it may lead to problems with the build system. - -To make library dependencies available for the build, the ClickHouse repository uses Git submodules, i.e. references to external repositories. These are not checked out by default. To do so, you can either +The ClickHouse repository uses Git submodules, i.e. references to external repositories (usually 3rd party libraries used by ClickHouse). These are not checked out by default. To do so, you can either - run `git clone` with option `--recurse-submodules`, @@ -52,7 +49,7 @@ To make library dependencies available for the build, the ClickHouse repository You can check the Git status with the command: `git submodule status`. -If you get the following error message: +If you get the following error message Permission denied (publickey). fatal: Could not read from remote repository. @@ -60,7 +57,7 @@ If you get the following error message: Please make sure you have the correct access rights and the repository exists. -It generally means that the SSH keys for connecting to GitHub are missing. These keys are normally located in `~/.ssh`. For SSH keys to be accepted you need to upload them in the settings section of GitHub UI. +it generally means that the SSH keys for connecting to GitHub are missing. These keys are normally located in `~/.ssh`. For SSH keys to be accepted you need to upload them in GitHub's settings. You can also clone the repository via https protocol: @@ -74,12 +71,17 @@ You can also add original ClickHouse repo address to your local repository to pu After successfully running this command you will be able to pull updates from the main ClickHouse repo by running `git pull upstream master`. +:::note +Instructions below assume you are building on Linux. If you are cross-compiling or building on macOS, please also check for operating system and architecture specific guides, such as building [on macOS for macOS](build-osx.md), [on Linux for macOS](build-cross-osx.md), [on Linux for Linux/RISC-V](build-cross-riscv.md) and so on. +::: + ## Build System {#build-system} ClickHouse uses CMake and Ninja for building. -CMake - a meta-build system that can generate Ninja files (build tasks). -Ninja - a smaller build system with a focus on the speed used to execute those cmake generated tasks. +- CMake - a meta-build system that can generate Ninja files (build tasks). + +- Ninja - a smaller build system with a focus on the speed used to execute those cmake generated tasks. To install on Ubuntu, Debian or Mint run `sudo apt install cmake ninja-build`. diff --git a/docs/en/development/style.md b/docs/en/development/style.md index 0b71a669638..77a550f2a0e 100644 --- a/docs/en/development/style.md +++ b/docs/en/development/style.md @@ -489,7 +489,7 @@ When using functions with response codes or `errno`, always check the result and ``` cpp if (0 != close(fd)) - throwFromErrno("Cannot close file " + file_name, ErrorCodes::CANNOT_CLOSE_FILE); + throw ErrnoException(ErrorCodes::CANNOT_CLOSE_FILE, "Cannot close file {}", file_name); ``` You can use assert to check invariant in code. diff --git a/docs/en/engines/table-engines/index.md b/docs/en/engines/table-engines/index.md index b024820024a..5e81eacc937 100644 --- a/docs/en/engines/table-engines/index.md +++ b/docs/en/engines/table-engines/index.md @@ -67,7 +67,6 @@ Engines in the family: Engines in the family: - [Distributed](../../engines/table-engines/special/distributed.md#distributed) -- [MaterializedView](../../engines/table-engines/special/materializedview.md#materializedview) - [Dictionary](../../engines/table-engines/special/dictionary.md#dictionary) - [Merge](../../engines/table-engines/special/merge.md#merge) - [File](../../engines/table-engines/special/file.md#file) diff --git a/docs/en/engines/table-engines/integrations/embedded-rocksdb.md b/docs/en/engines/table-engines/integrations/embedded-rocksdb.md index 9af857b0835..44febe78c77 100644 --- a/docs/en/engines/table-engines/integrations/embedded-rocksdb.md +++ b/docs/en/engines/table-engines/integrations/embedded-rocksdb.md @@ -212,5 +212,5 @@ ORDER BY key ASC ``` ### More information on Joins -- [`join_algorithm` setting](/docs/en/operations/settings/settings.md#settings-join_algorithm) +- [`join_algorithm` setting](/docs/en/operations/settings/settings.md#join_algorithm) - [JOIN clause](/docs/en/sql-reference/statements/select/join.md) diff --git a/docs/en/engines/table-engines/integrations/hdfs.md b/docs/en/engines/table-engines/integrations/hdfs.md index 19221c256f9..96e6bab6997 100644 --- a/docs/en/engines/table-engines/integrations/hdfs.md +++ b/docs/en/engines/table-engines/integrations/hdfs.md @@ -236,7 +236,7 @@ libhdfs3 support HDFS namenode HA. ## Storage Settings {#storage-settings} -- [hdfs_truncate_on_insert](/docs/en/operations/settings/settings.md#hdfs-truncate-on-insert) - allows to truncate file before insert into it. Disabled by default. +- [hdfs_truncate_on_insert](/docs/en/operations/settings/settings.md#hdfs_truncate_on_insert) - allows to truncate file before insert into it. Disabled by default. - [hdfs_create_multiple_files](/docs/en/operations/settings/settings.md#hdfs_allow_create_multiple_files) - allows to create a new file on each insert if format has suffix. Disabled by default. - [hdfs_skip_empty_files](/docs/en/operations/settings/settings.md#hdfs_skip_empty_files) - allows to skip empty files while reading. Disabled by default. diff --git a/docs/en/engines/table-engines/integrations/kafka.md b/docs/en/engines/table-engines/integrations/kafka.md index de1a090d491..141d87fed20 100644 --- a/docs/en/engines/table-engines/integrations/kafka.md +++ b/docs/en/engines/table-engines/integrations/kafka.md @@ -54,7 +54,7 @@ Optional parameters: - `kafka_schema` — Parameter that must be used if the format requires a schema definition. For example, [Cap’n Proto](https://capnproto.org/) requires the path to the schema file and the name of the root `schema.capnp:Message` object. - `kafka_num_consumers` — The number of consumers per table. Specify more consumers if the throughput of one consumer is insufficient. The total number of consumers should not exceed the number of partitions in the topic, since only one consumer can be assigned per partition, and must not be greater than the number of physical cores on the server where ClickHouse is deployed. Default: `1`. -- `kafka_max_block_size` — The maximum batch size (in messages) for poll. Default: [max_insert_block_size](../../../operations/settings/settings.md#setting-max_insert_block_size). +- `kafka_max_block_size` — The maximum batch size (in messages) for poll. Default: [max_insert_block_size](../../../operations/settings/settings.md#max_insert_block_size). - `kafka_skip_broken_messages` — Kafka message parser tolerance to schema-incompatible messages per block. If `kafka_skip_broken_messages = N` then the engine skips *N* Kafka messages that cannot be parsed (a message equals a row of data). Default: `0`. - `kafka_commit_every_batch` — Commit every consumed and handled batch instead of a single commit after writing a whole block. Default: `0`. - `kafka_client_id` — Client identifier. Empty by default. @@ -151,7 +151,7 @@ Example: SELECT level, sum(total) FROM daily GROUP BY level; ``` -To improve performance, received messages are grouped into blocks the size of [max_insert_block_size](../../../operations/settings/settings.md#settings-max_insert_block_size). If the block wasn’t formed within [stream_flush_interval_ms](../../../operations/settings/settings.md/#stream-flush-interval-ms) milliseconds, the data will be flushed to the table regardless of the completeness of the block. +To improve performance, received messages are grouped into blocks the size of [max_insert_block_size](../../../operations/settings/settings.md#max_insert_block_size). If the block wasn’t formed within [stream_flush_interval_ms](../../../operations/settings/settings.md/#stream-flush-interval-ms) milliseconds, the data will be flushed to the table regardless of the completeness of the block. To stop receiving topic data or to change the conversion logic, detach the materialized view: diff --git a/docs/en/engines/table-engines/integrations/nats.md b/docs/en/engines/table-engines/integrations/nats.md index 37a41159fab..e898d1f1b82 100644 --- a/docs/en/engines/table-engines/integrations/nats.md +++ b/docs/en/engines/table-engines/integrations/nats.md @@ -58,7 +58,7 @@ Optional parameters: - `nats_reconnect_wait` – Amount of time in milliseconds to sleep between each reconnect attempt. Default: `5000`. - `nats_server_list` - Server list for connection. Can be specified to connect to NATS cluster. - `nats_skip_broken_messages` - NATS message parser tolerance to schema-incompatible messages per block. Default: `0`. If `nats_skip_broken_messages = N` then the engine skips *N* RabbitMQ messages that cannot be parsed (a message equals a row of data). -- `nats_max_block_size` - Number of row collected by poll(s) for flushing data from NATS. Default: [max_insert_block_size](../../../operations/settings/settings.md#setting-max_insert_block_size). +- `nats_max_block_size` - Number of row collected by poll(s) for flushing data from NATS. Default: [max_insert_block_size](../../../operations/settings/settings.md#max_insert_block_size). - `nats_flush_interval_ms` - Timeout for flushing data read from NATS. Default: [stream_flush_interval_ms](../../../operations/settings/settings.md#stream-flush-interval-ms). - `nats_username` - NATS username. - `nats_password` - NATS password. diff --git a/docs/en/engines/table-engines/integrations/rabbitmq.md b/docs/en/engines/table-engines/integrations/rabbitmq.md index 53c6e089a70..0f3fef3d6fb 100644 --- a/docs/en/engines/table-engines/integrations/rabbitmq.md +++ b/docs/en/engines/table-engines/integrations/rabbitmq.md @@ -65,7 +65,7 @@ Optional parameters: - `rabbitmq_deadletter_exchange` - Specify name for a [dead letter exchange](https://www.rabbitmq.com/dlx.html). You can create another table with this exchange name and collect messages in cases when they are republished to dead letter exchange. By default dead letter exchange is not specified. - `rabbitmq_persistent` - If set to 1 (true), in insert query delivery mode will be set to 2 (marks messages as 'persistent'). Default: `0`. - `rabbitmq_skip_broken_messages` – RabbitMQ message parser tolerance to schema-incompatible messages per block. If `rabbitmq_skip_broken_messages = N` then the engine skips *N* RabbitMQ messages that cannot be parsed (a message equals a row of data). Default: `0`. -- `rabbitmq_max_block_size` - Number of row collected before flushing data from RabbitMQ. Default: [max_insert_block_size](../../../operations/settings/settings.md#setting-max_insert_block_size). +- `rabbitmq_max_block_size` - Number of row collected before flushing data from RabbitMQ. Default: [max_insert_block_size](../../../operations/settings/settings.md#max_insert_block_size). - `rabbitmq_flush_interval_ms` - Timeout for flushing data from RabbitMQ. Default: [stream_flush_interval_ms](../../../operations/settings/settings.md#stream-flush-interval-ms). - `rabbitmq_queue_settings_list` - allows to set RabbitMQ settings when creating a queue. Available settings: `x-max-length`, `x-max-length-bytes`, `x-message-ttl`, `x-expires`, `x-priority`, `x-max-priority`, `x-overflow`, `x-dead-letter-exchange`, `x-queue-type`. The `durable` setting is enabled automatically for the queue. - `rabbitmq_address` - Address for connection. Use ether this setting or `rabbitmq_host_port`. diff --git a/docs/en/engines/table-engines/integrations/s3.md b/docs/en/engines/table-engines/integrations/s3.md index 3144bdd32fa..dfa06801d04 100644 --- a/docs/en/engines/table-engines/integrations/s3.md +++ b/docs/en/engines/table-engines/integrations/s3.md @@ -222,7 +222,7 @@ CREATE TABLE table_with_asterisk (name String, value UInt32) ## Storage Settings {#storage-settings} -- [s3_truncate_on_insert](/docs/en/operations/settings/settings.md#s3-truncate-on-insert) - allows to truncate file before insert into it. Disabled by default. +- [s3_truncate_on_insert](/docs/en/operations/settings/settings.md#s3_truncate_on_insert) - allows to truncate file before insert into it. Disabled by default. - [s3_create_multiple_files](/docs/en/operations/settings/settings.md#s3_allow_create_multiple_files) - allows to create a new file on each insert if format has suffix. Disabled by default. - [s3_skip_empty_files](/docs/en/operations/settings/settings.md#s3_skip_empty_files) - allows to skip empty files while reading. Disabled by default. diff --git a/docs/en/engines/table-engines/mergetree-family/custom-partitioning-key.md b/docs/en/engines/table-engines/mergetree-family/custom-partitioning-key.md index 97d37e476ae..23d98d4b20e 100644 --- a/docs/en/engines/table-engines/mergetree-family/custom-partitioning-key.md +++ b/docs/en/engines/table-engines/mergetree-family/custom-partitioning-key.md @@ -12,7 +12,7 @@ In most cases you do not need a partition key, and in most other cases you do no You should never use too granular of partitioning. Don't partition your data by client identifiers or names. Instead, make a client identifier or name the first column in the ORDER BY expression. ::: -Partitioning is available for the [MergeTree](../../../engines/table-engines/mergetree-family/mergetree.md) family tables (including [replicated](../../../engines/table-engines/mergetree-family/replication.md) tables). [Materialized views](../../../engines/table-engines/special/materializedview.md#materializedview) based on MergeTree tables support partitioning, as well. +Partitioning is available for the [MergeTree family tables](../../../engines/table-engines/mergetree-family/mergetree.md), including [replicated tables](../../../engines/table-engines/mergetree-family/replication.md) and [materialized views](../../../sql-reference/statements/create/view.md#materialized-view). A partition is a logical combination of records in a table by a specified criterion. You can set a partition by an arbitrary criterion, such as by month, by day, or by event type. Each partition is stored separately to simplify manipulations of this data. When accessing the data, ClickHouse uses the smallest subset of partitions possible. Partitions improve performance for queries containing a partitioning key because ClickHouse will filter for that partition before selecting the parts and granules within the partition. diff --git a/docs/en/engines/table-engines/mergetree-family/mergetree.md b/docs/en/engines/table-engines/mergetree-family/mergetree.md index 55adf4208f8..ed413959ca6 100644 --- a/docs/en/engines/table-engines/mergetree-family/mergetree.md +++ b/docs/en/engines/table-engines/mergetree-family/mergetree.md @@ -520,7 +520,7 @@ Indexes of type `set` can be utilized by all functions. The other index types ar | [empty](/docs/en/sql-reference/functions/array-functions#function-empty) | ✔ | ✔ | ✗ | ✗ | ✗ | ✗ | | [notEmpty](/docs/en/sql-reference/functions/array-functions#function-notempty) | ✔ | ✔ | ✗ | ✗ | ✗ | ✗ | | [has](/docs/en/sql-reference/functions/array-functions#function-has) | ✗ | ✗ | ✔ | ✔ | ✔ | ✔ | -| [hasAny](/docs/en/sql-reference/functions/array-functions#function-hasAny) | ✗ | ✗ | ✗ | ✗ | ✔ | ✗ | +| [hasAny](/docs/en/sql-reference/functions/array-functions#function-hasAny) | ✗ | ✗ | ✔ | ✔ | ✔ | ✗ | | [hasAll](/docs/en/sql-reference/functions/array-functions#function-hasAll) | ✗ | ✗ | ✗ | ✗ | ✔ | ✗ | | hasToken | ✗ | ✗ | ✗ | ✔ | ✗ | ✔ | | hasTokenOrNull | ✗ | ✗ | ✗ | ✔ | ✗ | ✔ | diff --git a/docs/en/engines/table-engines/special/distributed.md b/docs/en/engines/table-engines/special/distributed.md index 14431c4c43b..de8ae0357dc 100644 --- a/docs/en/engines/table-engines/special/distributed.md +++ b/docs/en/engines/table-engines/special/distributed.md @@ -1,13 +1,16 @@ --- -slug: /en/engines/table-engines/special/distributed +sidebar_label: "Distributed" sidebar_position: 10 -sidebar_label: Distributed +slug: /en/engines/table-engines/special/distributed --- # Distributed Table Engine -Tables with Distributed engine do not store any data of their own, but allow distributed query processing on multiple servers. -Reading is automatically parallelized. During a read, the table indexes on remote servers are used, if there are any. +:::warning +To create a distributed table engine in the cloud, you can use the [remote and remoteSecure](../../../sql-reference/table-functions/remote) table functions. The `Distributed(...)` syntax cannot be used in ClickHouse Cloud. +::: + +Tables with Distributed engine do not store any data of their own, but allow distributed query processing on multiple servers. Reading is automatically parallelized. During a read, the table indexes on remote servers are used, if there are any. ## Creating a Table {#distributed-creating-a-table} @@ -22,6 +25,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] ``` ### From a Table {#distributed-from-a-table} + When the `Distributed` table is pointing to a table on the current server you can adopt that table's schema: ``` sql @@ -48,7 +52,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] AS [db2.]name2 Specifying the `sharding_key` is necessary for the following: -- For `INSERTs` into a distributed table (as the table engine needs the `sharding_key` to determine how to split the data). However, if `insert_distributed_one_random_shard` setting is enabled, then `INSERTs` do not need the sharding key +- For `INSERTs` into a distributed table (as the table engine needs the `sharding_key` to determine how to split the data). However, if `insert_distributed_one_random_shard` setting is enabled, then `INSERTs` do not need the sharding key. - For use with `optimize_skip_unused_shards` as the `sharding_key` is necessary to determine what shards should be queried #### policy_name @@ -108,7 +112,7 @@ Specifying the `sharding_key` is necessary for the following: For **Insert limit settings** (`..._insert`) see also: - [distributed_foreground_insert](../../../operations/settings/settings.md#distributed_foreground_insert) setting -- [prefer_localhost_replica](../../../operations/settings/settings.md#settings-prefer-localhost-replica) setting +- [prefer_localhost_replica](../../../operations/settings/settings.md#prefer-localhost-replica) setting - `bytes_to_throw_insert` handled before `bytes_to_delay_insert`, so you should not set it to the value less then `bytes_to_delay_insert` ::: @@ -122,9 +126,7 @@ SETTINGS fsync_directories=0; ``` -Data will be read from all servers in the `logs` cluster, from the `default.hits` table located on every server in the cluster. -Data is not only read but is partially processed on the remote servers (to the extent that this is possible). -For example, for a query with `GROUP BY`, data will be aggregated on remote servers, and the intermediate states of aggregate functions will be sent to the requestor server. Then data will be further aggregated. +Data will be read from all servers in the `logs` cluster, from the `default.hits` table located on every server in the cluster. Data is not only read but is partially processed on the remote servers (to the extent that this is possible). For example, for a query with `GROUP BY`, data will be aggregated on remote servers, and the intermediate states of aggregate functions will be sent to the requestor server. Then data will be further aggregated. Instead of the database name, you can use a constant expression that returns a string. For example: `currentDatabase()`. @@ -183,9 +185,7 @@ Clusters are configured in the [server configuration file](../../../operations/c ``` -Here a cluster is defined with the name `logs` that consists of two shards, each of which contains two replicas. -Shards refer to the servers that contain different parts of the data (in order to read all the data, you must access all the shards). -Replicas are duplicating servers (in order to read all the data, you can access the data on any one of the replicas). +Here a cluster is defined with the name `logs` that consists of two shards, each of which contains two replicas. Shards refer to the servers that contain different parts of the data (in order to read all the data, you must access all the shards). Replicas are duplicating servers (in order to read all the data, you can access the data on any one of the replicas). Cluster names must not contain dots. @@ -198,9 +198,7 @@ The parameters `host`, `port`, and optionally `user`, `password`, `secure`, `com - `secure` - Whether to use a secure SSL/TLS connection. Usually also requires specifying the port (the default secure port is `9440`). The server should listen on `9440` and be configured with correct certificates. - `compression` - Use data compression. Default value: `true`. -When specifying replicas, one of the available replicas will be selected for each of the shards when reading. You can configure the algorithm for load balancing (the preference for which replica to access) – see the [load_balancing](../../../operations/settings/settings.md#settings-load_balancing) setting. -If the connection with the server is not established, there will be an attempt to connect with a short timeout. If the connection failed, the next replica will be selected, and so on for all the replicas. If the connection attempt failed for all the replicas, the attempt will be repeated the same way, several times. -This works in favour of resiliency, but does not provide complete fault tolerance: a remote server might accept the connection, but might not work, or work poorly. +When specifying replicas, one of the available replicas will be selected for each of the shards when reading. You can configure the algorithm for load balancing (the preference for which replica to access) – see the [load_balancing](../../../operations/settings/settings.md#load_balancing) setting. If the connection with the server is not established, there will be an attempt to connect with a short timeout. If the connection failed, the next replica will be selected, and so on for all the replicas. If the connection attempt failed for all the replicas, the attempt will be repeated the same way, several times. This works in favour of resiliency, but does not provide complete fault tolerance: a remote server might accept the connection, but might not work, or work poorly. You can specify just one of the shards (in this case, query processing should be called remote, rather than distributed) or up to any number of shards. In each shard, you can specify from one to any number of replicas. You can specify a different number of replicas for each shard. @@ -245,7 +243,7 @@ If the server ceased to exist or had a rough restart (for example, due to a hard When querying a `Distributed` table, `SELECT` queries are sent to all shards and work regardless of how data is distributed across the shards (they can be distributed completely randomly). When you add a new shard, you do not have to transfer old data into it. Instead, you can write new data to it by using a heavier weight – the data will be distributed slightly unevenly, but queries will work correctly and efficiently. -When the `max_parallel_replicas` option is enabled, query processing is parallelized across all replicas within a single shard. For more information, see the section [max_parallel_replicas](../../../operations/settings/settings.md#settings-max_parallel_replicas). +When the `max_parallel_replicas` option is enabled, query processing is parallelized across all replicas within a single shard. For more information, see the section [max_parallel_replicas](../../../operations/settings/settings.md#max_parallel_replicas). To learn more about how distributed `in` and `global in` queries are processed, refer to [this](../../../sql-reference/operators/in.md#select-distributed-subqueries) documentation. diff --git a/docs/en/engines/table-engines/special/file.md b/docs/en/engines/table-engines/special/file.md index 6e3897398a5..fdf5242ba3b 100644 --- a/docs/en/engines/table-engines/special/file.md +++ b/docs/en/engines/table-engines/special/file.md @@ -101,8 +101,8 @@ For partitioning by month, use the `toYYYYMM(date_column)` expression, where `da ## Settings {#settings} -- [engine_file_empty_if_not_exists](/docs/en/operations/settings/settings.md#engine-file-emptyif-not-exists) - allows to select empty data from a file that doesn't exist. Disabled by default. +- [engine_file_empty_if_not_exists](/docs/en/operations/settings/settings.md#engine-file-empty_if-not-exists) - allows to select empty data from a file that doesn't exist. Disabled by default. - [engine_file_truncate_on_insert](/docs/en/operations/settings/settings.md#engine-file-truncate-on-insert) - allows to truncate file before insert into it. Disabled by default. - [engine_file_allow_create_multiple_files](/docs/en/operations/settings/settings.md#engine_file_allow_create_multiple_files) - allows to create a new file on each insert if format has suffix. Disabled by default. - [engine_file_skip_empty_files](/docs/en/operations/settings/settings.md#engine_file_skip_empty_files) - allows to skip empty files while reading. Disabled by default. -- [storage_file_read_method](/docs/en/operations/settings/settings.md#engine-file-emptyif-not-exists) - method of reading data from storage file, one of: `read`, `pread`, `mmap`. The mmap method does not apply to clickhouse-server (it's intended for clickhouse-local). Default value: `pread` for clickhouse-server, `mmap` for clickhouse-local. +- [storage_file_read_method](/docs/en/operations/settings/settings.md#engine-file-empty_if-not-exists) - method of reading data from storage file, one of: `read`, `pread`, `mmap`. The mmap method does not apply to clickhouse-server (it's intended for clickhouse-local). Default value: `pread` for clickhouse-server, `mmap` for clickhouse-local. diff --git a/docs/en/engines/table-engines/special/filelog.md b/docs/en/engines/table-engines/special/filelog.md index eef9a17444e..82201053bc5 100644 --- a/docs/en/engines/table-engines/special/filelog.md +++ b/docs/en/engines/table-engines/special/filelog.md @@ -41,7 +41,7 @@ Optional parameters: - `poll_timeout_ms` - Timeout for single poll from log file. Default: [stream_poll_timeout_ms](../../../operations/settings/settings.md#stream_poll_timeout_ms). - `poll_max_batch_size` — Maximum amount of records to be polled in a single poll. Default: [max_block_size](../../../operations/settings/settings.md#setting-max_block_size). -- `max_block_size` — The maximum batch size (in records) for poll. Default: [max_insert_block_size](../../../operations/settings/settings.md#setting-max_insert_block_size). +- `max_block_size` — The maximum batch size (in records) for poll. Default: [max_insert_block_size](../../../operations/settings/settings.md#max_insert_block_size). - `max_threads` - Number of max threads to parse files, default is 0, which means the number will be max(1, physical_cpu_cores / 4). - `poll_directory_watch_events_backoff_init` - The initial sleep value for watch directory thread. Default: `500`. - `poll_directory_watch_events_backoff_max` - The max sleep value for watch directory thread. Default: `32000`. diff --git a/docs/en/engines/table-engines/special/materializedview.md b/docs/en/engines/table-engines/special/materializedview.md deleted file mode 100644 index d5f3b364d4e..00000000000 --- a/docs/en/engines/table-engines/special/materializedview.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -slug: /en/engines/table-engines/special/materializedview -sidebar_position: 100 -sidebar_label: MaterializedView ---- - -# MaterializedView Table Engine - -Used for implementing materialized views (for more information, see [CREATE VIEW](../../../sql-reference/statements/create/view.md#materialized)). For storing data, it uses a different engine that was specified when creating the view. When reading from a table, it just uses that engine. diff --git a/docs/en/getting-started/example-datasets/youtube-dislikes.md b/docs/en/getting-started/example-datasets/youtube-dislikes.md index e24c6e5a6dc..a6bbb20cc8d 100644 --- a/docs/en/getting-started/example-datasets/youtube-dislikes.md +++ b/docs/en/getting-started/example-datasets/youtube-dislikes.md @@ -25,8 +25,7 @@ The steps below will easily work on a local install of ClickHouse too. The only 1. Let's see what the data looks like. The `s3cluster` table function returns a table, so we can `DESCRIBE` the result: ```sql -DESCRIBE s3Cluster( - 'default', +DESCRIBE s3( 'https://clickhouse-public-datasets.s3.amazonaws.com/youtube/original/files/*.zst', 'JSONLines' ); @@ -35,29 +34,29 @@ DESCRIBE s3Cluster( ClickHouse infers the following schema from the JSON file: ```response -┌─name────────────────┬─type─────────────────────────────────┐ -│ id │ Nullable(String) │ -│ fetch_date │ Nullable(Int64) │ -│ upload_date │ Nullable(String) │ -│ title │ Nullable(String) │ -│ uploader_id │ Nullable(String) │ -│ uploader │ Nullable(String) │ -│ uploader_sub_count │ Nullable(Int64) │ -│ is_age_limit │ Nullable(Bool) │ -│ view_count │ Nullable(Int64) │ -│ like_count │ Nullable(Int64) │ -│ dislike_count │ Nullable(Int64) │ -│ is_crawlable │ Nullable(Bool) │ -│ is_live_content │ Nullable(Bool) │ -│ has_subtitles │ Nullable(Bool) │ -│ is_ads_enabled │ Nullable(Bool) │ -│ is_comments_enabled │ Nullable(Bool) │ -│ description │ Nullable(String) │ -│ rich_metadata │ Array(Map(String, Nullable(String))) │ -│ super_titles │ Array(Map(String, Nullable(String))) │ -│ uploader_badges │ Nullable(String) │ -│ video_badges │ Nullable(String) │ -└─────────────────────┴──────────────────────────────────────┘ +┌─name────────────────┬─type───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┬─ttl_expression─┐ +│ id │ Nullable(String) │ │ │ │ │ │ +│ fetch_date │ Nullable(String) │ │ │ │ │ │ +│ upload_date │ Nullable(String) │ │ │ │ │ │ +│ title │ Nullable(String) │ │ │ │ │ │ +│ uploader_id │ Nullable(String) │ │ │ │ │ │ +│ uploader │ Nullable(String) │ │ │ │ │ │ +│ uploader_sub_count │ Nullable(Int64) │ │ │ │ │ │ +│ is_age_limit │ Nullable(Bool) │ │ │ │ │ │ +│ view_count │ Nullable(Int64) │ │ │ │ │ │ +│ like_count │ Nullable(Int64) │ │ │ │ │ │ +│ dislike_count │ Nullable(Int64) │ │ │ │ │ │ +│ is_crawlable │ Nullable(Bool) │ │ │ │ │ │ +│ is_live_content │ Nullable(Bool) │ │ │ │ │ │ +│ has_subtitles │ Nullable(Bool) │ │ │ │ │ │ +│ is_ads_enabled │ Nullable(Bool) │ │ │ │ │ │ +│ is_comments_enabled │ Nullable(Bool) │ │ │ │ │ │ +│ description │ Nullable(String) │ │ │ │ │ │ +│ rich_metadata │ Array(Tuple(call Nullable(String), content Nullable(String), subtitle Nullable(String), title Nullable(String), url Nullable(String))) │ │ │ │ │ │ +│ super_titles │ Array(Tuple(text Nullable(String), url Nullable(String))) │ │ │ │ │ │ +│ uploader_badges │ Nullable(String) │ │ │ │ │ │ +│ video_badges │ Nullable(String) │ │ │ │ │ │ +└─────────────────────┴────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘ ``` 2. Based on the inferred schema, we cleaned up the data types and added a primary key. Define the following table: @@ -82,13 +81,13 @@ CREATE TABLE youtube `is_ads_enabled` Bool, `is_comments_enabled` Bool, `description` String, - `rich_metadata` Array(Map(String, String)), - `super_titles` Array(Map(String, String)), + `rich_metadata` Array(Tuple(call String, content String, subtitle String, title String, url String)), + `super_titles` Array(Tuple(text String, url String)), `uploader_badges` String, `video_badges` String ) ENGINE = MergeTree -ORDER BY (uploader, upload_date); +ORDER BY (uploader, upload_date) ``` 3. The following command streams the records from the S3 files into the `youtube` table. diff --git a/docs/en/interfaces/formats.md b/docs/en/interfaces/formats.md index 57de0555bf6..836b1f2f637 100644 --- a/docs/en/interfaces/formats.md +++ b/docs/en/interfaces/formats.md @@ -478,6 +478,7 @@ The CSV format supports the output of totals and extremes the same way as `TabSe - [input_format_csv_allow_whitespace_or_tab_as_delimiter](/docs/en/operations/settings/settings-formats.md/# input_format_csv_allow_whitespace_or_tab_as_delimiter) - Allow to use whitespace or tab as field delimiter in CSV strings. Default value - `false`. - [input_format_csv_allow_variable_number_of_columns](/docs/en/operations/settings/settings-formats.md/#input_format_csv_allow_variable_number_of_columns) - allow variable number of columns in CSV format, ignore extra columns and use default values on missing columns. Default value - `false`. - [input_format_csv_use_default_on_bad_values](/docs/en/operations/settings/settings-formats.md/#input_format_csv_use_default_on_bad_values) - Allow to set default value to column when CSV field deserialization failed on bad value. Default value - `false`. +- [input_format_csv_try_infer_numbers_from_strings](/docs/en/operations/settings/settings-formats.md/#input_format_csv_try_infer_numbers_from_strings) - Try to infer numbers from string fields while schema inference. Default value - `false`. ## CSVWithNames {#csvwithnames} diff --git a/docs/en/interfaces/http.md b/docs/en/interfaces/http.md index 63f75fb7830..4eeb19cefcf 100644 --- a/docs/en/interfaces/http.md +++ b/docs/en/interfaces/http.md @@ -167,7 +167,7 @@ For successful requests that do not return a data table, an empty response body You can use compression to reduce network traffic when transmitting a large amount of data or for creating dumps that are immediately compressed. -You can use the internal ClickHouse compression format when transmitting data. The compressed data has a non-standard format, and you need `clickhouse-compressor` program to work with it. It is installed with the `clickhouse-client` package. To increase the efficiency of data insertion, you can disable server-side checksum verification by using the [http_native_compression_disable_checksumming_on_decompress](../operations/settings/settings.md#settings-http_native_compression_disable_checksumming_on_decompress) setting. +You can use the internal ClickHouse compression format when transmitting data. The compressed data has a non-standard format, and you need `clickhouse-compressor` program to work with it. It is installed with the `clickhouse-client` package. To increase the efficiency of data insertion, you can disable server-side checksum verification by using the [http_native_compression_disable_checksumming_on_decompress](../operations/settings/settings.md#http_native_compression_disable_checksumming_on_decompress) setting. If you specify `compress=1` in the URL, the server will compress the data it sends to you. If you specify `decompress=1` in the URL, the server will decompress the data which you pass in the `POST` method. @@ -183,7 +183,7 @@ You can also choose to use [HTTP compression](https://en.wikipedia.org/wiki/HTTP - `snappy` To send a compressed `POST` request, append the request header `Content-Encoding: compression_method`. -In order for ClickHouse to compress the response, enable compression with [enable_http_compression](../operations/settings/settings.md#settings-enable_http_compression) setting and append `Accept-Encoding: compression_method` header to the request. You can configure the data compression level in the [http_zlib_compression_level](../operations/settings/settings.md#settings-http_zlib_compression_level) setting for all compression methods. +In order for ClickHouse to compress the response, enable compression with [enable_http_compression](../operations/settings/settings.md#enable_http_compression) setting and append `Accept-Encoding: compression_method` header to the request. You can configure the data compression level in the [http_zlib_compression_level](../operations/settings/settings.md#http_zlib_compression_level) setting for all compression methods. :::info Some HTTP clients might decompress data from the server by default (with `gzip` and `deflate`) and you might get decompressed data even if you use the compression settings correctly. @@ -285,7 +285,7 @@ For information about other parameters, see the section “SET”. Similarly, you can use ClickHouse sessions in the HTTP protocol. To do this, you need to add the `session_id` GET parameter to the request. You can use any string as the session ID. By default, the session is terminated after 60 seconds of inactivity. To change this timeout, modify the `default_session_timeout` setting in the server configuration, or add the `session_timeout` GET parameter to the request. To check the session status, use the `session_check=1` parameter. Only one query at a time can be executed within a single session. -You can receive information about the progress of a query in `X-ClickHouse-Progress` response headers. To do this, enable [send_progress_in_http_headers](../operations/settings/settings.md#settings-send_progress_in_http_headers). Example of the header sequence: +You can receive information about the progress of a query in `X-ClickHouse-Progress` response headers. To do this, enable [send_progress_in_http_headers](../operations/settings/settings.md#send_progress_in_http_headers). Example of the header sequence: ``` text X-ClickHouse-Progress: {"read_rows":"2752512","read_bytes":"240570816","total_rows_to_read":"8880128","elapsed_ns":"662334"} @@ -496,7 +496,7 @@ Next are the configuration methods for different `type`. `query` value is a predefined query of `predefined_query_handler`, which is executed by ClickHouse when an HTTP request is matched and the result of the query is returned. It is a must configuration. -The following example defines the values of [max_threads](../operations/settings/settings.md#settings-max_threads) and `max_final_threads` settings, then queries the system table to check whether these settings were set successfully. +The following example defines the values of [max_threads](../operations/settings/settings.md#max_threads) and `max_final_threads` settings, then queries the system table to check whether these settings were set successfully. :::note To keep the default `handlers` such as` query`, `play`,` ping`, add the `` rule. @@ -539,7 +539,7 @@ In `dynamic_query_handler`, the query is written in the form of parameter of the ClickHouse extracts and executes the value corresponding to the `query_param_name` value in the URL of the HTTP request. The default value of `query_param_name` is `/query` . It is an optional configuration. If there is no definition in the configuration file, the parameter is not passed in. -To experiment with this functionality, the example defines the values of [max_threads](../operations/settings/settings.md#settings-max_threads) and `max_final_threads` and `queries` whether the settings were set successfully. +To experiment with this functionality, the example defines the values of [max_threads](../operations/settings/settings.md#max_threads) and `max_final_threads` and `queries` whether the settings were set successfully. Example: diff --git a/docs/en/interfaces/overview.md b/docs/en/interfaces/overview.md index e60aff927c4..0e09ab6a0b7 100644 --- a/docs/en/interfaces/overview.md +++ b/docs/en/interfaces/overview.md @@ -25,6 +25,7 @@ ClickHouse server provides embedded visual interfaces for power users: - Play UI: open `/play` in the browser; - Advanced Dashboard: open `/dashboard` in the browser; +- Binary symbols viewer for ClickHouse engineers: open `/binary` in the browser; There are also a wide range of third-party libraries for working with ClickHouse: diff --git a/docs/en/interfaces/schema-inference.md b/docs/en/interfaces/schema-inference.md index 0aadb09730a..ef858796936 100644 --- a/docs/en/interfaces/schema-inference.md +++ b/docs/en/interfaces/schema-inference.md @@ -834,6 +834,27 @@ $$) └──────────────┴───────────────┘ ``` +#### CSV settings {#csv-settings} + +##### input_format_csv_try_infer_numbers_from_strings + +Enabling this setting allows inferring numbers from string values. + +This setting is disabled by default. + +**Example:** + +```sql +SET input_format_json_try_infer_numbers_from_strings = 1; +DESC format(CSV, '"42","42.42"'); +``` +```reponse +┌─name─┬─type──────────────┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┬─ttl_expression─┐ +│ c1 │ Nullable(Int64) │ │ │ │ │ │ +│ c2 │ Nullable(Float64) │ │ │ │ │ │ +└──────┴───────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘ +``` + ### TSV/TSKV {#tsv-tskv} In TSV/TSKV formats ClickHouse extracts column value from the row according to tabular delimiters and then parses extracted value using @@ -1846,3 +1867,102 @@ DESC format(JSONAsString, '{"x" : 42, "y" : "Hello, World!"}') SETTINGS allow_ex │ json │ Object('json') │ │ │ │ │ │ └──────┴────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘ ``` + +## Schema inference modes {#schema-inference-modes} + +Schema inference from the set of data files can work in 2 different modes: `default` and `union`. +The mode is controlled by the setting `schema_inference_mode`. + +### Default mode {#default-schema-inference-mode} + +In default mode, ClickHouse assumes that all files have the same schema and tries to infer the schema by reading files one by one until it succeeds. + +Example: + +Let's say we have 3 files `data1.jsonl`, `data2.jsonl` and `data3.jsonl` with the next content: + +`data1.jsonl`: +```json +{"field1" : 1, "field2" : null} +{"field1" : 2, "field2" : null} +{"field1" : 3, "field2" : null} +``` + +`data2.jsonl`: +```json +{"field1" : 4, "field2" : "Data4"} +{"field1" : 5, "field2" : "Data5"} +{"field1" : 6, "field2" : "Data5"} +``` + +`data3.jsonl`: +```json +{"field1" : 7, "field2" : "Data7", "field3" : [1, 2, 3]} +{"field1" : 8, "field2" : "Data8", "field3" : [4, 5, 6]} +{"field1" : 9, "field2" : "Data9", "field3" : [7, 8, 9]} +``` + +Let's try to use schema inference on these 3 files: +```sql +:) DESCRIBE file('data{1,2,3}.jsonl') SETTINGS schema_inference_mode='default' +``` + +Result: +```text +┌─name───┬─type─────────────┐ +│ field1 │ Nullable(Int64) │ +│ field2 │ Nullable(String) │ +└────────┴──────────────────┘ +``` + +As we can see, we don't have `field3` from file `data3.jsonl`. +It happens because ClickHouse first tried to infer schema from file `data1.jsonl`, failed because of only nulls for field `field2`, +and then tried to infer schema from `data2.jsonl` and succeeded, so data from file `data3.jsonl` wasn't read. + +### Union mode {#default-schema-inference-mode} + +In union mode, ClickHouse assumes that files can have different schemas, so it infer schemas of all files and then union them to the common schema. + +Let's say we have 3 files `data1.jsonl`, `data2.jsonl` and `data3.jsonl` with the next content: + +`data1.jsonl`: +```json +{"field1" : 1} +{"field1" : 2} +{"field1" : 3} +``` + +`data2.jsonl`: +```json +{"field2" : "Data4"} +{"field2" : "Data5"} +{"field2" : "Data5"} +``` + +`data3.jsonl`: +```json +{"field3" : [1, 2, 3]} +{"field3" : [4, 5, 6]} +{"field3" : [7, 8, 9]} +``` + +Let's try to use schema inference on these 3 files: +```sql +:) DESCRIBE file('data{1,2,3}.jsonl') SETTINGS schema_inference_mode='union' +``` + +Result: +```text +┌─name───┬─type───────────────────┐ +│ field1 │ Nullable(Int64) │ +│ field2 │ Nullable(String) │ +│ field3 │ Array(Nullable(Int64)) │ +└────────┴────────────────────────┘ +``` + +As we can see, we have all fields from all files. + +Note: +- As some of the files may not contain some columns from the resulting schema, union mode is supported only for formats that support reading subset of columns (like JSONEachRow, Parquet, TSVWithNames, etc) and won't work for other formats (like CSV, TSV, JSONCompactEachRow, etc). +- If ClickHouse cannot infer the schema from one of the files, the exception will be thrown. +- If you have a lot of files, reading schema from all of them can take a lot of time. diff --git a/docs/en/operations/backup.md b/docs/en/operations/backup.md index 6068b185ede..d45885ee816 100644 --- a/docs/en/operations/backup.md +++ b/docs/en/operations/backup.md @@ -406,7 +406,7 @@ RESTORE TABLE data AS data_restored FROM Disk('s3_plain', 'cloud_backup'); :::note But keep in mind that: - This disk should not be used for `MergeTree` itself, only for `BACKUP`/`RESTORE` -- If your tables are backed by S3 storage, it doesn't use `CopyObject` calls to copy parts to the destination bucket, instead, it downloads and uploads them, which is very inefficient. Prefer to use `BACKUP ... TO S3()` syntax for this use-case. +- If your tables are backed by S3 storage and types of the disks are different, it doesn't use `CopyObject` calls to copy parts to the destination bucket, instead, it downloads and uploads them, which is very inefficient. Prefer to use `BACKUP ... TO S3()` syntax for this use-case. ::: ## Alternatives diff --git a/docs/en/operations/monitoring.md b/docs/en/operations/monitoring.md index adc384e21ae..de61da6f5c4 100644 --- a/docs/en/operations/monitoring.md +++ b/docs/en/operations/monitoring.md @@ -64,4 +64,4 @@ You can configure ClickHouse to export metrics to [Prometheus](https://prometheu Additionally, you can monitor server availability through the HTTP API. Send the `HTTP GET` request to `/ping`. If the server is available, it responds with `200 OK`. -To monitor servers in a cluster configuration, you should set the [max_replica_delay_for_distributed_queries](../operations/settings/settings.md#settings-max_replica_delay_for_distributed_queries) parameter and use the HTTP resource `/replicas_status`. A request to `/replicas_status` returns `200 OK` if the replica is available and is not delayed behind the other replicas. If a replica is delayed, it returns `503 HTTP_SERVICE_UNAVAILABLE` with information about the gap. +To monitor servers in a cluster configuration, you should set the [max_replica_delay_for_distributed_queries](../operations/settings/settings.md#max_replica_delay_for_distributed_queries) parameter and use the HTTP resource `/replicas_status`. A request to `/replicas_status` returns `200 OK` if the replica is available and is not delayed behind the other replicas. If a replica is delayed, it returns `503 HTTP_SERVICE_UNAVAILABLE` with information about the gap. diff --git a/docs/en/operations/optimizing-performance/sampling-query-profiler.md b/docs/en/operations/optimizing-performance/sampling-query-profiler.md index 206f710734e..194d2714422 100644 --- a/docs/en/operations/optimizing-performance/sampling-query-profiler.md +++ b/docs/en/operations/optimizing-performance/sampling-query-profiler.md @@ -42,7 +42,7 @@ To analyze the `trace_log` system table: - Install the `clickhouse-common-static-dbg` package. See [Install from DEB Packages](../../getting-started/install.md#install-from-deb-packages). -- Allow introspection functions by the [allow_introspection_functions](../../operations/settings/settings.md#settings-allow_introspection_functions) setting. +- Allow introspection functions by the [allow_introspection_functions](../../operations/settings/settings.md#allow_introspection_functions) setting. For security reasons, introspection functions are disabled by default. diff --git a/docs/en/operations/query-cache.md b/docs/en/operations/query-cache.md index def0f48b968..50c5ff4457f 100644 --- a/docs/en/operations/query-cache.md +++ b/docs/en/operations/query-cache.md @@ -29,6 +29,10 @@ Transactionally inconsistent caching is traditionally provided by client tools o the same caching logic and configuration is often duplicated. With ClickHouse's query cache, the caching logic moves to the server side. This reduces maintenance effort and avoids redundancy. +:::note +Security consideration: The cached query result is tied to the user executing it. Authorization checks are performed when the query is executed. This means that if there are any alterations to the user's role or permissions between the time the query is cached and when the cache is accessed, the result will not reflect these changes. We recommend using different users to distinguish between different levels of access, instead of actively toggling roles for a single user between queries, as this practice may lead to unexpected query results. +::: + ## Configuration Settings and Usage Setting [use_query_cache](settings/settings.md#use-query-cache) can be used to control whether a specific query or all queries of the @@ -99,7 +103,7 @@ It is also possible to limit the cache usage of individual users using [settings constraints](settings/constraints-on-settings.md). More specifically, you can restrict the maximum amount of memory (in bytes) a user may allocate in the query cache and the maximum number of stored query results. For that, first provide configurations [query_cache_max_size_in_bytes](settings/settings.md#query-cache-max-size-in-bytes) and -[query_cache_max_entries](settings/settings.md#query-cache-size-max-entries) in a user profile in `users.xml`, then make both settings +[query_cache_max_entries](settings/settings.md#query-cache-max-entries) in a user profile in `users.xml`, then make both settings readonly: ``` xml @@ -140,7 +144,7 @@ value can be specified at session, profile or query level using setting [query_c Entries in the query cache are compressed by default. This reduces the overall memory consumption at the cost of slower writes into / reads from the query cache. To disable compression, use setting [query_cache_compress_entries](settings/settings.md#query-cache-compress-entries). -ClickHouse reads table data in blocks of [max_block_size](settings/settings.md#settings-max_block_size) rows. Due to filtering, aggregation, +ClickHouse reads table data in blocks of [max_block_size](settings/settings.md#setting-max_block_size) rows. Due to filtering, aggregation, etc., result blocks are typically much smaller than 'max_block_size' but there are also cases where they are much bigger. Setting [query_cache_squash_partial_results](settings/settings.md#query-cache-squash-partial-results) (enabled by default) controls if result blocks are squashed (if they are tiny) or split (if they are large) into blocks of 'max_block_size' size before insertion into the query result diff --git a/docs/en/operations/server-configuration-parameters/settings.md b/docs/en/operations/server-configuration-parameters/settings.md index cc2692b8e02..48434d992e2 100644 --- a/docs/en/operations/server-configuration-parameters/settings.md +++ b/docs/en/operations/server-configuration-parameters/settings.md @@ -472,6 +472,39 @@ The value 0 means that you can delete all tables without any restrictions. ``` xml 0 ``` + + +## max\_database\_num\_to\_warn {#max-database-num-to-warn} +If the number of attached databases exceeds the specified value, clickhouse server will add warning messages to `system.warnings` table. +Default value: 1000 + +**Example** + +``` xml +50 +``` + +## max\_table\_num\_to\_warn {#max-table-num-to-warn} +If the number of attached tables exceeds the specified value, clickhouse server will add warning messages to `system.warnings` table. +Default value: 5000 + +**Example** + +``` xml +400 +``` + + +## max\_part\_num\_to\_warn {#max-part-num-to-warn} +If the number of active parts exceeds the specified value, clickhouse server will add warning messages to `system.warnings` table. +Default value: 100000 + +**Example** + +``` xml +400 +``` + ## max_temporary_data_on_disk_size @@ -1650,7 +1683,7 @@ Default value: `0.5`. Asynchronous loading of databases and tables. -If `true` all non-system databases with `Ordinary`, `Atomic` and `Replicated` engine will be loaded asynchronously after the ClickHouse server start up. See `system.async_loader` table, `tables_loader_background_pool_size` and `tables_loader_foreground_pool_size` server settings. Any query that tries to access a table, that is not yet loaded, will wait for exactly this table to be started up. If load job fails, query will rethrow an error (instead of shutting down the whole server in case of `async_load_databases = false`). The table that is waited for by at least one query will be loaded with higher priority. DDL queries on a database will wait for exactly that database to be started up. +If `true` all non-system databases with `Ordinary`, `Atomic` and `Replicated` engine will be loaded asynchronously after the ClickHouse server start up. See `system.asynchronous_loader` table, `tables_loader_background_pool_size` and `tables_loader_foreground_pool_size` server settings. Any query that tries to access a table, that is not yet loaded, will wait for exactly this table to be started up. If load job fails, query will rethrow an error (instead of shutting down the whole server in case of `async_load_databases = false`). The table that is waited for by at least one query will be loaded with higher priority. DDL queries on a database will wait for exactly that database to be started up. If `false`, all databases are loaded when the server starts. @@ -1976,7 +2009,7 @@ Data for the query cache is allocated in DRAM. If memory is scarce, make sure to ## query_thread_log {#query_thread_log} -Setting for logging threads of queries received with the [log_query_threads=1](../../operations/settings/settings.md#settings-log-query-threads) setting. +Setting for logging threads of queries received with the [log_query_threads=1](../../operations/settings/settings.md#log-query-threads) setting. Queries are logged in the [system.query_thread_log](../../operations/system-tables/query_thread_log.md#system_tables-query_thread_log) table, not in a separate file. You can change the name of the table in the `table` parameter (see below). @@ -2018,7 +2051,7 @@ If the table does not exist, ClickHouse will create it. If the structure of the ## query_views_log {#query_views_log} -Setting for logging views (live, materialized etc) dependant of queries received with the [log_query_views=1](../../operations/settings/settings.md#settings-log-query-views) setting. +Setting for logging views (live, materialized etc) dependant of queries received with the [log_query_views=1](../../operations/settings/settings.md#log-query-views) setting. Queries are logged in the [system.query_views_log](../../operations/system-tables/query_views_log.md#system_tables-query_views_log) table, not in a separate file. You can change the name of the table in the `table` parameter (see below). @@ -2298,7 +2331,7 @@ For the value of the `incl` attribute, see the section “[Configuration files]( **See Also** -- [skip_unavailable_shards](../../operations/settings/settings.md#settings-skip_unavailable_shards) +- [skip_unavailable_shards](../../operations/settings/settings.md#skip_unavailable_shards) - [Cluster Discovery](../../operations/cluster-discovery.md) - [Replicated database engine](../../engines/database-engines/replicated.md) diff --git a/docs/en/operations/settings/query-complexity.md b/docs/en/operations/settings/query-complexity.md index 9e36aa26946..1cb7ec9dced 100644 --- a/docs/en/operations/settings/query-complexity.md +++ b/docs/en/operations/settings/query-complexity.md @@ -139,7 +139,7 @@ Limit on the number of bytes in the result. The same as the previous setting. What to do if the volume of the result exceeds one of the limits: ‘throw’ or ‘break’. By default, throw. -Using ‘break’ is similar to using LIMIT. `Break` interrupts execution only at the block level. This means that amount of returned rows is greater than [max_result_rows](#setting-max_result_rows), multiple of [max_block_size](../../operations/settings/settings.md#setting-max_block_size) and depends on [max_threads](../../operations/settings/settings.md#settings-max_threads). +Using ‘break’ is similar to using LIMIT. `Break` interrupts execution only at the block level. This means that amount of returned rows is greater than [max_result_rows](#setting-max_result_rows), multiple of [max_block_size](../../operations/settings/settings.md#setting-max_block_size) and depends on [max_threads](../../operations/settings/settings.md#max_threads). Example: diff --git a/docs/en/operations/settings/settings-formats.md b/docs/en/operations/settings/settings-formats.md index 344e6dda680..3d76bd9df73 100644 --- a/docs/en/operations/settings/settings-formats.md +++ b/docs/en/operations/settings/settings-formats.md @@ -1130,6 +1130,13 @@ Result a 0 1971-01-01 ``` +## input_format_csv_try_infer_numbers_from_strings {#input_format_csv_try_infer_numbers_from_strings} + +If enabled, during schema inference ClickHouse will try to infer numbers from string fields. +It can be useful if CSV data contains quoted UInt64 numbers. + +Disabled by default. + ## Values format settings {#values-format-settings} ### input_format_values_interpret_expressions {#input_format_values_interpret_expressions} diff --git a/docs/en/operations/settings/settings-users.md b/docs/en/operations/settings/settings-users.md index 1f41eafd02e..96477f777a9 100644 --- a/docs/en/operations/settings/settings-users.md +++ b/docs/en/operations/settings/settings-users.md @@ -4,7 +4,7 @@ sidebar_position: 63 sidebar_label: User Settings --- -# User Settings +# Users and Roles Settings The `users` section of the `user.xml` configuration file contains user settings. @@ -187,3 +187,34 @@ The following configuration forces that user `user1` can only see the rows of `t ``` The `filter` can be any expression resulting in a [UInt8](../../sql-reference/data-types/int-uint.md)-type value. It usually contains comparisons and logical operators. Rows from `database_name.table1` where filter results to 0 are not returned for this user. The filtering is incompatible with `PREWHERE` operations and disables `WHERE→PREWHERE` optimization. + +## Roles + +You can create any predefined roles using the `roles` section of the `user.xml` configuration file. + +Structure of the `roles` section: + +```xml + + + + GRANT SHOW ON *.* + REVOKE SHOW ON system.* + GRANT CREATE ON *.* WITH GRANT OPTION + + + +``` + +These roles can also be granted to users from the `users` section: + +```xml + + + ... + + GRANT test_role + + + +``` diff --git a/docs/en/operations/settings/settings.md b/docs/en/operations/settings/settings.md index a284997508f..6e087467bb9 100644 --- a/docs/en/operations/settings/settings.md +++ b/docs/en/operations/settings/settings.md @@ -460,6 +460,12 @@ Possible values: Default value: 1048576. +## http_make_head_request {#http-make-head-request} + +The `http_make_head_request` setting allows the execution of a `HEAD` request while reading data from HTTP to retrieve information about the file to be read, such as its size. Since it's enabled by default, it may be desirable to disable this setting in cases where the server does not support `HEAD` requests. + +Default value: `true`. + ## table_function_remote_max_addresses {#table_function_remote_max_addresses} Sets the maximum number of addresses generated from patterns for the [remote](../../sql-reference/table-functions/remote.md) function. @@ -1578,9 +1584,15 @@ Default value: `default`. ## allow_experimental_parallel_reading_from_replicas -If true, ClickHouse will send a SELECT query to all replicas of a table (up to `max_parallel_replicas`) . It will work for any kind of MergeTree table. +Enables or disables sending SELECT queries to all replicas of a table (up to `max_parallel_replicas`). Reading is parallelized and coordinated dynamically. It will work for any kind of MergeTree table. -Default value: `false`. +Possible values: + +- 0 - Disabled. +- 1 - Enabled, silently disabled in case of failure. +- 2 - Enabled, throws an exception in case of failure. + +Default value: `0`. ## compile_expressions {#compile-expressions} @@ -1704,7 +1716,7 @@ Default value: `1` ## query_cache_squash_partial_results {#query-cache-squash-partial-results} -Squash partial result blocks to blocks of size [max_block_size](#setting-max_block_size). Reduces performance of inserts into the [query cache](../query-cache.md) but improves the compressability of cache entries (see [query_cache_compress-entries](#query_cache_compress_entries)). +Squash partial result blocks to blocks of size [max_block_size](#setting-max_block_size). Reduces performance of inserts into the [query cache](../query-cache.md) but improves the compressability of cache entries (see [query_cache_compress-entries](#query-cache-compress-entries)). Possible values: @@ -2474,7 +2486,7 @@ See also: - [load_balancing](#load_balancing-round_robin) - [Table engine Distributed](../../engines/table-engines/special/distributed.md) - [distributed_replica_error_cap](#distributed_replica_error_cap) -- [distributed_replica_error_half_life](#settings-distributed_replica_error_half_life) +- [distributed_replica_error_half_life](#distributed_replica_error_half_life) ## distributed_background_insert_sleep_time_ms {#distributed_background_insert_sleep_time_ms} @@ -4152,6 +4164,41 @@ Result: └─────┴─────┴───────┘ ``` +## enable_order_by_all {#enable-order-by-all} + +Enables or disables sorting by `ALL` columns, i.e. [ORDER BY](../../sql-reference/statements/select/order-by.md) + +Possible values: + +- 0 — Disable ORDER BY ALL. +- 1 — Enable ORDER BY ALL. + +Default value: `1`. + +**Example** + +Query: + +```sql +CREATE TABLE TAB(C1 Int, C2 Int, ALL Int) ENGINE=Memory(); + +INSERT INTO TAB VALUES (10, 20, 30), (20, 20, 10), (30, 10, 20); + +SELECT * FROM TAB ORDER BY ALL; -- returns an error that ALL is ambiguous + +SELECT * FROM TAB ORDER BY ALL SETTINGS enable_order_by_all; +``` + +Result: + +```text +┌─C1─┬─C2─┬─ALL─┐ +│ 20 │ 20 │ 10 │ +│ 30 │ 10 │ 20 │ +│ 10 │ 20 │ 30 │ +└────┴────┴─────┘ +``` + ## splitby_max_substrings_includes_remaining_string {#splitby_max_substrings_includes_remaining_string} Controls whether function [splitBy*()](../../sql-reference/functions/splitting-merging-functions.md) with argument `max_substrings` > 0 will include the remaining string in the last element of the result array. @@ -4349,6 +4396,8 @@ Default value: `1GiB`. ## Schema Inference settings +See [schema inference](../../interfaces/schema-inference.md#schema-inference-modes) documentation for more details. + ### schema_inference_use_cache_for_file {schema_inference_use_cache_for_file} Enable schemas cache for schema inference in `file` table function. @@ -4390,6 +4439,13 @@ Possible values: Default value: 2. +### schema_inference_mode {schema_inference_mode} + +The mode of schema inference. Possible values: `default` and `union`. +See [schema inference modes](../../interfaces/schema-inference.md#schema-inference-modes) section for more details. + +Default value: `default`. + ## compatibility {#compatibility} The `compatibility` setting causes ClickHouse to use the default settings of a previous version of ClickHouse, where the previous version is provided as the setting. @@ -4659,7 +4715,7 @@ Possible values: Default value: `false`. -## rename_files_after_processing +## rename_files_after_processing {#rename_files_after_processing} - **Type:** String @@ -5078,3 +5134,25 @@ When set to `true` than for all s3 requests first two attempts are made with low When set to `false` than all attempts are made with identical timeouts. Default value: `true`. + +## max_partition_size_to_drop + +Restriction on dropping partitions in query time. + +Default value: 50 GB. +The value 0 means that you can drop partitions without any restrictions. + +:::note +This query setting overwrites its server setting equivalent, see [max_partition_size_to_drop](/docs/en/operations/server-configuration-parameters/settings.md/#max-partition-size-to-drop) +::: + +## max_table_size_to_drop + +Restriction on deleting tables in query time. + +Default value: 50 GB. +The value 0 means that you can delete all tables without any restrictions. + +:::note +This query setting overwrites its server setting equivalent, see [max_table_size_to_drop](/docs/en/operations/server-configuration-parameters/settings.md/#max-table-size-to-drop) +::: \ No newline at end of file diff --git a/docs/en/operations/system-tables/async_loader.md b/docs/en/operations/system-tables/asynchronous_loader.md similarity index 97% rename from docs/en/operations/system-tables/async_loader.md rename to docs/en/operations/system-tables/asynchronous_loader.md index 4e8651a6d3e..af9aa4ecd09 100644 --- a/docs/en/operations/system-tables/async_loader.md +++ b/docs/en/operations/system-tables/asynchronous_loader.md @@ -1,7 +1,7 @@ --- -slug: /en/operations/system-tables/async_loader +slug: /en/operations/system-tables/asynchronous_loader --- -# async_loader +# asynchronous_loader Contains information and status for recent asynchronous jobs (e.g. for tables loading). The table contains a row for every job. There is a tool for visualizing information from this table `utils/async_loader_graph`. @@ -9,7 +9,7 @@ Example: ``` sql SELECT * -FROM system.async_loader +FROM system.asynchronous_loader FORMAT Vertical LIMIT 1 ``` diff --git a/docs/en/operations/system-tables/asynchronous_metrics.md b/docs/en/operations/system-tables/asynchronous_metrics.md index e46b495239c..fe8f963b1ec 100644 --- a/docs/en/operations/system-tables/asynchronous_metrics.md +++ b/docs/en/operations/system-tables/asynchronous_metrics.md @@ -239,6 +239,10 @@ The amount of virtual memory mapped for the pages of machine code of the server The amount of virtual memory mapped for the use of stack and for the allocated memory, in bytes. It is unspecified whether it includes the per-thread stacks and most of the allocated memory, that is allocated with the 'mmap' system call. This metric exists only for completeness reasons. I recommend to use the `MemoryResident` metric for monitoring. +### MemoryResidentMax + +Maximum amount of physical memory used by the server process, in bytes. + ### MemoryResident The amount of physical memory used by the server process, in bytes. @@ -547,6 +551,14 @@ Total amount of bytes (compressed, including data and indices) stored in all tab Total amount of data parts in all tables of MergeTree family. Numbers larger than 10 000 will negatively affect the server startup time and it may indicate unreasonable choice of the partition key. +### TotalPrimaryKeyBytesInMemory + +The total amount of memory (in bytes) used by primary key values (only takes active parts into account). + +### TotalPrimaryKeyBytesInMemoryAllocated + +The total amount of memory (in bytes) reserved for primary key values (only takes active parts into account). + ### TotalRowsOfMergeTreeTables Total amount of rows (records) stored in all tables of MergeTree family. diff --git a/docs/en/operations/system-tables/clusters.md b/docs/en/operations/system-tables/clusters.md index 2659f80e338..63cc083e4bc 100644 --- a/docs/en/operations/system-tables/clusters.md +++ b/docs/en/operations/system-tables/clusters.md @@ -78,5 +78,5 @@ is_active: NULL **See Also** - [Table engine Distributed](../../engines/table-engines/special/distributed.md) -- [distributed_replica_error_cap setting](../../operations/settings/settings.md#settings-distributed_replica_error_cap) -- [distributed_replica_error_half_life setting](../../operations/settings/settings.md#settings-distributed_replica_error_half_life) +- [distributed_replica_error_cap setting](../../operations/settings/settings.md#distributed_replica_error_cap) +- [distributed_replica_error_half_life setting](../../operations/settings/settings.md#distributed_replica_error_half_life) diff --git a/docs/en/operations/system-tables/database_engines.md b/docs/en/operations/system-tables/database_engines.md new file mode 100644 index 00000000000..09f0687af65 --- /dev/null +++ b/docs/en/operations/system-tables/database_engines.md @@ -0,0 +1,26 @@ +--- +slug: /en/operations/system-tables/database_engines +--- +# database_engines + +Contains the list of database engines supported by the server. + +This table contains the following columns (the column type is shown in brackets): + +- `name` (String) — The name of database engine. + +Example: + +``` sql +SELECT * +FROM system.database_engines +WHERE name in ('Atomic', 'Lazy', 'Ordinary') +``` + +``` text +┌─name─────┐ +│ Ordinary │ +│ Atomic │ +│ Lazy │ +└──────────┘ +``` diff --git a/docs/en/operations/system-tables/errors.md b/docs/en/operations/system-tables/errors.md index 01762962152..4582ea631b3 100644 --- a/docs/en/operations/system-tables/errors.md +++ b/docs/en/operations/system-tables/errors.md @@ -9,11 +9,15 @@ Columns: - `name` ([String](../../sql-reference/data-types/string.md)) — name of the error (`errorCodeToName`). - `code` ([Int32](../../sql-reference/data-types/int-uint.md)) — code number of the error. -- `value` ([UInt64](../../sql-reference/data-types/int-uint.md)) — the number of times this error has been happened. -- `last_error_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — time when the last error happened. +- `value` ([UInt64](../../sql-reference/data-types/int-uint.md)) — the number of times this error happened. +- `last_error_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — the time when the last error happened. - `last_error_message` ([String](../../sql-reference/data-types/string.md)) — message for the last error. -- `last_error_trace` ([Array(UInt64)](../../sql-reference/data-types/array.md)) — A [stack trace](https://en.wikipedia.org/wiki/Stack_trace) which represents a list of physical addresses where the called methods are stored. -- `remote` ([UInt8](../../sql-reference/data-types/int-uint.md)) — remote exception (i.e. received during one of the distributed query). +- `last_error_trace` ([Array(UInt64)](../../sql-reference/data-types/array.md)) — A [stack trace](https://en.wikipedia.org/wiki/Stack_trace) that represents a list of physical addresses where the called methods are stored. +- `remote` ([UInt8](../../sql-reference/data-types/int-uint.md)) — remote exception (i.e. received during one of the distributed queries). + +:::note +Counters for some errors may increase during successful query execution. It's not recommended to use this table for server monitoring purposes unless you are sure that corresponding error can not be a false positive. +::: **Example** diff --git a/docs/en/operations/system-tables/query_log.md b/docs/en/operations/system-tables/query_log.md index 4f5e214f1ce..7fcc4928355 100644 --- a/docs/en/operations/system-tables/query_log.md +++ b/docs/en/operations/system-tables/query_log.md @@ -11,7 +11,7 @@ This table does not contain the ingested data for `INSERT` queries. You can change settings of queries logging in the [query_log](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-query-log) section of the server configuration. -You can disable queries logging by setting [log_queries = 0](../../operations/settings/settings.md#settings-log-queries). We do not recommend to turn off logging because information in this table is important for solving issues. +You can disable queries logging by setting [log_queries = 0](../../operations/settings/settings.md#log-queries). We do not recommend to turn off logging because information in this table is important for solving issues. The flushing period of data is set in `flush_interval_milliseconds` parameter of the [query_log](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-query-log) server settings section. To force flushing, use the [SYSTEM FLUSH LOGS](../../sql-reference/statements/system.md#query_language-system-flush_logs) query. @@ -30,7 +30,7 @@ Each query creates one or two rows in the `query_log` table, depending on the st You can use the [log_queries_probability](../../operations/settings/settings.md#log-queries-probability) setting to reduce the number of queries, registered in the `query_log` table. -You can use the [log_formatted_queries](../../operations/settings/settings.md#settings-log-formatted-queries) setting to log formatted queries to the `formatted_query` column. +You can use the [log_formatted_queries](../../operations/settings/settings.md#log-formatted-queries) setting to log formatted queries to the `formatted_query` column. Columns: @@ -101,7 +101,7 @@ Columns: - `revision` ([UInt32](../../sql-reference/data-types/int-uint.md)) — ClickHouse revision. - `ProfileEvents` ([Map(String, UInt64)](../../sql-reference/data-types/map.md)) — ProfileEvents that measure different metrics. The description of them could be found in the table [system.events](../../operations/system-tables/events.md#system_tables-events) - `Settings` ([Map(String, String)](../../sql-reference/data-types/map.md)) — Settings that were changed when the client ran the query. To enable logging changes to settings, set the `log_query_settings` parameter to 1. -- `log_comment` ([String](../../sql-reference/data-types/string.md)) — Log comment. It can be set to arbitrary string no longer than [max_query_size](../../operations/settings/settings.md#settings-max_query_size). An empty string if it is not defined. +- `log_comment` ([String](../../sql-reference/data-types/string.md)) — Log comment. It can be set to arbitrary string no longer than [max_query_size](../../operations/settings/settings.md#max_query_size). An empty string if it is not defined. - `thread_ids` ([Array(UInt64)](../../sql-reference/data-types/array.md)) — Thread ids that are participating in query execution. These threads may not have run simultaneously. - `peak_threads_usage` ([UInt64)](../../sql-reference/data-types/int-uint.md)) — Maximum count of simultaneous threads executing the query. - `used_aggregate_functions` ([Array(String)](../../sql-reference/data-types/array.md)) — Canonical names of `aggregate functions`, which were used during query execution. diff --git a/docs/en/operations/system-tables/query_thread_log.md b/docs/en/operations/system-tables/query_thread_log.md index a198d7c304f..0420a0392f2 100644 --- a/docs/en/operations/system-tables/query_thread_log.md +++ b/docs/en/operations/system-tables/query_thread_log.md @@ -8,7 +8,7 @@ Contains information about threads that execute queries, for example, thread nam To start logging: 1. Configure parameters in the [query_thread_log](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-query_thread_log) section. -2. Set [log_query_threads](../../operations/settings/settings.md#settings-log-query-threads) to 1. +2. Set [log_query_threads](../../operations/settings/settings.md#log-query-threads) to 1. The flushing period of data is set in `flush_interval_milliseconds` parameter of the [query_thread_log](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-query_thread_log) server settings section. To force flushing, use the [SYSTEM FLUSH LOGS](../../sql-reference/statements/system.md#query_language-system-flush_logs) query. diff --git a/docs/en/operations/system-tables/query_views_log.md b/docs/en/operations/system-tables/query_views_log.md index 4dd8dd7420d..41a69da70aa 100644 --- a/docs/en/operations/system-tables/query_views_log.md +++ b/docs/en/operations/system-tables/query_views_log.md @@ -8,7 +8,7 @@ Contains information about the dependent views executed when running a query, fo To start logging: 1. Configure parameters in the [query_views_log](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-query_views_log) section. -2. Set [log_query_views](../../operations/settings/settings.md#settings-log-query-views) to 1. +2. Set [log_query_views](../../operations/settings/settings.md#log-query-views) to 1. The flushing period of data is set in `flush_interval_milliseconds` parameter of the [query_views_log](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-query_views_log) server settings section. To force flushing, use the [SYSTEM FLUSH LOGS](../../sql-reference/statements/system.md#query_language-system-flush_logs) query. diff --git a/docs/en/operations/system-tables/table_engines.md b/docs/en/operations/system-tables/table_engines.md index 08594739ecf..56668abae31 100644 --- a/docs/en/operations/system-tables/table_engines.md +++ b/docs/en/operations/system-tables/table_engines.md @@ -14,7 +14,7 @@ This table contains the following columns (the column type is shown in brackets) - `supports_sort_order` (UInt8) — Flag that indicates if table engine supports clauses `PARTITION_BY`, `PRIMARY_KEY`, `ORDER_BY` and `SAMPLE_BY`. - `supports_replication` (UInt8) — Flag that indicates if table engine supports [data replication](../../engines/table-engines/mergetree-family/replication.md). - `supports_duduplication` (UInt8) — Flag that indicates if table engine supports data deduplication. -- `supports_parallel_insert` (UInt8) — Flag that indicates if table engine supports parallel insert (see [`max_insert_threads`](../../operations/settings/settings.md#settings-max-insert-threads) setting). +- `supports_parallel_insert` (UInt8) — Flag that indicates if table engine supports parallel insert (see [`max_insert_threads`](../../operations/settings/settings.md#max-insert-threads) setting). Example: diff --git a/docs/en/operations/system-tables/tables.md b/docs/en/operations/system-tables/tables.md index e4461e14236..8049ab091c0 100644 --- a/docs/en/operations/system-tables/tables.md +++ b/docs/en/operations/system-tables/tables.md @@ -29,7 +29,7 @@ Columns: - `dependencies_database` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) - Database dependencies. -- `dependencies_table` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) - Table dependencies ([MaterializedView](../../engines/table-engines/special/materializedview.md) tables based on the current table). +- `dependencies_table` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) - Table dependencies ([materialized views](../../sql-reference/statements/create/view.md#materialized-view) the current table). - `create_table_query` ([String](../../sql-reference/data-types/string.md)) - The query that was used to create the table. @@ -57,6 +57,8 @@ Columns: - If the table stores data on disk, returns used space on disk (i.e. compressed). - If the table stores data in memory, returns approximated number of used bytes in memory. +- `total_bytes_uncompressed` ([Nullable](../../sql-reference/data-types/nullable.md)([UInt64](../../sql-reference/data-types/int-uint.md))) - Total number of uncompressed bytes, if it's possible to quickly determine the exact number of bytes from the part checksums for the table on storage, otherwise `NULL` (does not take underlying storage (if any) into account). + - `lifetime_rows` ([Nullable](../../sql-reference/data-types/nullable.md)([UInt64](../../sql-reference/data-types/int-uint.md))) - Total number of rows INSERTed since server start (only for `Buffer` tables). - `lifetime_bytes` ([Nullable](../../sql-reference/data-types/nullable.md)([UInt64](../../sql-reference/data-types/int-uint.md))) - Total number of bytes INSERTed since server start (only for `Buffer` tables). diff --git a/docs/en/operations/system-tables/view_refreshes.md b/docs/en/operations/system-tables/view_refreshes.md new file mode 100644 index 00000000000..12377507b39 --- /dev/null +++ b/docs/en/operations/system-tables/view_refreshes.md @@ -0,0 +1,43 @@ +--- +slug: /en/operations/system-tables/view_refreshes +--- +# view_refreshes + +Information about [Refreshable Materialized Views](../../sql-reference/statements/create/view.md#refreshable-materialized-view). Contains all refreshable materialized views, regardless of whether there's a refresh in progress or not. + + +Columns: + +- `database` ([String](../../sql-reference/data-types/string.md)) — The name of the database the table is in. +- `view` ([String](../../sql-reference/data-types/string.md)) — Table name. +- `status` ([String](../../sql-reference/data-types/string.md)) — Current state of the refresh. +- `last_refresh_result` ([String](../../sql-reference/data-types/string.md)) — Outcome of the latest refresh attempt. +- `last_refresh_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — Time of the last refresh attempt. `NULL` if no refresh attempts happened since server startup or table creation. +- `last_success_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — Time of the last successful refresh. `NULL` if no successful refreshes happened since server startup or table creation. +- `duration_ms` ([UInt64](../../sql-reference/data-types/int-uint.md)) — How long the last refresh attempt took. +- `next_refresh_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — Time at which the next refresh is scheduled to start. +- `remaining_dependencies` ([Array(String)](../../sql-reference/data-types/array.md)) — If the view has [refresh dependencies](../../sql-reference/statements/create/view.md#refresh-dependencies), this array contains the subset of those dependencies that are not satisfied for the current refresh yet. If `status = 'WaitingForDependencies'`, a refresh is ready to start as soon as these dependencies are fulfilled. +- `exception` ([String](../../sql-reference/data-types/string.md)) — if `last_refresh_result = 'Exception'`, i.e. the last refresh attempt failed, this column contains the corresponding error message and stack trace. +- `refresh_count` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Number of successful refreshes since last server restart or table creation. +- `progress` ([Float64](../../sql-reference/data-types/float.md)) — Progress of the current refresh, between 0 and 1. +- `read_rows` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Number of rows read by the current refresh so far. +- `total_rows` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Estimated total number of rows that need to be read by the current refresh. + +(There are additional columns related to current refresh progress, but they are currently unreliable.) + +**Example** + +```sql +SELECT + database, + view, + status, + last_refresh_result, + last_refresh_time, + next_refresh_time +FROM system.view_refreshes + +┌─database─┬─view───────────────────────┬─status────┬─last_refresh_result─┬───last_refresh_time─┬───next_refresh_time─┐ +│ default │ hello_documentation_reader │ Scheduled │ Finished │ 2023-12-01 01:24:00 │ 2023-12-01 01:25:00 │ +└──────────┴────────────────────────────┴───────────┴─────────────────────┴─────────────────────┴─────────────────────┘ +``` diff --git a/docs/en/operations/utilities/clickhouse-benchmark.md b/docs/en/operations/utilities/clickhouse-benchmark.md index 8620b44c368..8b7d7f85552 100644 --- a/docs/en/operations/utilities/clickhouse-benchmark.md +++ b/docs/en/operations/utilities/clickhouse-benchmark.md @@ -53,7 +53,6 @@ clickhouse-benchmark [keys] < queries_file; - `--confidence=N` — Level of confidence for T-test. Possible values: 0 (80%), 1 (90%), 2 (95%), 3 (98%), 4 (99%), 5 (99.5%). Default value: 5. In the [comparison mode](#clickhouse-benchmark-comparison-mode) `clickhouse-benchmark` performs the [Independent two-sample Student’s t-test](https://en.wikipedia.org/wiki/Student%27s_t-test#Independent_two-sample_t-test) to determine whether the two distributions aren’t different with the selected level of confidence. - `--cumulative` — Printing cumulative data instead of data per interval. - `--database=DATABASE_NAME` — ClickHouse database name. Default value: `default`. -- `--json=FILEPATH` — `JSON` output. When the key is set, `clickhouse-benchmark` outputs a report to the specified JSON-file. - `--user=USERNAME` — ClickHouse user name. Default value: `default`. - `--password=PSWD` — ClickHouse user password. Default value: empty string. - `--stacktrace` — Stack traces output. When the key is set, `clickhouse-bencmark` outputs stack traces of exceptions. diff --git a/docs/en/sql-reference/aggregate-functions/reference/count.md b/docs/en/sql-reference/aggregate-functions/reference/count.md index a40108a331a..ca4067c8d8c 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/count.md +++ b/docs/en/sql-reference/aggregate-functions/reference/count.md @@ -28,7 +28,7 @@ In both cases the type of the returned value is [UInt64](../../../sql-reference/ **Details** -ClickHouse supports the `COUNT(DISTINCT ...)` syntax. The behavior of this construction depends on the [count_distinct_implementation](../../../operations/settings/settings.md#settings-count_distinct_implementation) setting. It defines which of the [uniq\*](../../../sql-reference/aggregate-functions/reference/uniq.md#agg_function-uniq) functions is used to perform the operation. The default is the [uniqExact](../../../sql-reference/aggregate-functions/reference/uniqexact.md#agg_function-uniqexact) function. +ClickHouse supports the `COUNT(DISTINCT ...)` syntax. The behavior of this construction depends on the [count_distinct_implementation](../../../operations/settings/settings.md#count_distinct_implementation) setting. It defines which of the [uniq\*](../../../sql-reference/aggregate-functions/reference/uniq.md#agg_function-uniq) functions is used to perform the operation. The default is the [uniqExact](../../../sql-reference/aggregate-functions/reference/uniqexact.md#agg_function-uniqexact) function. The `SELECT count() FROM table` query is optimized by default using metadata from MergeTree. If you need to use row-level security, disable optimization using the [optimize_trivial_count_query](../../../operations/settings/settings.md#optimize-trivial-count-query) setting. diff --git a/docs/en/sql-reference/aggregate-functions/reference/sparkbar.md b/docs/en/sql-reference/aggregate-functions/reference/sparkbar.md index e21dad5b2f5..62edc221858 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/sparkbar.md +++ b/docs/en/sql-reference/aggregate-functions/reference/sparkbar.md @@ -1,62 +1,64 @@ ---- -slug: /en/sql-reference/aggregate-functions/reference/sparkbar -sidebar_position: 311 -sidebar_label: sparkbar ---- - -# sparkbar - -The function plots a frequency histogram for values `x` and the repetition rate `y` of these values over the interval `[min_x, max_x]`. -Repetitions for all `x` falling into the same bucket are averaged, so data should be pre-aggregated. -Negative repetitions are ignored. - -If no interval is specified, then the minimum `x` is used as the interval start, and the maximum `x` — as the interval end. -Otherwise, values outside the interval are ignored. - -**Syntax** - -``` sql -sparkbar(buckets[, min_x, max_x])(x, y) -``` - -**Parameters** - -- `buckets` — The number of segments. Type: [Integer](../../../sql-reference/data-types/int-uint.md). -- `min_x` — The interval start. Optional parameter. -- `max_x` — The interval end. Optional parameter. - -**Arguments** - -- `x` — The field with values. -- `y` — The field with the frequency of values. - -**Returned value** - -- The frequency histogram. - -**Example** - -Query: - -``` sql -CREATE TABLE spark_bar_data (`value` Int64, `event_date` Date) ENGINE = MergeTree ORDER BY event_date; - -INSERT INTO spark_bar_data VALUES (1,'2020-01-01'), (3,'2020-01-02'), (4,'2020-01-02'), (-3,'2020-01-02'), (5,'2020-01-03'), (2,'2020-01-04'), (3,'2020-01-05'), (7,'2020-01-06'), (6,'2020-01-07'), (8,'2020-01-08'), (2,'2020-01-11'); - -SELECT sparkbar(9)(event_date,cnt) FROM (SELECT sum(value) as cnt, event_date FROM spark_bar_data GROUP BY event_date); - -SELECT sparkbar(9, toDate('2020-01-01'), toDate('2020-01-10'))(event_date,cnt) FROM (SELECT sum(value) as cnt, event_date FROM spark_bar_data GROUP BY event_date); -``` - -Result: - -``` text -┌─sparkbar(9)(event_date, cnt)─┐ -│ ▂▅▂▃▆█ ▂ │ -└──────────────────────────────┘ - -┌─sparkbar(9, toDate('2020-01-01'), toDate('2020-01-10'))(event_date, cnt)─┐ -│ ▂▅▂▃▇▆█ │ -└──────────────────────────────────────────────────────────────────────────┘ -``` - +--- +slug: /en/sql-reference/aggregate-functions/reference/sparkbar +sidebar_position: 311 +sidebar_label: sparkbar +--- + +# sparkbar + +The function plots a frequency histogram for values `x` and the repetition rate `y` of these values over the interval `[min_x, max_x]`. +Repetitions for all `x` falling into the same bucket are averaged, so data should be pre-aggregated. +Negative repetitions are ignored. + +If no interval is specified, then the minimum `x` is used as the interval start, and the maximum `x` — as the interval end. +Otherwise, values outside the interval are ignored. + +**Syntax** + +``` sql +sparkbar(buckets[, min_x, max_x])(x, y) +``` + +**Parameters** + +- `buckets` — The number of segments. Type: [Integer](../../../sql-reference/data-types/int-uint.md). +- `min_x` — The interval start. Optional parameter. +- `max_x` — The interval end. Optional parameter. + +**Arguments** + +- `x` — The field with values. +- `y` — The field with the frequency of values. + +**Returned value** + +- The frequency histogram. + +**Example** + +Query: + +``` sql +CREATE TABLE spark_bar_data (`value` Int64, `event_date` Date) ENGINE = MergeTree ORDER BY event_date; + +INSERT INTO spark_bar_data VALUES (1,'2020-01-01'), (3,'2020-01-02'), (4,'2020-01-02'), (-3,'2020-01-02'), (5,'2020-01-03'), (2,'2020-01-04'), (3,'2020-01-05'), (7,'2020-01-06'), (6,'2020-01-07'), (8,'2020-01-08'), (2,'2020-01-11'); + +SELECT sparkbar(9)(event_date,cnt) FROM (SELECT sum(value) as cnt, event_date FROM spark_bar_data GROUP BY event_date); + +SELECT sparkbar(9, toDate('2020-01-01'), toDate('2020-01-10'))(event_date,cnt) FROM (SELECT sum(value) as cnt, event_date FROM spark_bar_data GROUP BY event_date); +``` + +Result: + +``` text +┌─sparkbar(9)(event_date, cnt)─┐ +│ ▂▅▂▃▆█ ▂ │ +└──────────────────────────────┘ + +┌─sparkbar(9, toDate('2020-01-01'), toDate('2020-01-10'))(event_date, cnt)─┐ +│ ▂▅▂▃▇▆█ │ +└──────────────────────────────────────────────────────────────────────────┘ +``` + +The alias for this function is sparkBar. + diff --git a/docs/en/sql-reference/dictionaries/index.md b/docs/en/sql-reference/dictionaries/index.md index 4f021b25809..9f86aaf2502 100644 --- a/docs/en/sql-reference/dictionaries/index.md +++ b/docs/en/sql-reference/dictionaries/index.md @@ -394,7 +394,7 @@ Configuration example: or ``` sql -LAYOUT(HASHED_ARRAY()) +LAYOUT(HASHED_ARRAY([SHARDS 1])) ``` ### complex_key_hashed_array @@ -412,7 +412,7 @@ Configuration example: or ``` sql -LAYOUT(COMPLEX_KEY_HASHED_ARRAY()) +LAYOUT(COMPLEX_KEY_HASHED_ARRAY([SHARDS 1])) ``` ### range_hashed {#range_hashed} @@ -2415,8 +2415,8 @@ clickhouse client \ --secure \ --password MY_PASSWORD \ --query " - INSERT INTO regexp_dictionary_source_table - SELECT * FROM input ('id UInt64, parent_id UInt64, regexp String, keys Array(String), values Array(String)') + INSERT INTO regexp_dictionary_source_table + SELECT * FROM input ('id UInt64, parent_id UInt64, regexp String, keys Array(String), values Array(String)') FORMAT CSV" < regexp_dict.csv ``` diff --git a/docs/en/sql-reference/functions/array-functions.md b/docs/en/sql-reference/functions/array-functions.md index 00efa63c960..f5da00a8663 100644 --- a/docs/en/sql-reference/functions/array-functions.md +++ b/docs/en/sql-reference/functions/array-functions.md @@ -143,7 +143,7 @@ range([start, ] end [, step]) **Implementation details** - All arguments `start`, `end`, `step` must be below data types: `UInt8`, `UInt16`, `UInt32`, `UInt64`,`Int8`, `Int16`, `Int32`, `Int64`, as well as elements of the returned array, which's type is a super type of all arguments. -- An exception is thrown if query results in arrays with a total length of more than number of elements specified by the [function_range_max_elements_in_block](../../operations/settings/settings.md#settings-function_range_max_elements_in_block) setting. +- An exception is thrown if query results in arrays with a total length of more than number of elements specified by the [function_range_max_elements_in_block](../../operations/settings/settings.md#function_range_max_elements_in_block) setting. - Returns Null if any argument has Nullable(Nothing) type. An exception is thrown if any argument has Null value (Nullable(T) type). **Examples** diff --git a/docs/en/sql-reference/functions/date-time-functions.md b/docs/en/sql-reference/functions/date-time-functions.md index 565486275e6..0261589b968 100644 --- a/docs/en/sql-reference/functions/date-time-functions.md +++ b/docs/en/sql-reference/functions/date-time-functions.md @@ -1809,6 +1809,8 @@ Alias: `dateTrunc`. - `quarter` - `year` + `unit` argument is case-insensitive. + - `value` — Date and time. [DateTime](../../sql-reference/data-types/datetime.md) or [DateTime64](../../sql-reference/data-types/datetime64.md). - `timezone` — [Timezone name](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) for the returned value (optional). If not specified, the function uses the timezone of the `value` parameter. [String](../../sql-reference/data-types/string.md). diff --git a/docs/en/sql-reference/functions/introspection.md b/docs/en/sql-reference/functions/introspection.md index 8cb35483555..1025b8bdc3d 100644 --- a/docs/en/sql-reference/functions/introspection.md +++ b/docs/en/sql-reference/functions/introspection.md @@ -16,7 +16,7 @@ For proper operation of introspection functions: - Install the `clickhouse-common-static-dbg` package. -- Set the [allow_introspection_functions](../../operations/settings/settings.md#settings-allow_introspection_functions) setting to 1. +- Set the [allow_introspection_functions](../../operations/settings/settings.md#allow_introspection_functions) setting to 1. For security reasons introspection functions are disabled by default. diff --git a/docs/en/sql-reference/functions/ip-address-functions.md b/docs/en/sql-reference/functions/ip-address-functions.md index 33c788a632e..be20e02d77e 100644 --- a/docs/en/sql-reference/functions/ip-address-functions.md +++ b/docs/en/sql-reference/functions/ip-address-functions.md @@ -501,41 +501,3 @@ Result: │ 0 │ └────────────────────────────────────────────────────────────────────┘ ``` - -## reverseDNSQuery - -Performs a reverse DNS query to get the PTR records associated with the IP address. - -**Syntax** - -``` sql -reverseDNSQuery(address) -``` - -This function performs reverse DNS resolutions on both IPv4 and IPv6. - -**Arguments** - -- `address` — An IPv4 or IPv6 address. [String](../../sql-reference/data-types/string.md). - -**Returned value** - -- Associated domains (PTR records). - -Type: Type: [Array(String)](../../sql-reference/data-types/array.md). - -**Example** - -Query: - -``` sql -SELECT reverseDNSQuery('192.168.0.2'); -``` - -Result: - -``` text -┌─reverseDNSQuery('192.168.0.2')────────────┐ -│ ['test2.example.com','test3.example.com'] │ -└───────────────────────────────────────────┘ -``` diff --git a/docs/en/sql-reference/functions/other-functions.md b/docs/en/sql-reference/functions/other-functions.md index 5b9d01985dd..35f9c7af2ce 100644 --- a/docs/en/sql-reference/functions/other-functions.md +++ b/docs/en/sql-reference/functions/other-functions.md @@ -2831,3 +2831,92 @@ Result: │ SELECT a, b FROM tab WHERE (a > 3) AND (b < 3) │ └─────────────────────────────────────────────────────────────────────────┘ ``` + +## minSampleSizeConversion + +Calculates minimum required sample size for an A/B test comparing conversions (proportions) in two samples. + +**Syntax** + +``` sql +minSampleSizeConversion(baseline, mde, power, alpha) +``` + +Uses the formula described in [this article](https://towardsdatascience.com/required-sample-size-for-a-b-testing-6f6608dd330a). Assumes equal sizes of treatment and control groups. Returns the sample size required for one group (i.e. the sample size required for the whole experiment is twice the returned value). + +**Arguments** + +- `baseline` — Baseline conversion. [Float](../data-types/float.md). +- `mde` — Minimum detectable effect (MDE) as percentage points (e.g. for a baseline conversion 0.25 the MDE 0.03 means an expected change to 0.25 ± 0.03). [Float](../data-types/float.md). +- `power` — Required statistical power of a test (1 - probability of Type II error). [Float](../data-types/float.md). +- `alpha` — Required significance level of a test (probability of Type I error). [Float](../data-types/float.md). + +**Returned value** + +A named [Tuple](../data-types/tuple.md) with 3 elements: + +- `"minimum_sample_size"` — Required sample size. [Float64](../data-types/float.md). +- `"detect_range_lower"` — Lower bound of the range of values not detectable with the returned required sample size (i.e. all values less than or equal to `"detect_range_lower"` are detectable with the provided `alpha` and `power`). Calculated as `baseline - mde`. [Float64](../data-types/float.md). +- `"detect_range_upper"` — Upper bound of the range of values not detectable with the returned required sample size (i.e. all values greater than or equal to `"detect_range_upper"` are detectable with the provided `alpha` and `power`). Calculated as `baseline + mde`. [Float64](../data-types/float.md). + +**Example** + +The following query calculates the required sample size for an A/B test with baseline conversion of 25%, MDE of 3%, significance level of 5%, and the desired statistical power of 80%: + +``` sql +SELECT minSampleSizeConversion(0.25, 0.03, 0.80, 0.05) AS sample_size; +``` + +Result: + +``` text +┌─sample_size───────────────────┐ +│ (3396.077603219163,0.22,0.28) │ +└───────────────────────────────┘ +``` + +## minSampleSizeContinuous + +Calculates minimum required sample size for an A/B test comparing means of a continuous metric in two samples. + +**Syntax** + +``` sql +minSampleSizeContinous(baseline, sigma, mde, power, alpha) +``` + +Alias: `minSampleSizeContinous` + +Uses the formula described in [this article](https://towardsdatascience.com/required-sample-size-for-a-b-testing-6f6608dd330a). Assumes equal sizes of treatment and control groups. Returns the required sample size for one group (i.e. the sample size required for the whole experiment is twice the returned value). Also assumes equal variance of the test metric in treatment and control groups. + +**Arguments** + +- `baseline` — Baseline value of a metric. [Integer](../data-types/int-uint.md) or [Float](../data-types/float.md). +- `sigma` — Baseline standard deviation of a metric. [Integer](../data-types/int-uint.md) or [Float](../data-types/float.md). +- `mde` — Minimum detectable effect (MDE) as percentage of the baseline value (e.g. for a baseline value 112.25 the MDE 0.03 means an expected change to 112.25 ± 112.25*0.03). [Integer](../data-types/int-uint.md) or [Float](../data-types/float.md). +- `power` — Required statistical power of a test (1 - probability of Type II error). [Integer](../data-types/int-uint.md) or [Float](../data-types/float.md). +- `alpha` — Required significance level of a test (probability of Type I error). [Integer](../data-types/int-uint.md) or [Float](../data-types/float.md). + +**Returned value** + +A named [Tuple](../data-types/tuple.md) with 3 elements: + +- `"minimum_sample_size"` — Required sample size. [Float64](../data-types/float.md). +- `"detect_range_lower"` — Lower bound of the range of values not detectable with the returned required sample size (i.e. all values less than or equal to `"detect_range_lower"` are detectable with the provided `alpha` and `power`). Calculated as `baseline * (1 - mde)`. [Float64](../data-types/float.md). +- `"detect_range_upper"` — Upper bound of the range of values not detectable with the returned required sample size (i.e. all values greater than or equal to `"detect_range_upper"` are detectable with the provided `alpha` and `power`). Calculated as `baseline * (1 + mde)`. [Float64](../data-types/float.md). + +**Example** + +The following query calculates the required sample size for an A/B test on a metric with baseline value of 112.25, standard deviation of 21.1, MDE of 3%, significance level of 5%, and the desired statistical power of 80%: + +``` sql +SELECT minSampleSizeContinous(112.25, 21.1, 0.03, 0.80, 0.05) AS sample_size; +``` + +Result: + +``` text +┌─sample_size───────────────────────────┐ +│ (616.2931945826209,108.8825,115.6175) │ +└───────────────────────────────────────┘ +``` diff --git a/docs/en/sql-reference/functions/string-functions.md b/docs/en/sql-reference/functions/string-functions.md index 1940993ce0b..20694211912 100644 --- a/docs/en/sql-reference/functions/string-functions.md +++ b/docs/en/sql-reference/functions/string-functions.md @@ -393,40 +393,6 @@ Reverses the sequence of bytes in a string. Reverses a sequence of Unicode code points in a string. Assumes that the string contains valid UTF-8 encoded text. If this assumption is violated, no exception is thrown and the result is undefined. -## format - -Format the `pattern` string with the strings listed in the arguments, similar to formatting in Python. The pattern string can contain replacement fields surrounded by curly braces `{}`. Anything not contained in braces is considered literal text and copied verbatim into the output. Literal brace character can be escaped by two braces: `{{ '{{' }}` and `{{ '}}' }}`. Field names can be numbers (starting from zero) or empty (then they are implicitly given monotonically increasing numbers). - -**Syntax** - -```sql -format(pattern, s0, s1, …) -``` - -**Example** - -``` sql -SELECT format('{1} {0} {1}', 'World', 'Hello') -``` - -```result -┌─format('{1} {0} {1}', 'World', 'Hello')─┐ -│ Hello World Hello │ -└─────────────────────────────────────────┘ -``` - -With implicit numbers: - -``` sql -SELECT format('{} {}', 'Hello', 'World') -``` - -```result -┌─format('{} {}', 'Hello', 'World')─┐ -│ Hello World │ -└───────────────────────────────────┘ -``` - ## concat Concatenates the given arguments. @@ -567,8 +533,8 @@ Result: ```result ┌─concatWithSeparator('a', '1', '2', '3', '4')─┐ -│ 1a2a3a4 │ -└───────────────────────────────────┘ +│ 1a2a3a4 │ +└──────────────────────────────────────────────┘ ``` ## concatWithSeparatorAssumeInjective @@ -577,26 +543,52 @@ Like `concatWithSeparator` but assumes that `concatWithSeparator(sep, expr1, exp A function is called injective if it returns for different arguments different results. In other words: different arguments never produce identical result. -## substring(s, offset, length) +## substring -Returns a substring with `length` many bytes, starting at the byte at index `offset`. Character indexing starts from 1. +Returns the substring of a string `s` which starts at the specified byte index `offset`. Byte counting starts from 1. If `offset` is 0, an empty string is returned. If `offset` is negative, the substring starts `pos` characters from the end of the string, rather than from the beginning. An optional argument `length` specifies the maximum number of bytes the returned substring may have. **Syntax** ```sql -substring(s, offset, length) +substring(s, offset[, length]) ``` Alias: - `substr` - `mid` +**Arguments** + +- `s` — The string to calculate a substring from. [String](../../sql-reference/data-types/string.md), [FixedString](../../sql-reference/data-types/fixedstring.md) or [Enum](../../sql-reference/data-types/enum.md) +- `offset` — The starting position of the substring in `s` . [(U)Int*](../../sql-reference/data-types/int-uint.md). +- `length` — The maximum length of the substring. [(U)Int*](../../sql-reference/data-types/int-uint.md). Optional. + +**Returned value** + +A substring of `s` with `length` many bytes, starting at index `offset`. + +Type: `String`. + +**Example** + +``` sql +SELECT 'database' AS db, substr(db, 5), substr(db, 5, 1) +``` + +Result: + +```result +┌─db───────┬─substring('database', 5)─┬─substring('database', 5, 1)─┐ +│ database │ base │ b │ +└──────────┴──────────────────────────┴─────────────────────────────┘ +``` + ## substringUTF8 Like `substring` but for Unicode code points. Assumes that the string contains valid UTF-8 encoded text. If this assumption is violated, no exception is thrown and the result is undefined. -## substringIndex(s, delim, count) +## substringIndex Returns the substring of `s` before `count` occurrences of the delimiter `delim`, as in Spark or MySQL. @@ -627,7 +619,7 @@ Result: └──────────────────────────────────────────────┘ ``` -## substringIndexUTF8(s, delim, count) +## substringIndexUTF8 Like `substringIndex` but for Unicode code points. Assumes that the string contains valid UTF-8 encoded text. If this assumption is violated, no exception is thrown and the result is undefined. @@ -1259,7 +1251,7 @@ This function also replaces numeric character references with Unicode characters **Syntax** ``` sql -decodeHTMComponent(x) +decodeHTMLComponent(x) ``` **Arguments** @@ -1276,7 +1268,7 @@ Type: [String](../../sql-reference/data-types/string.md). ``` sql SELECT decodeHTMLComponent(''CH'); -SELECT decodeHMLComponent('I♥ClickHouse'); +SELECT decodeHTMLComponent('I♥ClickHouse'); ``` Result: diff --git a/docs/en/sql-reference/functions/string-replace-functions.md b/docs/en/sql-reference/functions/string-replace-functions.md index 74d5d747193..c7bd16cad4a 100644 --- a/docs/en/sql-reference/functions/string-replace-functions.md +++ b/docs/en/sql-reference/functions/string-replace-functions.md @@ -132,6 +132,40 @@ For more information, see [RE2](https://github.com/google/re2/blob/master/re2/re regexpQuoteMeta(s) ``` +## format + +Format the `pattern` string with the values (strings, integers, etc.) listed in the arguments, similar to formatting in Python. The pattern string can contain replacement fields surrounded by curly braces `{}`. Anything not contained in braces is considered literal text and copied verbatim into the output. Literal brace character can be escaped by two braces: `{{ '{{' }}` and `{{ '}}' }}`. Field names can be numbers (starting from zero) or empty (then they are implicitly given monotonically increasing numbers). + +**Syntax** + +```sql +format(pattern, s0, s1, …) +``` + +**Example** + +``` sql +SELECT format('{1} {0} {1}', 'World', 'Hello') +``` + +```result +┌─format('{1} {0} {1}', 'World', 'Hello')─┐ +│ Hello World Hello │ +└─────────────────────────────────────────┘ +``` + +With implicit numbers: + +``` sql +SELECT format('{} {}', 'Hello', 'World') +``` + +```result +┌─format('{} {}', 'Hello', 'World')─┐ +│ Hello World │ +└───────────────────────────────────┘ +``` + ## translate Replaces characters in the string `s` using a one-to-one character mapping defined by `from` and `to` strings. `from` and `to` must be constant ASCII strings of the same size. Non-ASCII characters in the original string are not modified. diff --git a/docs/en/sql-reference/functions/time-series-functions.md b/docs/en/sql-reference/functions/time-series-functions.md new file mode 100644 index 00000000000..434432baa48 --- /dev/null +++ b/docs/en/sql-reference/functions/time-series-functions.md @@ -0,0 +1,59 @@ +--- +slug: /en/sql-reference/functions/time-series-functions +sidebar_position: 172 +sidebar_label: Time Series +--- + +# Time Series Functions + +Below functions are used for time series analysis. + +## seriesPeriodDetectFFT + +Finds the period of the given time series data using FFT +FFT - [Fast Fourier transform](https://en.wikipedia.org/wiki/Fast_Fourier_transform) + +**Syntax** + +``` sql +seriesPeriodDetectFFT(series); +``` + +**Arguments** + +- `series` - An array of numeric values + +**Returned value** + +- A real value equal to the period of time series +- Returns NAN when number of data points are less than four. + +Type: [Float64](../../sql-reference/data-types/float.md). + +**Examples** + +Query: + +``` sql +SELECT seriesPeriodDetectFFT([1, 4, 6, 1, 4, 6, 1, 4, 6, 1, 4, 6, 1, 4, 6, 1, 4, 6, 1, 4, 6]) AS print_0; +``` + +Result: + +``` text +┌───────────print_0──────┐ +│ 3 │ +└────────────────────────┘ +``` + +``` sql +SELECT seriesPeriodDetectFFT(arrayMap(x -> abs((x % 6) - 3), range(1000))) AS print_0; +``` + +Result: + +``` text +┌─print_0─┐ +│ 6 │ +└─────────┘ +``` diff --git a/docs/en/sql-reference/statements/alter/apply-deleted-mask.md b/docs/en/sql-reference/statements/alter/apply-deleted-mask.md new file mode 100644 index 00000000000..7a11d66e739 --- /dev/null +++ b/docs/en/sql-reference/statements/alter/apply-deleted-mask.md @@ -0,0 +1,22 @@ +--- +slug: /en/sql-reference/statements/alter/apply-deleted-mask +sidebar_position: 46 +sidebar_label: APPLY DELETED MASK +--- + +# Apply mask of deleted rows + +``` sql +ALTER TABLE [db].name [ON CLUSTER cluster] APPLY DELETED MASK [IN PARTITION partition_id] +``` + +The command applies mask created by [lightweight delete](/docs/en/sql-reference/statements/delete) and forcefully removes rows marked as deleted from disk. This command is a heavyweight mutation and it semantically equals to query ```ALTER TABLE [db].name DELETE WHERE _row_exists = 0```. + +:::note +It only works for tables in the [`MergeTree`](../../../engines/table-engines/mergetree-family/mergetree.md) family (including [replicated](../../../engines/table-engines/mergetree-family/replication.md) tables). +::: + +**See also** + +- [Lightweight deletes](/docs/en/sql-reference/statements/delete) +- [Heavyweight deletes](/docs/en/sql-reference/statements/alter/delete.md) diff --git a/docs/en/sql-reference/statements/alter/index.md b/docs/en/sql-reference/statements/alter/index.md index d28542e0a43..dc6668c7983 100644 --- a/docs/en/sql-reference/statements/alter/index.md +++ b/docs/en/sql-reference/statements/alter/index.md @@ -17,8 +17,9 @@ Most `ALTER TABLE` queries modify table settings or data: - [CONSTRAINT](/docs/en/sql-reference/statements/alter/constraint.md) - [TTL](/docs/en/sql-reference/statements/alter/ttl.md) - [STATISTIC](/docs/en/sql-reference/statements/alter/statistic.md) +- [APPLY DELETED MASK](/docs/en/sql-reference/statements/alter/apply-deleted-mask.md) -:::note +:::note Most `ALTER TABLE` queries are supported only for [\*MergeTree](/docs/en/engines/table-engines/mergetree-family/index.md) tables, as well as [Merge](/docs/en/engines/table-engines/special/merge.md) and [Distributed](/docs/en/engines/table-engines/special/distributed.md). ::: @@ -59,7 +60,7 @@ For all `ALTER` queries, you can use the [alter_sync](/docs/en/operations/settin You can specify how long (in seconds) to wait for inactive replicas to execute all `ALTER` queries with the [replication_wait_for_inactive_replica_timeout](/docs/en/operations/settings/settings.md/#replication-wait-for-inactive-replica-timeout) setting. -:::note +:::note For all `ALTER` queries, if `alter_sync = 2` and some replicas are not active for more than the time, specified in the `replication_wait_for_inactive_replica_timeout` setting, then an exception `UNFINISHED` is thrown. ::: diff --git a/docs/en/sql-reference/statements/alter/view.md b/docs/en/sql-reference/statements/alter/view.md index 5c5bf0355f6..517e64e3e5b 100644 --- a/docs/en/sql-reference/statements/alter/view.md +++ b/docs/en/sql-reference/statements/alter/view.md @@ -6,28 +6,28 @@ sidebar_label: VIEW # ALTER TABLE … MODIFY QUERY Statement -You can modify `SELECT` query that was specified when a [materialized view](../create/view.md#materialized) was created with the `ALTER TABLE … MODIFY QUERY` statement without interrupting ingestion process. +You can modify `SELECT` query that was specified when a [materialized view](../create/view.md#materialized) was created with the `ALTER TABLE … MODIFY QUERY` statement without interrupting ingestion process. -The `allow_experimental_alter_materialized_view_structure` setting must be enabled. +The `allow_experimental_alter_materialized_view_structure` setting must be enabled. This command is created to change materialized view created with `TO [db.]name` clause. It does not change the structure of the underling storage table and it does not change the columns' definition of the materialized view, because of this the application of this command is very limited for materialized views are created without `TO [db.]name` clause. **Example with TO table** ```sql -CREATE TABLE events (ts DateTime, event_type String) +CREATE TABLE events (ts DateTime, event_type String) ENGINE = MergeTree ORDER BY (event_type, ts); -CREATE TABLE events_by_day (ts DateTime, event_type String, events_cnt UInt64) +CREATE TABLE events_by_day (ts DateTime, event_type String, events_cnt UInt64) ENGINE = SummingMergeTree ORDER BY (event_type, ts); -CREATE MATERIALIZED VIEW mv TO events_by_day AS +CREATE MATERIALIZED VIEW mv TO events_by_day AS SELECT toStartOfDay(ts) ts, event_type, count() events_cnt FROM events -GROUP BY ts, event_type; +GROUP BY ts, event_type; -INSERT INTO events -SELECT Date '2020-01-01' + interval number * 900 second, +INSERT INTO events +SELECT Date '2020-01-01' + interval number * 900 second, ['imp', 'click'][number%2+1] FROM numbers(100); @@ -43,23 +43,23 @@ ORDER BY ts, event_type; │ 2020-01-02 00:00:00 │ imp │ 2 │ └─────────────────────┴────────────┴─────────────────┘ --- Let's add the new measurment `cost` +-- Let's add the new measurment `cost` -- and the new dimension `browser`. -ALTER TABLE events +ALTER TABLE events ADD COLUMN browser String, ADD COLUMN cost Float64; -- Column do not have to match in a materialized view and TO -- (destination table), so the next alter does not break insertion. -ALTER TABLE events_by_day +ALTER TABLE events_by_day ADD COLUMN cost Float64, ADD COLUMN browser String after event_type, MODIFY ORDER BY (event_type, ts, browser); -INSERT INTO events -SELECT Date '2020-01-02' + interval number * 900 second, +INSERT INTO events +SELECT Date '2020-01-02' + interval number * 900 second, ['imp', 'click'][number%2+1], ['firefox', 'safary', 'chrome'][number%3+1], 10/(number+1)%33 @@ -82,16 +82,16 @@ ORDER BY ts, event_type; └─────────────────────┴────────────┴─────────┴────────────┴──────┘ SET allow_experimental_alter_materialized_view_structure=1; - -ALTER TABLE mv MODIFY QUERY + +ALTER TABLE mv MODIFY QUERY SELECT toStartOfDay(ts) ts, event_type, browser, count() events_cnt, sum(cost) cost FROM events GROUP BY ts, event_type, browser; -INSERT INTO events -SELECT Date '2020-01-03' + interval number * 900 second, +INSERT INTO events +SELECT Date '2020-01-03' + interval number * 900 second, ['imp', 'click'][number%2+1], ['firefox', 'safary', 'chrome'][number%3+1], 10/(number+1)%33 @@ -138,7 +138,7 @@ PRIMARY KEY (event_type, ts) ORDER BY (event_type, ts, browser) SETTINGS index_granularity = 8192 --- !!! The columns' definition is unchanged but it does not matter, we are not quering +-- !!! The columns' definition is unchanged but it does not matter, we are not quering -- MATERIALIZED VIEW, we are quering TO (storage) table. -- SELECT section is updated. @@ -169,7 +169,7 @@ The application is very limited because you can only change the `SELECT` section ```sql CREATE TABLE src_table (`a` UInt32) ENGINE = MergeTree ORDER BY a; -CREATE MATERIALIZED VIEW mv (`a` UInt32) ENGINE = MergeTree ORDER BY a AS SELECT a FROM src_table; +CREATE MATERIALIZED VIEW mv (`a` UInt32) ENGINE = MergeTree ORDER BY a AS SELECT a FROM src_table; INSERT INTO src_table (a) VALUES (1), (2); SELECT * FROM mv; ``` @@ -199,3 +199,7 @@ SELECT * FROM mv; ## ALTER LIVE VIEW Statement `ALTER LIVE VIEW ... REFRESH` statement refreshes a [Live view](../create/view.md#live-view). See [Force Live View Refresh](../create/view.md#live-view-alter-refresh). + +## ALTER TABLE … MODIFY REFRESH Statement + +`ALTER TABLE ... MODIFY REFRESH` statement changes refresh parameters of a [Refreshable Materialized View](../create/view.md#refreshable-materialized-view). See [Changing Refresh Parameters](../create/view.md#changing-refresh-parameters). diff --git a/docs/en/sql-reference/statements/create/view.md b/docs/en/sql-reference/statements/create/view.md index 56828745048..f6158acd9a4 100644 --- a/docs/en/sql-reference/statements/create/view.md +++ b/docs/en/sql-reference/statements/create/view.md @@ -37,6 +37,7 @@ SELECT a, b, c FROM (SELECT ...) ``` ## Parameterized View + Parametrized views are similar to normal views, but can be created with parameters which are not resolved immediately. These views can be used with table functions, which specify the name of the view as function name and the parameter values as its arguments. ``` sql @@ -66,7 +67,7 @@ When creating a materialized view with `TO [db].[table]`, you can't also use `PO A materialized view is implemented as follows: when inserting data to the table specified in `SELECT`, part of the inserted data is converted by this `SELECT` query, and the result is inserted in the view. -:::note +:::note Materialized views in ClickHouse use **column names** instead of column order during insertion into destination table. If some column names are not present in the `SELECT` query result, ClickHouse uses a default value, even if the column is not [Nullable](../../data-types/nullable.md). A safe practice would be to add aliases for every column when using Materialized views. Materialized views in ClickHouse are implemented more like insert triggers. If there’s some aggregation in the view query, it’s applied only to the batch of freshly inserted data. Any changes to existing data of source table (like update, delete, drop partition, etc.) does not change the materialized view. @@ -96,9 +97,116 @@ This feature is deprecated and will be removed in the future. For your convenience, the old documentation is located [here](https://pastila.nl/?00f32652/fdf07272a7b54bda7e13b919264e449f.md) +## Refreshable Materialized View {#refreshable-materialized-view} + +```sql +CREATE MATERIALIZED VIEW [IF NOT EXISTS] [db.]table_name +REFRESH EVERY|AFTER interval [OFFSET interval] +RANDOMIZE FOR interval +DEPENDS ON [db.]name [, [db.]name [, ...]] +[TO[db.]name] [(columns)] [ENGINE = engine] [EMPTY] +AS SELECT ... +``` +where `interval` is a sequence of simple intervals: +```sql +number SECOND|MINUTE|HOUR|DAY|WEEK|MONTH|YEAR +``` + +Periodically runs the corresponding query and stores its result in a table, atomically replacing the table's previous contents. + +Differences from regular non-refreshable materialized views: + * No insert trigger. I.e. when new data is inserted into the table specified in SELECT, it's *not* automatically pushed to the refreshable materialized view. The periodic refresh runs the entire query and replaces the entire table. + * No restrictions on the SELECT query. Table functions (e.g. `url()`), views, UNION, JOIN, are all allowed. + +:::note +Refreshable materialized views are a work in progress. Setting `allow_experimental_refreshable_materialized_view = 1` is required for creating one. Current limitations: + * not compatible with Replicated database or table engines, + * require [Atomic database engine](../../../engines/database-engines/atomic.md), + * no retries for failed refresh - we just skip to the next scheduled refresh time, + * no limit on number of concurrent refreshes. +::: + +### Refresh Schedule + +Example refresh schedules: +```sql +REFRESH EVERY 1 DAY -- every day, at midnight (UTC) +REFRESH EVERY 1 MONTH -- on 1st day of every month, at midnight +REFRESH EVERY 1 MONTH OFFSET 5 DAY 2 HOUR -- on 6th day of every month, at 2:00 am +REFRESH EVERY 2 WEEK OFFSET 5 DAY 15 HOUR 10 MINUTE -- every other Saturday, at 3:10 pm +REFRESH EVERY 30 MINUTE -- at 00:00, 00:30, 01:00, 01:30, etc +REFRESH AFTER 30 MINUTE -- 30 minutes after the previous refresh completes, no alignment with time of day +-- REFRESH AFTER 1 HOUR OFFSET 1 MINUTE -- syntax errror, OFFSET is not allowed with AFTER +``` + +`RANDOMIZE FOR` randomly adjusts the time of each refresh, e.g.: +```sql +REFRESH EVERY 1 DAY OFFSET 2 HOUR RANDOMIZE FOR 1 HOUR -- every day at random time between 01:30 and 02:30 +``` + +At most one refresh may be running at a time, for a given view. E.g. if a view with `REFRESH EVERY 1 MINUTE` takes 2 minutes to refresh, it'll just be refreshing every 2 minutes. If it then becomes faster and starts refreshing in 10 seconds, it'll go back to refreshing every minute. (In particular, it won't refresh every 10 seconds to catch up with a backlog of missed refreshes - there's no such backlog.) + +Additionally, a refresh is started immediately after the materialized view is created, unless `EMPTY` is specified in the `CREATE` query. If `EMPTY` is specified, the first refresh happens according to schedule. + +### Dependencies {#refresh-dependencies} + +`DEPENDS ON` synchronizes refreshes of different tables. By way of example, suppose there's a chain of two refreshable materialized views: +```sql +CREATE MATERIALIZED VIEW source REFRESH EVERY 1 DAY AS SELECT * FROM url(...) +CREATE MATERIALIZED VIEW destination REFRESH EVERY 1 DAY AS SELECT ... FROM source +``` +Without `DEPENDS ON`, both views will start a refresh at midnight, and `destination` typically will see yesterday's data in `source`. If we add dependency: +``` +CREATE MATERIALIZED VIEW destination REFRESH EVERY 1 DAY DEPENDS ON source AS SELECT ... FROM source +``` +then `destination`'s refresh will start only after `source`'s refresh finished for that day, so `destination` will be based on fresh data. + +Alternatively, the same result can be achieved with: +``` +CREATE MATERIALIZED VIEW destination REFRESH AFTER 1 HOUR DEPENDS ON source AS SELECT ... FROM source +``` +where `1 HOUR` can be any duration less than `source`'s refresh period. The dependent table won't be refreshed more frequently than any of its dependencies. This is a valid way to set up a chain of refreshable views without specifying the real refresh period more than once. + +A few more examples: + * `REFRESH EVERY 1 DAY OFFSET 10 MINUTE` (`destination`) depends on `REFRESH EVERY 1 DAY` (`source`)
+ If `source` refresh takes more than 10 minutes, `destination` will wait for it. + * `REFRESH EVERY 1 DAY OFFSET 1 HOUR` depends on `REFRESH EVERY 1 DAY OFFSET 23 HOUR`
+ Similar to the above, even though the corresponding refreshes happen on different calendar days. + `destination`'s refresh on day X+1 will wait for `source`'s refresh on day X (if it takes more than 2 hours). + * `REFRESH EVERY 2 HOUR` depends on `REFRESH EVERY 1 HOUR`
+ The 2 HOUR refresh happens after the 1 HOUR refresh for every other hour, e.g. after the midnight + refresh, then after the 2am refresh, etc. + * `REFRESH EVERY 1 MINUTE` depends on `REFRESH EVERY 2 HOUR`
+ `REFRESH AFTER 1 MINUTE` depends on `REFRESH EVERY 2 HOUR`
+ `REFRESH AFTER 1 MINUTE` depends on `REFRESH AFTER 2 HOUR`
+ `destination` is refreshed once after every `source` refresh, i.e. every 2 hours. The `1 MINUTE` is effectively ignored. + * `REFRESH AFTER 1 HOUR` depends on `REFRESH AFTER 1 HOUR`
+ Currently this is not recommended. + +:::note +`DEPENDS ON` only works between refreshable materialized views. Listing a regular table in the `DEPENDS ON` list will prevent the view from ever refreshing (dependencies can be removed with `ALTER`, see below). +::: + +### Changing Refresh Parameters {#changing-refresh-parameters} + +To change refresh parameters: +``` +ALTER TABLE [db.]name MODIFY REFRESH EVERY|AFTER ... [RANDOMIZE FOR ...] [DEPENDS ON ...] +``` + +:::note +This replaces refresh schedule *and* dependencies. If the table had a `DEPENDS ON`, doing a `MODIFY REFRESH` without `DEPENDS ON` will remove the dependencies. +::: + +### Other operations + +The status of all refreshable materialized views is available in table [`system.view_refreshes`](../../../operations/system-tables/view_refreshes.md). In particular, it contains refresh progress (if running), last and next refresh time, exception message if a refresh failed. + +To manually stop, start, trigger, or cancel refreshes use [`SYSTEM STOP|START|REFRESH|CANCEL VIEW`](../system.md#refreshable-materialized-views). + ## Window View [Experimental] -:::info +:::info This is an experimental feature that may change in backwards-incompatible ways in the future releases. Enable usage of window views and `WATCH` query using [allow_experimental_window_view](../../../operations/settings/settings.md#allow-experimental-window-view) setting. Input the command `set allow_experimental_window_view = 1`. ::: diff --git a/docs/en/sql-reference/statements/insert-into.md b/docs/en/sql-reference/statements/insert-into.md index e0cc98c2351..f9d93305071 100644 --- a/docs/en/sql-reference/statements/insert-into.md +++ b/docs/en/sql-reference/statements/insert-into.md @@ -11,7 +11,7 @@ Inserts data into a table. **Syntax** ``` sql -INSERT INTO [TABLE] [db.]table [(c1, c2, c3)] VALUES (v11, v12, v13), (v21, v22, v23), ... +INSERT INTO [TABLE] [db.]table [(c1, c2, c3)] [SETTINGS ...] VALUES (v11, v12, v13), (v21, v22, v23), ... ``` You can specify a list of columns to insert using the `(c1, c2, c3)`. You can also use an expression with column [matcher](../../sql-reference/statements/select/index.md#asterisk) such as `*` and/or [modifiers](../../sql-reference/statements/select/index.md#select-modifiers) such as [APPLY](../../sql-reference/statements/select/index.md#apply-modifier), [EXCEPT](../../sql-reference/statements/select/index.md#except-modifier), [REPLACE](../../sql-reference/statements/select/index.md#replace-modifier). @@ -126,7 +126,7 @@ To insert a default value instead of `NULL` into a column with not nullable data **Syntax** ``` sql -INSERT INTO [TABLE] [db.]table [(c1, c2, c3)] FROM INFILE file_name [COMPRESSION type] FORMAT format_name +INSERT INTO [TABLE] [db.]table [(c1, c2, c3)] FROM INFILE file_name [COMPRESSION type] [SETTINGS ...] [FORMAT format_name] ``` Use the syntax above to insert data from a file, or files, stored on the **client** side. `file_name` and `type` are string literals. Input file [format](../../interfaces/formats.md) must be set in the `FORMAT` clause. diff --git a/docs/en/sql-reference/statements/optimize.md b/docs/en/sql-reference/statements/optimize.md index 07b5a196096..b5fc0a23745 100644 --- a/docs/en/sql-reference/statements/optimize.md +++ b/docs/en/sql-reference/statements/optimize.md @@ -17,7 +17,7 @@ This query tries to initialize an unscheduled merge of data parts for tables. No OPTIMIZE TABLE [db.]name [ON CLUSTER cluster] [PARTITION partition | PARTITION ID 'partition_id'] [FINAL] [DEDUPLICATE [BY expression]] ``` -The `OPTIMIZE` query is supported for [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) family, the [MaterializedView](../../engines/table-engines/special/materializedview.md) and the [Buffer](../../engines/table-engines/special/buffer.md) engines. Other table engines aren’t supported. +The `OPTIMIZE` query is supported for [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) family (including [materialized views](../../sql-reference/statements/create/view.md#materialized-view)) and the [Buffer](../../engines/table-engines/special/buffer.md) engines. Other table engines aren’t supported. When `OPTIMIZE` is used with the [ReplicatedMergeTree](../../engines/table-engines/mergetree-family/replication.md) family of table engines, ClickHouse creates a task for merging and waits for execution on all replicas (if the [alter_sync](../../operations/settings/settings.md#alter-sync) setting is set to `2`) or on current replica (if the [alter_sync](../../operations/settings/settings.md#alter-sync) setting is set to `1`). diff --git a/docs/en/sql-reference/statements/select/from.md b/docs/en/sql-reference/statements/select/from.md index a4f449ad321..06742ff74e2 100644 --- a/docs/en/sql-reference/statements/select/from.md +++ b/docs/en/sql-reference/statements/select/from.md @@ -34,7 +34,7 @@ Queries that use `FINAL` are executed slightly slower than similar queries that - Data is merged during query execution. - Queries with `FINAL` read primary key columns in addition to the columns specified in the query. -**In most cases, avoid using `FINAL`.** The common approach is to use different queries that assume the background processes of the `MergeTree` engine haven’t happened yet and deal with it by applying aggregation (for example, to discard duplicates). +`FINAL` requires additional compute and memory resources, as the processing that normally would occur at merge time must occur in memory at the time of the query. However, using FINAL is sometimes necessary in order to produce accurate results, and is less expensive than running `OPTIMIZE` to force a merge. It is also sometimes possible to use different queries that assume the background processes of the `MergeTree` engine haven’t happened yet and deal with it by applying aggregation (for example, to discard duplicates). If you need to use FINAL in your queries in order to get the required results, then it is okay to do so but be aware of the additional processing required. `FINAL` can be applied automatically using [FINAL](../../../operations/settings/settings.md#final) setting to all tables in a query using a session or a user profile. diff --git a/docs/en/sql-reference/statements/select/into-outfile.md b/docs/en/sql-reference/statements/select/into-outfile.md index 352af16042a..5b7196f13e3 100644 --- a/docs/en/sql-reference/statements/select/into-outfile.md +++ b/docs/en/sql-reference/statements/select/into-outfile.md @@ -12,7 +12,7 @@ Compressed files are supported. Compression type is detected by the extension of **Syntax** ```sql -SELECT INTO OUTFILE file_name [AND STDOUT] [APPEND] [COMPRESSION type [LEVEL level]] +SELECT INTO OUTFILE file_name [AND STDOUT] [APPEND | TRUNCATE] [COMPRESSION type [LEVEL level]] ``` `file_name` and `type` are string literals. Supported compression types are: `'none'`, `'gzip'`, `'deflate'`, `'br'`, `'xz'`, `'zstd'`, `'lz4'`, `'bz2'`. @@ -26,6 +26,7 @@ SELECT INTO OUTFILE file_name [AND STDOUT] [APPEND] [COMPRESSION typ - The default [output format](../../../interfaces/formats.md) is `TabSeparated` (like in the command-line client batch mode). Use [FORMAT](format.md) clause to change it. - If `AND STDOUT` is mentioned in the query then the output that is written to the file is also displayed on standard output. If used with compression, the plaintext is displayed on standard output. - If `APPEND` is mentioned in the query then the output is appended to an existing file. If compression is used, append cannot be used. +- When writing to a file that already exists, `APPEND` or `TRUNCATE` must be used. **Example** diff --git a/docs/en/sql-reference/statements/select/join.md b/docs/en/sql-reference/statements/select/join.md index 281a1d0436c..0529be06b5d 100644 --- a/docs/en/sql-reference/statements/select/join.md +++ b/docs/en/sql-reference/statements/select/join.md @@ -43,22 +43,23 @@ Additional join types available in ClickHouse: - `LEFT ANTI JOIN` and `RIGHT ANTI JOIN`, a blacklist on “join keys”, without producing a cartesian product. - `LEFT ANY JOIN`, `RIGHT ANY JOIN` and `INNER ANY JOIN`, partially (for opposite side of `LEFT` and `RIGHT`) or completely (for `INNER` and `FULL`) disables the cartesian product for standard `JOIN` types. - `ASOF JOIN` and `LEFT ASOF JOIN`, joining sequences with a non-exact match. `ASOF JOIN` usage is described below. +- `PASTE JOIN`, performs a horizontal concatenation of two tables. :::note -When [join_algorithm](../../../operations/settings/settings.md#settings-join_algorithm) is set to `partial_merge`, `RIGHT JOIN` and `FULL JOIN` are supported only with `ALL` strictness (`SEMI`, `ANTI`, `ANY`, and `ASOF` are not supported). +When [join_algorithm](../../../operations/settings/settings.md#join_algorithm) is set to `partial_merge`, `RIGHT JOIN` and `FULL JOIN` are supported only with `ALL` strictness (`SEMI`, `ANTI`, `ANY`, and `ASOF` are not supported). ::: ## Settings -The default join type can be overridden using [join_default_strictness](../../../operations/settings/settings.md#settings-join_default_strictness) setting. +The default join type can be overridden using [join_default_strictness](../../../operations/settings/settings.md#join_default_strictness) setting. The behavior of ClickHouse server for `ANY JOIN` operations depends on the [any_join_distinct_right_table_keys](../../../operations/settings/settings.md#any_join_distinct_right_table_keys) setting. **See also** -- [join_algorithm](../../../operations/settings/settings.md#settings-join_algorithm) -- [join_any_take_last_row](../../../operations/settings/settings.md#settings-join_any_take_last_row) +- [join_algorithm](../../../operations/settings/settings.md#join_algorithm) +- [join_any_take_last_row](../../../operations/settings/settings.md#join_any_take_last_row) - [join_use_nulls](../../../operations/settings/settings.md#join_use_nulls) - [partial_merge_join_optimizations](../../../operations/settings/settings.md#partial_merge_join_optimizations) - [partial_merge_join_rows_in_right_blocks](../../../operations/settings/settings.md#partial_merge_join_rows_in_right_blocks) @@ -269,6 +270,33 @@ For example, consider the following tables: `ASOF` join is **not** supported in the [Join](../../../engines/table-engines/special/join.md) table engine. ::: +## PASTE JOIN Usage + +The result of `PASTE JOIN` is a table that contains all columns from left subquery followed by all columns from the right subquery. +The rows are matched based on their positions in the original tables (the order of rows should be defined). +If the subqueries return a different number of rows, extra rows will be cut. + +Example: +```SQL +SELECT * +FROM +( + SELECT number AS a + FROM numbers(2) +) AS t1 +PASTE JOIN +( + SELECT number AS a + FROM numbers(2) + ORDER BY a DESC +) AS t2 + +┌─a─┬─t2.a─┐ +│ 0 │ 1 │ +│ 1 │ 0 │ +└───┴──────┘ +``` + ## Distributed JOIN There are two ways to execute join involving distributed tables: @@ -352,7 +380,7 @@ If you need a `JOIN` for joining with dimension tables (these are relatively sma ### Memory Limitations -By default, ClickHouse uses the [hash join](https://en.wikipedia.org/wiki/Hash_join) algorithm. ClickHouse takes the right_table and creates a hash table for it in RAM. If `join_algorithm = 'auto'` is enabled, then after some threshold of memory consumption, ClickHouse falls back to [merge](https://en.wikipedia.org/wiki/Sort-merge_join) join algorithm. For `JOIN` algorithms description see the [join_algorithm](../../../operations/settings/settings.md#settings-join_algorithm) setting. +By default, ClickHouse uses the [hash join](https://en.wikipedia.org/wiki/Hash_join) algorithm. ClickHouse takes the right_table and creates a hash table for it in RAM. If `join_algorithm = 'auto'` is enabled, then after some threshold of memory consumption, ClickHouse falls back to [merge](https://en.wikipedia.org/wiki/Sort-merge_join) join algorithm. For `JOIN` algorithms description see the [join_algorithm](../../../operations/settings/settings.md#join_algorithm) setting. If you need to restrict `JOIN` operation memory consumption use the following settings: diff --git a/docs/en/sql-reference/statements/select/order-by.md b/docs/en/sql-reference/statements/select/order-by.md index 53bdc9041a1..d6432a7b4f8 100644 --- a/docs/en/sql-reference/statements/select/order-by.md +++ b/docs/en/sql-reference/statements/select/order-by.md @@ -5,12 +5,22 @@ sidebar_label: ORDER BY # ORDER BY Clause -The `ORDER BY` clause contains a list of expressions, which can each be attributed with `DESC` (descending) or `ASC` (ascending) modifier which determine the sorting direction. If the direction is not specified, `ASC` is assumed, so it’s usually omitted. The sorting direction applies to a single expression, not to the entire list. Example: `ORDER BY Visits DESC, SearchPhrase`. Sorting is case-sensitive. +The `ORDER BY` clause contains -If you want to sort by column numbers instead of column names, enable the setting [enable_positional_arguments](../../../operations/settings/settings.md#enable-positional-arguments). +- a list of expressions, e.g. `ORDER BY visits, search_phrase`, +- a list of numbers referring to columns in the `SELECT` clause, e.g. `ORDER BY 2, 1`, or +- `ALL` which means all columns of the `SELECT` clause, e.g. `ORDER BY ALL`. -Rows that have identical values for the list of sorting expressions are output in an arbitrary order, which can also be non-deterministic (different each time). -If the ORDER BY clause is omitted, the order of the rows is also undefined, and may be non-deterministic as well. +To disable sorting by column numbers, set setting [enable_positional_arguments](../../../operations/settings/settings.md#enable-positional-arguments) = 0. +To disable sorting by `ALL`, set setting [enable_order_by_all](../../../operations/settings/settings.md#enable-order-by-all) = 0. + +The `ORDER BY` clause can be attributed by a `DESC` (descending) or `ASC` (ascending) modifier which determines the sorting direction. +Unless an explicit sort order is specified, `ASC` is used by default. +The sorting direction applies to a single expression, not to the entire list, e.g. `ORDER BY Visits DESC, SearchPhrase`. +Also, sorting is performed case-sensitively. + +Rows with identical values for a sort expressions are returned in an arbitrary and non-deterministic order. +If the `ORDER BY` clause is omitted in a `SELECT` statement, the row order is also arbitrary and non-deterministic. ## Sorting of Special Values @@ -265,8 +275,9 @@ Consider disabling `optimize_read_in_order` manually, when running queries that Optimization is supported in the following table engines: -- [MergeTree](../../../engines/table-engines/mergetree-family/mergetree.md) -- [Merge](../../../engines/table-engines/special/merge.md), [Buffer](../../../engines/table-engines/special/buffer.md), and [MaterializedView](../../../engines/table-engines/special/materializedview.md) table engines over `MergeTree`-engine tables +- [MergeTree](../../../engines/table-engines/mergetree-family/mergetree.md) (including [materialized views](../../../sql-reference/statements/create/view.md#materialized-view)), +- [Merge](../../../engines/table-engines/special/merge.md), +- [Buffer](../../../engines/table-engines/special/buffer.md) In `MaterializedView`-engine tables the optimization works with views like `SELECT ... FROM merge_tree_table ORDER BY pk`. But it is not supported in the queries like `SELECT ... FROM view ORDER BY pk` if the view query does not have the `ORDER BY` clause. diff --git a/docs/en/sql-reference/statements/system.md b/docs/en/sql-reference/statements/system.md index 695801983b7..0fdbbeac235 100644 --- a/docs/en/sql-reference/statements/system.md +++ b/docs/en/sql-reference/statements/system.md @@ -449,7 +449,7 @@ SYSTEM SYNC FILE CACHE [ON CLUSTER cluster_name] ``` -### SYSTEM STOP LISTEN +## SYSTEM STOP LISTEN Closes the socket and gracefully terminates the existing connections to the server on the specified port with the specified protocol. @@ -464,7 +464,7 @@ SYSTEM STOP LISTEN [ON CLUSTER cluster_name] [QUERIES ALL | QUERIES DEFAULT | QU - If `QUERIES DEFAULT [EXCEPT .. [,..]]` modifier is specified, all default protocols are stopped, unless specified with `EXCEPT` clause. - If `QUERIES CUSTOM [EXCEPT .. [,..]]` modifier is specified, all custom protocols are stopped, unless specified with `EXCEPT` clause. -### SYSTEM START LISTEN +## SYSTEM START LISTEN Allows new connections to be established on the specified protocols. @@ -473,3 +473,47 @@ However, if the server on the specified port and protocol was not stopped using ```sql SYSTEM START LISTEN [ON CLUSTER cluster_name] [QUERIES ALL | QUERIES DEFAULT | QUERIES CUSTOM | TCP | TCP WITH PROXY | TCP SECURE | HTTP | HTTPS | MYSQL | GRPC | POSTGRESQL | PROMETHEUS | CUSTOM 'protocol'] ``` + +## Managing Refreshable Materialized Views {#refreshable-materialized-views} + +Commands to control background tasks performed by [Refreshable Materialized Views](../../sql-reference/statements/create/view.md#refreshable-materialized-view) + +Keep an eye on [`system.view_refreshes`](../../operations/system-tables/view_refreshes.md) while using them. + +### SYSTEM REFRESH VIEW + +Trigger an immediate out-of-schedule refresh of a given view. + +```sql +SYSTEM REFRESH VIEW [db.]name +``` + +### SYSTEM STOP VIEW, SYSTEM STOP VIEWS + +Disable periodic refreshing of the given view or all refreshable views. If a refresh is in progress, cancel it too. + +```sql +SYSTEM STOP VIEW [db.]name +``` +```sql +SYSTEM STOP VIEWS +``` + +### SYSTEM START VIEW, SYSTEM START VIEWS + +Enable periodic refreshing for the given view or all refreshable views. No immediate refresh is triggered. + +```sql +SYSTEM START VIEW [db.]name +``` +```sql +SYSTEM START VIEWS +``` + +### SYSTEM CANCEL VIEW + +If there's a refresh in progress for the given view, interrupt and cancel it. Otherwise do nothing. + +```sql +SYSTEM CANCEL VIEW [db.]name +``` diff --git a/docs/en/sql-reference/syntax.md b/docs/en/sql-reference/syntax.md index f5651c2dcb6..6dcb3e75e48 100644 --- a/docs/en/sql-reference/syntax.md +++ b/docs/en/sql-reference/syntax.md @@ -16,7 +16,7 @@ INSERT INTO t VALUES (1, 'Hello, world'), (2, 'abc'), (3, 'def') The `INSERT INTO t VALUES` fragment is parsed by the full parser, and the data `(1, 'Hello, world'), (2, 'abc'), (3, 'def')` is parsed by the fast stream parser. You can also turn on the full parser for the data by using the [input_format_values_interpret_expressions](../operations/settings/settings-formats.md#input_format_values_interpret_expressions) setting. When `input_format_values_interpret_expressions = 1`, ClickHouse first tries to parse values with the fast stream parser. If it fails, ClickHouse tries to use the full parser for the data, treating it like an SQL [expression](#expressions). -Data can have any format. When a query is received, the server calculates no more than [max_query_size](../operations/settings/settings.md#settings-max_query_size) bytes of the request in RAM (by default, 1 MB), and the rest is stream parsed. +Data can have any format. When a query is received, the server calculates no more than [max_query_size](../operations/settings/settings.md#max_query_size) bytes of the request in RAM (by default, 1 MB), and the rest is stream parsed. It allows for avoiding issues with large `INSERT` queries. When using the `Values` format in an `INSERT` query, it may seem that data is parsed the same as expressions in a `SELECT` query, but this is not true. The `Values` format is much more limited. diff --git a/docs/en/sql-reference/table-functions/cluster.md b/docs/en/sql-reference/table-functions/cluster.md index a083c6b89a6..ad92ab39183 100644 --- a/docs/en/sql-reference/table-functions/cluster.md +++ b/docs/en/sql-reference/table-functions/cluster.md @@ -55,5 +55,5 @@ Connection settings like `host`, `port`, `user`, `password`, `compression`, `sec **See Also** -- [skip_unavailable_shards](../../operations/settings/settings.md#settings-skip_unavailable_shards) -- [load_balancing](../../operations/settings/settings.md#settings-load_balancing) +- [skip_unavailable_shards](../../operations/settings/settings.md#skip_unavailable_shards) +- [load_balancing](../../operations/settings/settings.md#load_balancing) diff --git a/docs/en/sql-reference/table-functions/file.md b/docs/en/sql-reference/table-functions/file.md index 98498eb8823..3a63811add6 100644 --- a/docs/en/sql-reference/table-functions/file.md +++ b/docs/en/sql-reference/table-functions/file.md @@ -128,17 +128,17 @@ Reading data from `table.csv`, located in `archive1.zip` or/and `archive2.zip`: SELECT * FROM file('user_files/archives/archive{1..2}.zip :: table.csv'); ``` -## Globbing {#globs_in_path} +## Globs in path {#globs_in_path} Paths may use globbing. Files must match the whole path pattern, not only the suffix or prefix. - `*` — Represents arbitrarily many characters except `/` but including the empty string. - `?` — Represents an arbitrary single character. -- `{some_string,another_string,yet_another_one}` — Represents any of alternative strings `'some_string', 'another_string', 'yet_another_one'`. The strings may contain `/`. +- `{some_string,another_string,yet_another_one}` — Substitutes any of strings `'some_string', 'another_string', 'yet_another_one'`. The strings can contain the `/` symbol. - `{N..M}` — Represents any number `>= N` and `<= M`. - `**` - Represents all files inside a folder recursively. -Constructions with `{}` are similar to the [remote](remote.md) table function. +Constructions with `{}` are similar to the [remote](remote.md) and [hdfs](hdfs.md) table functions. **Example** @@ -199,11 +199,11 @@ SELECT count(*) FROM file('big_dir/**/file002', 'CSV', 'name String, value UInt3 ## Settings {#settings} -- [engine_file_empty_if_not_exists](/docs/en/operations/settings/settings.md#engine-file-emptyif-not-exists) - allows to select empty data from a file that doesn't exist. Disabled by default. +- [engine_file_empty_if_not_exists](/docs/en/operations/settings/settings.md#engine-file-empty_if-not-exists) - allows to select empty data from a file that doesn't exist. Disabled by default. - [engine_file_truncate_on_insert](/docs/en/operations/settings/settings.md#engine-file-truncate-on-insert) - allows to truncate file before insert into it. Disabled by default. - [engine_file_allow_create_multiple_files](/docs/en/operations/settings/settings.md#engine_file_allow_create_multiple_files) - allows to create a new file on each insert if format has suffix. Disabled by default. - [engine_file_skip_empty_files](/docs/en/operations/settings/settings.md#engine_file_skip_empty_files) - allows to skip empty files while reading. Disabled by default. -- [storage_file_read_method](/docs/en/operations/settings/settings.md#engine-file-emptyif-not-exists) - method of reading data from storage file, one of: read, pread, mmap (only for clickhouse-local). Default value: `pread` for clickhouse-server, `mmap` for clickhouse-local. +- [storage_file_read_method](/docs/en/operations/settings/settings.md#engine-file-empty_if-not-exists) - method of reading data from storage file, one of: read, pread, mmap (only for clickhouse-local). Default value: `pread` for clickhouse-server, `mmap` for clickhouse-local. **See Also** diff --git a/docs/en/sql-reference/table-functions/fuzzJSON.md b/docs/en/sql-reference/table-functions/fuzzJSON.md index 74ccb0bcb8a..a64f35691f6 100644 --- a/docs/en/sql-reference/table-functions/fuzzJSON.md +++ b/docs/en/sql-reference/table-functions/fuzzJSON.md @@ -19,6 +19,7 @@ fuzzJSON({ named_collection [option=value [,..]] | json_str[, random_seed] }) - `json_str` (String) - The source string representing structured data in JSON format. - `random_seed` (UInt64) - Manual random seed for producing stable results. - `reuse_output` (boolean) - Reuse the output from a fuzzing process as input for the next fuzzer. + - `malform_output` (boolean) - Generate a string that cannot be parsed as a JSON object. - `max_output_length` (UInt64) - Maximum allowable length of the generated or perturbed JSON string. - `probability` (Float64) - The probability to fuzz a JSON field (a key-value pair). Must be within [0, 1] range. - `max_nesting_level` (UInt64) - The maximum allowed depth of nested structures within the JSON data. @@ -84,3 +85,13 @@ SELECT * FROM fuzzJSON('{"id":1}', 1234) LIMIT 3; {"BRjE":16137826149911306846} {"XjKE":15076727133550123563} ``` + +``` sql +SELECT * FROM fuzzJSON(json_nc, json_str='{"name" : "FuzzJSON"}', random_seed=1337, malform_output=true) LIMIT 3; +``` + +``` text +U"name":"FuzzJSON*"SpByjZKtr2VAyHCO"falseh +{"name"keFuzzJSON, "g6vVO7TCIk":jTt^ +{"DBhz":YFuzzJSON5} +``` diff --git a/docs/en/sql-reference/table-functions/hdfs.md b/docs/en/sql-reference/table-functions/hdfs.md index 31780e30e8e..92f904b8841 100644 --- a/docs/en/sql-reference/table-functions/hdfs.md +++ b/docs/en/sql-reference/table-functions/hdfs.md @@ -41,14 +41,14 @@ LIMIT 2 ## Globs in path {#globs_in_path} -Multiple path components can have globs. For being processed file should exists and matches to the whole path pattern (not only suffix or prefix). +Paths may use globbing. Files must match the whole path pattern, not only the suffix or prefix. -- `*` — Substitutes any number of any characters except `/` including empty string. -- `?` — Substitutes any single character. +- `*` — Represents arbitrarily many characters except `/` but including the empty string. +- `?` — Represents an arbitrary single character. - `{some_string,another_string,yet_another_one}` — Substitutes any of strings `'some_string', 'another_string', 'yet_another_one'`. The strings can contain the `/` symbol. -- `{N..M}` — Substitutes any number in range from N to M including both borders. +- `{N..M}` — Represents any number `>= N` and `<= M`. -Constructions with `{}` are similar to the [remote](../../sql-reference/table-functions/remote.md)) table function. +Constructions with `{}` are similar to the [remote](remote.md) and [file](file.md) table functions. **Example** @@ -100,7 +100,7 @@ FROM hdfs('hdfs://hdfs1:9000/big_dir/file{0..9}{0..9}{0..9}', 'CSV', 'name Strin ## Storage Settings {#storage-settings} -- [hdfs_truncate_on_insert](/docs/en/operations/settings/settings.md#hdfs-truncate-on-insert) - allows to truncate file before insert into it. Disabled by default. +- [hdfs_truncate_on_insert](/docs/en/operations/settings/settings.md#hdfs_truncate_on_insert) - allows to truncate file before insert into it. Disabled by default. - [hdfs_create_multiple_files](/docs/en/operations/settings/settings.md#hdfs_allow_create_multiple_files) - allows to create a new file on each insert if format has suffix. Disabled by default. - [hdfs_skip_empty_files](/docs/en/operations/settings/settings.md#hdfs_skip_empty_files) - allows to skip empty files while reading. Disabled by default. - [ignore_access_denied_multidirectory_globs](/docs/en/operations/settings/settings.md#ignore_access_denied_multidirectory_globs) - allows to ignore permission denied errors for multi-directory globs. diff --git a/docs/en/sql-reference/table-functions/remote.md b/docs/en/sql-reference/table-functions/remote.md index 3ca177050d3..228f4a4c7e1 100644 --- a/docs/en/sql-reference/table-functions/remote.md +++ b/docs/en/sql-reference/table-functions/remote.md @@ -165,5 +165,5 @@ The following pattern types are supported. - `{0n..0m}` - A range of numbers with leading zeroes. This pattern preserves leading zeroes in indices. For instance, `example{01..03}-1` generates `example01-1`, `example02-1` and `example03-1`. - `{a|b}` - Any number of variants separated by a `|`. The pattern specifies replicas. For instance, `example01-{1|2}` generates replicas `example01-1` and `example01-2`. -The query will be sent to the first healthy replica. However, for `remote` the replicas are iterated in the order currently set in the [load_balancing](../../operations/settings/settings.md#settings-load_balancing) setting. +The query will be sent to the first healthy replica. However, for `remote` the replicas are iterated in the order currently set in the [load_balancing](../../operations/settings/settings.md#load_balancing) setting. The number of generated addresses is limited by [table_function_remote_max_addresses](../../operations/settings/settings.md#table_function_remote_max_addresses) setting. diff --git a/docs/en/sql-reference/table-functions/s3.md b/docs/en/sql-reference/table-functions/s3.md index dc11259c626..8065f066666 100644 --- a/docs/en/sql-reference/table-functions/s3.md +++ b/docs/en/sql-reference/table-functions/s3.md @@ -16,7 +16,7 @@ When using the `s3 table function` with [`INSERT INTO...SELECT`](../../sql-refer **Syntax** ``` sql -s3(path [, NOSIGN | aws_access_key_id, aws_secret_access_key] [,format] [,structure] [,compression]) +s3(path [, NOSIGN | aws_access_key_id, aws_secret_access_key [,session_token]] [,format] [,structure] [,compression]) ``` :::tip GCS @@ -38,6 +38,8 @@ For GCS, substitute your HMAC key and HMAC secret where you see `aws_access_key_ ::: - `NOSIGN` - If this keyword is provided in place of credentials, all the requests will not be signed. +- `access_key_id`, `secret_access_key` — Keys that specify credentials to use with given endpoint. Optional. +- `session_token` - Session token to use with the given keys. Optional when passing keys. - `format` — The [format](../../interfaces/formats.md#formats) of the file. - `structure` — Structure of the table. Format `'column1_name column1_type, column2_name column2_type, ...'`. - `compression` — Parameter is optional. Supported values: `none`, `gzip/gz`, `brotli/br`, `xz/LZMA`, `zstd/zst`. By default, it will autodetect compression by file extension. @@ -236,7 +238,7 @@ LIMIT 5; ## Storage Settings {#storage-settings} -- [s3_truncate_on_insert](/docs/en/operations/settings/settings.md#s3-truncate-on-insert) - allows to truncate file before insert into it. Disabled by default. +- [s3_truncate_on_insert](/docs/en/operations/settings/settings.md#s3_truncate_on_insert) - allows to truncate file before insert into it. Disabled by default. - [s3_create_multiple_files](/docs/en/operations/settings/settings.md#s3_allow_create_multiple_files) - allows to create a new file on each insert if format has suffix. Disabled by default. - [s3_skip_empty_files](/docs/en/operations/settings/settings.md#s3_skip_empty_files) - allows to skip empty files while reading. Disabled by default. diff --git a/docs/en/sql-reference/table-functions/s3Cluster.md b/docs/en/sql-reference/table-functions/s3Cluster.md index 799eb31446a..080c9860519 100644 --- a/docs/en/sql-reference/table-functions/s3Cluster.md +++ b/docs/en/sql-reference/table-functions/s3Cluster.md @@ -10,14 +10,15 @@ Allows processing files from [Amazon S3](https://aws.amazon.com/s3/) and Google **Syntax** ``` sql -s3Cluster(cluster_name, source, [,access_key_id, secret_access_key] [,format] [,structure]) +s3Cluster(cluster_name, source, [,access_key_id, secret_access_key, [session_token]] [,format] [,structure]) ``` **Arguments** - `cluster_name` — Name of a cluster that is used to build a set of addresses and connection parameters to remote and local servers. - `source` — URL to a file or a bunch of files. Supports following wildcards in readonly mode: `*`, `**`, `?`, `{'abc','def'}` and `{N..M}` where `N`, `M` — numbers, `abc`, `def` — strings. For more information see [Wildcards In Path](../../engines/table-engines/integrations/s3.md#wildcards-in-path). -- `access_key_id` and `secret_access_key` — Keys that specify credentials to use with given endpoint. Optional. +- `access_key_id`, `secret_access_key` — Keys that specify credentials to use with given endpoint. Optional. +- `session_token` - Session token to use with the given keys. Optional when passing keys. - `format` — The [format](../../interfaces/formats.md#formats) of the file. - `structure` — Structure of the table. Format `'column1_name column1_type, column2_name column2_type, ...'`. diff --git a/docs/ru/development/style.md b/docs/ru/development/style.md index 49c4aade4e9..cd1297504af 100644 --- a/docs/ru/development/style.md +++ b/docs/ru/development/style.md @@ -493,7 +493,7 @@ catch (const DB::Exception & e) ``` cpp if (0 != close(fd)) - throwFromErrno("Cannot close file " + file_name, ErrorCodes::CANNOT_CLOSE_FILE); + throw ErrnoException(ErrorCodes::CANNOT_CLOSE_FILE, "Cannot close file {}", file_name); ``` `assert` не используются. diff --git a/docs/ru/engines/table-engines/mergetree-family/mergetree.md b/docs/ru/engines/table-engines/mergetree-family/mergetree.md index ebb692bb270..9f223157ea7 100644 --- a/docs/ru/engines/table-engines/mergetree-family/mergetree.md +++ b/docs/ru/engines/table-engines/mergetree-family/mergetree.md @@ -369,6 +369,9 @@ INDEX b (u64 * length(str), i32 + f64 * 100, date, str) TYPE set(100) GRANULARIT | [greaterOrEquals (\>=)](../../../sql-reference/functions/comparison-functions.md#greaterorequals) | ✔ | ✔ | ✗ | ✗ | ✗ | | [empty](../../../sql-reference/functions/array-functions.md#function-empty) | ✔ | ✔ | ✗ | ✗ | ✗ | | [notEmpty](../../../sql-reference/functions/array-functions.md#function-notempty) | ✔ | ✔ | ✗ | ✗ | ✗ | +| [has](../../../sql-reference/functions/array-functions.md#function-has) | ✗ | ✗ | ✔ | ✔ | ✔ | ✔ | +| [hasAny](../../../sql-reference/functions/array-functions.md#function-hasAny) | ✗ | ✗ | ✔ | ✔ | ✔ | ✗ | +| [hasAll](../../../sql-reference/functions/array-functions.md#function-hasAll) | ✗ | ✗ | ✗ | ✗ | ✔ | ✗ | | hasToken | ✗ | ✗ | ✗ | ✔ | ✗ | Функции с постоянным агрументом, который меньше, чем размер ngram не могут использовать индекс `ngrambf_v1` для оптимизации запроса. diff --git a/docs/ru/getting-started/example-datasets/criteo.md b/docs/ru/getting-started/example-datasets/criteo.md index 5ba55795632..4818e9e69d4 100644 --- a/docs/ru/getting-started/example-datasets/criteo.md +++ b/docs/ru/getting-started/example-datasets/criteo.md @@ -1,14 +1,14 @@ --- slug: /ru/getting-started/example-datasets/criteo sidebar_position: 18 -sidebar_label: "Терабайт логов кликов от Criteo" +sidebar_label: "Терабайтный журнал посещений сайта от Criteo" --- -# Терабайт логов кликов от Criteo {#terabait-logov-klikov-ot-criteo} +# Терабайтный журнал посещений сайта от Criteo {#terabaitnyi-zhurnal} Скачайте данные с http://labs.criteo.com/downloads/download-terabyte-click-logs/ -Создайте таблицу для импорта лога: +Создайте таблицу для импорта журнала: ``` sql CREATE TABLE criteo_log (date Date, clicked UInt8, int1 Int32, int2 Int32, int3 Int32, int4 Int32, int5 Int32, int6 Int32, int7 Int32, int8 Int32, int9 Int32, int10 Int32, int11 Int32, int12 Int32, int13 Int32, cat1 String, cat2 String, cat3 String, cat4 String, cat5 String, cat6 String, cat7 String, cat8 String, cat9 String, cat10 String, cat11 String, cat12 String, cat13 String, cat14 String, cat15 String, cat16 String, cat17 String, cat18 String, cat19 String, cat20 String, cat21 String, cat22 String, cat23 String, cat24 String, cat25 String, cat26 String) ENGINE = Log @@ -69,7 +69,7 @@ CREATE TABLE criteo ) ENGINE = MergeTree(date, intHash32(icat1), (date, intHash32(icat1)), 8192) ``` -Преобразуем данные из сырого лога и положим во вторую таблицу: +Преобразуйте импортированные данные, разложив их по таблице сконвертированных данных: ``` sql INSERT INTO criteo SELECT date, clicked, int1, int2, int3, int4, int5, int6, int7, int8, int9, int10, int11, int12, int13, reinterpretAsUInt32(unhex(cat1)) AS icat1, reinterpretAsUInt32(unhex(cat2)) AS icat2, reinterpretAsUInt32(unhex(cat3)) AS icat3, reinterpretAsUInt32(unhex(cat4)) AS icat4, reinterpretAsUInt32(unhex(cat5)) AS icat5, reinterpretAsUInt32(unhex(cat6)) AS icat6, reinterpretAsUInt32(unhex(cat7)) AS icat7, reinterpretAsUInt32(unhex(cat8)) AS icat8, reinterpretAsUInt32(unhex(cat9)) AS icat9, reinterpretAsUInt32(unhex(cat10)) AS icat10, reinterpretAsUInt32(unhex(cat11)) AS icat11, reinterpretAsUInt32(unhex(cat12)) AS icat12, reinterpretAsUInt32(unhex(cat13)) AS icat13, reinterpretAsUInt32(unhex(cat14)) AS icat14, reinterpretAsUInt32(unhex(cat15)) AS icat15, reinterpretAsUInt32(unhex(cat16)) AS icat16, reinterpretAsUInt32(unhex(cat17)) AS icat17, reinterpretAsUInt32(unhex(cat18)) AS icat18, reinterpretAsUInt32(unhex(cat19)) AS icat19, reinterpretAsUInt32(unhex(cat20)) AS icat20, reinterpretAsUInt32(unhex(cat21)) AS icat21, reinterpretAsUInt32(unhex(cat22)) AS icat22, reinterpretAsUInt32(unhex(cat23)) AS icat23, reinterpretAsUInt32(unhex(cat24)) AS icat24, reinterpretAsUInt32(unhex(cat25)) AS icat25, reinterpretAsUInt32(unhex(cat26)) AS icat26 FROM criteo_log; diff --git a/docs/ru/getting-started/example-datasets/github-events.mdx b/docs/ru/getting-started/example-datasets/github-events.mdx index c6e58a9f5a4..84f445074af 100644 --- a/docs/ru/getting-started/example-datasets/github-events.mdx +++ b/docs/ru/getting-started/example-datasets/github-events.mdx @@ -1,9 +1,9 @@ --- slug: /ru/getting-started/example-datasets/github-events sidebar_label: GitHub Events -title: "GitHub Events Dataset" +title: "Набор данных о событиях на GitHub" --- -import Content from '@site/docs/en/getting-started/example-datasets/github-events.md'; +Набор данных о событиях на GitHub с 2011 года по 6 декабря 2020 года содержит 3,1 млрд записей. Объём исходных данных — 75 ГБ, для загрузки в Clickhouse потребуется около 200 ГБ свободного пространства хранения (при использовании метода сжатия lz4). - +Полное описание набора, инструкции по загрузке и запросы к нему опубликованы на https://ghe.clickhouse.tech/ diff --git a/docs/ru/operations/utilities/clickhouse-format.md b/docs/ru/operations/utilities/clickhouse-format.md index af66930b368..9c4b7304940 100644 --- a/docs/ru/operations/utilities/clickhouse-format.md +++ b/docs/ru/operations/utilities/clickhouse-format.md @@ -1,115 +1,115 @@ ---- +--- slug: /ru/operations/utilities/clickhouse-format -sidebar_position: 65 -sidebar_label: clickhouse-format ---- - -# clickhouse-format {#clickhouse-format} - -Позволяет форматировать входящие запросы. - -Ключи: - -- `--help` или`-h` — выводит описание ключей. -- `--query` — форматирует запрос любой длины и сложности. -- `--hilite` — добавляет подсветку синтаксиса с экранированием символов. -- `--oneline` — форматирование в одну строку. -- `--quiet` или `-q` — проверяет синтаксис без вывода результата. -- `--multiquery` or `-n` — поддерживает несколько запросов в одной строке. -- `--obfuscate` — обфусцирует вместо форматирования. -- `--seed <строка>` — задает строку, которая определяет результат обфускации. -- `--backslash` — добавляет обратный слеш в конце каждой строки отформатированного запроса. Удобно использовать если многострочный запрос скопирован из интернета или другого источника и его нужно выполнить из командной строки. - -## Примеры {#examples} - -1. Форматирование запроса: - -```bash -$ clickhouse-format --query "select number from numbers(10) where number%2 order by number desc;" -``` - -Результат: - -```text -SELECT number -FROM numbers(10) -WHERE number % 2 -ORDER BY number DESC -``` - -2. Подсветка синтаксиса и форматирование в одну строку: - -```bash -$ clickhouse-format --oneline --hilite <<< "SELECT sum(number) FROM numbers(5);" -``` - -Результат: - -```sql -SELECT sum(number) FROM numbers(5) -``` - -3. Несколько запросов в одной строке: - -```bash -$ clickhouse-format -n <<< "SELECT * FROM (SELECT 1 AS x UNION ALL SELECT 1 UNION DISTINCT SELECT 3);" -``` - -Результат: - -```text -SELECT * -FROM -( - SELECT 1 AS x - UNION ALL - SELECT 1 - UNION DISTINCT - SELECT 3 -) -; -``` - -4. Обфускация: - -```bash -$ clickhouse-format --seed Hello --obfuscate <<< "SELECT cost_first_screen BETWEEN a AND b, CASE WHEN x >= 123 THEN y ELSE NULL END;" -``` - -Результат: - -```text -SELECT treasury_mammoth_hazelnut BETWEEN nutmeg AND span, CASE WHEN chive >= 116 THEN switching ELSE ANYTHING END; -``` - -Тот же запрос с другой инициализацией обфускатора: - -```bash -$ clickhouse-format --seed World --obfuscate <<< "SELECT cost_first_screen BETWEEN a AND b, CASE WHEN x >= 123 THEN y ELSE NULL END;" -``` - -Результат: - -```text -SELECT horse_tape_summer BETWEEN folklore AND moccasins, CASE WHEN intestine >= 116 THEN nonconformist ELSE FORESTRY END; -``` - -5. Добавление обратного слеша: - -```bash -$ clickhouse-format --backslash <<< "SELECT * FROM (SELECT 1 AS x UNION ALL SELECT 1 UNION DISTINCT SELECT 3);" -``` - -Результат: - -```text -SELECT * \ -FROM \ -( \ - SELECT 1 AS x \ - UNION ALL \ - SELECT 1 \ - UNION DISTINCT \ - SELECT 3 \ -) -``` +sidebar_position: 65 +sidebar_label: clickhouse-format +--- + +# clickhouse-format {#clickhouse-format} + +Позволяет форматировать входящие запросы. + +Ключи: + +- `--help` или`-h` — выводит описание ключей. +- `--query` — форматирует запрос любой длины и сложности. +- `--hilite` — добавляет подсветку синтаксиса с экранированием символов. +- `--oneline` — форматирование в одну строку. +- `--quiet` или `-q` — проверяет синтаксис без вывода результата. +- `--multiquery` or `-n` — поддерживает несколько запросов в одной строке. +- `--obfuscate` — обфусцирует вместо форматирования. +- `--seed <строка>` — задает строку, которая определяет результат обфускации. +- `--backslash` — добавляет обратный слеш в конце каждой строки отформатированного запроса. Удобно использовать если многострочный запрос скопирован из интернета или другого источника и его нужно выполнить из командной строки. + +## Примеры {#examples} + +1. Форматирование запроса: + +```bash +$ clickhouse-format --query "select number from numbers(10) where number%2 order by number desc;" +``` + +Результат: + +```text +SELECT number +FROM numbers(10) +WHERE number % 2 +ORDER BY number DESC +``` + +2. Подсветка синтаксиса и форматирование в одну строку: + +```bash +$ clickhouse-format --oneline --hilite <<< "SELECT sum(number) FROM numbers(5);" +``` + +Результат: + +```sql +SELECT sum(number) FROM numbers(5) +``` + +3. Несколько запросов в одной строке: + +```bash +$ clickhouse-format -n <<< "SELECT * FROM (SELECT 1 AS x UNION ALL SELECT 1 UNION DISTINCT SELECT 3);" +``` + +Результат: + +```text +SELECT * +FROM +( + SELECT 1 AS x + UNION ALL + SELECT 1 + UNION DISTINCT + SELECT 3 +) +; +``` + +4. Обфускация: + +```bash +$ clickhouse-format --seed Hello --obfuscate <<< "SELECT cost_first_screen BETWEEN a AND b, CASE WHEN x >= 123 THEN y ELSE NULL END;" +``` + +Результат: + +```text +SELECT treasury_mammoth_hazelnut BETWEEN nutmeg AND span, CASE WHEN chive >= 116 THEN switching ELSE ANYTHING END; +``` + +Тот же запрос с другой инициализацией обфускатора: + +```bash +$ clickhouse-format --seed World --obfuscate <<< "SELECT cost_first_screen BETWEEN a AND b, CASE WHEN x >= 123 THEN y ELSE NULL END;" +``` + +Результат: + +```text +SELECT horse_tape_summer BETWEEN folklore AND moccasins, CASE WHEN intestine >= 116 THEN nonconformist ELSE FORESTRY END; +``` + +5. Добавление обратного слеша: + +```bash +$ clickhouse-format --backslash <<< "SELECT * FROM (SELECT 1 AS x UNION ALL SELECT 1 UNION DISTINCT SELECT 3);" +``` + +Результат: + +```text +SELECT * \ +FROM \ +( \ + SELECT 1 AS x \ + UNION ALL \ + SELECT 1 \ + UNION DISTINCT \ + SELECT 3 \ +) +``` diff --git a/docs/ru/sql-reference/aggregate-functions/reference/sparkbar.md b/docs/ru/sql-reference/aggregate-functions/reference/sparkbar.md index 958a4bd3504..3b36ee04095 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/sparkbar.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/sparkbar.md @@ -1,62 +1,62 @@ ---- -slug: /ru/sql-reference/aggregate-functions/reference/sparkbar -sidebar_position: 311 -sidebar_label: sparkbar ---- - -# sparkbar {#sparkbar} - -Функция строит гистограмму частот по заданным значениям `x` и частоте повторения этих значений `y` на интервале `[min_x, max_x]`. Повторения для всех `x`, попавших в один бакет, усредняются, поэтому данные должны быть предварительно агрегированы. Отрицательные повторения игнорируются. - -Если интервал для построения не указан, то в качестве нижней границы интервала будет взято минимальное значение `x`, а в качестве верхней границы — максимальное значение `x`. -Значения `x` вне указанного интервала игнорируются. - - -**Синтаксис** - -``` sql -sparkbar(width[, min_x, max_x])(x, y) -``` - -**Параметры** - -- `width` — Количество столбцов гистограммы. Тип: [Integer](../../../sql-reference/data-types/int-uint.md). - -- `min_x` — Начало интервала. Необязательный параметр. -- `max_x` — Конец интервала. Необязательный параметр. - -**Аргументы** - -- `x` — Поле со значениями. -- `y` — Поле с частотой повторения значений. - - -**Возвращаемые значения** - -- Гистограмма частот. - -**Пример** - -Запрос: - -``` sql -CREATE TABLE spark_bar_data (`value` Int64, `event_date` Date) ENGINE = MergeTree ORDER BY event_date; - -INSERT INTO spark_bar_data VALUES (1,'2020-01-01'), (3,'2020-01-02'), (4,'2020-01-02'), (-3,'2020-01-02'), (5,'2020-01-03'), (2,'2020-01-04'), (3,'2020-01-05'), (7,'2020-01-06'), (6,'2020-01-07'), (8,'2020-01-08'), (2,'2020-01-11'); - -SELECT sparkbar(9)(event_date,cnt) FROM (SELECT sum(value) as cnt, event_date FROM spark_bar_data GROUP BY event_date); - -SELECT sparkbar(9, toDate('2020-01-01'), toDate('2020-01-10'))(event_date,cnt) FROM (SELECT sum(value) as cnt, event_date FROM spark_bar_data GROUP BY event_date); -``` - -Результат: - -``` text -┌─sparkbar(9)(event_date, cnt)─┐ -│ ▂▅▂▃▆█ ▂ │ -└──────────────────────────────┘ - -┌─sparkbar(9, toDate('2020-01-01'), toDate('2020-01-10'))(event_date, cnt)─┐ -│ ▂▅▂▃▇▆█ │ -└──────────────────────────────────────────────────────────────────────────┘ -``` +--- +slug: /ru/sql-reference/aggregate-functions/reference/sparkbar +sidebar_position: 311 +sidebar_label: sparkbar +--- + +# sparkbar {#sparkbar} + +Функция строит гистограмму частот по заданным значениям `x` и частоте повторения этих значений `y` на интервале `[min_x, max_x]`. Повторения для всех `x`, попавших в один бакет, усредняются, поэтому данные должны быть предварительно агрегированы. Отрицательные повторения игнорируются. + +Если интервал для построения не указан, то в качестве нижней границы интервала будет взято минимальное значение `x`, а в качестве верхней границы — максимальное значение `x`. +Значения `x` вне указанного интервала игнорируются. + + +**Синтаксис** + +``` sql +sparkbar(width[, min_x, max_x])(x, y) +``` + +**Параметры** + +- `width` — Количество столбцов гистограммы. Тип: [Integer](../../../sql-reference/data-types/int-uint.md). + +- `min_x` — Начало интервала. Необязательный параметр. +- `max_x` — Конец интервала. Необязательный параметр. + +**Аргументы** + +- `x` — Поле со значениями. +- `y` — Поле с частотой повторения значений. + + +**Возвращаемые значения** + +- Гистограмма частот. + +**Пример** + +Запрос: + +``` sql +CREATE TABLE spark_bar_data (`value` Int64, `event_date` Date) ENGINE = MergeTree ORDER BY event_date; + +INSERT INTO spark_bar_data VALUES (1,'2020-01-01'), (3,'2020-01-02'), (4,'2020-01-02'), (-3,'2020-01-02'), (5,'2020-01-03'), (2,'2020-01-04'), (3,'2020-01-05'), (7,'2020-01-06'), (6,'2020-01-07'), (8,'2020-01-08'), (2,'2020-01-11'); + +SELECT sparkbar(9)(event_date,cnt) FROM (SELECT sum(value) as cnt, event_date FROM spark_bar_data GROUP BY event_date); + +SELECT sparkbar(9, toDate('2020-01-01'), toDate('2020-01-10'))(event_date,cnt) FROM (SELECT sum(value) as cnt, event_date FROM spark_bar_data GROUP BY event_date); +``` + +Результат: + +``` text +┌─sparkbar(9)(event_date, cnt)─┐ +│ ▂▅▂▃▆█ ▂ │ +└──────────────────────────────┘ + +┌─sparkbar(9, toDate('2020-01-01'), toDate('2020-01-10'))(event_date, cnt)─┐ +│ ▂▅▂▃▇▆█ │ +└──────────────────────────────────────────────────────────────────────────┘ +``` diff --git a/docs/ru/sql-reference/table-functions/file.md b/docs/ru/sql-reference/table-functions/file.md index 7c709619679..5331cf00728 100644 --- a/docs/ru/sql-reference/table-functions/file.md +++ b/docs/ru/sql-reference/table-functions/file.md @@ -76,14 +76,16 @@ SELECT * FROM file('test.csv', 'CSV', 'column1 UInt32, column2 UInt32, column3 U ## Шаблоны поиска в компонентах пути {#globs-in-path} -При описании пути к файлу могут использоваться шаблоны поиска. Обрабатываются только те файлы, у которых путь и название соответствуют шаблону полностью (а не только префикс или суффикс). +Путь к файлу может содержать шаблоны в режиме доступа только для чтения. +Шаблоны могут содержаться в разных частях пути. +Обрабатываться будут те и только те файлы, которые существуют в файловой системе и удовлетворяют всему шаблону пути. - `*` — заменяет любое количество любых символов кроме `/`, включая отсутствие символов. - `?` — заменяет ровно один любой символ. - `{some_string,another_string,yet_another_one}` — заменяет любую из строк `'some_string', 'another_string', 'yet_another_one'`. Эти строки также могут содержать символ `/`. - `{N..M}` — заменяет любое число в интервале от `N` до `M` включительно (может содержать ведущие нули). -Конструкция с `{}` аналогична табличной функции [remote](remote.md). +Конструкция с `{}` аналогична табличным функциям [remote](remote.md), [hdfs](hdfs.md). **Пример** diff --git a/docs/ru/sql-reference/table-functions/hdfs.md b/docs/ru/sql-reference/table-functions/hdfs.md index b70de5e3a4f..6dcb1a21791 100644 --- a/docs/ru/sql-reference/table-functions/hdfs.md +++ b/docs/ru/sql-reference/table-functions/hdfs.md @@ -14,7 +14,7 @@ hdfs(URI, format, structure) **Входные параметры** -- `URI` — URI файла в HDFS. Путь к файлу поддерживает следующие шаблоны в режиме доступа только для чтения `*`, `?`, `{abc,def}` и `{N..M}`, где `N`, `M` — числа, \``'abc', 'def'` — строки. +- `URI` — URI файла в HDFS. - `format` — [формат](../../interfaces/formats.md#formats) файла. - `structure` — структура таблицы. Формат `'column1_name column1_type, column2_name column2_type, ...'`. @@ -41,19 +41,22 @@ LIMIT 2 ## Шаблоны поиска в компонентах пути {#globs-in-path} -- `*` — Заменяет любое количество любых символов кроме `/`, включая отсутствие символов. +Путь к файлу может содержать шаблоны в режиме доступа только для чтения. +Шаблоны могут содержаться в разных частях пути. +Обрабатываться будут те и только те файлы, которые существуют в файловой системе и удовлетворяют всему шаблону пути. + + +- `*` — Заменяет любое количество любых символов (кроме `/`), включая отсутствие символов. - `?` — Заменяет ровно один любой символ. - `{some_string,another_string,yet_another_one}` — Заменяет любую из строк `'some_string', 'another_string', 'yet_another_one'`. Эти строки также могут содержать символ `/`. - `{N..M}` — Заменяет любое число в интервале от `N` до `M` включительно (может содержать ведущие нули). -Конструкция с `{}` аналогична табличной функции [remote](remote.md). +Конструкция с `{}` аналогична табличной функции [remote](remote.md), [file](file.md). :::danger Предупреждение -Если ваш список файлов содержит интервал с ведущими нулями, используйте конструкцию с фигурными скобками для каждой цифры по отдельности или используйте `?`. +Если ваш список файлов содержит интервал с ведущими нулями, используйте отдельную конструкцию с фигурными скобками для каждой цифры или используйте `?`. ::: -Шаблоны могут содержаться в разных частях пути. Обрабатываться будут ровно те файлы, которые и удовлетворяют всему шаблону пути, и существуют в файловой системе. - ## Виртуальные столбцы {#virtualnye-stolbtsy} - `_path` — Путь к файлу. diff --git a/docs/ru/sql-reference/table-functions/s3.md b/docs/ru/sql-reference/table-functions/s3.md index 7deef68f47f..fe40cb0c507 100644 --- a/docs/ru/sql-reference/table-functions/s3.md +++ b/docs/ru/sql-reference/table-functions/s3.md @@ -11,7 +11,7 @@ sidebar_label: s3 **Синтаксис** ``` sql -s3(path [,aws_access_key_id, aws_secret_access_key] [,format] [,structure] [,compression]) +s3(path [,access_key_id, secret_access_key [,session_token]] [,format] [,structure] [,compression]) ``` **Aргументы** diff --git a/docs/ru/sql-reference/table-functions/s3Cluster.md b/docs/ru/sql-reference/table-functions/s3Cluster.md index b8f34d805ff..b382bf5e384 100644 --- a/docs/ru/sql-reference/table-functions/s3Cluster.md +++ b/docs/ru/sql-reference/table-functions/s3Cluster.md @@ -11,14 +11,14 @@ sidebar_label: s3Cluster **Синтаксис** ``` sql -s3Cluster(cluster_name, source, [,access_key_id, secret_access_key] [,format] [,structure]) +s3Cluster(cluster_name, source, [,access_key_id, secret_access_key [,session_token]] [,format] [,structure]) ``` **Аргументы** - `cluster_name` — имя кластера, используемое для создания набора адресов и параметров подключения к удаленным и локальным серверам. - `source` — URL файла или нескольких файлов. Поддерживает следующие символы подстановки: `*`, `?`, `{'abc','def'}` и `{N..M}`, где `N`, `M` — числа, `abc`, `def` — строки. Подробнее смотрите в разделе [Символы подстановки](../../engines/table-engines/integrations/s3.md#wildcards-in-path). -- `access_key_id` и `secret_access_key` — ключи, указывающие на учетные данные для использования с точкой приема запроса. Необязательные параметры. +- `access_key_id`, `secret_access_key` и `session_token` — ключи, указывающие на учетные данные для использования с точкой приема запроса. Необязательные параметры. - `format` — [формат](../../interfaces/formats.md#formats) файла. - `structure` — структура таблицы. Формат `'column1_name column1_type, column2_name column2_type, ...'`. diff --git a/docs/zh/development/style.md b/docs/zh/development/style.md index 977b4dce92a..c0a08291e02 100644 --- a/docs/zh/development/style.md +++ b/docs/zh/development/style.md @@ -485,7 +485,7 @@ catch (const DB::Exception & e) ``` cpp if (0 != close(fd)) - throwFromErrno("Cannot close file " + file_name, ErrorCodes::CANNOT_CLOSE_FILE); + throw ErrnoException(ErrorCodes::CANNOT_CLOSE_FILE, "Cannot close file {}", file_name); ``` `不要使用断言`。 diff --git a/docs/zh/engines/table-engines/mergetree-family/mergetree.md b/docs/zh/engines/table-engines/mergetree-family/mergetree.md index ef46afbcbd5..bfa69338657 100644 --- a/docs/zh/engines/table-engines/mergetree-family/mergetree.md +++ b/docs/zh/engines/table-engines/mergetree-family/mergetree.md @@ -1,819 +1,822 @@ ---- -slug: /zh/engines/table-engines/mergetree-family/mergetree ---- -# MergeTree {#table_engines-mergetree} - -Clickhouse 中最强大的表引擎当属 `MergeTree` (合并树)引擎及该系列(`*MergeTree`)中的其他引擎。 - -`MergeTree` 系列的引擎被设计用于插入极大量的数据到一张表当中。数据可以以数据片段的形式一个接着一个的快速写入,数据片段在后台按照一定的规则进行合并。相比在插入时不断修改(重写)已存储的数据,这种策略会高效很多。 - -主要特点: - -- 存储的数据按主键排序。 - - 这使得您能够创建一个小型的稀疏索引来加快数据检索。 - -- 如果指定了 [分区键](custom-partitioning-key.md) 的话,可以使用分区。 - - 在相同数据集和相同结果集的情况下 ClickHouse 中某些带分区的操作会比普通操作更快。查询中指定了分区键时 ClickHouse 会自动截取分区数据。这也有效增加了查询性能。 - -- 支持数据副本。 - - `ReplicatedMergeTree` 系列的表提供了数据副本功能。更多信息,请参阅 [数据副本](replication.md) 一节。 - -- 支持数据采样。 - - 需要的话,您可以给表设置一个采样方法。 - -:::info -[合并](../special/merge.md#merge) 引擎并不属于 `*MergeTree` 系列。 -::: - -## 建表 {#table_engine-mergetree-creating-a-table} - -``` sql -CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] -( - name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1] [TTL expr1], - name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2] [TTL expr2], - ... - INDEX index_name1 expr1 TYPE type1(...) GRANULARITY value1, - INDEX index_name2 expr2 TYPE type2(...) GRANULARITY value2 -) ENGINE = MergeTree() -ORDER BY expr -[PARTITION BY expr] -[PRIMARY KEY expr] -[SAMPLE BY expr] -[TTL expr [DELETE|TO DISK 'xxx'|TO VOLUME 'xxx'], ...] -[SETTINGS name=value, ...] -``` - -对于以上参数的描述,可参考 [CREATE 语句 的描述](../../../engines/table-engines/mergetree-family/mergetree.md) 。 - - - -**子句** - -- `ENGINE` - 引擎名和参数。 `ENGINE = MergeTree()`. `MergeTree` 引擎没有参数。 - -- `ORDER BY` — 排序键。 - - 可以是一组列的元组或任意的表达式。 例如: `ORDER BY (CounterID, EventDate)` 。 - - 如果没有使用 `PRIMARY KEY` 显式指定的主键,ClickHouse 会使用排序键作为主键。 - - 如果不需要排序,可以使用 `ORDER BY tuple()`. 参考 [选择主键](https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/mergetree/#selecting-the-primary-key) - -- `PARTITION BY` — [分区键](custom-partitioning-key.md) ,可选项。 - - 大多数情况下,不需要使用分区键。即使需要使用,也不需要使用比月更细粒度的分区键。分区不会加快查询(这与 ORDER BY 表达式不同)。永远也别使用过细粒度的分区键。不要使用客户端指定分区标识符或分区字段名称来对数据进行分区(而是将分区字段标识或名称作为 ORDER BY 表达式的第一列来指定分区)。 - - 要按月分区,可以使用表达式 `toYYYYMM(date_column)` ,这里的 `date_column` 是一个 [Date](../../../engines/table-engines/mergetree-family/mergetree.md) 类型的列。分区名的格式会是 `"YYYYMM"` 。 - -- `PRIMARY KEY` - 如果要 [选择与排序键不同的主键](#choosing-a-primary-key-that-differs-from-the-sorting-key),在这里指定,可选项。 - - 默认情况下主键跟排序键(由 `ORDER BY` 子句指定)相同。 - 因此,大部分情况下不需要再专门指定一个 `PRIMARY KEY` 子句。 - -- `SAMPLE BY` - 用于抽样的表达式,可选项。 - - 如果要用抽样表达式,主键中必须包含这个表达式。例如: - `SAMPLE BY intHash32(UserID) ORDER BY (CounterID, EventDate, intHash32(UserID))` 。 - -- `TTL` - 指定行存储的持续时间并定义数据片段在硬盘和卷上的移动逻辑的规则列表,可选项。 - - 表达式中必须存在至少一个 `Date` 或 `DateTime` 类型的列,比如: - - `TTL date + INTERVAl 1 DAY` - - 规则的类型 `DELETE|TO DISK 'xxx'|TO VOLUME 'xxx'`指定了当满足条件(到达指定时间)时所要执行的动作:移除过期的行,还是将数据片段(如果数据片段中的所有行都满足表达式的话)移动到指定的磁盘(`TO DISK 'xxx'`) 或 卷(`TO VOLUME 'xxx'`)。默认的规则是移除(`DELETE`)。可以在列表中指定多个规则,但最多只能有一个`DELETE`的规则。 - - 更多细节,请查看 [表和列的 TTL](#table_engine-mergetree-ttl) - -- `SETTINGS` — 控制 `MergeTree` 行为的额外参数,可选项: - - - `index_granularity` — 索引粒度。索引中相邻的『标记』间的数据行数。默认值8192 。参考[数据存储](#mergetree-data-storage)。 - - `index_granularity_bytes` — 索引粒度,以字节为单位,默认值: 10Mb。如果想要仅按数据行数限制索引粒度, 请设置为0(不建议)。 - - `min_index_granularity_bytes` - 允许的最小数据粒度,默认值:1024b。该选项用于防止误操作,添加了一个非常低索引粒度的表。参考[数据存储](#mergetree-data-storage) - - `enable_mixed_granularity_parts` — 是否启用通过 `index_granularity_bytes` 控制索引粒度的大小。在19.11版本之前, 只有 `index_granularity` 配置能够用于限制索引粒度的大小。当从具有很大的行(几十上百兆字节)的表中查询数据时候,`index_granularity_bytes` 配置能够提升ClickHouse的性能。如果您的表里有很大的行,可以开启这项配置来提升`SELECT` 查询的性能。 - - `use_minimalistic_part_header_in_zookeeper` — ZooKeeper中数据片段存储方式 。如果`use_minimalistic_part_header_in_zookeeper=1` ,ZooKeeper 会存储更少的数据。更多信息参考[服务配置参数]([Server Settings | ClickHouse Documentation](https://clickhouse.com/docs/zh/operations/server-configuration-parameters/settings/))这章中的 [设置描述](../../../operations/server-configuration-parameters/settings.md#server-settings-use_minimalistic_part_header_in_zookeeper) 。 - - `min_merge_bytes_to_use_direct_io` — 使用直接 I/O 来操作磁盘的合并操作时要求的最小数据量。合并数据片段时,ClickHouse 会计算要被合并的所有数据的总存储空间。如果大小超过了 `min_merge_bytes_to_use_direct_io` 设置的字节数,则 ClickHouse 将使用直接 I/O 接口(`O_DIRECT` 选项)对磁盘读写。如果设置 `min_merge_bytes_to_use_direct_io = 0` ,则会禁用直接 I/O。默认值:`10 * 1024 * 1024 * 1024` 字节。 - - - `merge_with_ttl_timeout` — TTL合并频率的最小间隔时间,单位:秒。默认值: 86400 (1 天)。 - - `write_final_mark` — 是否启用在数据片段尾部写入最终索引标记。默认值: 1(不要关闭)。 - - `merge_max_block_size` — 在块中进行合并操作时的最大行数限制。默认值:8192 - - `storage_policy` — 存储策略。 参见 [使用具有多个块的设备进行数据存储](#table_engine-mergetree-multiple-volumes). - - `min_bytes_for_wide_part`,`min_rows_for_wide_part` 在数据片段中可以使用`Wide`格式进行存储的最小字节数/行数。您可以不设置、只设置一个,或全都设置。参考:[数据存储](#mergetree-data-storage) - - `max_parts_in_total` - 所有分区中最大块的数量(意义不明) - - `max_compress_block_size` - 在数据压缩写入表前,未压缩数据块的最大大小。您可以在全局设置中设置该值(参见[max_compress_block_size](https://clickhouse.com/docs/zh/operations/settings/settings/#max-compress-block-size))。建表时指定该值会覆盖全局设置。 - - `min_compress_block_size` - 在数据压缩写入表前,未压缩数据块的最小大小。您可以在全局设置中设置该值(参见[min_compress_block_size](https://clickhouse.com/docs/zh/operations/settings/settings/#min-compress-block-size))。建表时指定该值会覆盖全局设置。 - - `max_partitions_to_read` - 一次查询中可访问的分区最大数。您可以在全局设置中设置该值(参见[max_partitions_to_read](https://clickhouse.com/docs/zh/operations/settings/settings/#max_partitions_to_read))。 - -**示例配置** - -``` sql -ENGINE MergeTree() PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDate, intHash32(UserID)) SAMPLE BY intHash32(UserID) SETTINGS index_granularity=8192 -``` - -在这个例子中,我们设置了按月进行分区。 - -同时我们设置了一个按用户 ID 哈希的抽样表达式。这使得您可以对该表中每个 `CounterID` 和 `EventDate` 的数据伪随机分布。如果您在查询时指定了 [SAMPLE](../../../engines/table-engines/mergetree-family/mergetree.md#select-sample-clause) 子句。 ClickHouse会返回对于用户子集的一个均匀的伪随机数据采样。 - -`index_granularity` 可省略因为 8192 是默认设置 。 - -
-已弃用的建表方法 - -:::attention "注意" -不要在新版项目中使用该方法,可能的话,请将旧项目切换到上述方法。 -::: - -``` sql -CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] -( - name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], - name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], - ... -) ENGINE [=] MergeTree(date-column [, sampling_expression], (primary, key), index_granularity) -``` - -**MergeTree() 参数** - -- `date-column` — 类型为 [日期](../../../engines/table-engines/mergetree-family/mergetree.md) 的列名。ClickHouse 会自动依据这个列按月创建分区。分区名格式为 `"YYYYMM"` 。 -- `sampling_expression` — 采样表达式。 -- `(primary, key)` — 主键。类型 — [元组()](../../../engines/table-engines/mergetree-family/mergetree.md) -- `index_granularity` — 索引粒度。即索引中相邻『标记』间的数据行数。设为 8192 可以适用大部分场景。 - -**示例** - - MergeTree(EventDate, intHash32(UserID), (CounterID, EventDate, intHash32(UserID)), 8192) - -对于主要的配置方法,这里 `MergeTree` 引擎跟前面的例子一样,可以以同样的方式配置。 -
- -## 数据存储 {#mergetree-data-storage} - -表由按主键排序的数据片段(DATA PART)组成。 - -当数据被插入到表中时,会创建多个数据片段并按主键的字典序排序。例如,主键是 `(CounterID, Date)` 时,片段中数据首先按 `CounterID` 排序,具有相同 `CounterID` 的部分按 `Date` 排序。 - -不同分区的数据会被分成不同的片段,ClickHouse 在后台合并数据片段以便更高效存储。不同分区的数据片段不会进行合并。合并机制并不保证具有相同主键的行全都合并到同一个数据片段中。 - -数据片段可以以 `Wide` 或 `Compact` 格式存储。在 `Wide` 格式下,每一列都会在文件系统中存储为单独的文件,在 `Compact` 格式下所有列都存储在一个文件中。`Compact` 格式可以提高插入量少插入频率频繁时的性能。 - -数据存储格式由 `min_bytes_for_wide_part` 和 `min_rows_for_wide_part` 表引擎参数控制。如果数据片段中的字节数或行数少于相应的设置值,数据片段会以 `Compact` 格式存储,否则会以 `Wide` 格式存储。 - -每个数据片段被逻辑的分割成颗粒(granules)。颗粒是 ClickHouse 中进行数据查询时的最小不可分割数据集。ClickHouse 不会对行或值进行拆分,所以每个颗粒总是包含整数个行。每个颗粒的第一行通过该行的主键值进行标记, -ClickHouse 会为每个数据片段创建一个索引文件来存储这些标记。对于每列,无论它是否包含在主键当中,ClickHouse 都会存储类似标记。这些标记让您可以在列文件中直接找到数据。 - -颗粒的大小通过表引擎参数 `index_granularity` 和 `index_granularity_bytes` 控制。颗粒的行数的在 `[1, index_granularity]` 范围中,这取决于行的大小。如果单行的大小超过了 `index_granularity_bytes` 设置的值,那么一个颗粒的大小会超过 `index_granularity_bytes`。在这种情况下,颗粒的大小等于该行的大小。 - -## 主键和索引在查询中的表现 {#primary-keys-and-indexes-in-queries} - -我们以 `(CounterID, Date)` 以主键。排序好的索引的图示会是下面这样: - -``` text - 全部数据 : [-------------------------------------------------------------------------] - CounterID: [aaaaaaaaaaaaaaaaaabbbbcdeeeeeeeeeeeeefgggggggghhhhhhhhhiiiiiiiiikllllllll] - Date: [1111111222222233331233211111222222333211111112122222223111112223311122333] - 标记: | | | | | | | | | | | - a,1 a,2 a,3 b,3 e,2 e,3 g,1 h,2 i,1 i,3 l,3 - 标记号: 0 1 2 3 4 5 6 7 8 9 10 -``` - -如果指定查询如下: - -- `CounterID in ('a', 'h')`,服务器会读取标记号在 `[0, 3)` 和 `[6, 8)` 区间中的数据。 -- `CounterID IN ('a', 'h') AND Date = 3`,服务器会读取标记号在 `[1, 3)` 和 `[7, 8)` 区间中的数据。 -- `Date = 3`,服务器会读取标记号在 `[1, 10]` 区间中的数据。 - -上面例子可以看出使用索引通常会比全表描述要高效。 - -稀疏索引会引起额外的数据读取。当读取主键单个区间范围的数据时,每个数据块中最多会多读 `index_granularity * 2` 行额外的数据。 - -稀疏索引使得您可以处理极大量的行,因为大多数情况下,这些索引常驻于内存。 - -ClickHouse 不要求主键唯一,所以您可以插入多条具有相同主键的行。 - -您可以在`PRIMARY KEY`与`ORDER BY`条件中使用`可为空的`类型的表达式,但强烈建议不要这么做。为了启用这项功能,请打开[allow_nullable_key](../../../operations/settings/index.md#allow-nullable-key),[NULLS_LAST](../../../sql-reference/statements/select/order-by.md#sorting-of-special-values)规则也适用于`ORDER BY`条件中有NULL值的情况下。 - -### 主键的选择 {#zhu-jian-de-xuan-ze} - -主键中列的数量并没有明确的限制。依据数据结构,您可以在主键包含多些或少些列。这样可以: - - - 改善索引的性能。 - - - 如果当前主键是 `(a, b)` ,在下列情况下添加另一个 `c` 列会提升性能: - - - 查询会使用 `c` 列作为条件 - - 很长的数据范围( `index_granularity` 的数倍)里 `(a, b)` 都是相同的值,并且这样的情况很普遍。换言之,就是加入另一列后,可以让您的查询略过很长的数据范围。 - - - 改善数据压缩。 - - ClickHouse 以主键排序片段数据,所以,数据的一致性越高,压缩越好。 - - - 在[CollapsingMergeTree](collapsingmergetree.md#table_engine-collapsingmergetree) 和 [SummingMergeTree](summingmergetree.md) 引擎里进行数据合并时会提供额外的处理逻辑。 - - 在这种情况下,指定与主键不同的 *排序键* 也是有意义的。 - -长的主键会对插入性能和内存消耗有负面影响,但主键中额外的列并不影响 `SELECT` 查询的性能。 - -可以使用 `ORDER BY tuple()` 语法创建没有主键的表。在这种情况下 ClickHouse 根据数据插入的顺序存储。如果在使用 `INSERT ... SELECT` 时希望保持数据的排序,请设置 [max_insert_threads = 1](../../../operations/settings/settings.md#settings-max-insert-threads)。 - -想要根据初始顺序进行数据查询,使用 [单线程查询](../../../operations/settings/settings.md#settings-max_threads) - -### 选择与排序键不同的主键 {#choosing-a-primary-key-that-differs-from-the-sorting-key} - -Clickhouse可以做到指定一个跟排序键不一样的主键,此时排序键用于在数据片段中进行排序,主键用于在索引文件中进行标记的写入。这种情况下,主键表达式元组必须是排序键表达式元组的前缀(即主键为(a,b),排序列必须为(a,b,******))。 - -当使用 [SummingMergeTree](summingmergetree.md) 和 [AggregatingMergeTree](aggregatingmergetree.md) 引擎时,这个特性非常有用。通常在使用这类引擎时,表里的列分两种:*维度* 和 *度量* 。典型的查询会通过任意的 `GROUP BY` 对度量列进行聚合并通过维度列进行过滤。由于 SummingMergeTree 和 AggregatingMergeTree 会对排序键相同的行进行聚合,所以把所有的维度放进排序键是很自然的做法。但这将导致排序键中包含大量的列,并且排序键会伴随着新添加的维度不断的更新。 - -在这种情况下合理的做法是,只保留少量的列在主键当中用于提升扫描效率,将维度列添加到排序键中。 - -对排序键进行 [ALTER](../../../sql-reference/statements/alter.md) 是轻量级的操作,因为当一个新列同时被加入到表里和排序键里时,已存在的数据片段并不需要修改。由于旧的排序键是新排序键的前缀,并且新添加的列中没有数据,因此在表修改时的数据对于新旧的排序键来说都是有序的。 - -### 索引和分区在查询中的应用 {#use-of-indexes-and-partitions-in-queries} - -对于 `SELECT` 查询,ClickHouse 分析是否可以使用索引。如果 `WHERE/PREWHERE` 子句具有下面这些表达式(作为完整WHERE条件的一部分或全部)则可以使用索引:进行相等/不相等的比较;对主键列或分区列进行`IN`运算、有固定前缀的`LIKE`运算(如name like 'test%')、函数运算(部分函数适用),还有对上述表达式进行逻辑运算。 - - -因此,在索引键的一个或多个区间上快速地执行查询是可能的。下面例子中,指定标签;指定标签和日期范围;指定标签和日期;指定多个标签和日期范围等执行查询,都会非常快。 - -当引擎配置如下时: - -``` sql - ENGINE MergeTree() PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDate) SETTINGS index_granularity=8192 -``` - -这种情况下,这些查询: - -``` sql -SELECT count() FROM table WHERE EventDate = toDate(now()) AND CounterID = 34 -SELECT count() FROM table WHERE EventDate = toDate(now()) AND (CounterID = 34 OR CounterID = 42) -SELECT count() FROM table WHERE ((EventDate >= toDate('2014-01-01') AND EventDate <= toDate('2014-01-31')) OR EventDate = toDate('2014-05-01')) AND CounterID IN (101500, 731962, 160656) AND (CounterID = 101500 OR EventDate != toDate('2014-05-01')) -``` - -ClickHouse 会依据主键索引剪掉不符合的数据,依据按月分区的分区键剪掉那些不包含符合数据的分区。 - -上文的查询显示,即使索引用于复杂表达式,因为读表操作经过优化,所以使用索引不会比完整扫描慢。 - -下面这个例子中,不会使用索引。 - -``` sql -SELECT count() FROM table WHERE CounterID = 34 OR URL LIKE '%upyachka%' -``` - -要检查 ClickHouse 执行一个查询时能否使用索引,可设置 [force_index_by_date](../../../operations/settings/settings.md#settings-force_index_by_date) 和 [force_primary_key](../../../operations/settings/settings.md) 。 - -使用按月分区的分区列允许只读取包含适当日期区间的数据块,这种情况下,数据块会包含很多天(最多整月)的数据。在块中,数据按主键排序,主键第一列可能不包含日期。因此,仅使用日期而没有用主键字段作为条件的查询将会导致需要读取超过这个指定日期以外的数据。 - -### 部分单调主键的使用 - -考虑这样的场景,比如一个月中的天数。它们在一个月的范围内形成一个[单调序列](https://zh.wikipedia.org/wiki/单调函数) ,但如果扩展到更大的时间范围它们就不再单调了。这就是一个部分单调序列。如果用户使用部分单调的主键创建表,ClickHouse同样会创建一个稀疏索引。当用户从这类表中查询数据时,ClickHouse 会对查询条件进行分析。如果用户希望获取两个索引标记之间的数据并且这两个标记在一个月以内,ClickHouse 可以在这种特殊情况下使用到索引,因为它可以计算出查询参数与索引标记之间的距离。 - -如果查询参数范围内的主键不是单调序列,那么 ClickHouse 无法使用索引。在这种情况下,ClickHouse 会进行全表扫描。 - -ClickHouse 在任何主键代表一个部分单调序列的情况下都会使用这个逻辑。 - -### 跳数索引 {#tiao-shu-suo-yin-fen-duan-hui-zong-suo-yin-shi-yan-xing-de} - -此索引在 `CREATE` 语句的列部分里定义。 - -``` sql -INDEX index_name expr TYPE type(...) GRANULARITY granularity_value -``` - -`*MergeTree` 系列的表可以指定跳数索引。 -跳数索引是指数据片段按照粒度(建表时指定的`index_granularity`)分割成小块后,将上述SQL的granularity_value数量的小块组合成一个大的块,对这些大块写入索引信息,这样有助于使用`where`筛选时跳过大量不必要的数据,减少`SELECT`需要读取的数据量。 - -**示例** - -``` sql -CREATE TABLE table_name -( - u64 UInt64, - i32 Int32, - s String, - ... - INDEX a (u64 * i32, s) TYPE minmax GRANULARITY 3, - INDEX b (u64 * length(s)) TYPE set(1000) GRANULARITY 4 -) ENGINE = MergeTree() -... -``` - -上例中的索引能让 ClickHouse 执行下面这些查询时减少读取数据量。 - -``` sql -SELECT count() FROM table WHERE s < 'z' -SELECT count() FROM table WHERE u64 * i32 == 10 AND u64 * length(s) >= 1234 -``` - -#### 可用的索引类型 {#table_engine-mergetree-data_skipping-indexes} - -- `minmax` - 存储指定表达式的极值(如果表达式是 `tuple` ,则存储 `tuple` 中每个元素的极值),这些信息用于跳过数据块,类似主键。 - -- `set(max_rows)` - 存储指定表达式的不重复值(不超过 `max_rows` 个,`max_rows=0` 则表示『无限制』)。这些信息可用于检查数据块是否满足 `WHERE` 条件。 - -- `ngrambf_v1(n, size_of_bloom_filter_in_bytes, number_of_hash_functions, random_seed)` - 存储一个包含数据块中所有 n元短语(ngram) 的 [布隆过滤器](https://en.wikipedia.org/wiki/Bloom_filter) 。只可用在字符串上。 - 可用于优化 `equals` , `like` 和 `in` 表达式的性能。 - - `n` – 短语长度。 - - `size_of_bloom_filter_in_bytes` – 布隆过滤器大小,字节为单位。(因为压缩得好,可以指定比较大的值,如 256 或 512)。 - - `number_of_hash_functions` – 布隆过滤器中使用的哈希函数的个数。 - - `random_seed` – 哈希函数的随机种子。 - -- `tokenbf_v1(size_of_bloom_filter_in_bytes, number_of_hash_functions, random_seed)` - 跟 `ngrambf_v1` 类似,但是存储的是token而不是ngrams。Token是由非字母数字的符号分割的序列。 - -- `bloom_filter(bloom_filter([false_positive])` – 为指定的列存储布隆过滤器 - - 可选参数`false_positive`用来指定从布隆过滤器收到错误响应的几率。取值范围是 (0,1),默认值:0.025 - - 支持的数据类型:`Int*`, `UInt*`, `Float*`, `Enum`, `Date`, `DateTime`, `String`, `FixedString`, `Array`, `LowCardinality`, `Nullable`。 - - 以下函数会用到这个索引: [equals](../../../sql-reference/functions/comparison-functions.md), [notEquals](../../../sql-reference/functions/comparison-functions.md), [in](../../../sql-reference/functions/in-functions.md), [notIn](../../../sql-reference/functions/in-functions.md), [has](../../../sql-reference/functions/array-functions.md) - -``` sql -INDEX sample_index (u64 * length(s)) TYPE minmax GRANULARITY 4 -INDEX sample_index2 (u64 * length(str), i32 + f64 * 100, date, str) TYPE set(100) GRANULARITY 4 -INDEX sample_index3 (lower(str), str) TYPE ngrambf_v1(3, 256, 2, 0) GRANULARITY 4 -``` - -#### 函数支持 {#functions-support} - -WHERE 子句中的条件可以包含对某列数据进行运算的函数表达式,如果列是索引的一部分,ClickHouse会在执行函数时尝试使用索引。不同的函数对索引的支持是不同的。 - -`set` 索引会对所有函数生效,其他索引对函数的生效情况见下表 - -| 函数 (操作符) / 索引 | primary key | minmax | ngrambf_v1 | tokenbf_v1 | bloom_filter | -| ------------------------------------------------------------ | ----------- | ------ | ---------- | ---------- | ------------ | -| [equals (=, ==)](../../../sql-reference/functions/comparison-functions.md#equals) | ✔ | ✔ | ✔ | ✔ | ✔ | -| [notEquals(!=, <>)](../../../sql-reference/functions/comparison-functions.md#notequals) | ✔ | ✔ | ✔ | ✔ | ✔ | -| [like](../../../sql-reference/functions/string-search-functions.md#function-like) | ✔ | ✔ | ✔ | ✔ | ✔ | -| [notLike](../../../sql-reference/functions/string-search-functions.md#function-notlike) | ✔ | ✔ | ✗ | ✗ | ✗ | -| [startsWith](../../../sql-reference/functions/string-functions.md#startswith) | ✔ | ✔ | ✔ | ✔ | ✗ | -| [endsWith](../../../sql-reference/functions/string-functions.md#endswith) | ✗ | ✗ | ✔ | ✔ | ✗ | -| [multiSearchAny](../../../sql-reference/functions/string-search-functions.md#function-multisearchany) | ✗ | ✗ | ✔ | ✗ | ✗ | -| [in](../../../sql-reference/functions/in-functions.md#in-functions) | ✔ | ✔ | ✔ | ✔ | ✔ | -| [notIn](../../../sql-reference/functions/in-functions.md#in-functions) | ✔ | ✔ | ✔ | ✔ | ✔ | -| [less (\<)](../../../sql-reference/functions/comparison-functions.md#less) | ✔ | ✔ | ✗ | ✗ | ✗ | -| [greater (\>)](../../../sql-reference/functions/comparison-functions.md#greater) | ✔ | ✔ | ✗ | ✗ | ✗ | -| [lessOrEquals (\<=)](../../../sql-reference/functions/comparison-functions.md#lessorequals) | ✔ | ✔ | ✗ | ✗ | ✗ | -| [greaterOrEquals (\>=)](../../../sql-reference/functions/comparison-functions.md#greaterorequals) | ✔ | ✔ | ✗ | ✗ | ✗ | -| [empty](../../../sql-reference/functions/array-functions.md#function-empty) | ✔ | ✔ | ✗ | ✗ | ✗ | -| [notEmpty](../../../sql-reference/functions/array-functions.md#function-notempty) | ✔ | ✔ | ✗ | ✗ | ✗ | -| hasToken | ✗ | ✗ | ✗ | ✔ | ✗ | - -常量参数小于 ngram 大小的函数不能使用 `ngrambf_v1` 进行查询优化。 - -:::note -布隆过滤器可能会包含不符合条件的匹配,所以 `ngrambf_v1`, `tokenbf_v1` 和 `bloom_filter` 索引不能用于结果返回为假的函数,例如: - -- 可以用来优化的场景 - - `s LIKE '%test%'` - - `NOT s NOT LIKE '%test%'` - - `s = 1` - - `NOT s != 1` - - `startsWith(s, 'test')` -- 不能用来优化的场景 - - `NOT s LIKE '%test%'` - - `s NOT LIKE '%test%'` - - `NOT s = 1` - - `s != 1` - - `NOT startsWith(s, 'test')` -::: - -## 并发数据访问 {#concurrent-data-access} - -对于表的并发访问,我们使用多版本机制。换言之,当一张表同时被读和更新时,数据从当前查询到的一组片段中读取。没有冗长的的锁。插入不会阻碍读取。 - -对表的读操作是自动并行的。 - -## 列和表的 TTL {#table_engine-mergetree-ttl} - -TTL用于设置值的生命周期,它既可以为整张表设置,也可以为每个列字段单独设置。表级别的 TTL 还会指定数据在磁盘和卷上自动转移的逻辑。 - -TTL 表达式的计算结果必须是 [日期](../../../engines/table-engines/mergetree-family/mergetree.md) 或 [日期时间](../../../engines/table-engines/mergetree-family/mergetree.md) 类型的字段。 - -示例: - -``` sql -TTL time_column -TTL time_column + interval -``` - -要定义`interval`, 需要使用 [时间间隔](../../../engines/table-engines/mergetree-family/mergetree.md#operators-datetime) 操作符。 - -``` sql -TTL date_time + INTERVAL 1 MONTH -TTL date_time + INTERVAL 15 HOUR -``` - -### 列 TTL {#mergetree-column-ttl} - -当列中的值过期时, ClickHouse会将它们替换成该列数据类型的默认值。如果数据片段中列的所有值均已过期,则ClickHouse 会从文件系统中的数据片段中删除此列。 - -`TTL`子句不能被用于主键字段。 - -**示例:** - -创建表时指定 `TTL` - -``` sql -CREATE TABLE example_table -( - d DateTime, - a Int TTL d + INTERVAL 1 MONTH, - b Int TTL d + INTERVAL 1 MONTH, - c String -) -ENGINE = MergeTree -PARTITION BY toYYYYMM(d) -ORDER BY d; -``` - -为表中已存在的列字段添加 `TTL` - -``` sql -ALTER TABLE example_table - MODIFY COLUMN - c String TTL d + INTERVAL 1 DAY; -``` - -修改列字段的 `TTL` - -``` sql -ALTER TABLE example_table - MODIFY COLUMN - c String TTL d + INTERVAL 1 MONTH; -``` - -### 表 TTL {#mergetree-table-ttl} - -表可以设置一个用于移除过期行的表达式,以及多个用于在磁盘或卷上自动转移数据片段的表达式。当表中的行过期时,ClickHouse 会删除所有对应的行。对于数据片段的转移特性,必须所有的行都满足转移条件。 - -``` sql -TTL expr - [DELETE|TO DISK 'xxx'|TO VOLUME 'xxx'][, DELETE|TO DISK 'aaa'|TO VOLUME 'bbb'] ... - [WHERE conditions] - [GROUP BY key_expr [SET v1 = aggr_func(v1) [, v2 = aggr_func(v2) ...]] ] - -``` - -TTL 规则的类型紧跟在每个 TTL 表达式后面,它会影响满足表达式时(到达指定时间时)应当执行的操作: - -- `DELETE` - 删除过期的行(默认操作); -- `TO DISK 'aaa'` - 将数据片段移动到磁盘 `aaa`; -- `TO VOLUME 'bbb'` - 将数据片段移动到卷 `bbb`. -- `GROUP BY` - 聚合过期的行 - -使用`WHERE`从句,您可以指定哪些过期的行会被删除或聚合(不适用于移动)。`GROUP BY`表达式必须是表主键的前缀。如果某列不是`GROUP BY`表达式的一部分,也没有在SET从句显示引用,结果行中相应列的值是随机的(就好像使用了`any`函数)。 - -**示例**: - -创建时指定 TTL - -``` sql -CREATE TABLE example_table -( - d DateTime, - a Int -) -ENGINE = MergeTree -PARTITION BY toYYYYMM(d) -ORDER BY d -TTL d + INTERVAL 1 MONTH DELETE, - d + INTERVAL 1 WEEK TO VOLUME 'aaa', - d + INTERVAL 2 WEEK TO DISK 'bbb'; -``` - -修改表的 `TTL` - -``` sql -ALTER TABLE example_table - MODIFY TTL d + INTERVAL 1 DAY; -``` - -创建一张表,设置一个月后数据过期,这些过期的行中日期为星期一的删除: - -``` sql -CREATE TABLE table_with_where -( - d DateTime, - a Int -) -ENGINE = MergeTree -PARTITION BY toYYYYMM(d) -ORDER BY d -TTL d + INTERVAL 1 MONTH DELETE WHERE toDayOfWeek(d) = 1; -``` - -创建一张表,设置过期的列会被聚合。列`x`包含每组行中的最大值,`y`为最小值,`d`为可能任意值。 - -``` sql -CREATE TABLE table_for_aggregation -( - d DateTime, - k1 Int, - k2 Int, - x Int, - y Int -) -ENGINE = MergeTree -ORDER BY (k1, k2) -TTL d + INTERVAL 1 MONTH GROUP BY k1, k2 SET x = max(x), y = min(y); -``` - -**删除数据** - -ClickHouse 在数据片段合并时会删除掉过期的数据。 - -当ClickHouse发现数据过期时, 它将会执行一个计划外的合并。要控制这类合并的频率, 您可以设置 `merge_with_ttl_timeout`。如果该值被设置的太低, 它将引发大量计划外的合并,这可能会消耗大量资源。 - -如果在两次合并的时间间隔中执行 `SELECT` 查询, 则可能会得到过期的数据。为了避免这种情况,可以在 `SELECT` 之前使用 [OPTIMIZE](../../../engines/table-engines/mergetree-family/mergetree.md#misc_operations-optimize) 。 - -## 使用多个块设备进行数据存储 {#table_engine-mergetree-multiple-volumes} - -### 介绍 {#introduction} - -MergeTree 系列表引擎可以将数据存储在多个块设备上。这对某些可以潜在被划分为“冷”“热”的表来说是很有用的。最新数据被定期的查询但只需要很小的空间。相反,详尽的历史数据很少被用到。如果有多块磁盘可用,那么“热”的数据可以放置在快速的磁盘上(比如 NVMe 固态硬盘或内存),“冷”的数据可以放在相对较慢的磁盘上(比如机械硬盘)。 - -数据片段是 `MergeTree` 引擎表的最小可移动单元。属于同一个数据片段的数据被存储在同一块磁盘上。数据片段会在后台自动的在磁盘间移动,也可以通过 [ALTER](../../../sql-reference/statements/alter.md#alter_move-partition) 查询来移动。 - -### 术语 {#terms} - -- 磁盘 — 挂载到文件系统的块设备 -- 默认磁盘 — 在服务器设置中通过 [path](../../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-path) 参数指定的数据存储 -- 卷 — 相同磁盘的顺序列表 (类似于 [JBOD](https://en.wikipedia.org/wiki/Non-RAID_drive_architectures)) -- 存储策略 — 卷的集合及他们之间的数据移动规则 - - 以上名称的信息在Clickhouse中系统表[system.storage_policies](https://clickhouse.com/docs/zh/operations/system-tables/storage_policies/#system_tables-storage_policies)和[system.disks](https://clickhouse.com/docs/zh/operations/system-tables/disks/#system_tables-disks)体现。为了应用存储策略,可以在建表时使用`storage_policy`设置。 - -### 配置 {#table_engine-mergetree-multiple-volumes_configure} - -磁盘、卷和存储策略应当在主配置文件 `config.xml` 或 `config.d` 目录中的独立文件中的 `` 标签内定义。 - -配置结构: - -``` xml - - - - /mnt/fast_ssd/clickhouse/ - - - /mnt/hdd1/clickhouse/ - 10485760 - - - /mnt/hdd2/clickhouse/ - 10485760 - - - ... - - - ... - -``` - -标签: - -- `` — 磁盘名,名称必须与其他磁盘不同. -- `path` — 服务器将用来存储数据 (`data` 和 `shadow` 目录) 的路径, 应当以 ‘/’ 结尾. -- `keep_free_space_bytes` — 需要保留的剩余磁盘空间. - -磁盘定义的顺序无关紧要。 - -存储策略配置: - -``` xml - - ... - - - - - disk_name_from_disks_configuration - 1073741824 - - - - - - - 0.2 - - - - - - - - ... - -``` - -标签: - -- `policy_name_N` — 策略名称,不能重复。 -- `volume_name_N` — 卷名称,不能重复。 -- `disk` — 卷中的磁盘。 -- `max_data_part_size_bytes` — 卷中的磁盘可以存储的数据片段的最大大小。 -- `move_factor` — 当可用空间少于这个因子时,数据将自动的向下一个卷(如果有的话)移动 (默认值为 0.1)。 - -配置示例: - -``` xml - - ... - - - - - disk1 - disk2 - - - - - - - - fast_ssd - 1073741824 - - - disk1 - - - 0.2 - - - - -
- jbod1 -
- - external - -
-
-
- ... -
-``` - -在给出的例子中, `hdd_in_order` 策略实现了 [循环制](https://zh.wikipedia.org/wiki/循环制) 方法。因此这个策略只定义了一个卷(`single`),数据片段会以循环的顺序全部存储到它的磁盘上。当有多个类似的磁盘挂载到系统上,但没有配置 RAID 时,这种策略非常有用。请注意一个每个独立的磁盘驱动都并不可靠,您可能需要用3份或更多的复制份数来补偿它。 - -如果在系统中有不同类型的磁盘可用,可以使用 `moving_from_ssd_to_hdd`。`hot` 卷由 SSD 磁盘(`fast_ssd`)组成,这个卷上可以存储的数据片段的最大大小为 1GB。所有大于 1GB 的数据片段都会被直接存储到 `cold` 卷上,`cold` 卷包含一个名为 `disk1` 的 HDD 磁盘。 -同样,一旦 `fast_ssd` 被填充超过 80%,数据会通过后台进程向 `disk1` 进行转移。 - -存储策略中卷的枚举顺序是很重要的。因为当一个卷被充满时,数据会向下一个卷转移。磁盘的枚举顺序同样重要,因为数据是依次存储在磁盘上的。 - -在创建表时,可以应用存储策略: - -``` sql -CREATE TABLE table_with_non_default_policy ( - EventDate Date, - OrderID UInt64, - BannerID UInt64, - SearchPhrase String -) ENGINE = MergeTree -ORDER BY (OrderID, BannerID) -PARTITION BY toYYYYMM(EventDate) -SETTINGS storage_policy = 'moving_from_ssd_to_hdd' -``` - -`default` 存储策略意味着只使用一个卷,这个卷只包含一个在 `` 中定义的磁盘。您可以使用[ALTER TABLE ... MODIFY SETTING]来修改存储策略,新的存储策略应该包含所有以前的磁盘和卷,并使用相同的名称。 - -可以通过 [background_move_pool_size](../../../operations/server-configuration-parameters/settings.md#background_move_pool_size) 设置调整执行后台任务的线程数。 - -### 详细说明 {#details} - -对于 `MergeTree` 表,数据通过以下不同的方式写入到磁盘当中: - -- 插入(`INSERT`查询) -- 后台合并和[数据变异](../../../sql-reference/statements/alter.md#alter-mutations) -- 从另一个副本下载 -- [ALTER TABLE … FREEZE PARTITION](../../../sql-reference/statements/alter.md#alter_freeze-partition) 冻结分区 - -除了数据变异和冻结分区以外的情况下,数据按照以下逻辑存储到卷或磁盘上: - -1. 首个卷(按定义顺序)拥有足够的磁盘空间存储数据片段(`unreserved_space > current_part_size`)并且允许存储给定数据片段的大小(`max_data_part_size_bytes > current_part_size`) -2. 在这个数据卷内,紧挨着先前存储数据的那块磁盘之后的磁盘,拥有比数据片段大的剩余空间。(`unreserved_space - keep_free_space_bytes > current_part_size`) - -更进一步,数据变异和分区冻结使用的是 [硬链接](https://en.wikipedia.org/wiki/Hard_link)。不同磁盘之间的硬链接是不支持的,所以在这种情况下数据片段都会被存储到原来的那一块磁盘上。 - -在后台,数据片段基于剩余空间(`move_factor`参数)根据卷在配置文件中定义的顺序进行转移。数据永远不会从最后一个移出也不会从第一个移入。可以通过系统表 [system.part_log](../../../operations/system-tables/part_log.md#system_tables-part-log) (字段 `type = MOVE_PART`) 和 [system.parts](../../../operations/system-tables/parts.md#system_tables-parts) (字段 `path` 和 `disk`) 来监控后台的移动情况。具体细节可以通过服务器日志查看。 - -用户可以通过 [ALTER TABLE … MOVE PART\|PARTITION … TO VOLUME\|DISK …](../../../sql-reference/statements/alter.md#alter_move-partition) 强制移动一个数据片段或分区到另外一个卷,所有后台移动的限制都会被考虑在内。这个查询会自行启动,无需等待后台操作完成。如果没有足够的可用空间或任何必须条件没有被满足,用户会收到报错信息。 - -数据移动不会妨碍到数据复制。也就是说,同一张表的不同副本可以指定不同的存储策略。 - -在后台合并和数据变异之后,旧的数据片段会在一定时间后被移除 (`old_parts_lifetime`)。在这期间,他们不能被移动到其他的卷或磁盘。也就是说,直到数据片段被完全移除,它们仍然会被磁盘占用空间计算在内。 - -## 使用S3进行数据存储 {#using-s3-data-storage} - -`MergeTree`系列表引擎允许使用[S3](https://aws.amazon.com/s3/)存储数据,需要修改磁盘类型为`S3`。 - -示例配置: - -``` xml - - ... - - - s3 - https://storage.yandexcloud.net/my-bucket/root-path/ - your_access_key_id - your_secret_access_key - - your_base64_encoded_customer_key - - http://proxy1 - http://proxy2 - - 10000 - 5000 - 10 - 4 - 1000 - /var/lib/clickhouse/disks/s3/ - false - - - ... - -``` - -必须的参数: - -- `endpoint` - S3的结点URL,以`path`或`virtual hosted`[格式](https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html)书写。 -- `access_key_id` - S3的Access Key ID。 -- `secret_access_key` - S3的Secret Access Key。 - -可选参数: - -- `region` - S3的区域名称 -- `use_environment_credentials` - 从环境变量AWS_ACCESS_KEY_ID、AWS_SECRET_ACCESS_KEY和AWS_SESSION_TOKEN中读取认证参数。默认值为`false`。 -- `use_insecure_imds_request` - 如果设置为`true`,S3客户端在认证时会使用不安全的IMDS请求。默认值为`false`。 -- `proxy` - 访问S3结点URL时代理设置。每一个`uri`项的值都应该是合法的代理URL。 -- `connect_timeout_ms` - Socket连接超时时间,默认值为`10000`,即10秒。 -- `request_timeout_ms` - 请求超时时间,默认值为`5000`,即5秒。 -- `retry_attempts` - 请求失败后的重试次数,默认值为10。 -- `single_read_retries` - 读过程中连接丢失后重试次数,默认值为4。 -- `min_bytes_for_seek` - 使用查找操作,而不是顺序读操作的最小字节数,默认值为1000。 -- `metadata_path` - 本地存放S3元数据文件的路径,默认值为`/var/lib/clickhouse/disks//` -- `skip_access_check` - 如果为`true`,Clickhouse启动时不检查磁盘是否可用。默认为`false`。 -- `server_side_encryption_customer_key_base64` - 如果指定该项的值,请求时会加上为了访问SSE-C加密数据而必须的头信息。 - -S3磁盘也可以设置冷热存储: -```xml - - ... - - - s3 - https://storage.yandexcloud.net/my-bucket/root-path/ - your_access_key_id - your_secret_access_key - - - - - -
- s3 -
-
-
- - -
- default -
- - s3 - -
- 0.2 -
-
- ... -
-``` - -指定了`cold`选项后,本地磁盘剩余空间如果小于`move_factor * disk_size`,或有TTL设置时,数据就会定时迁移至S3了。 - -## 虚拟列 {#virtual-columns} - -- `_part` - 分区名称。 -- `_part_index` - 作为请求的结果,按顺序排列的分区数。 -- `_partition_id` — 分区名称。 -- `_part_uuid` - 唯一部分标识符(如果 MergeTree 设置`assign_part_uuids` 已启用)。 -- `_partition_value` — `partition by` 表达式的值(元组)。 -- `_sample_factor` - 采样因子(来自请求)。 +--- +slug: /zh/engines/table-engines/mergetree-family/mergetree +--- +# MergeTree {#table_engines-mergetree} + +Clickhouse 中最强大的表引擎当属 `MergeTree` (合并树)引擎及该系列(`*MergeTree`)中的其他引擎。 + +`MergeTree` 系列的引擎被设计用于插入极大量的数据到一张表当中。数据可以以数据片段的形式一个接着一个的快速写入,数据片段在后台按照一定的规则进行合并。相比在插入时不断修改(重写)已存储的数据,这种策略会高效很多。 + +主要特点: + +- 存储的数据按主键排序。 + + 这使得您能够创建一个小型的稀疏索引来加快数据检索。 + +- 如果指定了 [分区键](custom-partitioning-key.md) 的话,可以使用分区。 + + 在相同数据集和相同结果集的情况下 ClickHouse 中某些带分区的操作会比普通操作更快。查询中指定了分区键时 ClickHouse 会自动截取分区数据。这也有效增加了查询性能。 + +- 支持数据副本。 + + `ReplicatedMergeTree` 系列的表提供了数据副本功能。更多信息,请参阅 [数据副本](replication.md) 一节。 + +- 支持数据采样。 + + 需要的话,您可以给表设置一个采样方法。 + +:::info +[合并](../special/merge.md#merge) 引擎并不属于 `*MergeTree` 系列。 +::: + +## 建表 {#table_engine-mergetree-creating-a-table} + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] +( + name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1] [TTL expr1], + name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2] [TTL expr2], + ... + INDEX index_name1 expr1 TYPE type1(...) GRANULARITY value1, + INDEX index_name2 expr2 TYPE type2(...) GRANULARITY value2 +) ENGINE = MergeTree() +ORDER BY expr +[PARTITION BY expr] +[PRIMARY KEY expr] +[SAMPLE BY expr] +[TTL expr [DELETE|TO DISK 'xxx'|TO VOLUME 'xxx'], ...] +[SETTINGS name=value, ...] +``` + +对于以上参数的描述,可参考 [CREATE 语句 的描述](../../../engines/table-engines/mergetree-family/mergetree.md) 。 + + + +**子句** + +- `ENGINE` - 引擎名和参数。 `ENGINE = MergeTree()`. `MergeTree` 引擎没有参数。 + +- `ORDER BY` — 排序键。 + + 可以是一组列的元组或任意的表达式。 例如: `ORDER BY (CounterID, EventDate)` 。 + + 如果没有使用 `PRIMARY KEY` 显式指定的主键,ClickHouse 会使用排序键作为主键。 + + 如果不需要排序,可以使用 `ORDER BY tuple()`. 参考 [选择主键](https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/mergetree/#selecting-the-primary-key) + +- `PARTITION BY` — [分区键](custom-partitioning-key.md) ,可选项。 + + 大多数情况下,不需要使用分区键。即使需要使用,也不需要使用比月更细粒度的分区键。分区不会加快查询(这与 ORDER BY 表达式不同)。永远也别使用过细粒度的分区键。不要使用客户端指定分区标识符或分区字段名称来对数据进行分区(而是将分区字段标识或名称作为 ORDER BY 表达式的第一列来指定分区)。 + + 要按月分区,可以使用表达式 `toYYYYMM(date_column)` ,这里的 `date_column` 是一个 [Date](../../../engines/table-engines/mergetree-family/mergetree.md) 类型的列。分区名的格式会是 `"YYYYMM"` 。 + +- `PRIMARY KEY` - 如果要 [选择与排序键不同的主键](#choosing-a-primary-key-that-differs-from-the-sorting-key),在这里指定,可选项。 + + 默认情况下主键跟排序键(由 `ORDER BY` 子句指定)相同。 + 因此,大部分情况下不需要再专门指定一个 `PRIMARY KEY` 子句。 + +- `SAMPLE BY` - 用于抽样的表达式,可选项。 + + 如果要用抽样表达式,主键中必须包含这个表达式。例如: + `SAMPLE BY intHash32(UserID) ORDER BY (CounterID, EventDate, intHash32(UserID))` 。 + +- `TTL` - 指定行存储的持续时间并定义数据片段在硬盘和卷上的移动逻辑的规则列表,可选项。 + + 表达式中必须存在至少一个 `Date` 或 `DateTime` 类型的列,比如: + + `TTL date + INTERVAl 1 DAY` + + 规则的类型 `DELETE|TO DISK 'xxx'|TO VOLUME 'xxx'`指定了当满足条件(到达指定时间)时所要执行的动作:移除过期的行,还是将数据片段(如果数据片段中的所有行都满足表达式的话)移动到指定的磁盘(`TO DISK 'xxx'`) 或 卷(`TO VOLUME 'xxx'`)。默认的规则是移除(`DELETE`)。可以在列表中指定多个规则,但最多只能有一个`DELETE`的规则。 + + 更多细节,请查看 [表和列的 TTL](#table_engine-mergetree-ttl) + +- `SETTINGS` — 控制 `MergeTree` 行为的额外参数,可选项: + + - `index_granularity` — 索引粒度。索引中相邻的『标记』间的数据行数。默认值8192 。参考[数据存储](#mergetree-data-storage)。 + - `index_granularity_bytes` — 索引粒度,以字节为单位,默认值: 10Mb。如果想要仅按数据行数限制索引粒度, 请设置为0(不建议)。 + - `min_index_granularity_bytes` - 允许的最小数据粒度,默认值:1024b。该选项用于防止误操作,添加了一个非常低索引粒度的表。参考[数据存储](#mergetree-data-storage) + - `enable_mixed_granularity_parts` — 是否启用通过 `index_granularity_bytes` 控制索引粒度的大小。在19.11版本之前, 只有 `index_granularity` 配置能够用于限制索引粒度的大小。当从具有很大的行(几十上百兆字节)的表中查询数据时候,`index_granularity_bytes` 配置能够提升ClickHouse的性能。如果您的表里有很大的行,可以开启这项配置来提升`SELECT` 查询的性能。 + - `use_minimalistic_part_header_in_zookeeper` — ZooKeeper中数据片段存储方式 。如果`use_minimalistic_part_header_in_zookeeper=1` ,ZooKeeper 会存储更少的数据。更多信息参考[服务配置参数]([Server Settings | ClickHouse Documentation](https://clickhouse.com/docs/zh/operations/server-configuration-parameters/settings/))这章中的 [设置描述](../../../operations/server-configuration-parameters/settings.md#server-settings-use_minimalistic_part_header_in_zookeeper) 。 + - `min_merge_bytes_to_use_direct_io` — 使用直接 I/O 来操作磁盘的合并操作时要求的最小数据量。合并数据片段时,ClickHouse 会计算要被合并的所有数据的总存储空间。如果大小超过了 `min_merge_bytes_to_use_direct_io` 设置的字节数,则 ClickHouse 将使用直接 I/O 接口(`O_DIRECT` 选项)对磁盘读写。如果设置 `min_merge_bytes_to_use_direct_io = 0` ,则会禁用直接 I/O。默认值:`10 * 1024 * 1024 * 1024` 字节。 + + - `merge_with_ttl_timeout` — TTL合并频率的最小间隔时间,单位:秒。默认值: 86400 (1 天)。 + - `write_final_mark` — 是否启用在数据片段尾部写入最终索引标记。默认值: 1(不要关闭)。 + - `merge_max_block_size` — 在块中进行合并操作时的最大行数限制。默认值:8192 + - `storage_policy` — 存储策略。 参见 [使用具有多个块的设备进行数据存储](#table_engine-mergetree-multiple-volumes). + - `min_bytes_for_wide_part`,`min_rows_for_wide_part` 在数据片段中可以使用`Wide`格式进行存储的最小字节数/行数。您可以不设置、只设置一个,或全都设置。参考:[数据存储](#mergetree-data-storage) + - `max_parts_in_total` - 所有分区中最大块的数量(意义不明) + - `max_compress_block_size` - 在数据压缩写入表前,未压缩数据块的最大大小。您可以在全局设置中设置该值(参见[max_compress_block_size](https://clickhouse.com/docs/zh/operations/settings/settings/#max-compress-block-size))。建表时指定该值会覆盖全局设置。 + - `min_compress_block_size` - 在数据压缩写入表前,未压缩数据块的最小大小。您可以在全局设置中设置该值(参见[min_compress_block_size](https://clickhouse.com/docs/zh/operations/settings/settings/#min-compress-block-size))。建表时指定该值会覆盖全局设置。 + - `max_partitions_to_read` - 一次查询中可访问的分区最大数。您可以在全局设置中设置该值(参见[max_partitions_to_read](https://clickhouse.com/docs/zh/operations/settings/settings/#max_partitions_to_read))。 + +**示例配置** + +``` sql +ENGINE MergeTree() PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDate, intHash32(UserID)) SAMPLE BY intHash32(UserID) SETTINGS index_granularity=8192 +``` + +在这个例子中,我们设置了按月进行分区。 + +同时我们设置了一个按用户 ID 哈希的抽样表达式。这使得您可以对该表中每个 `CounterID` 和 `EventDate` 的数据伪随机分布。如果您在查询时指定了 [SAMPLE](../../../engines/table-engines/mergetree-family/mergetree.md#select-sample-clause) 子句。 ClickHouse会返回对于用户子集的一个均匀的伪随机数据采样。 + +`index_granularity` 可省略因为 8192 是默认设置 。 + +
+已弃用的建表方法 + +:::attention "注意" +不要在新版项目中使用该方法,可能的话,请将旧项目切换到上述方法。 +::: + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] +( + name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1], + name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2], + ... +) ENGINE [=] MergeTree(date-column [, sampling_expression], (primary, key), index_granularity) +``` + +**MergeTree() 参数** + +- `date-column` — 类型为 [日期](../../../engines/table-engines/mergetree-family/mergetree.md) 的列名。ClickHouse 会自动依据这个列按月创建分区。分区名格式为 `"YYYYMM"` 。 +- `sampling_expression` — 采样表达式。 +- `(primary, key)` — 主键。类型 — [元组()](../../../engines/table-engines/mergetree-family/mergetree.md) +- `index_granularity` — 索引粒度。即索引中相邻『标记』间的数据行数。设为 8192 可以适用大部分场景。 + +**示例** + + MergeTree(EventDate, intHash32(UserID), (CounterID, EventDate, intHash32(UserID)), 8192) + +对于主要的配置方法,这里 `MergeTree` 引擎跟前面的例子一样,可以以同样的方式配置。 +
+ +## 数据存储 {#mergetree-data-storage} + +表由按主键排序的数据片段(DATA PART)组成。 + +当数据被插入到表中时,会创建多个数据片段并按主键的字典序排序。例如,主键是 `(CounterID, Date)` 时,片段中数据首先按 `CounterID` 排序,具有相同 `CounterID` 的部分按 `Date` 排序。 + +不同分区的数据会被分成不同的片段,ClickHouse 在后台合并数据片段以便更高效存储。不同分区的数据片段不会进行合并。合并机制并不保证具有相同主键的行全都合并到同一个数据片段中。 + +数据片段可以以 `Wide` 或 `Compact` 格式存储。在 `Wide` 格式下,每一列都会在文件系统中存储为单独的文件,在 `Compact` 格式下所有列都存储在一个文件中。`Compact` 格式可以提高插入量少插入频率频繁时的性能。 + +数据存储格式由 `min_bytes_for_wide_part` 和 `min_rows_for_wide_part` 表引擎参数控制。如果数据片段中的字节数或行数少于相应的设置值,数据片段会以 `Compact` 格式存储,否则会以 `Wide` 格式存储。 + +每个数据片段被逻辑的分割成颗粒(granules)。颗粒是 ClickHouse 中进行数据查询时的最小不可分割数据集。ClickHouse 不会对行或值进行拆分,所以每个颗粒总是包含整数个行。每个颗粒的第一行通过该行的主键值进行标记, +ClickHouse 会为每个数据片段创建一个索引文件来存储这些标记。对于每列,无论它是否包含在主键当中,ClickHouse 都会存储类似标记。这些标记让您可以在列文件中直接找到数据。 + +颗粒的大小通过表引擎参数 `index_granularity` 和 `index_granularity_bytes` 控制。颗粒的行数的在 `[1, index_granularity]` 范围中,这取决于行的大小。如果单行的大小超过了 `index_granularity_bytes` 设置的值,那么一个颗粒的大小会超过 `index_granularity_bytes`。在这种情况下,颗粒的大小等于该行的大小。 + +## 主键和索引在查询中的表现 {#primary-keys-and-indexes-in-queries} + +我们以 `(CounterID, Date)` 以主键。排序好的索引的图示会是下面这样: + +``` text + 全部数据 : [-------------------------------------------------------------------------] + CounterID: [aaaaaaaaaaaaaaaaaabbbbcdeeeeeeeeeeeeefgggggggghhhhhhhhhiiiiiiiiikllllllll] + Date: [1111111222222233331233211111222222333211111112122222223111112223311122333] + 标记: | | | | | | | | | | | + a,1 a,2 a,3 b,3 e,2 e,3 g,1 h,2 i,1 i,3 l,3 + 标记号: 0 1 2 3 4 5 6 7 8 9 10 +``` + +如果指定查询如下: + +- `CounterID in ('a', 'h')`,服务器会读取标记号在 `[0, 3)` 和 `[6, 8)` 区间中的数据。 +- `CounterID IN ('a', 'h') AND Date = 3`,服务器会读取标记号在 `[1, 3)` 和 `[7, 8)` 区间中的数据。 +- `Date = 3`,服务器会读取标记号在 `[1, 10]` 区间中的数据。 + +上面例子可以看出使用索引通常会比全表描述要高效。 + +稀疏索引会引起额外的数据读取。当读取主键单个区间范围的数据时,每个数据块中最多会多读 `index_granularity * 2` 行额外的数据。 + +稀疏索引使得您可以处理极大量的行,因为大多数情况下,这些索引常驻于内存。 + +ClickHouse 不要求主键唯一,所以您可以插入多条具有相同主键的行。 + +您可以在`PRIMARY KEY`与`ORDER BY`条件中使用`可为空的`类型的表达式,但强烈建议不要这么做。为了启用这项功能,请打开[allow_nullable_key](../../../operations/settings/index.md#allow-nullable-key),[NULLS_LAST](../../../sql-reference/statements/select/order-by.md#sorting-of-special-values)规则也适用于`ORDER BY`条件中有NULL值的情况下。 + +### 主键的选择 {#zhu-jian-de-xuan-ze} + +主键中列的数量并没有明确的限制。依据数据结构,您可以在主键包含多些或少些列。这样可以: + + - 改善索引的性能。 + + - 如果当前主键是 `(a, b)` ,在下列情况下添加另一个 `c` 列会提升性能: + + - 查询会使用 `c` 列作为条件 + - 很长的数据范围( `index_granularity` 的数倍)里 `(a, b)` 都是相同的值,并且这样的情况很普遍。换言之,就是加入另一列后,可以让您的查询略过很长的数据范围。 + + - 改善数据压缩。 + + ClickHouse 以主键排序片段数据,所以,数据的一致性越高,压缩越好。 + + - 在[CollapsingMergeTree](collapsingmergetree.md#table_engine-collapsingmergetree) 和 [SummingMergeTree](summingmergetree.md) 引擎里进行数据合并时会提供额外的处理逻辑。 + + 在这种情况下,指定与主键不同的 *排序键* 也是有意义的。 + +长的主键会对插入性能和内存消耗有负面影响,但主键中额外的列并不影响 `SELECT` 查询的性能。 + +可以使用 `ORDER BY tuple()` 语法创建没有主键的表。在这种情况下 ClickHouse 根据数据插入的顺序存储。如果在使用 `INSERT ... SELECT` 时希望保持数据的排序,请设置 [max_insert_threads = 1](../../../operations/settings/settings.md#settings-max-insert-threads)。 + +想要根据初始顺序进行数据查询,使用 [单线程查询](../../../operations/settings/settings.md#settings-max_threads) + +### 选择与排序键不同的主键 {#choosing-a-primary-key-that-differs-from-the-sorting-key} + +Clickhouse可以做到指定一个跟排序键不一样的主键,此时排序键用于在数据片段中进行排序,主键用于在索引文件中进行标记的写入。这种情况下,主键表达式元组必须是排序键表达式元组的前缀(即主键为(a,b),排序列必须为(a,b,******))。 + +当使用 [SummingMergeTree](summingmergetree.md) 和 [AggregatingMergeTree](aggregatingmergetree.md) 引擎时,这个特性非常有用。通常在使用这类引擎时,表里的列分两种:*维度* 和 *度量* 。典型的查询会通过任意的 `GROUP BY` 对度量列进行聚合并通过维度列进行过滤。由于 SummingMergeTree 和 AggregatingMergeTree 会对排序键相同的行进行聚合,所以把所有的维度放进排序键是很自然的做法。但这将导致排序键中包含大量的列,并且排序键会伴随着新添加的维度不断的更新。 + +在这种情况下合理的做法是,只保留少量的列在主键当中用于提升扫描效率,将维度列添加到排序键中。 + +对排序键进行 [ALTER](../../../sql-reference/statements/alter.md) 是轻量级的操作,因为当一个新列同时被加入到表里和排序键里时,已存在的数据片段并不需要修改。由于旧的排序键是新排序键的前缀,并且新添加的列中没有数据,因此在表修改时的数据对于新旧的排序键来说都是有序的。 + +### 索引和分区在查询中的应用 {#use-of-indexes-and-partitions-in-queries} + +对于 `SELECT` 查询,ClickHouse 分析是否可以使用索引。如果 `WHERE/PREWHERE` 子句具有下面这些表达式(作为完整WHERE条件的一部分或全部)则可以使用索引:进行相等/不相等的比较;对主键列或分区列进行`IN`运算、有固定前缀的`LIKE`运算(如name like 'test%')、函数运算(部分函数适用),还有对上述表达式进行逻辑运算。 + + +因此,在索引键的一个或多个区间上快速地执行查询是可能的。下面例子中,指定标签;指定标签和日期范围;指定标签和日期;指定多个标签和日期范围等执行查询,都会非常快。 + +当引擎配置如下时: + +``` sql + ENGINE MergeTree() PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDate) SETTINGS index_granularity=8192 +``` + +这种情况下,这些查询: + +``` sql +SELECT count() FROM table WHERE EventDate = toDate(now()) AND CounterID = 34 +SELECT count() FROM table WHERE EventDate = toDate(now()) AND (CounterID = 34 OR CounterID = 42) +SELECT count() FROM table WHERE ((EventDate >= toDate('2014-01-01') AND EventDate <= toDate('2014-01-31')) OR EventDate = toDate('2014-05-01')) AND CounterID IN (101500, 731962, 160656) AND (CounterID = 101500 OR EventDate != toDate('2014-05-01')) +``` + +ClickHouse 会依据主键索引剪掉不符合的数据,依据按月分区的分区键剪掉那些不包含符合数据的分区。 + +上文的查询显示,即使索引用于复杂表达式,因为读表操作经过优化,所以使用索引不会比完整扫描慢。 + +下面这个例子中,不会使用索引。 + +``` sql +SELECT count() FROM table WHERE CounterID = 34 OR URL LIKE '%upyachka%' +``` + +要检查 ClickHouse 执行一个查询时能否使用索引,可设置 [force_index_by_date](../../../operations/settings/settings.md#settings-force_index_by_date) 和 [force_primary_key](../../../operations/settings/settings.md) 。 + +使用按月分区的分区列允许只读取包含适当日期区间的数据块,这种情况下,数据块会包含很多天(最多整月)的数据。在块中,数据按主键排序,主键第一列可能不包含日期。因此,仅使用日期而没有用主键字段作为条件的查询将会导致需要读取超过这个指定日期以外的数据。 + +### 部分单调主键的使用 + +考虑这样的场景,比如一个月中的天数。它们在一个月的范围内形成一个[单调序列](https://zh.wikipedia.org/wiki/单调函数) ,但如果扩展到更大的时间范围它们就不再单调了。这就是一个部分单调序列。如果用户使用部分单调的主键创建表,ClickHouse同样会创建一个稀疏索引。当用户从这类表中查询数据时,ClickHouse 会对查询条件进行分析。如果用户希望获取两个索引标记之间的数据并且这两个标记在一个月以内,ClickHouse 可以在这种特殊情况下使用到索引,因为它可以计算出查询参数与索引标记之间的距离。 + +如果查询参数范围内的主键不是单调序列,那么 ClickHouse 无法使用索引。在这种情况下,ClickHouse 会进行全表扫描。 + +ClickHouse 在任何主键代表一个部分单调序列的情况下都会使用这个逻辑。 + +### 跳数索引 {#tiao-shu-suo-yin-fen-duan-hui-zong-suo-yin-shi-yan-xing-de} + +此索引在 `CREATE` 语句的列部分里定义。 + +``` sql +INDEX index_name expr TYPE type(...) GRANULARITY granularity_value +``` + +`*MergeTree` 系列的表可以指定跳数索引。 +跳数索引是指数据片段按照粒度(建表时指定的`index_granularity`)分割成小块后,将上述SQL的granularity_value数量的小块组合成一个大的块,对这些大块写入索引信息,这样有助于使用`where`筛选时跳过大量不必要的数据,减少`SELECT`需要读取的数据量。 + +**示例** + +``` sql +CREATE TABLE table_name +( + u64 UInt64, + i32 Int32, + s String, + ... + INDEX a (u64 * i32, s) TYPE minmax GRANULARITY 3, + INDEX b (u64 * length(s)) TYPE set(1000) GRANULARITY 4 +) ENGINE = MergeTree() +... +``` + +上例中的索引能让 ClickHouse 执行下面这些查询时减少读取数据量。 + +``` sql +SELECT count() FROM table WHERE s < 'z' +SELECT count() FROM table WHERE u64 * i32 == 10 AND u64 * length(s) >= 1234 +``` + +#### 可用的索引类型 {#table_engine-mergetree-data_skipping-indexes} + +- `minmax` + 存储指定表达式的极值(如果表达式是 `tuple` ,则存储 `tuple` 中每个元素的极值),这些信息用于跳过数据块,类似主键。 + +- `set(max_rows)` + 存储指定表达式的不重复值(不超过 `max_rows` 个,`max_rows=0` 则表示『无限制』)。这些信息可用于检查数据块是否满足 `WHERE` 条件。 + +- `ngrambf_v1(n, size_of_bloom_filter_in_bytes, number_of_hash_functions, random_seed)` + 存储一个包含数据块中所有 n元短语(ngram) 的 [布隆过滤器](https://en.wikipedia.org/wiki/Bloom_filter) 。只可用在字符串上。 + 可用于优化 `equals` , `like` 和 `in` 表达式的性能。 + - `n` – 短语长度。 + - `size_of_bloom_filter_in_bytes` – 布隆过滤器大小,字节为单位。(因为压缩得好,可以指定比较大的值,如 256 或 512)。 + - `number_of_hash_functions` – 布隆过滤器中使用的哈希函数的个数。 + - `random_seed` – 哈希函数的随机种子。 + +- `tokenbf_v1(size_of_bloom_filter_in_bytes, number_of_hash_functions, random_seed)` + 跟 `ngrambf_v1` 类似,但是存储的是token而不是ngrams。Token是由非字母数字的符号分割的序列。 + +- `bloom_filter(bloom_filter([false_positive])` – 为指定的列存储布隆过滤器 + + 可选参数`false_positive`用来指定从布隆过滤器收到错误响应的几率。取值范围是 (0,1),默认值:0.025 + + 支持的数据类型:`Int*`, `UInt*`, `Float*`, `Enum`, `Date`, `DateTime`, `String`, `FixedString`, `Array`, `LowCardinality`, `Nullable`。 + + 以下函数会用到这个索引: [equals](../../../sql-reference/functions/comparison-functions.md), [notEquals](../../../sql-reference/functions/comparison-functions.md), [in](../../../sql-reference/functions/in-functions.md), [notIn](../../../sql-reference/functions/in-functions.md), [has](../../../sql-reference/functions/array-functions.md) + +``` sql +INDEX sample_index (u64 * length(s)) TYPE minmax GRANULARITY 4 +INDEX sample_index2 (u64 * length(str), i32 + f64 * 100, date, str) TYPE set(100) GRANULARITY 4 +INDEX sample_index3 (lower(str), str) TYPE ngrambf_v1(3, 256, 2, 0) GRANULARITY 4 +``` + +#### 函数支持 {#functions-support} + +WHERE 子句中的条件可以包含对某列数据进行运算的函数表达式,如果列是索引的一部分,ClickHouse会在执行函数时尝试使用索引。不同的函数对索引的支持是不同的。 + +`set` 索引会对所有函数生效,其他索引对函数的生效情况见下表 + +| 函数 (操作符) / 索引 | primary key | minmax | ngrambf_v1 | tokenbf_v1 | bloom_filter | +| ------------------------------------------------------------ | ----------- | ------ | ---------- | ---------- | ------------ | +| [equals (=, ==)](../../../sql-reference/functions/comparison-functions.md#equals) | ✔ | ✔ | ✔ | ✔ | ✔ | +| [notEquals(!=, <>)](../../../sql-reference/functions/comparison-functions.md#notequals) | ✔ | ✔ | ✔ | ✔ | ✔ | +| [like](../../../sql-reference/functions/string-search-functions.md#function-like) | ✔ | ✔ | ✔ | ✔ | ✔ | +| [notLike](../../../sql-reference/functions/string-search-functions.md#function-notlike) | ✔ | ✔ | ✗ | ✗ | ✗ | +| [startsWith](../../../sql-reference/functions/string-functions.md#startswith) | ✔ | ✔ | ✔ | ✔ | ✗ | +| [endsWith](../../../sql-reference/functions/string-functions.md#endswith) | ✗ | ✗ | ✔ | ✔ | ✗ | +| [multiSearchAny](../../../sql-reference/functions/string-search-functions.md#function-multisearchany) | ✗ | ✗ | ✔ | ✗ | ✗ | +| [in](../../../sql-reference/functions/in-functions.md#in-functions) | ✔ | ✔ | ✔ | ✔ | ✔ | +| [notIn](../../../sql-reference/functions/in-functions.md#in-functions) | ✔ | ✔ | ✔ | ✔ | ✔ | +| [less (\<)](../../../sql-reference/functions/comparison-functions.md#less) | ✔ | ✔ | ✗ | ✗ | ✗ | +| [greater (\>)](../../../sql-reference/functions/comparison-functions.md#greater) | ✔ | ✔ | ✗ | ✗ | ✗ | +| [lessOrEquals (\<=)](../../../sql-reference/functions/comparison-functions.md#lessorequals) | ✔ | ✔ | ✗ | ✗ | ✗ | +| [greaterOrEquals (\>=)](../../../sql-reference/functions/comparison-functions.md#greaterorequals) | ✔ | ✔ | ✗ | ✗ | ✗ | +| [empty](../../../sql-reference/functions/array-functions.md#function-empty) | ✔ | ✔ | ✗ | ✗ | ✗ | +| [notEmpty](../../../sql-reference/functions/array-functions.md#function-notempty) | ✔ | ✔ | ✗ | ✗ | ✗ | +| [has](../../../sql-reference/functions/array-functions.md#function-has) | ✗ | ✗ | ✔ | ✔ | ✔ | ✔ | +| [hasAny](../../../sql-reference/functions/array-functions.md#function-hasAny) | ✗ | ✗ | ✔ | ✔ | ✔ | ✗ | +| [hasAll](../../../sql-reference/functions/array-functions.md#function-hasAll) | ✗ | ✗ | ✗ | ✗ | ✔ | ✗ | +| hasToken | ✗ | ✗ | ✗ | ✔ | ✗ | + +常量参数小于 ngram 大小的函数不能使用 `ngrambf_v1` 进行查询优化。 + +:::note +布隆过滤器可能会包含不符合条件的匹配,所以 `ngrambf_v1`, `tokenbf_v1` 和 `bloom_filter` 索引不能用于结果返回为假的函数,例如: + +- 可以用来优化的场景 + - `s LIKE '%test%'` + - `NOT s NOT LIKE '%test%'` + - `s = 1` + - `NOT s != 1` + - `startsWith(s, 'test')` +- 不能用来优化的场景 + - `NOT s LIKE '%test%'` + - `s NOT LIKE '%test%'` + - `NOT s = 1` + - `s != 1` + - `NOT startsWith(s, 'test')` +::: + +## 并发数据访问 {#concurrent-data-access} + +对于表的并发访问,我们使用多版本机制。换言之,当一张表同时被读和更新时,数据从当前查询到的一组片段中读取。没有冗长的的锁。插入不会阻碍读取。 + +对表的读操作是自动并行的。 + +## 列和表的 TTL {#table_engine-mergetree-ttl} + +TTL用于设置值的生命周期,它既可以为整张表设置,也可以为每个列字段单独设置。表级别的 TTL 还会指定数据在磁盘和卷上自动转移的逻辑。 + +TTL 表达式的计算结果必须是 [日期](../../../engines/table-engines/mergetree-family/mergetree.md) 或 [日期时间](../../../engines/table-engines/mergetree-family/mergetree.md) 类型的字段。 + +示例: + +``` sql +TTL time_column +TTL time_column + interval +``` + +要定义`interval`, 需要使用 [时间间隔](../../../engines/table-engines/mergetree-family/mergetree.md#operators-datetime) 操作符。 + +``` sql +TTL date_time + INTERVAL 1 MONTH +TTL date_time + INTERVAL 15 HOUR +``` + +### 列 TTL {#mergetree-column-ttl} + +当列中的值过期时, ClickHouse会将它们替换成该列数据类型的默认值。如果数据片段中列的所有值均已过期,则ClickHouse 会从文件系统中的数据片段中删除此列。 + +`TTL`子句不能被用于主键字段。 + +**示例:** + +创建表时指定 `TTL` + +``` sql +CREATE TABLE example_table +( + d DateTime, + a Int TTL d + INTERVAL 1 MONTH, + b Int TTL d + INTERVAL 1 MONTH, + c String +) +ENGINE = MergeTree +PARTITION BY toYYYYMM(d) +ORDER BY d; +``` + +为表中已存在的列字段添加 `TTL` + +``` sql +ALTER TABLE example_table + MODIFY COLUMN + c String TTL d + INTERVAL 1 DAY; +``` + +修改列字段的 `TTL` + +``` sql +ALTER TABLE example_table + MODIFY COLUMN + c String TTL d + INTERVAL 1 MONTH; +``` + +### 表 TTL {#mergetree-table-ttl} + +表可以设置一个用于移除过期行的表达式,以及多个用于在磁盘或卷上自动转移数据片段的表达式。当表中的行过期时,ClickHouse 会删除所有对应的行。对于数据片段的转移特性,必须所有的行都满足转移条件。 + +``` sql +TTL expr + [DELETE|TO DISK 'xxx'|TO VOLUME 'xxx'][, DELETE|TO DISK 'aaa'|TO VOLUME 'bbb'] ... + [WHERE conditions] + [GROUP BY key_expr [SET v1 = aggr_func(v1) [, v2 = aggr_func(v2) ...]] ] + +``` + +TTL 规则的类型紧跟在每个 TTL 表达式后面,它会影响满足表达式时(到达指定时间时)应当执行的操作: + +- `DELETE` - 删除过期的行(默认操作); +- `TO DISK 'aaa'` - 将数据片段移动到磁盘 `aaa`; +- `TO VOLUME 'bbb'` - 将数据片段移动到卷 `bbb`. +- `GROUP BY` - 聚合过期的行 + +使用`WHERE`从句,您可以指定哪些过期的行会被删除或聚合(不适用于移动)。`GROUP BY`表达式必须是表主键的前缀。如果某列不是`GROUP BY`表达式的一部分,也没有在SET从句显示引用,结果行中相应列的值是随机的(就好像使用了`any`函数)。 + +**示例**: + +创建时指定 TTL + +``` sql +CREATE TABLE example_table +( + d DateTime, + a Int +) +ENGINE = MergeTree +PARTITION BY toYYYYMM(d) +ORDER BY d +TTL d + INTERVAL 1 MONTH DELETE, + d + INTERVAL 1 WEEK TO VOLUME 'aaa', + d + INTERVAL 2 WEEK TO DISK 'bbb'; +``` + +修改表的 `TTL` + +``` sql +ALTER TABLE example_table + MODIFY TTL d + INTERVAL 1 DAY; +``` + +创建一张表,设置一个月后数据过期,这些过期的行中日期为星期一的删除: + +``` sql +CREATE TABLE table_with_where +( + d DateTime, + a Int +) +ENGINE = MergeTree +PARTITION BY toYYYYMM(d) +ORDER BY d +TTL d + INTERVAL 1 MONTH DELETE WHERE toDayOfWeek(d) = 1; +``` + +创建一张表,设置过期的列会被聚合。列`x`包含每组行中的最大值,`y`为最小值,`d`为可能任意值。 + +``` sql +CREATE TABLE table_for_aggregation +( + d DateTime, + k1 Int, + k2 Int, + x Int, + y Int +) +ENGINE = MergeTree +ORDER BY (k1, k2) +TTL d + INTERVAL 1 MONTH GROUP BY k1, k2 SET x = max(x), y = min(y); +``` + +**删除数据** + +ClickHouse 在数据片段合并时会删除掉过期的数据。 + +当ClickHouse发现数据过期时, 它将会执行一个计划外的合并。要控制这类合并的频率, 您可以设置 `merge_with_ttl_timeout`。如果该值被设置的太低, 它将引发大量计划外的合并,这可能会消耗大量资源。 + +如果在两次合并的时间间隔中执行 `SELECT` 查询, 则可能会得到过期的数据。为了避免这种情况,可以在 `SELECT` 之前使用 [OPTIMIZE](../../../engines/table-engines/mergetree-family/mergetree.md#misc_operations-optimize) 。 + +## 使用多个块设备进行数据存储 {#table_engine-mergetree-multiple-volumes} + +### 介绍 {#introduction} + +MergeTree 系列表引擎可以将数据存储在多个块设备上。这对某些可以潜在被划分为“冷”“热”的表来说是很有用的。最新数据被定期的查询但只需要很小的空间。相反,详尽的历史数据很少被用到。如果有多块磁盘可用,那么“热”的数据可以放置在快速的磁盘上(比如 NVMe 固态硬盘或内存),“冷”的数据可以放在相对较慢的磁盘上(比如机械硬盘)。 + +数据片段是 `MergeTree` 引擎表的最小可移动单元。属于同一个数据片段的数据被存储在同一块磁盘上。数据片段会在后台自动的在磁盘间移动,也可以通过 [ALTER](../../../sql-reference/statements/alter.md#alter_move-partition) 查询来移动。 + +### 术语 {#terms} + +- 磁盘 — 挂载到文件系统的块设备 +- 默认磁盘 — 在服务器设置中通过 [path](../../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-path) 参数指定的数据存储 +- 卷 — 相同磁盘的顺序列表 (类似于 [JBOD](https://en.wikipedia.org/wiki/Non-RAID_drive_architectures)) +- 存储策略 — 卷的集合及他们之间的数据移动规则 + + 以上名称的信息在Clickhouse中系统表[system.storage_policies](https://clickhouse.com/docs/zh/operations/system-tables/storage_policies/#system_tables-storage_policies)和[system.disks](https://clickhouse.com/docs/zh/operations/system-tables/disks/#system_tables-disks)体现。为了应用存储策略,可以在建表时使用`storage_policy`设置。 + +### 配置 {#table_engine-mergetree-multiple-volumes_configure} + +磁盘、卷和存储策略应当在主配置文件 `config.xml` 或 `config.d` 目录中的独立文件中的 `` 标签内定义。 + +配置结构: + +``` xml + + + + /mnt/fast_ssd/clickhouse/ + + + /mnt/hdd1/clickhouse/ + 10485760 + + + /mnt/hdd2/clickhouse/ + 10485760 + + + ... + + + ... + +``` + +标签: + +- `` — 磁盘名,名称必须与其他磁盘不同. +- `path` — 服务器将用来存储数据 (`data` 和 `shadow` 目录) 的路径, 应当以 ‘/’ 结尾. +- `keep_free_space_bytes` — 需要保留的剩余磁盘空间. + +磁盘定义的顺序无关紧要。 + +存储策略配置: + +``` xml + + ... + + + + + disk_name_from_disks_configuration + 1073741824 + + + + + + + 0.2 + + + + + + + + ... + +``` + +标签: + +- `policy_name_N` — 策略名称,不能重复。 +- `volume_name_N` — 卷名称,不能重复。 +- `disk` — 卷中的磁盘。 +- `max_data_part_size_bytes` — 卷中的磁盘可以存储的数据片段的最大大小。 +- `move_factor` — 当可用空间少于这个因子时,数据将自动的向下一个卷(如果有的话)移动 (默认值为 0.1)。 + +配置示例: + +``` xml + + ... + + + + + disk1 + disk2 + + + + + + + + fast_ssd + 1073741824 + + + disk1 + + + 0.2 + + + + +
+ jbod1 +
+ + external + +
+
+
+ ... +
+``` + +在给出的例子中, `hdd_in_order` 策略实现了 [循环制](https://zh.wikipedia.org/wiki/循环制) 方法。因此这个策略只定义了一个卷(`single`),数据片段会以循环的顺序全部存储到它的磁盘上。当有多个类似的磁盘挂载到系统上,但没有配置 RAID 时,这种策略非常有用。请注意一个每个独立的磁盘驱动都并不可靠,您可能需要用3份或更多的复制份数来补偿它。 + +如果在系统中有不同类型的磁盘可用,可以使用 `moving_from_ssd_to_hdd`。`hot` 卷由 SSD 磁盘(`fast_ssd`)组成,这个卷上可以存储的数据片段的最大大小为 1GB。所有大于 1GB 的数据片段都会被直接存储到 `cold` 卷上,`cold` 卷包含一个名为 `disk1` 的 HDD 磁盘。 +同样,一旦 `fast_ssd` 被填充超过 80%,数据会通过后台进程向 `disk1` 进行转移。 + +存储策略中卷的枚举顺序是很重要的。因为当一个卷被充满时,数据会向下一个卷转移。磁盘的枚举顺序同样重要,因为数据是依次存储在磁盘上的。 + +在创建表时,可以应用存储策略: + +``` sql +CREATE TABLE table_with_non_default_policy ( + EventDate Date, + OrderID UInt64, + BannerID UInt64, + SearchPhrase String +) ENGINE = MergeTree +ORDER BY (OrderID, BannerID) +PARTITION BY toYYYYMM(EventDate) +SETTINGS storage_policy = 'moving_from_ssd_to_hdd' +``` + +`default` 存储策略意味着只使用一个卷,这个卷只包含一个在 `` 中定义的磁盘。您可以使用[ALTER TABLE ... MODIFY SETTING]来修改存储策略,新的存储策略应该包含所有以前的磁盘和卷,并使用相同的名称。 + +可以通过 [background_move_pool_size](../../../operations/server-configuration-parameters/settings.md#background_move_pool_size) 设置调整执行后台任务的线程数。 + +### 详细说明 {#details} + +对于 `MergeTree` 表,数据通过以下不同的方式写入到磁盘当中: + +- 插入(`INSERT`查询) +- 后台合并和[数据变异](../../../sql-reference/statements/alter.md#alter-mutations) +- 从另一个副本下载 +- [ALTER TABLE … FREEZE PARTITION](../../../sql-reference/statements/alter.md#alter_freeze-partition) 冻结分区 + +除了数据变异和冻结分区以外的情况下,数据按照以下逻辑存储到卷或磁盘上: + +1. 首个卷(按定义顺序)拥有足够的磁盘空间存储数据片段(`unreserved_space > current_part_size`)并且允许存储给定数据片段的大小(`max_data_part_size_bytes > current_part_size`) +2. 在这个数据卷内,紧挨着先前存储数据的那块磁盘之后的磁盘,拥有比数据片段大的剩余空间。(`unreserved_space - keep_free_space_bytes > current_part_size`) + +更进一步,数据变异和分区冻结使用的是 [硬链接](https://en.wikipedia.org/wiki/Hard_link)。不同磁盘之间的硬链接是不支持的,所以在这种情况下数据片段都会被存储到原来的那一块磁盘上。 + +在后台,数据片段基于剩余空间(`move_factor`参数)根据卷在配置文件中定义的顺序进行转移。数据永远不会从最后一个移出也不会从第一个移入。可以通过系统表 [system.part_log](../../../operations/system-tables/part_log.md#system_tables-part-log) (字段 `type = MOVE_PART`) 和 [system.parts](../../../operations/system-tables/parts.md#system_tables-parts) (字段 `path` 和 `disk`) 来监控后台的移动情况。具体细节可以通过服务器日志查看。 + +用户可以通过 [ALTER TABLE … MOVE PART\|PARTITION … TO VOLUME\|DISK …](../../../sql-reference/statements/alter.md#alter_move-partition) 强制移动一个数据片段或分区到另外一个卷,所有后台移动的限制都会被考虑在内。这个查询会自行启动,无需等待后台操作完成。如果没有足够的可用空间或任何必须条件没有被满足,用户会收到报错信息。 + +数据移动不会妨碍到数据复制。也就是说,同一张表的不同副本可以指定不同的存储策略。 + +在后台合并和数据变异之后,旧的数据片段会在一定时间后被移除 (`old_parts_lifetime`)。在这期间,他们不能被移动到其他的卷或磁盘。也就是说,直到数据片段被完全移除,它们仍然会被磁盘占用空间计算在内。 + +## 使用S3进行数据存储 {#using-s3-data-storage} + +`MergeTree`系列表引擎允许使用[S3](https://aws.amazon.com/s3/)存储数据,需要修改磁盘类型为`S3`。 + +示例配置: + +``` xml + + ... + + + s3 + https://storage.yandexcloud.net/my-bucket/root-path/ + your_access_key_id + your_secret_access_key + + your_base64_encoded_customer_key + + http://proxy1 + http://proxy2 + + 10000 + 5000 + 10 + 4 + 1000 + /var/lib/clickhouse/disks/s3/ + false + + + ... + +``` + +必须的参数: + +- `endpoint` - S3的结点URL,以`path`或`virtual hosted`[格式](https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html)书写。 +- `access_key_id` - S3的Access Key ID。 +- `secret_access_key` - S3的Secret Access Key。 + +可选参数: + +- `region` - S3的区域名称 +- `use_environment_credentials` - 从环境变量AWS_ACCESS_KEY_ID、AWS_SECRET_ACCESS_KEY和AWS_SESSION_TOKEN中读取认证参数。默认值为`false`。 +- `use_insecure_imds_request` - 如果设置为`true`,S3客户端在认证时会使用不安全的IMDS请求。默认值为`false`。 +- `proxy` - 访问S3结点URL时代理设置。每一个`uri`项的值都应该是合法的代理URL。 +- `connect_timeout_ms` - Socket连接超时时间,默认值为`10000`,即10秒。 +- `request_timeout_ms` - 请求超时时间,默认值为`5000`,即5秒。 +- `retry_attempts` - 请求失败后的重试次数,默认值为10。 +- `single_read_retries` - 读过程中连接丢失后重试次数,默认值为4。 +- `min_bytes_for_seek` - 使用查找操作,而不是顺序读操作的最小字节数,默认值为1000。 +- `metadata_path` - 本地存放S3元数据文件的路径,默认值为`/var/lib/clickhouse/disks//` +- `skip_access_check` - 如果为`true`,Clickhouse启动时不检查磁盘是否可用。默认为`false`。 +- `server_side_encryption_customer_key_base64` - 如果指定该项的值,请求时会加上为了访问SSE-C加密数据而必须的头信息。 + +S3磁盘也可以设置冷热存储: +```xml + + ... + + + s3 + https://storage.yandexcloud.net/my-bucket/root-path/ + your_access_key_id + your_secret_access_key + + + + + +
+ s3 +
+
+
+ + +
+ default +
+ + s3 + +
+ 0.2 +
+
+ ... +
+``` + +指定了`cold`选项后,本地磁盘剩余空间如果小于`move_factor * disk_size`,或有TTL设置时,数据就会定时迁移至S3了。 + +## 虚拟列 {#virtual-columns} + +- `_part` - 分区名称。 +- `_part_index` - 作为请求的结果,按顺序排列的分区数。 +- `_partition_id` — 分区名称。 +- `_part_uuid` - 唯一部分标识符(如果 MergeTree 设置`assign_part_uuids` 已启用)。 +- `_partition_value` — `partition by` 表达式的值(元组)。 +- `_sample_factor` - 采样因子(来自请求)。 diff --git a/docs/zh/faq/general/dbms-naming.md b/docs/zh/faq/general/dbms-naming.md index e732c2f054e..f24b3134093 100644 --- a/docs/zh/faq/general/dbms-naming.md +++ b/docs/zh/faq/general/dbms-naming.md @@ -1,18 +1,18 @@ ---- +--- slug: /zh/faq/general/dbms-naming -title: "\u201CClickHouse\u201D 有什么含义?" -toc_hidden: true -sidebar_position: 10 ---- - -# “ClickHouse” 有什么含义? {#what-does-clickhouse-mean} - -它是“**点击**流”和“数据**仓库**”的组合。它来自于Yandex最初的用例。在Metrica网站上,ClickHouse本应该保存人们在互联网上的所有点击记录,现在它仍然在做这项工作。你可以在[ClickHouse history](../../introduction/history.md)页面上阅读更多关于这个用例的信息。 - -这个由两部分组成的意思有两个结果: - -- 唯一正确的写“Click**H**ouse”的方式是用大写H。 -- 如果需要缩写,请使用“**CH**”。由于一些历史原因,缩写CK在中国也很流行,主要是因为中文中最早的一个关于ClickHouse的演讲使用了这种形式。 - -!!! info “有趣的事实” - 多年后ClickHouse闻名于世, 这种命名方法:结合各有深意的两个词被赞扬为最好的数据库命名方式, 卡内基梅隆大学数据库副教授[Andy Pavlo做的研究](https://www.cs.cmu.edu/~pavlo/blog/2020/03/on-naming-a-database-management-system.html) 。ClickHouse与Postgres共同获得“史上最佳数据库名”奖。 +title: "\u201CClickHouse\u201D 有什么含义?" +toc_hidden: true +sidebar_position: 10 +--- + +# “ClickHouse” 有什么含义? {#what-does-clickhouse-mean} + +它是“**点击**流”和“数据**仓库**”的组合。它来自于Yandex最初的用例。在Metrica网站上,ClickHouse本应该保存人们在互联网上的所有点击记录,现在它仍然在做这项工作。你可以在[ClickHouse history](../../introduction/history.md)页面上阅读更多关于这个用例的信息。 + +这个由两部分组成的意思有两个结果: + +- 唯一正确的写“Click**H**ouse”的方式是用大写H。 +- 如果需要缩写,请使用“**CH**”。由于一些历史原因,缩写CK在中国也很流行,主要是因为中文中最早的一个关于ClickHouse的演讲使用了这种形式。 + +!!! info “有趣的事实” + 多年后ClickHouse闻名于世, 这种命名方法:结合各有深意的两个词被赞扬为最好的数据库命名方式, 卡内基梅隆大学数据库副教授[Andy Pavlo做的研究](https://www.cs.cmu.edu/~pavlo/blog/2020/03/on-naming-a-database-management-system.html) 。ClickHouse与Postgres共同获得“史上最佳数据库名”奖。 diff --git a/docs/zh/faq/general/how-do-i-contribute-code-to-clickhouse.md b/docs/zh/faq/general/how-do-i-contribute-code-to-clickhouse.md index daa7abf525f..16f48baf7ef 100644 --- a/docs/zh/faq/general/how-do-i-contribute-code-to-clickhouse.md +++ b/docs/zh/faq/general/how-do-i-contribute-code-to-clickhouse.md @@ -1,18 +1,18 @@ ---- +--- slug: /zh/faq/general/how-do-i-contribute-code-to-clickhouse -title: 我如何为ClickHouse贡献代码? -toc_hidden: true -sidebar_position: 120 ---- - -# 我如何为ClickHouse贡献代码? {#how-do-i-contribute-code-to-clickhouse} - -ClickHouse是一个开源项目[在GitHub上开发](https://github.com/ClickHouse/ClickHouse)。 - -按照惯例,贡献指南发布在源代码库根目录的 [CONTRIBUTING.md](https://github.com/ClickHouse/ClickHouse/blob/master/CONTRIBUTING.md)文件中。 - -如果你想对ClickHouse提出实质性的改变建议,可以考虑[在GitHub上发布一个问题](https://github.com/ClickHouse/ClickHouse/issues/new/choose),解释一下你想做什么,先与维护人员和社区讨论一下。[此类RFC问题的例子](https://github.com/ClickHouse/ClickHouse/issues?q=is%3Aissue+is%3Aopen+rfc)。 - -如果您的贡献与安全相关,也请查看[我们的安全政策](https://github.com/ClickHouse/ClickHouse/security/policy/)。 - - +title: 我如何为ClickHouse贡献代码? +toc_hidden: true +sidebar_position: 120 +--- + +# 我如何为ClickHouse贡献代码? {#how-do-i-contribute-code-to-clickhouse} + +ClickHouse是一个开源项目[在GitHub上开发](https://github.com/ClickHouse/ClickHouse)。 + +按照惯例,贡献指南发布在源代码库根目录的 [CONTRIBUTING.md](https://github.com/ClickHouse/ClickHouse/blob/master/CONTRIBUTING.md)文件中。 + +如果你想对ClickHouse提出实质性的改变建议,可以考虑[在GitHub上发布一个问题](https://github.com/ClickHouse/ClickHouse/issues/new/choose),解释一下你想做什么,先与维护人员和社区讨论一下。[此类RFC问题的例子](https://github.com/ClickHouse/ClickHouse/issues?q=is%3Aissue+is%3Aopen+rfc)。 + +如果您的贡献与安全相关,也请查看[我们的安全政策](https://github.com/ClickHouse/ClickHouse/security/policy/)。 + + diff --git a/docs/zh/faq/integration/index.md b/docs/zh/faq/integration/index.md index 3a3f97761f3..b0ca2d05c05 100644 --- a/docs/zh/faq/integration/index.md +++ b/docs/zh/faq/integration/index.md @@ -1,22 +1,22 @@ ---- -slug: /zh/faq/integration/ -title: 关于集成ClickHouse和其他系统的问题 -toc_hidden_folder: true -sidebar_position: 4 -sidebar_label: Integration ---- - -# 关于集成ClickHouse和其他系统的问题 {#question-about-integrating-clickhouse-and-other-systems} - -问题: - -- [如何从 ClickHouse 导出数据到一个文件?](../../faq/integration/file-export.md) -- [如何导入JSON到ClickHouse?](../../faq/integration/json-import.md) -- [如果我用ODBC链接Oracle数据库出现编码问题该怎么办?](../../faq/integration/oracle-odbc.md) - - - -!!! info "没看到你要找的东西吗?" - 查看[其他faq类别](../../faq/index.md)或浏览左边栏中的主要文档文章。 - +--- +slug: /zh/faq/integration/ +title: 关于集成ClickHouse和其他系统的问题 +toc_hidden_folder: true +sidebar_position: 4 +sidebar_label: Integration +--- + +# 关于集成ClickHouse和其他系统的问题 {#question-about-integrating-clickhouse-and-other-systems} + +问题: + +- [如何从 ClickHouse 导出数据到一个文件?](../../faq/integration/file-export.md) +- [如何导入JSON到ClickHouse?](../../faq/integration/json-import.md) +- [如果我用ODBC链接Oracle数据库出现编码问题该怎么办?](../../faq/integration/oracle-odbc.md) + + + +!!! info "没看到你要找的东西吗?" + 查看[其他faq类别](../../faq/index.md)或浏览左边栏中的主要文档文章。 + {## [原文](https://clickhouse.com/docs/en/faq/integration/) ##} \ No newline at end of file diff --git a/docs/zh/faq/operations/index.md b/docs/zh/faq/operations/index.md index 153eda6199a..1fe84655ada 100644 --- a/docs/zh/faq/operations/index.md +++ b/docs/zh/faq/operations/index.md @@ -1,21 +1,21 @@ ---- -slug: /zh/faq/operations/ -title: 关于操作ClickHouse服务器和集群的问题 -toc_hidden_folder: true -sidebar_position: 3 -sidebar_label: Operations ---- - -# 关于操作ClickHouse服务器和集群的问题 {#question-about-operating-clickhouse-servers-and-clusters} - -问题: - -- [如果想在生产环境部署,需要用哪个版本的 ClickHouse 呢?](../../faq/operations/production.md) -- [是否可能从 ClickHouse 数据表中删除所有旧的数据记录?](../../faq/operations/delete-old-data.md) -- [ClickHouse支持多区域复制吗?](../../faq/operations/multi-region-replication.md) - - -!!! info "没看到你要找的东西吗?" - 查看[其他faq类别](../../faq/index.md)或浏览左边栏中的主要文档文章。 - -{## [原文](https://clickhouse.com/docs/en/faq/production/) ##} +--- +slug: /zh/faq/operations/ +title: 关于操作ClickHouse服务器和集群的问题 +toc_hidden_folder: true +sidebar_position: 3 +sidebar_label: Operations +--- + +# 关于操作ClickHouse服务器和集群的问题 {#question-about-operating-clickhouse-servers-and-clusters} + +问题: + +- [如果想在生产环境部署,需要用哪个版本的 ClickHouse 呢?](../../faq/operations/production.md) +- [是否可能从 ClickHouse 数据表中删除所有旧的数据记录?](../../faq/operations/delete-old-data.md) +- [ClickHouse支持多区域复制吗?](../../faq/operations/multi-region-replication.md) + + +!!! info "没看到你要找的东西吗?" + 查看[其他faq类别](../../faq/index.md)或浏览左边栏中的主要文档文章。 + +{## [原文](https://clickhouse.com/docs/en/faq/production/) ##} diff --git a/docs/zh/faq/operations/multi-region-replication.md b/docs/zh/faq/operations/multi-region-replication.md index 05f856a9ea7..14df8b72eff 100644 --- a/docs/zh/faq/operations/multi-region-replication.md +++ b/docs/zh/faq/operations/multi-region-replication.md @@ -1,15 +1,15 @@ ---- +--- slug: /zh/faq/operations/multi-region-replication -title: ClickHouse支持多区域复制吗? -toc_hidden: true -sidebar_position: 30 ---- - -# ClickHouse支持多区域复制吗? {#does-clickhouse-support-multi-region-replication} - -简短的回答是“是的”。然而,我们建议将所有区域/数据中心之间的延迟保持在两位数字范围内,否则,在通过分布式共识协议时,写性能将受到影响。例如,美国海岸之间的复制可能会很好,但美国和欧洲之间就不行。 - -在配置方面,这与单区域复制没有区别,只是使用位于不同位置的主机作为副本。 - -更多信息,请参见[关于数据复制的完整文章](../../engines/table-engines/mergetree-family/replication.md)。 - +title: ClickHouse支持多区域复制吗? +toc_hidden: true +sidebar_position: 30 +--- + +# ClickHouse支持多区域复制吗? {#does-clickhouse-support-multi-region-replication} + +简短的回答是“是的”。然而,我们建议将所有区域/数据中心之间的延迟保持在两位数字范围内,否则,在通过分布式共识协议时,写性能将受到影响。例如,美国海岸之间的复制可能会很好,但美国和欧洲之间就不行。 + +在配置方面,这与单区域复制没有区别,只是使用位于不同位置的主机作为副本。 + +更多信息,请参见[关于数据复制的完整文章](../../engines/table-engines/mergetree-family/replication.md)。 + diff --git a/docs/zh/sql-reference/statements/select/order-by.md b/docs/zh/sql-reference/statements/select/order-by.md index 01f702a4b1e..3286fc9f9e7 100644 --- a/docs/zh/sql-reference/statements/select/order-by.md +++ b/docs/zh/sql-reference/statements/select/order-by.md @@ -61,6 +61,22 @@ sidebar_label: ORDER BY 我们只建议使用 `COLLATE` 对于少量行的最终排序,因为排序与 `COLLATE` 比正常的按字节排序效率低。 +## ORDER BY ALL + +`ORDER BY ALL` 对所有选定的列进行升序排序。 + +示例: + +``` sql +SELECT a, b, c FROM t ORDER BY ALL +``` + +等同于: + +``` sql +SELECT a, b, c FROM t ORDER BY a, b, c +``` + ## 实现细节 {#implementation-details} 更少的RAM使用,如果一个足够小 [LIMIT](../../../sql-reference/statements/select/limit.md) 除了指定 `ORDER BY`. 否则,所花费的内存量与用于排序的数据量成正比。 对于分布式查询处理,如果 [GROUP BY](../../../sql-reference/statements/select/group-by.md) 省略排序,在远程服务器上部分完成排序,并将结果合并到请求者服务器上。 这意味着对于分布式排序,要排序的数据量可以大于单个服务器上的内存量。 diff --git a/docs/zh/sql-reference/table-functions/s3.md b/docs/zh/sql-reference/table-functions/s3.md index a62fa9ebb19..f7384a7526e 100644 --- a/docs/zh/sql-reference/table-functions/s3.md +++ b/docs/zh/sql-reference/table-functions/s3.md @@ -11,7 +11,7 @@ sidebar_label: s3 **语法** ``` sql -s3(path, [aws_access_key_id, aws_secret_access_key,] format, structure, [compression]) +s3(path [,access_key_id, secret_access_key [,session_token]] ,format, structure, [compression]) ``` **参数** diff --git a/programs/bash-completion/completions/ch b/programs/bash-completion/completions/ch new file mode 120000 index 00000000000..7101fd9ed04 --- /dev/null +++ b/programs/bash-completion/completions/ch @@ -0,0 +1 @@ +clickhouse \ No newline at end of file diff --git a/programs/bash-completion/completions/chc b/programs/bash-completion/completions/chc new file mode 100644 index 00000000000..0e34cd4eab2 --- /dev/null +++ b/programs/bash-completion/completions/chc @@ -0,0 +1,2 @@ +[[ -v $_CLICKHOUSE_COMPLETION_LOADED ]] || source "$(dirname "${BASH_SOURCE[0]}")/clickhouse-bootstrap" +_complete_clickhouse_generic chc diff --git a/programs/bash-completion/completions/chl b/programs/bash-completion/completions/chl new file mode 100644 index 00000000000..6d0338bf122 --- /dev/null +++ b/programs/bash-completion/completions/chl @@ -0,0 +1,2 @@ +[[ -v $_CLICKHOUSE_COMPLETION_LOADED ]] || source "$(dirname "${BASH_SOURCE[0]}")/clickhouse-bootstrap" +_complete_clickhouse_generic chl diff --git a/programs/bash-completion/completions/clickhouse b/programs/bash-completion/completions/clickhouse index fc55398dcf1..ff0a60c60be 100644 --- a/programs/bash-completion/completions/clickhouse +++ b/programs/bash-completion/completions/clickhouse @@ -31,3 +31,4 @@ function _complete_for_clickhouse_entrypoint_bin() } _complete_clickhouse_generic clickhouse _complete_for_clickhouse_entrypoint_bin +_complete_clickhouse_generic ch _complete_for_clickhouse_entrypoint_bin diff --git a/programs/benchmark/Benchmark.cpp b/programs/benchmark/Benchmark.cpp index d6b8b38d84d..59fc6c0c17f 100644 --- a/programs/benchmark/Benchmark.cpp +++ b/programs/benchmark/Benchmark.cpp @@ -35,7 +35,6 @@ #include #include #include -#include /** A tool for evaluating ClickHouse performance. @@ -405,7 +404,7 @@ private: || sigaddset(&sig_set, SIGINT) || pthread_sigmask(SIG_BLOCK, &sig_set, nullptr)) { - throwFromErrno("Cannot block signal.", ErrorCodes::CANNOT_BLOCK_SIGNAL); + throw ErrnoException(ErrorCodes::CANNOT_BLOCK_SIGNAL, "Cannot block signal"); } while (true) diff --git a/programs/copier/ClusterCopierApp.cpp b/programs/copier/ClusterCopierApp.cpp index 8f24d13d379..e3371185aad 100644 --- a/programs/copier/ClusterCopierApp.cpp +++ b/programs/copier/ClusterCopierApp.cpp @@ -2,6 +2,7 @@ #include #include #include +#include #include #include #include @@ -159,6 +160,7 @@ void ClusterCopierApp::mainImpl() registerFunctions(); registerAggregateFunctions(); registerTableFunctions(); + registerDatabases(); registerStorages(); registerDictionaries(); registerDisks(/* global_skip_access_check= */ true); diff --git a/programs/format/Format.cpp b/programs/format/Format.cpp index d7d61bbcd3b..05ba86069d7 100644 --- a/programs/format/Format.cpp +++ b/programs/format/Format.cpp @@ -17,6 +17,7 @@ #include #include +#include #include #include #include @@ -24,6 +25,7 @@ #include #include #include +#include #include #include #include @@ -32,6 +34,9 @@ #pragma GCC diagnostic ignored "-Wunused-function" #pragma GCC diagnostic ignored "-Wmissing-declarations" +extern const char * auto_time_zones[]; + + namespace DB { namespace ErrorCodes @@ -126,6 +131,7 @@ int mainEntryClickHouseFormat(int argc, char ** argv) registerFunctions(); registerAggregateFunctions(); registerTableFunctions(); + registerDatabases(); registerStorages(); registerFormats(); @@ -133,9 +139,25 @@ int mainEntryClickHouseFormat(int argc, char ** argv) auto all_known_storage_names = StorageFactory::instance().getAllRegisteredNames(); auto all_known_data_type_names = DataTypeFactory::instance().getAllRegisteredNames(); + auto all_known_settings = Settings().getAllRegisteredNames(); + auto all_known_merge_tree_settings = MergeTreeSettings().getAllRegisteredNames(); additional_names.insert(all_known_storage_names.begin(), all_known_storage_names.end()); additional_names.insert(all_known_data_type_names.begin(), all_known_data_type_names.end()); + additional_names.insert(all_known_settings.begin(), all_known_settings.end()); + additional_names.insert(all_known_merge_tree_settings.begin(), all_known_merge_tree_settings.end()); + + for (auto * it = auto_time_zones; *it; ++it) + { + String time_zone_name = *it; + + /// Example: Europe/Amsterdam + Strings split; + boost::split(split, time_zone_name, [](char c){ return c == '/'; }); + for (const auto & word : split) + if (!word.empty()) + additional_names.insert(word); + } KnownIdentifierFunc is_known_identifier = [&](std::string_view name) { diff --git a/programs/install/Install.cpp b/programs/install/Install.cpp index 9d4d791263b..52f30098b38 100644 --- a/programs/install/Install.cpp +++ b/programs/install/Install.cpp @@ -328,7 +328,7 @@ int mainEntryClickHouseInstall(int argc, char ** argv) fs::create_symlink(binary_self_canonical_path, main_bin_path); if (0 != chmod(binary_self_canonical_path.string().c_str(), S_IRUSR | S_IRGRP | S_IROTH | S_IXUSR | S_IXGRP | S_IXOTH)) - throwFromErrno(fmt::format("Cannot chmod {}", binary_self_canonical_path.string()), ErrorCodes::SYSTEM_ERROR); + throw ErrnoException(ErrorCodes::SYSTEM_ERROR, "Cannot chmod {}", binary_self_canonical_path.string()); } } else @@ -361,7 +361,7 @@ int mainEntryClickHouseInstall(int argc, char ** argv) if (already_installed) { if (0 != chmod(main_bin_path.string().c_str(), S_IRUSR | S_IRGRP | S_IROTH | S_IXUSR | S_IXGRP | S_IXOTH)) - throwFromErrno(fmt::format("Cannot chmod {}", main_bin_path.string()), ErrorCodes::SYSTEM_ERROR); + throw ErrnoException(ErrorCodes::SYSTEM_ERROR, "Cannot chmod {}", main_bin_path.string()); } else { @@ -395,7 +395,7 @@ int mainEntryClickHouseInstall(int argc, char ** argv) } if (0 != chmod(destination.c_str(), S_IRUSR | S_IRGRP | S_IROTH | S_IXUSR | S_IXGRP | S_IXOTH)) - throwFromErrno(fmt::format("Cannot chmod {}", main_bin_tmp_path.string()), ErrorCodes::SYSTEM_ERROR); + throw ErrnoException(ErrorCodes::SYSTEM_ERROR, "Cannot chmod {}", main_bin_tmp_path.string()); } catch (const Exception & e) { @@ -1122,7 +1122,7 @@ namespace return 0; } else - throwFromErrno(fmt::format("Cannot obtain the status of pid {} with `kill`", pid), ErrorCodes::CANNOT_KILL); + throw ErrnoException(ErrorCodes::CANNOT_KILL, "Cannot obtain the status of pid {} with `kill`", pid); } if (!pid) @@ -1143,7 +1143,7 @@ namespace if (0 == kill(pid, signal)) fmt::print("Sent {} signal to process with pid {}.\n", signal_name, pid); else - throwFromErrno(fmt::format("Cannot send {} signal", signal_name), ErrorCodes::SYSTEM_ERROR); + throw ErrnoException(ErrorCodes::SYSTEM_ERROR, "Cannot send {} signal", signal_name); size_t try_num = 0; for (; try_num < max_tries; ++try_num) diff --git a/programs/keeper/CMakeLists.txt b/programs/keeper/CMakeLists.txt index cdb1d89b18e..b8f538f821c 100644 --- a/programs/keeper/CMakeLists.txt +++ b/programs/keeper/CMakeLists.txt @@ -68,6 +68,7 @@ if (BUILD_STANDALONE_KEEPER) ${CMAKE_CURRENT_SOURCE_DIR}/../../src/Server/waitServersToFinish.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../../src/Server/ServerType.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../../src/Server/HTTPRequestHandlerFactoryMain.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/../../src/Server/KeeperReadinessHandler.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../../src/Server/HTTP/HTTPServer.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../../src/Server/HTTP/ReadHeaders.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../../src/Server/HTTP/HTTPServerConnection.cpp @@ -114,6 +115,7 @@ if (BUILD_STANDALONE_KEEPER) ${CMAKE_CURRENT_SOURCE_DIR}/../../src/Disks/LocalDirectorySyncGuard.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../../src/Disks/TemporaryFileOnDisk.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../../src/Disks/loadLocalDiskConfig.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/../../src/Disks/DiskType.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../../src/Disks/ObjectStorages/IObjectStorage.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../../src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorage.cpp diff --git a/programs/keeper/Keeper.cpp b/programs/keeper/Keeper.cpp index e04e669abae..48d26233d94 100644 --- a/programs/keeper/Keeper.cpp +++ b/programs/keeper/Keeper.cpp @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include @@ -32,6 +33,7 @@ #include #include #include +#include #include "Core/Defines.h" #include "config.h" @@ -289,6 +291,33 @@ try if (!config().has("keeper_server")) throw Exception(ErrorCodes::NO_ELEMENTS_IN_CONFIG, "Keeper configuration ( section) not found in config"); + auto updateMemorySoftLimitInConfig = [&](Poco::Util::AbstractConfiguration & config) + { + UInt64 memory_soft_limit = 0; + if (config.has("keeper_server.max_memory_usage_soft_limit")) + { + memory_soft_limit = config.getUInt64("keeper_server.max_memory_usage_soft_limit"); + } + + /// if memory soft limit is not set, we will use default value + if (memory_soft_limit == 0) + { + Float64 ratio = 0.9; + if (config.has("keeper_server.max_memory_usage_soft_limit_ratio")) + ratio = config.getDouble("keeper_server.max_memory_usage_soft_limit_ratio"); + + size_t physical_server_memory = getMemoryAmount(); + if (ratio > 0 && physical_server_memory > 0) + { + memory_soft_limit = static_cast(physical_server_memory * ratio); + config.setUInt64("keeper_server.max_memory_usage_soft_limit", memory_soft_limit); + } + } + LOG_INFO(log, "keeper_server.max_memory_usage_soft_limit is set to {}", formatReadableSizeWithBinarySuffix(memory_soft_limit)); + }; + + updateMemorySoftLimitInConfig(config()); + std::string path; if (config().has("keeper_server.storage_path")) @@ -328,6 +357,13 @@ try config().getUInt("max_thread_pool_free_size", 1000), config().getUInt("thread_pool_queue_size", 10000) ); + /// Wait for all threads to avoid possible use-after-free (for example logging objects can be already destroyed). + SCOPE_EXIT({ + Stopwatch watch; + LOG_INFO(log, "Waiting for background threads"); + GlobalThreadPool::instance().shutdown(); + LOG_INFO(log, "Background threads finished in {} ms", watch.elapsedMilliseconds()); + }); static ServerErrorHandler error_handler; Poco::ErrorHandler::set(&error_handler); @@ -459,6 +495,29 @@ try std::make_unique( std::move(my_http_context), createPrometheusMainHandlerFactory(*this, config_getter(), async_metrics, "PrometheusHandler-factory"), server_pool, socket, http_params)); }); + + /// HTTP control endpoints + port_name = "keeper_server.http_control.port"; + createServer(listen_host, port_name, listen_try, [&](UInt16 port) mutable + { + auto my_http_context = httpContext(); + Poco::Timespan my_keep_alive_timeout(config.getUInt("keep_alive_timeout", 10), 0); + Poco::Net::HTTPServerParams::Ptr my_http_params = new Poco::Net::HTTPServerParams; + my_http_params->setTimeout(my_http_context->getReceiveTimeout()); + my_http_params->setKeepAliveTimeout(my_keep_alive_timeout); + + Poco::Net::ServerSocket socket; + auto address = socketBindListen(socket, listen_host, port); + socket.setReceiveTimeout(my_http_context->getReceiveTimeout()); + socket.setSendTimeout(my_http_context->getSendTimeout()); + servers->emplace_back( + listen_host, + port_name, + "HTTP Control: http://" + address.toString(), + std::make_unique( + std::move(my_http_context), createKeeperHTTPControlMainHandlerFactory(config_getter(), global_context->getKeeperDispatcher(), "KeeperHTTPControlHandler-factory"), server_pool, socket, http_params) + ); + }); } for (auto & server : *servers) @@ -492,6 +551,8 @@ try { updateLevels(*config, logger()); + updateMemorySoftLimitInConfig(*config); + if (config->has("keeper_server")) global_context->updateKeeperConfiguration(*config); diff --git a/programs/library-bridge/LibraryBridgeHandlers.cpp b/programs/library-bridge/LibraryBridgeHandlers.cpp index 9642dd7ee63..7c77e633a44 100644 --- a/programs/library-bridge/LibraryBridgeHandlers.cpp +++ b/programs/library-bridge/LibraryBridgeHandlers.cpp @@ -2,6 +2,7 @@ #include "CatBoostLibraryHandler.h" #include "CatBoostLibraryHandlerFactory.h" +#include "Common/ProfileEvents.h" #include "ExternalDictionaryLibraryHandler.h" #include "ExternalDictionaryLibraryHandlerFactory.h" @@ -44,7 +45,7 @@ namespace response.setStatusAndReason(HTTPResponse::HTTP_INTERNAL_SERVER_ERROR); if (!response.sent()) - *response.send() << message << std::endl; + *response.send() << message << '\n'; LOG_WARNING(&Poco::Logger::get("LibraryBridge"), fmt::runtime(message)); } @@ -96,7 +97,7 @@ ExternalDictionaryLibraryBridgeRequestHandler::ExternalDictionaryLibraryBridgeRe } -void ExternalDictionaryLibraryBridgeRequestHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse & response) +void ExternalDictionaryLibraryBridgeRequestHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse & response, const ProfileEvents::Event & /*write_event*/) { LOG_TRACE(log, "Request URI: {}", request.getURI()); HTMLForm params(getContext()->getSettingsRef(), request); @@ -384,7 +385,7 @@ ExternalDictionaryLibraryBridgeExistsHandler::ExternalDictionaryLibraryBridgeExi } -void ExternalDictionaryLibraryBridgeExistsHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse & response) +void ExternalDictionaryLibraryBridgeExistsHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse & response, const ProfileEvents::Event & /*write_event*/) { try { @@ -423,7 +424,7 @@ CatBoostLibraryBridgeRequestHandler::CatBoostLibraryBridgeRequestHandler( } -void CatBoostLibraryBridgeRequestHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse & response) +void CatBoostLibraryBridgeRequestHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse & response, const ProfileEvents::Event & /*write_event*/) { LOG_TRACE(log, "Request URI: {}", request.getURI()); HTMLForm params(getContext()->getSettingsRef(), request); @@ -621,7 +622,7 @@ CatBoostLibraryBridgeExistsHandler::CatBoostLibraryBridgeExistsHandler(size_t ke } -void CatBoostLibraryBridgeExistsHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse & response) +void CatBoostLibraryBridgeExistsHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse & response, const ProfileEvents::Event & /*write_event*/) { try { diff --git a/programs/library-bridge/LibraryBridgeHandlers.h b/programs/library-bridge/LibraryBridgeHandlers.h index 16815e84723..4f08d7a6084 100644 --- a/programs/library-bridge/LibraryBridgeHandlers.h +++ b/programs/library-bridge/LibraryBridgeHandlers.h @@ -20,7 +20,7 @@ class ExternalDictionaryLibraryBridgeRequestHandler : public HTTPRequestHandler, public: ExternalDictionaryLibraryBridgeRequestHandler(size_t keep_alive_timeout_, ContextPtr context_); - void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response) override; + void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response, const ProfileEvents::Event & write_event) override; private: static constexpr inline auto FORMAT = "RowBinary"; @@ -36,7 +36,7 @@ class ExternalDictionaryLibraryBridgeExistsHandler : public HTTPRequestHandler, public: ExternalDictionaryLibraryBridgeExistsHandler(size_t keep_alive_timeout_, ContextPtr context_); - void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response) override; + void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response, const ProfileEvents::Event & write_event) override; private: const size_t keep_alive_timeout; @@ -65,7 +65,7 @@ class CatBoostLibraryBridgeRequestHandler : public HTTPRequestHandler, WithConte public: CatBoostLibraryBridgeRequestHandler(size_t keep_alive_timeout_, ContextPtr context_); - void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response) override; + void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response, const ProfileEvents::Event & write_event) override; private: const size_t keep_alive_timeout; @@ -79,7 +79,7 @@ class CatBoostLibraryBridgeExistsHandler : public HTTPRequestHandler, WithContex public: CatBoostLibraryBridgeExistsHandler(size_t keep_alive_timeout_, ContextPtr context_); - void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response) override; + void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response, const ProfileEvents::Event & write_event) override; private: const size_t keep_alive_timeout; diff --git a/programs/local/LocalServer.cpp b/programs/local/LocalServer.cpp index fbb64ea1135..ccd3d84630f 100644 --- a/programs/local/LocalServer.cpp +++ b/programs/local/LocalServer.cpp @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include @@ -43,7 +44,7 @@ #include #include #include -#include +#include #include #include #include @@ -489,6 +490,7 @@ try registerFunctions(); registerAggregateFunctions(); registerTableFunctions(); + registerDatabases(); registerStorages(); registerDictionaries(); registerDisks(/* global_skip_access_check= */ true); @@ -726,12 +728,7 @@ void LocalServer::processConfig() /// We load temporary database first, because projections need it. DatabaseCatalog::instance().initializeAndLoadTemporaryDatabase(); - /** Init dummy default DB - * NOTE: We force using isolated default database to avoid conflicts with default database from server environment - * Otherwise, metadata of temporary File(format, EXPLICIT_PATH) tables will pollute metadata/ directory; - * if such tables will not be dropped, clickhouse-server will not be able to load them due to security reasons. - */ - std::string default_database = config().getString("default_database", "_local"); + std::string default_database = config().getString("default_database", "default"); DatabaseCatalog::instance().attachDatabase(default_database, createClickHouseLocalDatabaseOverlay(default_database, global_context)); global_context->setCurrentDatabase(default_database); @@ -744,7 +741,7 @@ void LocalServer::processConfig() LOG_DEBUG(log, "Loading metadata from {}", path); auto startup_system_tasks = loadMetadataSystem(global_context); - attachSystemTablesLocal(global_context, *createMemoryDatabaseIfNotExists(global_context, DatabaseCatalog::SYSTEM_DATABASE)); + attachSystemTablesServer(global_context, *createMemoryDatabaseIfNotExists(global_context, DatabaseCatalog::SYSTEM_DATABASE), false); attachInformationSchema(global_context, *createMemoryDatabaseIfNotExists(global_context, DatabaseCatalog::INFORMATION_SCHEMA)); attachInformationSchema(global_context, *createMemoryDatabaseIfNotExists(global_context, DatabaseCatalog::INFORMATION_SCHEMA_UPPERCASE)); waitLoad(TablesLoaderForegroundPoolId, startup_system_tasks); @@ -757,13 +754,13 @@ void LocalServer::processConfig() } /// For ClickHouse local if path is not set the loader will be disabled. - global_context->getUserDefinedSQLObjectsLoader().loadObjects(); + global_context->getUserDefinedSQLObjectsStorage().loadObjects(); LOG_DEBUG(log, "Loaded metadata."); } else if (!config().has("no-system-tables")) { - attachSystemTablesLocal(global_context, *createMemoryDatabaseIfNotExists(global_context, DatabaseCatalog::SYSTEM_DATABASE)); + attachSystemTablesServer(global_context, *createMemoryDatabaseIfNotExists(global_context, DatabaseCatalog::SYSTEM_DATABASE), false); attachInformationSchema(global_context, *createMemoryDatabaseIfNotExists(global_context, DatabaseCatalog::INFORMATION_SCHEMA)); attachInformationSchema(global_context, *createMemoryDatabaseIfNotExists(global_context, DatabaseCatalog::INFORMATION_SCHEMA_UPPERCASE)); } @@ -776,6 +773,7 @@ void LocalServer::processConfig() global_context->setQueryKindInitial(); global_context->setQueryKind(query_kind); + global_context->setQueryParameters(query_parameters); } @@ -822,6 +820,7 @@ void LocalServer::printHelpMessage([[maybe_unused]] const OptionsDescription & o std::cout << getHelpHeader() << "\n"; std::cout << options_description.main_description.value() << "\n"; std::cout << getHelpFooter() << "\n"; + std::cout << "In addition, --param_name=value can be specified for substitution of parameters for parametrized queries.\n"; #endif } @@ -898,7 +897,31 @@ void LocalServer::readArguments(int argc, char ** argv, Arguments & common_argum for (int arg_num = 1; arg_num < argc; ++arg_num) { std::string_view arg = argv[arg_num]; - if (arg == "--multiquery" && (arg_num + 1) < argc && !std::string_view(argv[arg_num + 1]).starts_with('-')) + /// Parameter arg after underline. + if (arg.starts_with("--param_")) + { + auto param_continuation = arg.substr(strlen("--param_")); + auto equal_pos = param_continuation.find_first_of('='); + + if (equal_pos == std::string::npos) + { + /// param_name value + ++arg_num; + if (arg_num >= argc) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Parameter requires value"); + arg = argv[arg_num]; + query_parameters.emplace(String(param_continuation), String(arg)); + } + else + { + if (equal_pos == 0) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Parameter name cannot be empty"); + + /// param_name=value + query_parameters.emplace(param_continuation.substr(0, equal_pos), param_continuation.substr(equal_pos + 1)); + } + } + else if (arg == "--multiquery" && (arg_num + 1) < argc && !std::string_view(argv[arg_num + 1]).starts_with('-')) { /// Transform the abbreviated syntax '--multiquery ' into the full syntax '--multiquery -q ' ++arg_num; diff --git a/programs/main.cpp b/programs/main.cpp index 959984d565d..7d07112de66 100644 --- a/programs/main.cpp +++ b/programs/main.cpp @@ -158,7 +158,6 @@ std::pair clickhouse_applications[] = std::pair clickhouse_short_names[] = { #if ENABLE_CLICKHOUSE_LOCAL - {"ch", "local"}, {"chl", "local"}, #endif #if ENABLE_CLICKHOUSE_CLIENT @@ -502,6 +501,17 @@ int main(int argc_, char ** argv_) } } + /// Interpret binary without argument or with arguments starts with dash + /// ('-') as clickhouse-local for better usability: + /// + /// clickhouse # dumps help + /// clickhouse -q 'select 1' # use local + /// clickhouse # spawn local + /// clickhouse local # spawn local + /// + if (main_func == printHelp && !argv.empty() && (argv.size() == 1 || argv[1][0] == '-')) + main_func = mainEntryClickHouseLocal; + return main_func(static_cast(argv.size()), argv.data()); } #endif diff --git a/programs/obfuscator/Obfuscator.cpp b/programs/obfuscator/Obfuscator.cpp index 2cb5250cdf2..7e09d5e8046 100644 --- a/programs/obfuscator/Obfuscator.cpp +++ b/programs/obfuscator/Obfuscator.cpp @@ -1307,7 +1307,7 @@ try /// stdin must be seekable auto res = lseek(file->getFD(), 0, SEEK_SET); if (-1 == res) - throwFromErrno("Input must be seekable file (it will be read twice).", ErrorCodes::CANNOT_SEEK_THROUGH_FILE); + throw ErrnoException(ErrorCodes::CANNOT_SEEK_THROUGH_FILE, "Input must be seekable file (it will be read twice)"); SingleReadBufferIterator read_buffer_iterator(std::move(file)); schema_columns = readSchemaFromFormat(input_format, {}, read_buffer_iterator, false, context_const); @@ -1336,7 +1336,7 @@ try /// stdin must be seekable auto res = lseek(file_in.getFD(), 0, SEEK_SET); if (-1 == res) - throwFromErrno("Input must be seekable file (it will be read twice).", ErrorCodes::CANNOT_SEEK_THROUGH_FILE); + throw ErrnoException(ErrorCodes::CANNOT_SEEK_THROUGH_FILE, "Input must be seekable file (it will be read twice)"); } Obfuscator obfuscator(header, seed, markov_model_params); diff --git a/programs/odbc-bridge/ColumnInfoHandler.cpp b/programs/odbc-bridge/ColumnInfoHandler.cpp index 434abf0bf14..774883657b7 100644 --- a/programs/odbc-bridge/ColumnInfoHandler.cpp +++ b/programs/odbc-bridge/ColumnInfoHandler.cpp @@ -69,7 +69,7 @@ namespace } -void ODBCColumnsInfoHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse & response) +void ODBCColumnsInfoHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse & response, const ProfileEvents::Event & /*write_event*/) { HTMLForm params(getContext()->getSettingsRef(), request, request.getStream()); LOG_TRACE(log, "Request URI: {}", request.getURI()); @@ -78,7 +78,7 @@ void ODBCColumnsInfoHandler::handleRequest(HTTPServerRequest & request, HTTPServ { response.setStatusAndReason(Poco::Net::HTTPResponse::HTTP_INTERNAL_SERVER_ERROR); if (!response.sent()) - *response.send() << message << std::endl; + *response.send() << message << '\n'; LOG_WARNING(log, fmt::runtime(message)); }; diff --git a/programs/odbc-bridge/ColumnInfoHandler.h b/programs/odbc-bridge/ColumnInfoHandler.h index 3ba8b182ba6..e3087701182 100644 --- a/programs/odbc-bridge/ColumnInfoHandler.h +++ b/programs/odbc-bridge/ColumnInfoHandler.h @@ -23,7 +23,7 @@ public: { } - void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response) override; + void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response, const ProfileEvents::Event & write_event) override; private: Poco::Logger * log; diff --git a/programs/odbc-bridge/IdentifierQuoteHandler.cpp b/programs/odbc-bridge/IdentifierQuoteHandler.cpp index f622995bf15..a23efb112de 100644 --- a/programs/odbc-bridge/IdentifierQuoteHandler.cpp +++ b/programs/odbc-bridge/IdentifierQuoteHandler.cpp @@ -21,7 +21,7 @@ namespace DB { -void IdentifierQuoteHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse & response) +void IdentifierQuoteHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse & response, const ProfileEvents::Event & /*write_event*/) { HTMLForm params(getContext()->getSettingsRef(), request, request.getStream()); LOG_TRACE(log, "Request URI: {}", request.getURI()); @@ -30,7 +30,7 @@ void IdentifierQuoteHandler::handleRequest(HTTPServerRequest & request, HTTPServ { response.setStatusAndReason(Poco::Net::HTTPResponse::HTTP_INTERNAL_SERVER_ERROR); if (!response.sent()) - *response.send() << message << std::endl; + response.send()->writeln(message); LOG_WARNING(log, fmt::runtime(message)); }; diff --git a/programs/odbc-bridge/IdentifierQuoteHandler.h b/programs/odbc-bridge/IdentifierQuoteHandler.h index d57bbc0ca8a..ff5c02ca07b 100644 --- a/programs/odbc-bridge/IdentifierQuoteHandler.h +++ b/programs/odbc-bridge/IdentifierQuoteHandler.h @@ -21,7 +21,7 @@ public: { } - void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response) override; + void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response, const ProfileEvents::Event & write_event) override; private: Poco::Logger * log; diff --git a/programs/odbc-bridge/MainHandler.cpp b/programs/odbc-bridge/MainHandler.cpp index 9130b3e0f47..e350afa2b10 100644 --- a/programs/odbc-bridge/MainHandler.cpp +++ b/programs/odbc-bridge/MainHandler.cpp @@ -46,12 +46,12 @@ void ODBCHandler::processError(HTTPServerResponse & response, const std::string { response.setStatusAndReason(HTTPResponse::HTTP_INTERNAL_SERVER_ERROR); if (!response.sent()) - *response.send() << message << std::endl; + *response.send() << message << '\n'; LOG_WARNING(log, fmt::runtime(message)); } -void ODBCHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse & response) +void ODBCHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse & response, const ProfileEvents::Event & /*write_event*/) { HTMLForm params(getContext()->getSettingsRef(), request); LOG_TRACE(log, "Request URI: {}", request.getURI()); diff --git a/programs/odbc-bridge/MainHandler.h b/programs/odbc-bridge/MainHandler.h index bc0fca8b9a5..7977245ff82 100644 --- a/programs/odbc-bridge/MainHandler.h +++ b/programs/odbc-bridge/MainHandler.h @@ -30,7 +30,7 @@ public: { } - void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response) override; + void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response, const ProfileEvents::Event & write_event) override; private: Poco::Logger * log; diff --git a/programs/odbc-bridge/PingHandler.cpp b/programs/odbc-bridge/PingHandler.cpp index e3ab5e5cd00..80d0e2bf4a9 100644 --- a/programs/odbc-bridge/PingHandler.cpp +++ b/programs/odbc-bridge/PingHandler.cpp @@ -6,7 +6,7 @@ namespace DB { -void PingHandler::handleRequest(HTTPServerRequest & /* request */, HTTPServerResponse & response) +void PingHandler::handleRequest(HTTPServerRequest & /* request */, HTTPServerResponse & response, const ProfileEvents::Event & /*write_event*/) { try { diff --git a/programs/odbc-bridge/PingHandler.h b/programs/odbc-bridge/PingHandler.h index c969ec55af7..c5447107e0c 100644 --- a/programs/odbc-bridge/PingHandler.h +++ b/programs/odbc-bridge/PingHandler.h @@ -10,7 +10,7 @@ class PingHandler : public HTTPRequestHandler { public: explicit PingHandler(size_t keep_alive_timeout_) : keep_alive_timeout(keep_alive_timeout_) {} - void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response) override; + void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response, const ProfileEvents::Event & write_event) override; private: size_t keep_alive_timeout; diff --git a/programs/odbc-bridge/SchemaAllowedHandler.cpp b/programs/odbc-bridge/SchemaAllowedHandler.cpp index 020359f51fd..c7025ca4311 100644 --- a/programs/odbc-bridge/SchemaAllowedHandler.cpp +++ b/programs/odbc-bridge/SchemaAllowedHandler.cpp @@ -29,7 +29,7 @@ namespace } -void SchemaAllowedHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse & response) +void SchemaAllowedHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse & response, const ProfileEvents::Event & /*write_event*/) { HTMLForm params(getContext()->getSettingsRef(), request, request.getStream()); LOG_TRACE(log, "Request URI: {}", request.getURI()); @@ -38,7 +38,7 @@ void SchemaAllowedHandler::handleRequest(HTTPServerRequest & request, HTTPServer { response.setStatusAndReason(Poco::Net::HTTPResponse::HTTP_INTERNAL_SERVER_ERROR); if (!response.sent()) - *response.send() << message << std::endl; + *response.send() << message << '\n'; LOG_WARNING(log, fmt::runtime(message)); }; diff --git a/programs/odbc-bridge/SchemaAllowedHandler.h b/programs/odbc-bridge/SchemaAllowedHandler.h index cb71a6fb5a2..aa0b04b1d31 100644 --- a/programs/odbc-bridge/SchemaAllowedHandler.h +++ b/programs/odbc-bridge/SchemaAllowedHandler.h @@ -24,7 +24,7 @@ public: { } - void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response) override; + void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response, const ProfileEvents::Event & write_event) override; private: Poco::Logger * log; diff --git a/programs/server/Server.cpp b/programs/server/Server.cpp index de0cfb9b9fa..7ad7460f6f8 100644 --- a/programs/server/Server.cpp +++ b/programs/server/Server.cpp @@ -45,6 +45,7 @@ #include #include #include +#include #include #include #include @@ -66,11 +67,12 @@ #include #include #include -#include +#include #include #include #include #include +#include #include #include #include @@ -92,6 +94,7 @@ #include #include #include +#include #include #include #include @@ -149,6 +152,18 @@ namespace ProfileEvents { extern const Event MainConfigLoads; extern const Event ServerStartupMilliseconds; + extern const Event InterfaceNativeSendBytes; + extern const Event InterfaceNativeReceiveBytes; + extern const Event InterfaceHTTPSendBytes; + extern const Event InterfaceHTTPReceiveBytes; + extern const Event InterfacePrometheusSendBytes; + extern const Event InterfacePrometheusReceiveBytes; + extern const Event InterfaceInterserverSendBytes; + extern const Event InterfaceInterserverReceiveBytes; + extern const Event InterfaceMySQLSendBytes; + extern const Event InterfaceMySQLReceiveBytes; + extern const Event InterfacePostgreSQLSendBytes; + extern const Event InterfacePostgreSQLReceiveBytes; } namespace fs = std::filesystem; @@ -646,6 +661,7 @@ try registerFunctions(); registerAggregateFunctions(); registerTableFunctions(); + registerDatabases(); registerStorages(); registerDictionaries(); registerDisks(/* global_skip_access_check= */ false); @@ -657,6 +673,11 @@ try CurrentMetrics::set(CurrentMetrics::Revision, ClickHouseRevision::getVersionRevision()); CurrentMetrics::set(CurrentMetrics::VersionInteger, ClickHouseRevision::getVersionInteger()); + Poco::ThreadPool server_pool(3, server_settings.max_connections); + std::mutex servers_lock; + std::vector servers; + std::vector servers_to_start_before_tables; + /** Context contains all that query execution is dependent: * settings, available functions, data types, aggregate functions, databases, ... */ @@ -697,6 +718,68 @@ try server_settings.max_thread_pool_size, server_settings.max_thread_pool_free_size, server_settings.thread_pool_queue_size); + /// Wait for all threads to avoid possible use-after-free (for example logging objects can be already destroyed). + SCOPE_EXIT({ + Stopwatch watch; + LOG_INFO(log, "Waiting for background threads"); + GlobalThreadPool::instance().shutdown(); + LOG_INFO(log, "Background threads finished in {} ms", watch.elapsedMilliseconds()); + }); + + /// NOTE: global context should be destroyed *before* GlobalThreadPool::shutdown() + /// Otherwise GlobalThreadPool::shutdown() will hang, since Context holds some threads. + SCOPE_EXIT({ + /** Ask to cancel background jobs all table engines, + * and also query_log. + * It is important to do early, not in destructor of Context, because + * table engines could use Context on destroy. + */ + LOG_INFO(log, "Shutting down storages."); + + global_context->shutdown(); + + LOG_DEBUG(log, "Shut down storages."); + + if (!servers_to_start_before_tables.empty()) + { + LOG_DEBUG(log, "Waiting for current connections to servers for tables to finish."); + size_t current_connections = 0; + { + std::lock_guard lock(servers_lock); + for (auto & server : servers_to_start_before_tables) + { + server.stop(); + current_connections += server.currentConnections(); + } + } + + if (current_connections) + LOG_INFO(log, "Closed all listening sockets. Waiting for {} outstanding connections.", current_connections); + else + LOG_INFO(log, "Closed all listening sockets."); + + if (current_connections > 0) + current_connections = waitServersToFinish(servers_to_start_before_tables, servers_lock, server_settings.shutdown_wait_unfinished); + + if (current_connections) + LOG_INFO(log, "Closed connections to servers for tables. But {} remain. Probably some tables of other users cannot finish their connections after context shutdown.", current_connections); + else + LOG_INFO(log, "Closed connections to servers for tables."); + } + + global_context->shutdownKeeperDispatcher(); + + /// Wait server pool to avoid use-after-free of destroyed context in the handlers + server_pool.joinAll(); + + /** Explicitly destroy Context. It is more convenient than in destructor of Server, because logger is still available. + * At this moment, no one could own shared part of Context. + */ + global_context.reset(); + shared_context.reset(); + LOG_DEBUG(log, "Destroyed global context."); + }); + #if USE_AZURE_BLOB_STORAGE /// It makes sense to deinitialize libxml after joining of all threads @@ -755,10 +838,6 @@ try } } - Poco::ThreadPool server_pool(3, server_settings.max_connections); - std::mutex servers_lock; - std::vector servers; - std::vector servers_to_start_before_tables; /// This object will periodically calculate some metrics. ServerAsynchronousMetrics async_metrics( global_context, @@ -1281,6 +1360,9 @@ try global_context->setMaxTableSizeToDrop(server_settings_.max_table_size_to_drop); global_context->setMaxPartitionSizeToDrop(server_settings_.max_partition_size_to_drop); + global_context->setMaxTableNumToWarn(server_settings_.max_table_num_to_warn); + global_context->setMaxDatabaseNumToWarn(server_settings_.max_database_num_to_warn); + global_context->setMaxPartNumToWarn(server_settings_.max_part_num_to_warn); ConcurrencyControl::SlotCount concurrent_threads_soft_limit = ConcurrencyControl::Unlimited; if (server_settings_.concurrent_threads_soft_limit_num > 0 && server_settings_.concurrent_threads_soft_limit_num < concurrent_threads_soft_limit) @@ -1383,8 +1465,6 @@ try global_context->reloadAuxiliaryZooKeepersConfigIfChanged(config); - global_context->reloadQueryMaskingRulesIfChanged(config); - std::lock_guard lock(servers_lock); updateServers(*config, server_pool, async_metrics, servers, servers_to_start_before_tables); } @@ -1405,6 +1485,8 @@ try #endif NamedCollectionUtils::reloadFromConfig(*config); + FileCacheFactory::instance().updateSettingsFromConfig(*config); + ProfileEvents::increment(ProfileEvents::MainConfigLoads); /// Must be the last. @@ -1488,6 +1570,34 @@ try throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "SSL support for TCP protocol is disabled because Poco library was built without NetSSL support."); #endif }); + + /// HTTP control endpoints + port_name = "keeper_server.http_control.port"; + createServer(config(), listen_host, port_name, listen_try, /* start_server: */ false, + servers_to_start_before_tables, + [&](UInt16 port) -> ProtocolServerAdapter + { + auto http_context = httpContext(); + Poco::Timespan keep_alive_timeout(config().getUInt("keep_alive_timeout", 10), 0); + Poco::Net::HTTPServerParams::Ptr http_params = new Poco::Net::HTTPServerParams; + http_params->setTimeout(http_context->getReceiveTimeout()); + http_params->setKeepAliveTimeout(keep_alive_timeout); + + Poco::Net::ServerSocket socket; + auto address = socketBindListen(config(), socket, listen_host, port); + socket.setReceiveTimeout(http_context->getReceiveTimeout()); + socket.setSendTimeout(http_context->getSendTimeout()); + return ProtocolServerAdapter( + listen_host, + port_name, + "HTTP Control: http://" + address.toString(), + std::make_unique( + std::move(http_context), + createKeeperHTTPControlMainHandlerFactory( + config_getter(), + global_context->getKeeperDispatcher(), + "KeeperHTTPControlHandler-factory"), server_pool, socket, http_params)); + }); } #else throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "ClickHouse server built without NuRaft library. Cannot use internal coordination."); @@ -1598,60 +1708,6 @@ try /// try set up encryption. There are some errors in config, error will be printed and server wouldn't start. CompressionCodecEncrypted::Configuration::instance().load(config(), "encryption_codecs"); - SCOPE_EXIT({ - async_metrics.stop(); - - /** Ask to cancel background jobs all table engines, - * and also query_log. - * It is important to do early, not in destructor of Context, because - * table engines could use Context on destroy. - */ - LOG_INFO(log, "Shutting down storages."); - - global_context->shutdown(); - - LOG_DEBUG(log, "Shut down storages."); - - if (!servers_to_start_before_tables.empty()) - { - LOG_DEBUG(log, "Waiting for current connections to servers for tables to finish."); - size_t current_connections = 0; - { - std::lock_guard lock(servers_lock); - for (auto & server : servers_to_start_before_tables) - { - server.stop(); - current_connections += server.currentConnections(); - } - } - - if (current_connections) - LOG_INFO(log, "Closed all listening sockets. Waiting for {} outstanding connections.", current_connections); - else - LOG_INFO(log, "Closed all listening sockets."); - - if (current_connections > 0) - current_connections = waitServersToFinish(servers_to_start_before_tables, servers_lock, server_settings.shutdown_wait_unfinished); - - if (current_connections) - LOG_INFO(log, "Closed connections to servers for tables. But {} remain. Probably some tables of other users cannot finish their connections after context shutdown.", current_connections); - else - LOG_INFO(log, "Closed connections to servers for tables."); - - global_context->shutdownKeeperDispatcher(); - } - - /// Wait server pool to avoid use-after-free of destroyed context in the handlers - server_pool.joinAll(); - - /** Explicitly destroy Context. It is more convenient than in destructor of Server, because logger is still available. - * At this moment, no one could own shared part of Context. - */ - global_context.reset(); - shared_context.reset(); - LOG_DEBUG(log, "Destroyed global context."); - }); - /// DNSCacheUpdater uses BackgroundSchedulePool which lives in shared context /// and thus this object must be created after the SCOPE_EXIT object where shared /// context is destroyed. @@ -1714,7 +1770,7 @@ try /// After loading validate that default database exists database_catalog.assertDatabaseExists(default_database); /// Load user-defined SQL functions. - global_context->getUserDefinedSQLObjectsLoader().loadObjects(); + global_context->getUserDefinedSQLObjectsStorage().loadObjects(); } catch (...) { @@ -2003,7 +2059,7 @@ std::unique_ptr Server::buildProtocolStackFromConfig( auto create_factory = [&](const std::string & type, const std::string & conf_name) -> TCPServerConnectionFactory::Ptr { if (type == "tcp") - return TCPServerConnectionFactory::Ptr(new TCPHandlerFactory(*this, false, false)); + return TCPServerConnectionFactory::Ptr(new TCPHandlerFactory(*this, false, false, ProfileEvents::InterfaceNativeReceiveBytes, ProfileEvents::InterfaceNativeSendBytes)); if (type == "tls") #if USE_SSL @@ -2015,20 +2071,20 @@ std::unique_ptr Server::buildProtocolStackFromConfig( if (type == "proxy1") return TCPServerConnectionFactory::Ptr(new ProxyV1HandlerFactory(*this, conf_name)); if (type == "mysql") - return TCPServerConnectionFactory::Ptr(new MySQLHandlerFactory(*this)); + return TCPServerConnectionFactory::Ptr(new MySQLHandlerFactory(*this, ProfileEvents::InterfaceMySQLReceiveBytes, ProfileEvents::InterfaceMySQLSendBytes)); if (type == "postgres") - return TCPServerConnectionFactory::Ptr(new PostgreSQLHandlerFactory(*this)); + return TCPServerConnectionFactory::Ptr(new PostgreSQLHandlerFactory(*this, ProfileEvents::InterfacePostgreSQLReceiveBytes, ProfileEvents::InterfacePostgreSQLSendBytes)); if (type == "http") return TCPServerConnectionFactory::Ptr( - new HTTPServerConnectionFactory(httpContext(), http_params, createHandlerFactory(*this, config, async_metrics, "HTTPHandler-factory")) + new HTTPServerConnectionFactory(httpContext(), http_params, createHandlerFactory(*this, config, async_metrics, "HTTPHandler-factory"), ProfileEvents::InterfaceHTTPReceiveBytes, ProfileEvents::InterfaceHTTPSendBytes) ); if (type == "prometheus") return TCPServerConnectionFactory::Ptr( - new HTTPServerConnectionFactory(httpContext(), http_params, createHandlerFactory(*this, config, async_metrics, "PrometheusHandler-factory")) + new HTTPServerConnectionFactory(httpContext(), http_params, createHandlerFactory(*this, config, async_metrics, "PrometheusHandler-factory"), ProfileEvents::InterfacePrometheusReceiveBytes, ProfileEvents::InterfacePrometheusSendBytes) ); if (type == "interserver") return TCPServerConnectionFactory::Ptr( - new HTTPServerConnectionFactory(httpContext(), http_params, createHandlerFactory(*this, config, async_metrics, "InterserverIOHTTPHandler-factory")) + new HTTPServerConnectionFactory(httpContext(), http_params, createHandlerFactory(*this, config, async_metrics, "InterserverIOHTTPHandler-factory"), ProfileEvents::InterfaceInterserverReceiveBytes, ProfileEvents::InterfaceInterserverSendBytes) ); throw Exception(ErrorCodes::INVALID_CONFIG_PARAMETER, "Protocol configuration error, unknown protocol name '{}'", type); @@ -2086,10 +2142,9 @@ void Server::createServers( { const Settings & settings = global_context->getSettingsRef(); - Poco::Timespan keep_alive_timeout(config.getUInt("keep_alive_timeout", 10), 0); Poco::Net::HTTPServerParams::Ptr http_params = new Poco::Net::HTTPServerParams; http_params->setTimeout(settings.http_receive_timeout); - http_params->setKeepAliveTimeout(keep_alive_timeout); + http_params->setKeepAliveTimeout(global_context->getServerSettings().keep_alive_timeout); Poco::Util::AbstractConfiguration::Keys protocols; config.keys("protocols", protocols); @@ -2162,7 +2217,7 @@ void Server::createServers( port_name, "http://" + address.toString(), std::make_unique( - httpContext(), createHandlerFactory(*this, config, async_metrics, "HTTPHandler-factory"), server_pool, socket, http_params)); + httpContext(), createHandlerFactory(*this, config, async_metrics, "HTTPHandler-factory"), server_pool, socket, http_params, ProfileEvents::InterfaceHTTPReceiveBytes, ProfileEvents::InterfaceHTTPSendBytes)); }); } @@ -2182,7 +2237,7 @@ void Server::createServers( port_name, "https://" + address.toString(), std::make_unique( - httpContext(), createHandlerFactory(*this, config, async_metrics, "HTTPSHandler-factory"), server_pool, socket, http_params)); + httpContext(), createHandlerFactory(*this, config, async_metrics, "HTTPSHandler-factory"), server_pool, socket, http_params, ProfileEvents::InterfaceHTTPReceiveBytes, ProfileEvents::InterfaceHTTPSendBytes)); #else UNUSED(port); throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "HTTPS protocol is disabled because Poco library was built without NetSSL support."); @@ -2205,7 +2260,7 @@ void Server::createServers( port_name, "native protocol (tcp): " + address.toString(), std::make_unique( - new TCPHandlerFactory(*this, /* secure */ false, /* proxy protocol */ false), + new TCPHandlerFactory(*this, /* secure */ false, /* proxy protocol */ false, ProfileEvents::InterfaceNativeReceiveBytes, ProfileEvents::InterfaceNativeSendBytes), server_pool, socket, new Poco::Net::TCPServerParams)); @@ -2227,7 +2282,7 @@ void Server::createServers( port_name, "native protocol (tcp) with PROXY: " + address.toString(), std::make_unique( - new TCPHandlerFactory(*this, /* secure */ false, /* proxy protocol */ true), + new TCPHandlerFactory(*this, /* secure */ false, /* proxy protocol */ true, ProfileEvents::InterfaceNativeReceiveBytes, ProfileEvents::InterfaceNativeSendBytes), server_pool, socket, new Poco::Net::TCPServerParams)); @@ -2250,7 +2305,7 @@ void Server::createServers( port_name, "secure native protocol (tcp_secure): " + address.toString(), std::make_unique( - new TCPHandlerFactory(*this, /* secure */ true, /* proxy protocol */ false), + new TCPHandlerFactory(*this, /* secure */ true, /* proxy protocol */ false, ProfileEvents::InterfaceNativeReceiveBytes, ProfileEvents::InterfaceNativeSendBytes), server_pool, socket, new Poco::Net::TCPServerParams)); @@ -2274,7 +2329,7 @@ void Server::createServers( listen_host, port_name, "MySQL compatibility protocol: " + address.toString(), - std::make_unique(new MySQLHandlerFactory(*this), server_pool, socket, new Poco::Net::TCPServerParams)); + std::make_unique(new MySQLHandlerFactory(*this, ProfileEvents::InterfaceMySQLReceiveBytes, ProfileEvents::InterfaceMySQLSendBytes), server_pool, socket, new Poco::Net::TCPServerParams)); }); } @@ -2291,7 +2346,7 @@ void Server::createServers( listen_host, port_name, "PostgreSQL compatibility protocol: " + address.toString(), - std::make_unique(new PostgreSQLHandlerFactory(*this), server_pool, socket, new Poco::Net::TCPServerParams)); + std::make_unique(new PostgreSQLHandlerFactory(*this, ProfileEvents::InterfacePostgreSQLReceiveBytes, ProfileEvents::InterfacePostgreSQLSendBytes), server_pool, socket, new Poco::Net::TCPServerParams)); }); } @@ -2325,7 +2380,7 @@ void Server::createServers( port_name, "Prometheus: http://" + address.toString(), std::make_unique( - httpContext(), createHandlerFactory(*this, config, async_metrics, "PrometheusHandler-factory"), server_pool, socket, http_params)); + httpContext(), createHandlerFactory(*this, config, async_metrics, "PrometheusHandler-factory"), server_pool, socket, http_params, ProfileEvents::InterfacePrometheusReceiveBytes, ProfileEvents::InterfacePrometheusSendBytes)); }); } } @@ -2343,10 +2398,9 @@ void Server::createInterserverServers( { const Settings & settings = global_context->getSettingsRef(); - Poco::Timespan keep_alive_timeout(config.getUInt("keep_alive_timeout", 10), 0); Poco::Net::HTTPServerParams::Ptr http_params = new Poco::Net::HTTPServerParams; http_params->setTimeout(settings.http_receive_timeout); - http_params->setKeepAliveTimeout(keep_alive_timeout); + http_params->setKeepAliveTimeout(global_context->getServerSettings().keep_alive_timeout); /// Now iterate over interserver_listen_hosts for (const auto & interserver_listen_host : interserver_listen_hosts) @@ -2372,7 +2426,9 @@ void Server::createInterserverServers( createHandlerFactory(*this, config, async_metrics, "InterserverIOHTTPHandler-factory"), server_pool, socket, - http_params)); + http_params, + ProfileEvents::InterfaceInterserverReceiveBytes, + ProfileEvents::InterfaceInterserverSendBytes)); }); } @@ -2395,7 +2451,9 @@ void Server::createInterserverServers( createHandlerFactory(*this, config, async_metrics, "InterserverIOHTTPSHandler-factory"), server_pool, socket, - http_params)); + http_params, + ProfileEvents::InterfaceInterserverReceiveBytes, + ProfileEvents::InterfaceInterserverSendBytes)); #else UNUSED(port); throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "SSL support for TCP protocol is disabled because Poco library was built without NetSSL support."); diff --git a/programs/server/binary.html b/programs/server/binary.html new file mode 100644 index 00000000000..988dd33a72a --- /dev/null +++ b/programs/server/binary.html @@ -0,0 +1,267 @@ + + + + + + ClickHouse Binary Viewer + + + + + +
+
+ + + + + diff --git a/programs/server/dashboard.html b/programs/server/dashboard.html index c62e0c98184..04fdfb2d3ca 100644 --- a/programs/server/dashboard.html +++ b/programs/server/dashboard.html @@ -455,6 +455,7 @@
+
@@ -720,7 +721,7 @@ function insertChart(i) { query_editor_confirm.addEventListener('click', editConfirm); /// Ctrl+Enter (or Cmd+Enter on Mac) will also confirm editing. - query_editor.addEventListener('keydown', e => { + query_editor.addEventListener('keydown', event => { if ((event.metaKey || event.ctrlKey) && (event.keyCode == 13 || event.keyCode == 10)) { editConfirm(); } @@ -895,7 +896,7 @@ document.getElementById('add').addEventListener('click', e => { }); document.getElementById('reload').addEventListener('click', e => { - reloadAll(false); + reloadAll(queries.length == 0); }); document.getElementById('search').addEventListener('click', e => { @@ -964,12 +965,10 @@ document.getElementById('mass-editor-textarea').addEventListener('input', e => { function legendAsTooltipPlugin({ className, style = { background: "var(--legend-background)" } } = {}) { let legendEl; - let showTop = false; - const showLimit = 5; + let multiline; function init(u, opts) { legendEl = u.root.querySelector(".u-legend"); - legendEl.classList.remove("u-inline"); className && legendEl.classList.add(className); @@ -985,18 +984,19 @@ function legendAsTooltipPlugin({ className, style = { background: "var(--legend- ...style }); + const nodes = legendEl.querySelectorAll("th"); + for (let i = 0; i < nodes.length; i++) + nodes[i]._order = i; + if (opts.series.length == 2) { - const nodes = legendEl.querySelectorAll("th"); + multiline = false; for (let i = 0; i < nodes.length; i++) nodes[i].style.display = "none"; } else { + multiline = true; legendEl.querySelector("th").remove(); legendEl.querySelector("td").setAttribute('colspan', '2'); legendEl.querySelector("td").style.textAlign = 'center'; - } - - if (opts.series.length - 1 > showLimit) { - showTop = true; let footer = legendEl.insertRow().insertCell(); footer.setAttribute('colspan', '2'); footer.style.textAlign = 'center'; @@ -1023,18 +1023,20 @@ function legendAsTooltipPlugin({ className, style = { background: "var(--legend- left -= legendEl.clientWidth / 2; top -= legendEl.clientHeight / 2; legendEl.style.transform = "translate(" + left + "px, " + top + "px)"; - if (showTop) { + + if (multiline) { let nodes = nodeListToArray(legendEl.querySelectorAll("tr")); let header = nodes.shift(); let footer = nodes.pop(); - nodes.forEach(function (node) { node._sort_key = +node.querySelector("td").textContent; }); - nodes.sort((a, b) => +b._sort_key - +a._sort_key); + let showLimit = Math.floor(u.height / 30); + nodes.forEach(function (node) { node._sort_key = nodes.length > showLimit ? +node.querySelector("td").textContent.replace(/,/g,'') : node._order; }); + nodes.sort((a, b) => b._sort_key - a._sort_key); nodes.forEach(function (node) { node.parentNode.appendChild(node); }); for (let i = 0; i < nodes.length; i++) { nodes[i].style.display = i < showLimit ? null : "none"; - delete nodes[i]._sort_key; } footer.parentNode.appendChild(footer); + footer.style.display = nodes.length > showLimit ? null : "none"; } } @@ -1291,6 +1293,7 @@ async function drawAll() { document.getElementById('add').style.display = 'inline-block'; document.getElementById('edit').style.display = 'inline-block'; document.getElementById('search-span').style.display = ''; + hideError(); } else { const charts = document.getElementById('charts') @@ -1317,9 +1320,11 @@ function disableButtons() { reloadButton.classList.add('disabled'); const runButton = document.getElementById('run'); - runButton.value = 'Reloading…'; - runButton.disabled = true; - runButton.classList.add('disabled'); + if (runButton) { + runButton.value = 'Reloading…'; + runButton.disabled = true; + runButton.classList.add('disabled'); + } const searchButton = document.getElementById('search'); searchButton.value = '…'; @@ -1334,9 +1339,11 @@ function enableButtons() { reloadButton.classList.remove('disabled'); const runButton = document.getElementById('run'); - runButton.value = 'Ok'; - runButton.disabled = false; - runButton.classList.remove('disabled'); + if (runButton) { + runButton.value = 'Ok'; + runButton.disabled = false; + runButton.classList.remove('disabled'); + } const searchButton = document.getElementById('search'); searchButton.value = '🔎'; @@ -1359,14 +1366,17 @@ async function reloadAll(do_search) { } await drawAll(); } catch (e) { - showError(e.toString()); + showError(e.message); } enableButtons(); } document.getElementById('params').onsubmit = function(event) { - let do_search = document.activeElement === document.getElementById('search-query'); - reloadAll(do_search); + if (document.activeElement === document.getElementById('search-query')) { + reloadAll(true); + } else { + reloadAll(queries.length == 0); + } event.preventDefault(); } @@ -1405,13 +1415,15 @@ function refreshCustomized(value) { document.getElementById('search-span').style.opacity = customized ? 0.5 : 1.0; } -function regenerate() { +function updateFromState() { document.getElementById('url').value = host; document.getElementById('user').value = user; document.getElementById('password').value = password; document.getElementById('search-query').value = search_query; refreshCustomized(); +} +function regenerate() { findParamsInQueries(); buildParams(); @@ -1430,7 +1442,7 @@ function regenerate() { window.onpopstate = function(event) { if (!event.state) { return; } ({host, user, queries, params, search_query, customized} = event.state); - + updateFromState(); regenerate(); drawAll(); }; @@ -1447,6 +1459,7 @@ if (window.location.hash) { async function start() { try { + updateFromState(); if (queries.length == 0) { await searchQueries(); } else { @@ -1460,7 +1473,7 @@ async function start() { drawAll(); } } catch (e) { - showError(e.toString()); + showError(e.message); } } diff --git a/programs/su/su.cpp b/programs/su/su.cpp index cebd05b3eb1..a8f61fb32b6 100644 --- a/programs/su/su.cpp +++ b/programs/su/su.cpp @@ -56,7 +56,7 @@ void setUserAndGroup(std::string arg_uid, std::string arg_gid) group * result{}; if (0 != getgrnam_r(arg_gid.data(), &entry, buf.get(), buf_size, &result)) - throwFromErrno(fmt::format("Cannot do 'getgrnam_r' to obtain gid from group name ({})", arg_gid), ErrorCodes::SYSTEM_ERROR); + throw ErrnoException(ErrorCodes::SYSTEM_ERROR, "Cannot do 'getgrnam_r' to obtain gid from group name ({})", arg_gid); if (!result) throw Exception(ErrorCodes::BAD_ARGUMENTS, "Group {} is not found in the system", arg_gid); @@ -68,7 +68,7 @@ void setUserAndGroup(std::string arg_uid, std::string arg_gid) throw Exception(ErrorCodes::BAD_ARGUMENTS, "Group has id 0, but dropping privileges to gid 0 does not make sense"); if (0 != setgid(gid)) - throwFromErrno(fmt::format("Cannot do 'setgid' to user ({})", arg_gid), ErrorCodes::SYSTEM_ERROR); + throw ErrnoException(ErrorCodes::SYSTEM_ERROR, "Cannot do 'setgid' to user ({})", arg_gid); } if (!arg_uid.empty()) @@ -81,7 +81,7 @@ void setUserAndGroup(std::string arg_uid, std::string arg_gid) passwd * result{}; if (0 != getpwnam_r(arg_uid.data(), &entry, buf.get(), buf_size, &result)) - throwFromErrno(fmt::format("Cannot do 'getpwnam_r' to obtain uid from user name ({})", arg_uid), ErrorCodes::SYSTEM_ERROR); + throw ErrnoException(ErrorCodes::SYSTEM_ERROR, "Cannot do 'getpwnam_r' to obtain uid from user name ({})", arg_uid); if (!result) throw Exception(ErrorCodes::BAD_ARGUMENTS, "User {} is not found in the system", arg_uid); @@ -93,7 +93,7 @@ void setUserAndGroup(std::string arg_uid, std::string arg_gid) throw Exception(ErrorCodes::BAD_ARGUMENTS, "User has id 0, but dropping privileges to uid 0 does not make sense"); if (0 != setuid(uid)) - throwFromErrno(fmt::format("Cannot do 'setuid' to user ({})", arg_uid), ErrorCodes::SYSTEM_ERROR); + throw ErrnoException(ErrorCodes::SYSTEM_ERROR, "Cannot do 'setuid' to user ({})", arg_uid); } } @@ -136,7 +136,7 @@ try execvp(new_argv.front(), new_argv.data()); - throwFromErrno("Cannot execvp", ErrorCodes::SYSTEM_ERROR); + throw ErrnoException(ErrorCodes::SYSTEM_ERROR, "Cannot execvp"); } catch (...) { diff --git a/rust/BLAKE3/CMakeLists.txt b/rust/BLAKE3/CMakeLists.txt deleted file mode 100644 index ceb0a647b66..00000000000 --- a/rust/BLAKE3/CMakeLists.txt +++ /dev/null @@ -1,3 +0,0 @@ -clickhouse_import_crate(MANIFEST_PATH Cargo.toml) -target_include_directories(_ch_rust_blake3 INTERFACE include) -add_library(ch_rust::blake3 ALIAS _ch_rust_blake3) diff --git a/rust/BLAKE3/Cargo.toml b/rust/BLAKE3/Cargo.toml deleted file mode 100644 index ed414fa54c1..00000000000 --- a/rust/BLAKE3/Cargo.toml +++ /dev/null @@ -1,20 +0,0 @@ -[package] -name = "_ch_rust_blake3" -version = "0.1.0" - -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - -[dependencies] -blake3 = "1.2.0" -libc = "0.2.132" - -[lib] -crate-type = ["staticlib"] - -[profile.release] -debug = true - -[profile.release-thinlto] -inherits = "release" -# BLAKE3 module requires "full" LTO (not "thin") to get additional 10% performance benefit -lto = true diff --git a/rust/BLAKE3/include/blake3.h b/rust/BLAKE3/include/blake3.h deleted file mode 100644 index 5dc7d5bd902..00000000000 --- a/rust/BLAKE3/include/blake3.h +++ /dev/null @@ -1,15 +0,0 @@ -#ifndef BLAKE3_H -#define BLAKE3_H - -#include - - -extern "C" { - -char *blake3_apply_shim(const char *begin, uint32_t _size, uint8_t *out_char_data); - -void blake3_free_char_pointer(char *ptr_to_free); - -} // extern "C" - -#endif /* BLAKE3_H */ diff --git a/rust/BLAKE3/src/lib.rs b/rust/BLAKE3/src/lib.rs deleted file mode 100644 index 011145d2f71..00000000000 --- a/rust/BLAKE3/src/lib.rs +++ /dev/null @@ -1,30 +0,0 @@ -extern crate blake3; -extern crate libc; - -use std::ffi::{CStr, CString}; -use std::os::raw::c_char; - -#[no_mangle] -pub unsafe extern "C" fn blake3_apply_shim( - begin: *const c_char, - _size: u32, - out_char_data: *mut u8, -) -> *mut c_char { - if begin.is_null() { - let err_str = CString::new("input was a null pointer").unwrap(); - return err_str.into_raw(); - } - let mut hasher = blake3::Hasher::new(); - let input_bytes = CStr::from_ptr(begin); - let input_res = input_bytes.to_bytes(); - hasher.update(input_res); - let mut reader = hasher.finalize_xof(); - reader.fill(std::slice::from_raw_parts_mut(out_char_data, blake3::OUT_LEN)); - std::ptr::null_mut() -} - -// Freeing memory according to docs: https://doc.rust-lang.org/std/ffi/struct.CString.html#method.into_raw -#[no_mangle] -pub unsafe extern "C" fn blake3_free_char_pointer(ptr_to_free: *mut c_char) { - std::mem::drop(CString::from_raw(ptr_to_free)); -} diff --git a/rust/CMakeLists.txt b/rust/CMakeLists.txt index 6aa25e95679..66694ee16f8 100644 --- a/rust/CMakeLists.txt +++ b/rust/CMakeLists.txt @@ -14,6 +14,10 @@ macro(configure_rustc) set(RUST_CFLAGS "${RUST_CFLAGS} --sysroot ${CMAKE_SYSROOT}") endif() + if (USE_MUSL) + set(RUST_CXXFLAGS "${RUST_CXXFLAGS} -D_LIBCPP_HAS_MUSL_LIBC=1") + endif () + if(CCACHE_EXECUTABLE MATCHES "/sccache$") message(STATUS "Using RUSTC_WRAPPER: ${CCACHE_EXECUTABLE}") set(RUSTCWRAPPER "rustc-wrapper = \"${CCACHE_EXECUTABLE}\"") @@ -95,6 +99,5 @@ function(add_rust_subdirectory src) VERBATIM) endfunction() -add_rust_subdirectory (BLAKE3) add_rust_subdirectory (skim) add_rust_subdirectory (prql) diff --git a/rust/Cargo.lock b/rust/Cargo.lock index 04569cd3b3a..86bbec5579f 100644 --- a/rust/Cargo.lock +++ b/rust/Cargo.lock @@ -2,14 +2,6 @@ # It is not intended for manual editing. version = 3 -[[package]] -name = "_ch_rust_blake3" -version = "0.1.0" -dependencies = [ - "blake3", - "libc", -] - [[package]] name = "_ch_rust_prql" version = "0.1.0" @@ -30,9 +22,9 @@ dependencies = [ [[package]] name = "addr2line" -version = "0.20.0" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4fa78e18c64fce05e902adecd7a5eed15a5e0a3439f7b0e169f0252214865e3" +checksum = "8a30b2e23b9e17a9f90641c7ab1549cd9b44f296d3ccbf309d2863cfe398a0cb" dependencies = [ "gimli", ] @@ -45,24 +37,31 @@ checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" [[package]] name = "ahash" -version = "0.7.6" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47" +checksum = "91429305e9f0a25f6205c5b8e0d2db09e0708a7a6df0f42212bb56c32c8ac97a" dependencies = [ - "getrandom", + "cfg-if", "once_cell", "version_check", + "zerocopy", ] [[package]] name = "aho-corasick" -version = "1.0.2" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43f6cb1bf222025340178f382c426f13757b2960e89779dfcb319c32542a5a41" +checksum = "b2969dcb958b36655471fc61f7e416fa76033bdd4bfed0678d8fee1e2d07a1f0" dependencies = [ "memchr", ] +[[package]] +name = "allocator-api2" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0942ffc6dcaadf03badf6e6a2d0228460359d5e34b57ccdc720b7382dfbd5ec5" + [[package]] name = "android-tzdata" version = "0.1.1" @@ -95,43 +94,43 @@ dependencies = [ [[package]] name = "anstyle" -version = "1.0.1" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a30da5c5f2d5e72842e00bcb57657162cdabef0931f40e2deb9b4140440cecd" +checksum = "7079075b41f533b8c61d2a4d073c4676e1f8b249ff94a393b0595db304e0dd87" [[package]] name = "anstyle-parse" -version = "0.2.1" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "938874ff5980b03a87c5524b3ae5b59cf99b1d6bc836848df7bc5ada9643c333" +checksum = "c75ac65da39e5fe5ab759307499ddad880d724eed2f6ce5b5e8a26f4f387928c" dependencies = [ "utf8parse", ] [[package]] name = "anstyle-query" -version = "1.0.0" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ca11d4be1bab0c8bc8734a9aa7bf4ee8316d462a08c6ac5052f888fef5b494b" +checksum = "e28923312444cdd728e4738b3f9c9cac739500909bb3d3c94b43551b16517648" dependencies = [ - "windows-sys", + "windows-sys 0.52.0", ] [[package]] name = "anstyle-wincon" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "180abfa45703aebe0093f79badacc01b8fd4ea2e35118747e5811127f926e188" +checksum = "c677ab05e09154296dd37acecd46420c17b9713e8366facafa8fc0885167cf4c" dependencies = [ "anstyle", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] name = "anyhow" -version = "1.0.72" +version = "1.0.75" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b13c32d80ecc7ab747b80c3784bce54ee8a7a0cc4fbda9bf4cda2cf6fe90854" +checksum = "a4668cab20f66d8d020e1fbc0ebe47217433c1b6c8f2040faf858554e394ace6" dependencies = [ "backtrace", ] @@ -146,12 +145,6 @@ dependencies = [ "yansi", ] -[[package]] -name = "arrayref" -version = "0.3.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b4930d2cb77ce62f89ee5d5289b4ac049559b1c45539271f5ed4fdc7db34545" - [[package]] name = "arrayvec" version = "0.7.4" @@ -166,9 +159,9 @@ checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "backtrace" -version = "0.3.68" +version = "0.3.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4319208da049c43661739c5fade2ba182f09d1dc2299b32298d3a31692b17e12" +checksum = "2089b7e3f35b9dd2d0ed921ead4f6d318c27680d4a5bd167b3ee120edb105837" dependencies = [ "addr2line", "cc", @@ -193,44 +186,24 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.3.3" +version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "630be753d4e58660abd17930c71b647fe46c27ea6b63cc59e1e3851406972e42" - -[[package]] -name = "blake3" -version = "1.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "199c42ab6972d92c9f8995f086273d25c42fc0f7b2a1fcefba465c1352d25ba5" -dependencies = [ - "arrayref", - "arrayvec", - "cc", - "cfg-if", - "constant_time_eq", - "digest", -] - -[[package]] -name = "block-buffer" -version = "0.10.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" -dependencies = [ - "generic-array", -] +checksum = "327762f6e5a765692301e5bb513e0d9fef63be86bbc14528052b1cd3e6f03e07" [[package]] name = "bumpalo" -version = "3.13.0" +version = "3.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3e2c3daef883ecc1b5d58c15adae93470a91d425f3532ba1695849656af3fc1" +checksum = "7f30e7476521f6f8af1a1c4c0b8cc94f0bee37d91763d0ca2665f299b6cd8aec" [[package]] name = "cc" -version = "1.0.79" +version = "1.0.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50d30906286121d95be3d479533b458f87493b30a4b5f79a607db8f5d11aa91f" +checksum = "f1174fb0b6ec23863f8b971027804a42614e347eafb0a95bf0b12cdae21fc4d0" +dependencies = [ + "libc", +] [[package]] name = "cfg-if" @@ -240,24 +213,23 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "chrono" -version = "0.4.26" +version = "0.4.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec837a71355b28f6556dbd569b37b3f363091c0bd4b2e735674521b4c5fd9bc5" +checksum = "7f2c685bad3eb3d45a01354cedb7d5faa66194d1d58ba6e267a8de788f79db38" dependencies = [ "android-tzdata", "iana-time-zone", "js-sys", "num-traits", - "time 0.1.45", "wasm-bindgen", - "winapi", + "windows-targets 0.48.5", ] [[package]] name = "chumsky" -version = "0.9.2" +version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23170228b96236b5a7299057ac284a321457700bc8c41a4476052f0f4ba5349d" +checksum = "8eebd66744a15ded14960ab4ccdbfb51ad3b81f51f3f04a80adac98c985396c9" dependencies = [ "hashbrown", "stacker", @@ -279,17 +251,11 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7" -[[package]] -name = "constant_time_eq" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7144d30dcf0fafbce74250a3963025d8d52177934239851c917d29f1df280c2" - [[package]] name = "core-foundation-sys" -version = "0.8.4" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e496a50fda8aacccc86d7529e2c1e0892dbd0f898a6b5645b5561b89c3210efa" +checksum = "06ea2b9bc92be3c2baa9334a323ebca2d6f074ff852cd1d7b11064035cd3868f" [[package]] name = "crossbeam" @@ -307,9 +273,9 @@ dependencies = [ [[package]] name = "crossbeam-channel" -version = "0.5.8" +version = "0.5.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a33c2bf77f2df06183c3aa30d1e96c0695a313d4f9c453cc3762a6db39f99200" +checksum = "14c3242926edf34aec4ac3a77108ad4854bffaa2e4ddc1824124ce59231302d5" dependencies = [ "cfg-if", "crossbeam-utils", @@ -317,9 +283,9 @@ dependencies = [ [[package]] name = "crossbeam-deque" -version = "0.8.3" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce6fd6f855243022dcecf8702fef0c297d4338e226845fe067f6341ad9fa0cef" +checksum = "fca89a0e215bab21874660c67903c5f143333cab1da83d041c7ded6053774751" dependencies = [ "cfg-if", "crossbeam-epoch", @@ -328,22 +294,21 @@ dependencies = [ [[package]] name = "crossbeam-epoch" -version = "0.9.15" +version = "0.9.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae211234986c545741a7dc064309f67ee1e5ad243d0e48335adc0484d960bcc7" +checksum = "2d2fe95351b870527a5d09bf563ed3c97c0cffb87cf1c78a591bf48bb218d9aa" dependencies = [ "autocfg", "cfg-if", "crossbeam-utils", "memoffset 0.9.0", - "scopeguard", ] [[package]] name = "crossbeam-queue" -version = "0.3.8" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1cfb3ea8a53f37c40dea2c7bedcbd88bdfae54f5e2175d6ecaff1c988353add" +checksum = "b9bcf5bdbfdd6030fb4a1c497b5d5fc5921aa2f60d359a17e249c0e6df3de153" dependencies = [ "cfg-if", "crossbeam-utils", @@ -351,28 +316,18 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.16" +version = "0.8.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a22b2d63d4d1dc0b7f1b6b2747dd0088008a9be28b6ddf0b1e7d335e3037294" +checksum = "c06d96137f14f244c37f989d9fff8f95e6c18b918e71f36638f8c49112e4c78f" dependencies = [ "cfg-if", ] -[[package]] -name = "crypto-common" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" -dependencies = [ - "generic-array", - "typenum", -] - [[package]] name = "csv" -version = "1.2.2" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "626ae34994d3d8d668f4269922248239db4ae42d538b14c398b74a52208e8086" +checksum = "ac574ff4d437a7b5ad237ef331c17ccca63c46479e5b5453eb8e10bb99a759fe" dependencies = [ "csv-core", "itoa", @@ -382,18 +337,18 @@ dependencies = [ [[package]] name = "csv-core" -version = "0.1.10" +version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b2466559f260f48ad25fe6317b3c8dac77b5bdb5763ac7d9d6103530663bc90" +checksum = "5efa2b3d7902f4b634a20cae3c9c4e6209dc4779feb6863329607560143efa70" dependencies = [ "memchr", ] [[package]] name = "cxx" -version = "1.0.102" +version = "1.0.111" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f68e12e817cb19eaab81aaec582b4052d07debd3c3c6b083b9d361db47c7dc9d" +checksum = "e9fc0c733f71e58dedf4f034cd2a266f80b94cc9ed512729e1798651b68c2cba" dependencies = [ "cc", "cxxbridge-flags", @@ -403,9 +358,9 @@ dependencies = [ [[package]] name = "cxx-build" -version = "1.0.102" +version = "1.0.111" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e789217e4ab7cf8cc9ce82253180a9fe331f35f5d339f0ccfe0270b39433f397" +checksum = "51bc81d2664db24cf1d35405f66e18a85cffd4d49ab930c71a5c6342a410f38c" dependencies = [ "cc", "codespan-reporting", @@ -413,24 +368,24 @@ dependencies = [ "proc-macro2", "quote", "scratch", - "syn 2.0.27", + "syn 2.0.41", ] [[package]] name = "cxxbridge-flags" -version = "1.0.102" +version = "1.0.111" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78a19f4c80fd9ab6c882286fa865e92e07688f4387370a209508014ead8751d0" +checksum = "8511afbe34ea242697784da5cb2c5d4a0afb224ca8b136bdf93bfe180cbe5884" [[package]] name = "cxxbridge-macro" -version = "1.0.102" +version = "1.0.111" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8fcfa71f66c8563c4fa9dd2bb68368d50267856f831ac5d85367e0805f9606c" +checksum = "5c6888cd161769d65134846d4d4981d5a6654307cc46ec83fb917e530aea5f84" dependencies = [ "proc-macro2", "quote", - "syn 2.0.27", + "syn 2.0.41", ] [[package]] @@ -478,6 +433,15 @@ dependencies = [ "once_cell", ] +[[package]] +name = "deranged" +version = "0.3.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8eb30d70a07a3b04884d2677f06bec33509dc67ca60d92949e5535352d3191dc" +dependencies = [ + "powerfmt", +] + [[package]] name = "derive_builder" version = "0.11.2" @@ -509,17 +473,6 @@ dependencies = [ "syn 1.0.109", ] -[[package]] -name = "digest" -version = "0.10.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" -dependencies = [ - "block-buffer", - "crypto-common", - "subtle", -] - [[package]] name = "dirs-next" version = "2.0.0" @@ -556,28 +509,17 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.27", + "syn 2.0.41", ] [[package]] name = "errno" -version = "0.3.2" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b30f669a7961ef1631673d2766cc92f52d64f7ef354d4fe0ddfd30ed52f0f4f" +checksum = "a258e46cdc063eb8519c00b9fc845fc47bcfca4130e2f08e88665ceda8474245" dependencies = [ - "errno-dragonfly", - "libc", - "windows-sys", -] - -[[package]] -name = "errno-dragonfly" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa68f1b12764fab894d2755d2518754e71b4fd80ecfb822714a1206c2aab39bf" -dependencies = [ - "cc", "libc", + "windows-sys 0.52.0", ] [[package]] @@ -595,40 +537,31 @@ dependencies = [ "thread_local", ] -[[package]] -name = "generic-array" -version = "0.14.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" -dependencies = [ - "typenum", - "version_check", -] - [[package]] name = "getrandom" -version = "0.2.10" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be4136b2a15dd319360be1c07d9933517ccf0be8f16bf62a3bee4f0d618df427" +checksum = "fe9006bed769170c11f845cf00c7c1e9092aeb3f268e007c3e760ac68008070f" dependencies = [ "cfg-if", "libc", - "wasi 0.11.0+wasi-snapshot-preview1", + "wasi", ] [[package]] name = "gimli" -version = "0.27.3" +version = "0.28.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6c80984affa11d98d1b88b66ac8853f143217b399d3c74116778ff8fdb4ed2e" +checksum = "4271d37baee1b8c7e4b708028c57d816cf9d2434acb33a549475f78c181f6253" [[package]] name = "hashbrown" -version = "0.12.3" +version = "0.14.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" +checksum = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604" dependencies = [ "ahash", + "allocator-api2", ] [[package]] @@ -639,22 +572,22 @@ checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" [[package]] name = "hermit-abi" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "443144c8cdadd93ebf52ddb4056d257f5b52c04d3c804e657d19eb73fc33668b" +checksum = "d77f7ec81a6d05a3abb01ab6eb7590f6083d08449fe5a1c8b1e620283546ccb7" [[package]] name = "iana-time-zone" -version = "0.1.57" +version = "0.1.58" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fad5b825842d2b38bd206f3e81d6957625fd7f0a361e345c30e01a0ae2dd613" +checksum = "8326b86b6cff230b97d0d312a6c40a60726df3332e721f72a1b035f451663b20" dependencies = [ "android_system_properties", "core-foundation-sys", "iana-time-zone-haiku", "js-sys", "wasm-bindgen", - "windows", + "windows-core", ] [[package]] @@ -680,16 +613,7 @@ checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b" dependencies = [ "hermit-abi", "rustix", - "windows-sys", -] - -[[package]] -name = "itertools" -version = "0.10.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" -dependencies = [ - "either", + "windows-sys 0.48.0", ] [[package]] @@ -702,16 +626,25 @@ dependencies = [ ] [[package]] -name = "itoa" -version = "1.0.9" +name = "itertools" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af150ab688ff2122fcef229be89cb50dd66af9e01a4ff320cc137eecc9bacc38" +checksum = "25db6b064527c5d482d0423354fcd07a89a2dfe07b67892e62411946db7f07b0" +dependencies = [ + "either", +] + +[[package]] +name = "itoa" +version = "1.0.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1a46d1a171d865aa5f83f92695765caa047a9b4cbae2cbf37dbd613a793fd4c" [[package]] name = "js-sys" -version = "0.3.64" +version = "0.3.66" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5f195fe497f702db0f318b07fdd68edb16955aed830df8363d837542f8f935a" +checksum = "cee9c64da59eae3b50095c18d3e74f8b73c0b86d2792824ff01bbce68ba229ca" dependencies = [ "wasm-bindgen", ] @@ -724,9 +657,20 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.147" +version = "0.2.151" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4668fb0ea861c1df094127ac5f1da3409a82116a4ba74fca2e58ef927159bb3" +checksum = "302d7ab3130588088d277783b1e2d2e10c9e9e4a16dd9050e6ec93fb3e7048f4" + +[[package]] +name = "libredox" +version = "0.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85c833ca1e66078851dba29046874e38f08b2c883700aa29a03ddd3b23814ee8" +dependencies = [ + "bitflags 2.4.1", + "libc", + "redox_syscall", +] [[package]] name = "link-cplusplus" @@ -739,21 +683,21 @@ dependencies = [ [[package]] name = "linux-raw-sys" -version = "0.4.5" +version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57bcfdad1b858c2db7c38303a6d2ad4dfaf5eb53dfeb0910128b2c26d6158503" +checksum = "c4cd1a83af159aa67994778be9070f0ae1bd732942279cabb14f86f986a21456" [[package]] name = "log" -version = "0.4.19" +version = "0.4.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b06a4cde4c0f271a446782e3eff8de789548ce57dbc8eca9292c27f4a42004b4" +checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f" [[package]] name = "memchr" -version = "2.5.0" +version = "2.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" +checksum = "f665ee40bc4a3c5590afb1e9677db74a508659dfd71e126420da8274909a0167" [[package]] name = "memoffset" @@ -825,37 +769,27 @@ dependencies = [ [[package]] name = "num-traits" -version = "0.2.16" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f30b0abd723be7e2ffca1272140fac1a2f084c77ec3e123c192b66af1ee9e6c2" +checksum = "39e3200413f237f41ab11ad6d161bc7239c84dcb631773ccd7de3dfe4b5c267c" dependencies = [ "autocfg", ] -[[package]] -name = "num_cpus" -version = "1.16.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" -dependencies = [ - "hermit-abi", - "libc", -] - [[package]] name = "object" -version = "0.31.1" +version = "0.32.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8bda667d9f2b5051b8833f59f3bf748b28ef54f850f4fcb389a252aa383866d1" +checksum = "9cf5f9dd3933bd50a9e1f149ec995f39ae2c496d31fd772c1fd45ebc27e902b0" dependencies = [ "memchr", ] [[package]] name = "once_cell" -version = "1.18.0" +version = "1.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d" +checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" [[package]] name = "pin-utils" @@ -864,19 +798,25 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] -name = "proc-macro2" -version = "1.0.66" +name = "powerfmt" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18fb31db3f9bddb2ea821cde30a9f70117e3f119938b5ee630b7403aa6e2ead9" +checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" + +[[package]] +name = "proc-macro2" +version = "1.0.70" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "39278fbbf5fb4f646ce651690877f89d1c5811a3d4acb27700c1cb3cdb78fd3b" dependencies = [ "unicode-ident", ] [[package]] name = "prql-ast" -version = "0.9.3" +version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71194e75f14dbe7debdf2b5eca0812c978021a1bd23d6fe1da98b58e407e035a" +checksum = "d9d91522f9f16d055409b9ffec55693a96e3424fe5d8e7c8331adcf6d7ee363a" dependencies = [ "enum-as-inner", "semver", @@ -886,9 +826,9 @@ dependencies = [ [[package]] name = "prql-compiler" -version = "0.9.3" +version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ff28e838b1be4227cc567a75c11caa3be25c5015f0e5fd21279c06e944ba44f" +checksum = "f4d56865532fcf1abaa31fbb6da6fd9e90edc441c5c78bfe2870ee75187c7a3c" dependencies = [ "anstream", "anyhow", @@ -912,9 +852,9 @@ dependencies = [ [[package]] name = "prql-parser" -version = "0.9.3" +version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3182e2ef0465a960eb02519b18768e39123d3c3a0037a2d2934055a3ef901870" +checksum = "9360352e413390cfd26345f49279622b87581a3b748340d3f42d4d616c2a1ec1" dependencies = [ "chumsky", "itertools 0.11.0", @@ -933,18 +873,18 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.31" +version = "1.0.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fe8a65d69dd0808184ebb5f836ab526bb259db23c657efa38711b1072ee47f0" +checksum = "5267fca4496028628a95160fc423a33e8b2e6af8a5302579e322e4b520293cae" dependencies = [ "proc-macro2", ] [[package]] name = "rayon" -version = "1.7.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d2df5196e37bcc87abebc0053e20787d73847bb33134a69841207dd0a47f03b" +checksum = "9c27db03db7734835b3f53954b534c91069375ce6ccaa2e065441e07d9b6cdb1" dependencies = [ "either", "rayon-core", @@ -952,41 +892,39 @@ dependencies = [ [[package]] name = "rayon-core" -version = "1.11.0" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b8f95bd6966f5c87776639160a66bd8ab9895d9d4ab01ddba9fc60661aebe8d" +checksum = "5ce3fb6ad83f861aac485e76e1985cd109d9a3713802152be56c3b1f0e0658ed" dependencies = [ - "crossbeam-channel", "crossbeam-deque", "crossbeam-utils", - "num_cpus", ] [[package]] name = "redox_syscall" -version = "0.2.16" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a" +checksum = "4722d768eff46b75989dd134e5c353f0d6296e5aaa3132e776cbdb56be7731aa" dependencies = [ "bitflags 1.3.2", ] [[package]] name = "redox_users" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b033d837a7cf162d7993aded9304e30a83213c648b6e389db233191f891e5c2b" +checksum = "a18479200779601e498ada4e8c1e1f50e3ee19deb0259c25825a98b5603b2cb4" dependencies = [ "getrandom", - "redox_syscall", + "libredox", "thiserror", ] [[package]] name = "regex" -version = "1.9.1" +version = "1.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2eae68fc220f7cf2532e4494aded17545fce192d59cd996e0fe7887f4ceb575" +checksum = "380b951a9c5e80ddfd6136919eef32310721aa4aacd4889a8d39124b026ab343" dependencies = [ "aho-corasick", "memchr", @@ -996,9 +934,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.3.3" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39354c10dd07468c2e73926b23bb9c2caca74c5501e38a35da70406f1d923310" +checksum = "5f804c7828047e88b2d32e2d7fe5a105da8ee3264f01902f796c8e067dc2483f" dependencies = [ "aho-corasick", "memchr", @@ -1007,9 +945,9 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.7.4" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5ea92a5b6195c6ef2a0295ea818b312502c6fc94dde986c5553242e18fd4ce2" +checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f" [[package]] name = "rustc-demangle" @@ -1019,15 +957,15 @@ checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76" [[package]] name = "rustix" -version = "0.38.6" +version = "0.38.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ee020b1716f0a80e2ace9b03441a749e402e86712f15f16fe8a8f75afac732f" +checksum = "72e572a5e8ca657d7366229cdde4bd14c4eb5499a9573d4d366fe1b599daa316" dependencies = [ - "bitflags 2.3.3", + "bitflags 2.4.1", "errno", "libc", "linux-raw-sys", - "windows-sys", + "windows-sys 0.52.0", ] [[package]] @@ -1038,15 +976,9 @@ checksum = "7ffc183a10b4478d04cbbbfc96d0873219d962dd5accaff2ffbd4ceb7df837f4" [[package]] name = "ryu" -version = "1.0.15" +version = "1.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ad4cc8da4ef723ed60bced201181d83791ad433213d8c24efffda1eec85d741" - -[[package]] -name = "scopeguard" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" +checksum = "f98d2aa92eebf49b69786be48e4477826b256916e84a57ff2a4f21923b48eb4c" [[package]] name = "scratch" @@ -1056,38 +988,38 @@ checksum = "a3cf7c11c38cb994f3d40e8a8cde3bbd1f72a435e4c49e85d6553d8312306152" [[package]] name = "semver" -version = "1.0.18" +version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0293b4b29daaf487284529cc2f5675b8e57c61f70167ba415a463651fd6a918" +checksum = "836fa6a3e1e547f9a2c4040802ec865b5d85f4014efe00555d7090a3dcaa1090" dependencies = [ "serde", ] [[package]] name = "serde" -version = "1.0.174" +version = "1.0.193" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b88756493a5bd5e5395d53baa70b194b05764ab85b59e43e4b8f4e1192fa9b1" +checksum = "25dd9975e68d0cb5aa1120c288333fc98731bd1dd12f561e468ea4728c042b89" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.174" +version = "1.0.193" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e5c3a298c7f978e53536f95a63bdc4c4a64550582f31a0359a9afda6aede62e" +checksum = "43576ca501357b9b071ac53cdc7da8ef0cbd9493d8df094cd821777ea6e894d3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.27", + "syn 2.0.41", ] [[package]] name = "serde_json" -version = "1.0.103" +version = "1.0.108" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d03b412469450d4404fe8499a268edd7f8b79fecb074b0d812ad64ca21f4031b" +checksum = "3d1c7e3eac408d115102c4c24ad393e0821bb3a5df4d506a80f85f7a742a526b" dependencies = [ "itoa", "ryu", @@ -1112,7 +1044,7 @@ dependencies = [ "nix 0.25.1", "rayon", "regex", - "time 0.3.23", + "time", "timer", "tuikit", "unicode-width", @@ -1121,20 +1053,20 @@ dependencies = [ [[package]] name = "sqlformat" -version = "0.2.1" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c12bc9199d1db8234678b7051747c07f517cdcf019262d1847b94ec8b1aee3e" +checksum = "ce81b7bd7c4493975347ef60d8c7e8b742d4694f4c49f93e0a12ea263938176c" dependencies = [ - "itertools 0.10.5", + "itertools 0.12.0", "nom", "unicode_categories", ] [[package]] name = "sqlparser" -version = "0.36.1" +version = "0.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2eaa1e88e78d2c2460d78b7dc3f0c08dbb606ab4222f9aff36f420d36e307d87" +checksum = "37ae05a8250b968a3f7db93155a84d68b2e6cea1583949af5ca5b5170c76c075" dependencies = [ "log", "serde", @@ -1170,23 +1102,17 @@ dependencies = [ [[package]] name = "strum_macros" -version = "0.25.1" +version = "0.25.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6069ca09d878a33f883cc06aaa9718ede171841d3832450354410b718b097232" +checksum = "23dc1fa9ac9c169a78ba62f0b841814b7abae11bdd047b9c58f893439e309ea0" dependencies = [ "heck", "proc-macro2", "quote", "rustversion", - "syn 2.0.27", + "syn 2.0.41", ] -[[package]] -name = "subtle" -version = "2.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc" - [[package]] name = "syn" version = "1.0.109" @@ -1200,9 +1126,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.27" +version = "2.0.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b60f673f44a8255b9c8c657daf66a596d435f2da81a555b06dc644d080ba45e0" +checksum = "44c8b28c477cc3bf0e7966561e3460130e1255f7a1cf71931075f1c5e7a7e269" dependencies = [ "proc-macro2", "quote", @@ -1222,31 +1148,31 @@ dependencies = [ [[package]] name = "termcolor" -version = "1.2.0" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be55cf8942feac5c765c2c993422806843c9a9a45d4d5c407ad6dd2ea95eb9b6" +checksum = "ff1bc3d3f05aff0403e8ac0d92ced918ec05b666a43f83297ccef5bea8a3d449" dependencies = [ "winapi-util", ] [[package]] name = "thiserror" -version = "1.0.44" +version = "1.0.51" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "611040a08a0439f8248d1990b111c95baa9c704c805fa1f62104b39655fd7f90" +checksum = "f11c217e1416d6f036b870f14e0413d480dbf28edbee1f877abaf0206af43bb7" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.44" +version = "1.0.51" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "090198534930841fab3a5d1bb637cde49e339654e606195f8d9c76eeb081dc96" +checksum = "01742297787513b79cf8e29d1056ede1313e2420b7b3b15d0a768b4921f549df" dependencies = [ "proc-macro2", "quote", - "syn 2.0.27", + "syn 2.0.41", ] [[package]] @@ -1261,30 +1187,21 @@ dependencies = [ [[package]] name = "time" -version = "0.1.45" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b797afad3f312d1c66a56d11d0316f916356d11bd158fbc6ca6389ff6bf805a" -dependencies = [ - "libc", - "wasi 0.10.0+wasi-snapshot-preview1", - "winapi", -] - -[[package]] -name = "time" -version = "0.3.23" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59e399c068f43a5d116fedaf73b203fa4f9c519f17e2b34f63221d3792f81446" +checksum = "c4a34ab300f2dee6e562c10a046fc05e358b29f9bf92277f30c3c8d82275f6f5" dependencies = [ + "deranged", + "powerfmt", "serde", "time-core", ] [[package]] name = "time-core" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7300fbefb4dadc1af235a9cef3737cea692a9d97e1b9cbcd4ebdae6f8868e6fb" +checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" [[package]] name = "timer" @@ -1309,23 +1226,17 @@ dependencies = [ "unicode-width", ] -[[package]] -name = "typenum" -version = "1.16.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "497961ef93d974e23eb6f433eb5fe1b7930b659f06d12dec6fc44a8f554c0bba" - [[package]] name = "unicode-ident" -version = "1.0.11" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "301abaae475aa91687eb82514b328ab47a211a533026cb25fc3e519b86adfc3c" +checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" [[package]] name = "unicode-width" -version = "0.1.10" +version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0edd1e5b14653f783770bce4a4dabb4a5108a5370a5f5d8cfe8710c361f6c8b" +checksum = "e51733f11c9c4f72aa0c160008246859e340b00807569a0da0e7a1079b27ba85" [[package]] name = "unicode_categories" @@ -1366,12 +1277,6 @@ dependencies = [ "quote", ] -[[package]] -name = "wasi" -version = "0.10.0+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f" - [[package]] name = "wasi" version = "0.11.0+wasi-snapshot-preview1" @@ -1380,9 +1285,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.87" +version = "0.2.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7706a72ab36d8cb1f80ffbf0e071533974a60d0a308d01a5d0375bf60499a342" +checksum = "0ed0d4f68a3015cc185aff4db9506a015f4b96f95303897bfa23f846db54064e" dependencies = [ "cfg-if", "wasm-bindgen-macro", @@ -1390,24 +1295,24 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.87" +version = "0.2.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ef2b6d3c510e9625e5fe6f509ab07d66a760f0885d858736483c32ed7809abd" +checksum = "1b56f625e64f3a1084ded111c4d5f477df9f8c92df113852fa5a374dbda78826" dependencies = [ "bumpalo", "log", "once_cell", "proc-macro2", "quote", - "syn 2.0.27", + "syn 2.0.41", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-macro" -version = "0.2.87" +version = "0.2.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dee495e55982a3bd48105a7b947fd2a9b4a8ae3010041b9e0faab3f9cd028f1d" +checksum = "0162dbf37223cd2afce98f3d0785506dcb8d266223983e4b5b525859e6e182b2" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -1415,22 +1320,22 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.87" +version = "0.2.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" +checksum = "f0eb82fcb7930ae6219a7ecfd55b217f5f0893484b7a13022ebb2b2bf20b5283" dependencies = [ "proc-macro2", "quote", - "syn 2.0.27", + "syn 2.0.41", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.87" +version = "0.2.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca6ad05a4870b2bf5fe995117d3728437bd27d7cd5f06f13c17443ef369775a1" +checksum = "7ab9b36309365056cd639da3134bf87fa8f3d86008abf99e612384a6eecd459f" [[package]] name = "winapi" @@ -1450,9 +1355,9 @@ checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" [[package]] name = "winapi-util" -version = "0.1.5" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178" +checksum = "f29e6f9198ba0d26b4c9f07dbe6f9ed633e1f3d5b8b414090084349e46a52596" dependencies = [ "winapi", ] @@ -1464,12 +1369,12 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] -name = "windows" -version = "0.48.0" +name = "windows-core" +version = "0.51.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e686886bc078bc1b0b600cac0147aadb815089b6e4da64016cbd754b6342700f" +checksum = "f1f8cf84f35d2db49a46868f947758c7a1138116f7fac3bc844f43ade1292e64" dependencies = [ - "windows-targets", + "windows-targets 0.48.5", ] [[package]] @@ -1478,68 +1383,154 @@ version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" dependencies = [ - "windows-targets", + "windows-targets 0.48.5", +] + +[[package]] +name = "windows-sys" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" +dependencies = [ + "windows-targets 0.52.0", ] [[package]] name = "windows-targets" -version = "0.48.1" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05d4b17490f70499f20b9e791dcf6a299785ce8af4d709018206dc5b4953e95f" +checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" dependencies = [ - "windows_aarch64_gnullvm", - "windows_aarch64_msvc", - "windows_i686_gnu", - "windows_i686_msvc", - "windows_x86_64_gnu", - "windows_x86_64_gnullvm", - "windows_x86_64_msvc", + "windows_aarch64_gnullvm 0.48.5", + "windows_aarch64_msvc 0.48.5", + "windows_i686_gnu 0.48.5", + "windows_i686_msvc 0.48.5", + "windows_x86_64_gnu 0.48.5", + "windows_x86_64_gnullvm 0.48.5", + "windows_x86_64_msvc 0.48.5", +] + +[[package]] +name = "windows-targets" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a18201040b24831fbb9e4eb208f8892e1f50a37feb53cc7ff887feb8f50e7cd" +dependencies = [ + "windows_aarch64_gnullvm 0.52.0", + "windows_aarch64_msvc 0.52.0", + "windows_i686_gnu 0.52.0", + "windows_i686_msvc 0.52.0", + "windows_x86_64_gnu 0.52.0", + "windows_x86_64_gnullvm 0.52.0", + "windows_x86_64_msvc 0.52.0", ] [[package]] name = "windows_aarch64_gnullvm" -version = "0.48.0" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91ae572e1b79dba883e0d315474df7305d12f569b400fcf90581b06062f7e1bc" +checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb7764e35d4db8a7921e09562a0304bf2f93e0a51bfccee0bd0bb0b666b015ea" [[package]] name = "windows_aarch64_msvc" -version = "0.48.0" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2ef27e0d7bdfcfc7b868b317c1d32c641a6fe4629c171b8928c7b08d98d7cf3" +checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbaa0368d4f1d2aaefc55b6fcfee13f41544ddf36801e793edbbfd7d7df075ef" [[package]] name = "windows_i686_gnu" -version = "0.48.0" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "622a1962a7db830d6fd0a69683c80a18fda201879f0f447f065a3b7467daa241" +checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" + +[[package]] +name = "windows_i686_gnu" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a28637cb1fa3560a16915793afb20081aba2c92ee8af57b4d5f28e4b3e7df313" [[package]] name = "windows_i686_msvc" -version = "0.48.0" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4542c6e364ce21bf45d69fdd2a8e455fa38d316158cfd43b3ac1c5b1b19f8e00" +checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" + +[[package]] +name = "windows_i686_msvc" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ffe5e8e31046ce6230cc7215707b816e339ff4d4d67c65dffa206fd0f7aa7b9a" [[package]] name = "windows_x86_64_gnu" -version = "0.48.0" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca2b8a661f7628cbd23440e50b05d705db3686f894fc9580820623656af974b1" +checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d6fa32db2bc4a2f5abeacf2b69f7992cd09dca97498da74a151a3132c26befd" [[package]] name = "windows_x86_64_gnullvm" -version = "0.48.0" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7896dbc1f41e08872e9d5e8f8baa8fdd2677f29468c4e156210174edc7f7b953" +checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a657e1e9d3f514745a572a6846d3c7aa7dbe1658c056ed9c3344c4109a6949e" [[package]] name = "windows_x86_64_msvc" -version = "0.48.0" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a" +checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04" [[package]] name = "yansi" version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09041cd90cf85f7f8b2df60c646f853b7f535ce68f85244eb6731cf89fa498ec" + +[[package]] +name = "zerocopy" +version = "0.7.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1c4061bedbb353041c12f413700357bec76df2c7e2ca8e4df8bac24c6bf68e3d" +dependencies = [ + "zerocopy-derive", +] + +[[package]] +name = "zerocopy-derive" +version = "0.7.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b3c129550b3e6de3fd0ba67ba5c81818f9805e58b8d7fee80a3a59d2c9fc601a" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.41", +] diff --git a/rust/Cargo.toml b/rust/Cargo.toml index 2a2b582cea8..ac8b31a7290 100644 --- a/rust/Cargo.toml +++ b/rust/Cargo.toml @@ -1,7 +1,6 @@ # workspace is required to vendor crates for all packages. [workspace] members = [ - "BLAKE3", "skim", "prql", ] diff --git a/src/Access/Common/AccessType.h b/src/Access/Common/AccessType.h index f99ae2a8aea..463be6a3aea 100644 --- a/src/Access/Common/AccessType.h +++ b/src/Access/Common/AccessType.h @@ -82,7 +82,8 @@ enum class AccessType \ M(ALTER_VIEW_REFRESH, "ALTER LIVE VIEW REFRESH, REFRESH VIEW", VIEW, ALTER_VIEW) \ M(ALTER_VIEW_MODIFY_QUERY, "ALTER TABLE MODIFY QUERY", VIEW, ALTER_VIEW) \ - M(ALTER_VIEW, "", GROUP, ALTER) /* allows to execute ALTER VIEW REFRESH, ALTER VIEW MODIFY QUERY; + M(ALTER_VIEW_MODIFY_REFRESH, "ALTER TABLE MODIFY QUERY", VIEW, ALTER_VIEW) \ + M(ALTER_VIEW, "", GROUP, ALTER) /* allows to execute ALTER VIEW REFRESH, ALTER VIEW MODIFY QUERY, ALTER VIEW MODIFY REFRESH; implicitly enabled by the grant ALTER_TABLE */\ \ M(ALTER, "", GROUP, ALL) /* allows to execute ALTER {TABLE|LIVE VIEW} */\ @@ -177,12 +178,14 @@ enum class AccessType M(SYSTEM_MOVES, "SYSTEM STOP MOVES, SYSTEM START MOVES, STOP MOVES, START MOVES", TABLE, SYSTEM) \ M(SYSTEM_PULLING_REPLICATION_LOG, "SYSTEM STOP PULLING REPLICATION LOG, SYSTEM START PULLING REPLICATION LOG", TABLE, SYSTEM) \ M(SYSTEM_CLEANUP, "SYSTEM STOP CLEANUP, SYSTEM START CLEANUP", TABLE, SYSTEM) \ + M(SYSTEM_VIEWS, "SYSTEM REFRESH VIEW, SYSTEM START VIEWS, SYSTEM STOP VIEWS, SYSTEM START VIEW, SYSTEM STOP VIEW, SYSTEM CANCEL VIEW, REFRESH VIEW, START VIEWS, STOP VIEWS, START VIEW, STOP VIEW, CANCEL VIEW", VIEW, SYSTEM) \ M(SYSTEM_DISTRIBUTED_SENDS, "SYSTEM STOP DISTRIBUTED SENDS, SYSTEM START DISTRIBUTED SENDS, STOP DISTRIBUTED SENDS, START DISTRIBUTED SENDS", TABLE, SYSTEM_SENDS) \ M(SYSTEM_REPLICATED_SENDS, "SYSTEM STOP REPLICATED SENDS, SYSTEM START REPLICATED SENDS, STOP REPLICATED SENDS, START REPLICATED SENDS", TABLE, SYSTEM_SENDS) \ M(SYSTEM_SENDS, "SYSTEM STOP SENDS, SYSTEM START SENDS, STOP SENDS, START SENDS", GROUP, SYSTEM) \ M(SYSTEM_REPLICATION_QUEUES, "SYSTEM STOP REPLICATION QUEUES, SYSTEM START REPLICATION QUEUES, STOP REPLICATION QUEUES, START REPLICATION QUEUES", TABLE, SYSTEM) \ M(SYSTEM_DROP_REPLICA, "DROP REPLICA", TABLE, SYSTEM) \ M(SYSTEM_SYNC_REPLICA, "SYNC REPLICA", TABLE, SYSTEM) \ + M(SYSTEM_REPLICA_READINESS, "SYSTEM REPLICA READY, SYSTEM REPLICA UNREADY", GLOBAL, SYSTEM) \ M(SYSTEM_RESTART_REPLICA, "RESTART REPLICA", TABLE, SYSTEM) \ M(SYSTEM_RESTORE_REPLICA, "RESTORE REPLICA", TABLE, SYSTEM) \ M(SYSTEM_WAIT_LOADING_PARTS, "WAIT LOADING PARTS", TABLE, SYSTEM) \ diff --git a/src/Access/ContextAccess.cpp b/src/Access/ContextAccess.cpp index 90fddd0085d..567b131c00e 100644 --- a/src/Access/ContextAccess.cpp +++ b/src/Access/ContextAccess.cpp @@ -155,6 +155,7 @@ namespace "formats", "privileges", "data_type_families", + "database_engines", "table_engines", "table_functions", "aggregate_function_combinators", diff --git a/src/Access/tests/gtest_access_rights_ops.cpp b/src/Access/tests/gtest_access_rights_ops.cpp index b5a15513a89..a7594503992 100644 --- a/src/Access/tests/gtest_access_rights_ops.cpp +++ b/src/Access/tests/gtest_access_rights_ops.cpp @@ -51,7 +51,7 @@ TEST(AccessRights, Union) "CREATE DICTIONARY, DROP DATABASE, DROP TABLE, DROP VIEW, DROP DICTIONARY, UNDROP TABLE, " "TRUNCATE, OPTIMIZE, BACKUP, CREATE ROW POLICY, ALTER ROW POLICY, DROP ROW POLICY, " "SHOW ROW POLICIES, SYSTEM MERGES, SYSTEM TTL MERGES, SYSTEM FETCHES, " - "SYSTEM MOVES, SYSTEM PULLING REPLICATION LOG, SYSTEM CLEANUP, SYSTEM SENDS, SYSTEM REPLICATION QUEUES, " + "SYSTEM MOVES, SYSTEM PULLING REPLICATION LOG, SYSTEM CLEANUP, SYSTEM VIEWS, SYSTEM SENDS, SYSTEM REPLICATION QUEUES, " "SYSTEM DROP REPLICA, SYSTEM SYNC REPLICA, SYSTEM RESTART REPLICA, " "SYSTEM RESTORE REPLICA, SYSTEM WAIT LOADING PARTS, SYSTEM SYNC DATABASE REPLICA, SYSTEM FLUSH DISTRIBUTED, dictGet ON db1.*, GRANT NAMED COLLECTION ADMIN ON db1"); } diff --git a/src/AggregateFunctions/AggregateFunctionFactory.cpp b/src/AggregateFunctions/AggregateFunctionFactory.cpp index 5c101888140..b6ba562045d 100644 --- a/src/AggregateFunctions/AggregateFunctionFactory.cpp +++ b/src/AggregateFunctions/AggregateFunctionFactory.cpp @@ -51,10 +51,10 @@ void AggregateFunctionFactory::registerFunction(const String & name, Value creat void AggregateFunctionFactory::registerNullsActionTransformation(const String & source_ignores_nulls, const String & target_respect_nulls) { if (!aggregate_functions.contains(source_ignores_nulls)) - throw Exception(ErrorCodes::LOGICAL_ERROR, "registerNullsActionTransformation: Source aggregation '{}' not found"); + throw Exception(ErrorCodes::LOGICAL_ERROR, "registerNullsActionTransformation: Source aggregation '{}' not found", source_ignores_nulls); if (!aggregate_functions.contains(target_respect_nulls)) - throw Exception(ErrorCodes::LOGICAL_ERROR, "registerNullsActionTransformation: Target aggregation '{}' not found"); + throw Exception(ErrorCodes::LOGICAL_ERROR, "registerNullsActionTransformation: Target aggregation '{}' not found", target_respect_nulls); if (!respect_nulls.emplace(source_ignores_nulls, target_respect_nulls).second) throw Exception( diff --git a/src/AggregateFunctions/AggregateFunctionGroupArray.cpp b/src/AggregateFunctions/AggregateFunctionGroupArray.cpp index b95471df90a..6c6397e35d5 100644 --- a/src/AggregateFunctions/AggregateFunctionGroupArray.cpp +++ b/src/AggregateFunctions/AggregateFunctionGroupArray.cpp @@ -20,6 +20,7 @@ #include #include +#include #include diff --git a/src/AggregateFunctions/AggregateFunctionMax.cpp b/src/AggregateFunctions/AggregateFunctionMax.cpp index 813129e42ec..e74224a24c3 100644 --- a/src/AggregateFunctions/AggregateFunctionMax.cpp +++ b/src/AggregateFunctions/AggregateFunctionMax.cpp @@ -1,7 +1,7 @@ #include -#include #include - +#include +#include namespace DB { @@ -10,10 +10,122 @@ struct Settings; namespace { +template +class AggregateFunctionsSingleValueMax final : public AggregateFunctionsSingleValue +{ + using Parent = AggregateFunctionsSingleValue; + +public: + explicit AggregateFunctionsSingleValueMax(const DataTypePtr & type) : Parent(type) { } + + /// Specializations for native numeric types + ALWAYS_INLINE inline void addBatchSinglePlace( + size_t row_begin, + size_t row_end, + AggregateDataPtr __restrict place, + const IColumn ** __restrict columns, + Arena * arena, + ssize_t if_argument_pos) const override; + + ALWAYS_INLINE inline void addBatchSinglePlaceNotNull( + size_t row_begin, + size_t row_end, + AggregateDataPtr __restrict place, + const IColumn ** __restrict columns, + const UInt8 * __restrict null_map, + Arena * arena, + ssize_t if_argument_pos) const override; +}; + +// NOLINTBEGIN(bugprone-macro-parentheses) +#define SPECIALIZE(TYPE) \ +template <> \ +void AggregateFunctionsSingleValueMax>>::addBatchSinglePlace( \ + size_t row_begin, \ + size_t row_end, \ + AggregateDataPtr __restrict place, \ + const IColumn ** __restrict columns, \ + Arena *, \ + ssize_t if_argument_pos) const \ +{ \ + const auto & column = assert_cast>::ColVecType &>(*columns[0]); \ + std::optional opt; \ + if (if_argument_pos >= 0) \ + { \ + const auto & flags = assert_cast(*columns[if_argument_pos]).getData(); \ + opt = findNumericMaxIf(column.getData().data(), flags.data(), row_begin, row_end); \ + } \ + else \ + opt = findNumericMax(column.getData().data(), row_begin, row_end); \ + if (opt.has_value()) \ + this->data(place).changeIfGreater(opt.value()); \ +} +// NOLINTEND(bugprone-macro-parentheses) + +FOR_BASIC_NUMERIC_TYPES(SPECIALIZE) +#undef SPECIALIZE + +template +void AggregateFunctionsSingleValueMax::addBatchSinglePlace( + size_t row_begin, + size_t row_end, + AggregateDataPtr __restrict place, + const IColumn ** __restrict columns, + Arena * arena, + ssize_t if_argument_pos) const +{ + return Parent::addBatchSinglePlace(row_begin, row_end, place, columns, arena, if_argument_pos); +} + +// NOLINTBEGIN(bugprone-macro-parentheses) +#define SPECIALIZE(TYPE) \ +template <> \ +void AggregateFunctionsSingleValueMax>>::addBatchSinglePlaceNotNull( \ + size_t row_begin, \ + size_t row_end, \ + AggregateDataPtr __restrict place, \ + const IColumn ** __restrict columns, \ + const UInt8 * __restrict null_map, \ + Arena *, \ + ssize_t if_argument_pos) const \ +{ \ + const auto & column = assert_cast>::ColVecType &>(*columns[0]); \ + std::optional opt; \ + if (if_argument_pos >= 0) \ + { \ + const auto * if_flags = assert_cast(*columns[if_argument_pos]).getData().data(); \ + auto final_flags = std::make_unique(row_end); \ + for (size_t i = row_begin; i < row_end; ++i) \ + final_flags[i] = (!null_map[i]) & !!if_flags[i]; \ + opt = findNumericMaxIf(column.getData().data(), final_flags.get(), row_begin, row_end); \ + } \ + else \ + opt = findNumericMaxNotNull(column.getData().data(), null_map, row_begin, row_end); \ + if (opt.has_value()) \ + this->data(place).changeIfGreater(opt.value()); \ +} +// NOLINTEND(bugprone-macro-parentheses) + +FOR_BASIC_NUMERIC_TYPES(SPECIALIZE) +#undef SPECIALIZE + +template +void AggregateFunctionsSingleValueMax::addBatchSinglePlaceNotNull( + size_t row_begin, + size_t row_end, + AggregateDataPtr __restrict place, + const IColumn ** __restrict columns, + const UInt8 * __restrict null_map, + Arena * arena, + ssize_t if_argument_pos) const +{ + return Parent::addBatchSinglePlaceNotNull(row_begin, row_end, place, columns, null_map, arena, if_argument_pos); +} + AggregateFunctionPtr createAggregateFunctionMax( const std::string & name, const DataTypes & argument_types, const Array & parameters, const Settings * settings) { - return AggregateFunctionPtr(createAggregateFunctionSingleValue(name, argument_types, parameters, settings)); + return AggregateFunctionPtr(createAggregateFunctionSingleValue(name, argument_types, parameters, settings)); } AggregateFunctionPtr createAggregateFunctionArgMax( diff --git a/src/AggregateFunctions/AggregateFunctionMin.cpp b/src/AggregateFunctions/AggregateFunctionMin.cpp index ac3e05121f7..48758aa74b0 100644 --- a/src/AggregateFunctions/AggregateFunctionMin.cpp +++ b/src/AggregateFunctions/AggregateFunctionMin.cpp @@ -1,6 +1,7 @@ #include -#include #include +#include +#include namespace DB @@ -10,10 +11,123 @@ struct Settings; namespace { +template +class AggregateFunctionsSingleValueMin final : public AggregateFunctionsSingleValue +{ + using Parent = AggregateFunctionsSingleValue; + +public: + explicit AggregateFunctionsSingleValueMin(const DataTypePtr & type) : Parent(type) { } + + /// Specializations for native numeric types + ALWAYS_INLINE inline void addBatchSinglePlace( + size_t row_begin, + size_t row_end, + AggregateDataPtr __restrict place, + const IColumn ** __restrict columns, + Arena * arena, + ssize_t if_argument_pos) const override; + + ALWAYS_INLINE inline void addBatchSinglePlaceNotNull( + size_t row_begin, + size_t row_end, + AggregateDataPtr __restrict place, + const IColumn ** __restrict columns, + const UInt8 * __restrict null_map, + Arena * arena, + ssize_t if_argument_pos) const override; +}; + +// NOLINTBEGIN(bugprone-macro-parentheses) +#define SPECIALIZE(TYPE) \ + template <> \ + void AggregateFunctionsSingleValueMin>>::addBatchSinglePlace( \ + size_t row_begin, \ + size_t row_end, \ + AggregateDataPtr __restrict place, \ + const IColumn ** __restrict columns, \ + Arena *, \ + ssize_t if_argument_pos) const \ + { \ + const auto & column = assert_cast>::ColVecType &>(*columns[0]); \ + std::optional opt; \ + if (if_argument_pos >= 0) \ + { \ + const auto & flags = assert_cast(*columns[if_argument_pos]).getData(); \ + opt = findNumericMinIf(column.getData().data(), flags.data(), row_begin, row_end); \ + } \ + else \ + opt = findNumericMin(column.getData().data(), row_begin, row_end); \ + if (opt.has_value()) \ + this->data(place).changeIfLess(opt.value()); \ + } +// NOLINTEND(bugprone-macro-parentheses) + +FOR_BASIC_NUMERIC_TYPES(SPECIALIZE) +#undef SPECIALIZE + +template +void AggregateFunctionsSingleValueMin::addBatchSinglePlace( + size_t row_begin, + size_t row_end, + AggregateDataPtr __restrict place, + const IColumn ** __restrict columns, + Arena * arena, + ssize_t if_argument_pos) const +{ + return Parent::addBatchSinglePlace(row_begin, row_end, place, columns, arena, if_argument_pos); +} + +// NOLINTBEGIN(bugprone-macro-parentheses) +#define SPECIALIZE(TYPE) \ + template <> \ + void AggregateFunctionsSingleValueMin>>::addBatchSinglePlaceNotNull( \ + size_t row_begin, \ + size_t row_end, \ + AggregateDataPtr __restrict place, \ + const IColumn ** __restrict columns, \ + const UInt8 * __restrict null_map, \ + Arena *, \ + ssize_t if_argument_pos) const \ + { \ + const auto & column = assert_cast>::ColVecType &>(*columns[0]); \ + std::optional opt; \ + if (if_argument_pos >= 0) \ + { \ + const auto * if_flags = assert_cast(*columns[if_argument_pos]).getData().data(); \ + auto final_flags = std::make_unique(row_end); \ + for (size_t i = row_begin; i < row_end; ++i) \ + final_flags[i] = (!null_map[i]) & !!if_flags[i]; \ + opt = findNumericMinIf(column.getData().data(), final_flags.get(), row_begin, row_end); \ + } \ + else \ + opt = findNumericMinNotNull(column.getData().data(), null_map, row_begin, row_end); \ + if (opt.has_value()) \ + this->data(place).changeIfLess(opt.value()); \ + } +// NOLINTEND(bugprone-macro-parentheses) + +FOR_BASIC_NUMERIC_TYPES(SPECIALIZE) +#undef SPECIALIZE + +template +void AggregateFunctionsSingleValueMin::addBatchSinglePlaceNotNull( + size_t row_begin, + size_t row_end, + AggregateDataPtr __restrict place, + const IColumn ** __restrict columns, + const UInt8 * __restrict null_map, + Arena * arena, + ssize_t if_argument_pos) const +{ + return Parent::addBatchSinglePlaceNotNull(row_begin, row_end, place, columns, null_map, arena, if_argument_pos); +} + AggregateFunctionPtr createAggregateFunctionMin( const std::string & name, const DataTypes & argument_types, const Array & parameters, const Settings * settings) { - return AggregateFunctionPtr(createAggregateFunctionSingleValue(name, argument_types, parameters, settings)); + return AggregateFunctionPtr(createAggregateFunctionSingleValue( + name, argument_types, parameters, settings)); } AggregateFunctionPtr createAggregateFunctionArgMin( diff --git a/src/AggregateFunctions/AggregateFunctionMinMaxAny.h b/src/AggregateFunctions/AggregateFunctionMinMaxAny.h index ef1de76df79..b69a0b100a3 100644 --- a/src/AggregateFunctions/AggregateFunctionMinMaxAny.h +++ b/src/AggregateFunctions/AggregateFunctionMinMaxAny.h @@ -43,14 +43,12 @@ namespace ErrorCodes template struct SingleValueDataFixed { -private: using Self = SingleValueDataFixed; using ColVecType = ColumnVectorOrDecimal; bool has_value = false; /// We need to remember if at least one value has been passed. This is necessary for AggregateFunctionIf. T value = T{}; -public: static constexpr bool result_is_nullable = false; static constexpr bool should_skip_null_arguments = true; static constexpr bool is_any = false; @@ -157,6 +155,15 @@ public: return false; } + void changeIfLess(T from) + { + if (!has() || from < value) + { + has_value = true; + value = from; + } + } + bool changeIfGreater(const IColumn & column, size_t row_num, Arena * arena) { if (!has() || assert_cast(column).getData()[row_num] > value) @@ -179,6 +186,15 @@ public: return false; } + void changeIfGreater(T & from) + { + if (!has() || from > value) + { + has_value = true; + value = from; + } + } + bool isEqualTo(const Self & to) const { return has() && to.value == value; @@ -448,7 +464,6 @@ public: } #endif - }; struct Compatibility @@ -1214,7 +1229,7 @@ struct AggregateFunctionAnyHeavyData : Data template -class AggregateFunctionsSingleValue final : public IAggregateFunctionDataHelper> +class AggregateFunctionsSingleValue : public IAggregateFunctionDataHelper> { static constexpr bool is_any = Data::is_any; @@ -1230,8 +1245,11 @@ public: || StringRef(Data::name()) == StringRef("max")) { if (!type->isComparable()) - throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal type {} of argument of aggregate function {} " - "because the values of that data type are not comparable", type->getName(), getName()); + throw Exception( + ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, + "Illegal type {} of argument of aggregate function {} because the values of that data type are not comparable", + type->getName(), + Data::name()); } } diff --git a/src/AggregateFunctions/AggregateFunctionSparkbar.cpp b/src/AggregateFunctions/AggregateFunctionSparkbar.cpp index 7ff9df03824..b6e538520a8 100644 --- a/src/AggregateFunctions/AggregateFunctionSparkbar.cpp +++ b/src/AggregateFunctions/AggregateFunctionSparkbar.cpp @@ -378,6 +378,7 @@ AggregateFunctionPtr createAggregateFunctionSparkbar(const std::string & name, c void registerAggregateFunctionSparkbar(AggregateFunctionFactory & factory) { factory.registerFunction("sparkbar", createAggregateFunctionSparkbar); + factory.registerAlias("sparkBar", "sparkbar"); } } diff --git a/src/AggregateFunctions/AggregateFunctionSum.h b/src/AggregateFunctions/AggregateFunctionSum.h index b3006f2ce82..5781ab69c6b 100644 --- a/src/AggregateFunctions/AggregateFunctionSum.h +++ b/src/AggregateFunctions/AggregateFunctionSum.h @@ -504,7 +504,7 @@ public: const auto * if_flags = assert_cast(*columns[if_argument_pos]).getData().data(); auto final_flags = std::make_unique(row_end); for (size_t i = row_begin; i < row_end; ++i) - final_flags[i] = (!null_map[i]) & if_flags[i]; + final_flags[i] = (!null_map[i]) & !!if_flags[i]; this->data(place).addManyConditional(column.getData().data(), final_flags.get(), row_begin, row_end); } diff --git a/src/AggregateFunctions/AggregateFunctionSumMap.cpp b/src/AggregateFunctions/AggregateFunctionSumMap.cpp index 04bc908396a..9f0873a6c9c 100644 --- a/src/AggregateFunctions/AggregateFunctionSumMap.cpp +++ b/src/AggregateFunctions/AggregateFunctionSumMap.cpp @@ -254,11 +254,20 @@ public: if (it != merged_maps.end()) { for (size_t col = 0; col < values_types.size(); ++col) + { if (!elem.second[col].isNull()) - applyVisitor(Visitor(elem.second[col]), it->second[col]); + { + if (it->second[col].isNull()) + it->second[col] = elem.second[col]; + else + applyVisitor(Visitor(elem.second[col]), it->second[col]); + } + } } else + { merged_maps[elem.first] = elem.second; + } } } diff --git a/src/AggregateFunctions/IAggregateFunction.h b/src/AggregateFunctions/IAggregateFunction.h index a8254baac3a..94bb121893d 100644 --- a/src/AggregateFunctions/IAggregateFunction.h +++ b/src/AggregateFunctions/IAggregateFunction.h @@ -197,7 +197,7 @@ public: virtual void insertMergeResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena * arena) const { if (isState()) - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Function {} is marked as State but method insertMergeResultInto is not implemented"); + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Function {} is marked as State but method insertMergeResultInto is not implemented", getName()); insertResultInto(place, to, arena); } diff --git a/src/AggregateFunctions/examples/quantile-t-digest.cpp b/src/AggregateFunctions/examples/quantile-t-digest.cpp index b4e58e6203c..5360304b311 100644 --- a/src/AggregateFunctions/examples/quantile-t-digest.cpp +++ b/src/AggregateFunctions/examples/quantile-t-digest.cpp @@ -1,6 +1,7 @@ #include #include #include +#include int main(int, char **) { diff --git a/src/AggregateFunctions/findNumeric.cpp b/src/AggregateFunctions/findNumeric.cpp new file mode 100644 index 00000000000..bbad8c1fe3d --- /dev/null +++ b/src/AggregateFunctions/findNumeric.cpp @@ -0,0 +1,15 @@ +#include + +namespace DB +{ +#define INSTANTIATION(T) \ + template std::optional findNumericMin(const T * __restrict ptr, size_t start, size_t end); \ + template std::optional findNumericMinNotNull(const T * __restrict ptr, const UInt8 * __restrict condition_map, size_t start, size_t end); \ + template std::optional findNumericMinIf(const T * __restrict ptr, const UInt8 * __restrict condition_map, size_t start, size_t end); \ + template std::optional findNumericMax(const T * __restrict ptr, size_t start, size_t end); \ + template std::optional findNumericMaxNotNull(const T * __restrict ptr, const UInt8 * __restrict condition_map, size_t start, size_t end); \ + template std::optional findNumericMaxIf(const T * __restrict ptr, const UInt8 * __restrict condition_map, size_t start, size_t end); + +FOR_BASIC_NUMERIC_TYPES(INSTANTIATION) +#undef INSTANTIATION +} diff --git a/src/AggregateFunctions/findNumeric.h b/src/AggregateFunctions/findNumeric.h new file mode 100644 index 00000000000..df7c325569a --- /dev/null +++ b/src/AggregateFunctions/findNumeric.h @@ -0,0 +1,154 @@ +#pragma once + +#include +#include +#include +#include +#include + +#include +#include + +namespace DB +{ +template +concept is_any_native_number = (is_any_of); + +template +struct MinComparator +{ + static ALWAYS_INLINE inline const T & cmp(const T & a, const T & b) { return std::min(a, b); } +}; + +template +struct MaxComparator +{ + static ALWAYS_INLINE inline const T & cmp(const T & a, const T & b) { return std::max(a, b); } +}; + +MULTITARGET_FUNCTION_AVX2_SSE42( + MULTITARGET_FUNCTION_HEADER(template static std::optional NO_INLINE), + findNumericExtremeImpl, + MULTITARGET_FUNCTION_BODY((const T * __restrict ptr, const UInt8 * __restrict condition_map [[maybe_unused]], size_t row_begin, size_t row_end) + { + size_t count = row_end - row_begin; + ptr += row_begin; + if constexpr (!add_all_elements) + condition_map += row_begin; + + T ret{}; + size_t i = 0; + for (; i < count; i++) + { + if (add_all_elements || !condition_map[i] == add_if_cond_zero) + { + ret = ptr[i]; + break; + } + } + if (i >= count) + return std::nullopt; + + /// Unroll the loop manually for floating point, since the compiler doesn't do it without fastmath + /// as it might change the return value + if constexpr (std::is_floating_point_v) + { + constexpr size_t unroll_block = 512 / sizeof(T); /// Chosen via benchmarks with AVX2 so YMMV + size_t unrolled_end = i + (((count - i) / unroll_block) * unroll_block); + + if (i < unrolled_end) + { + T partial_min[unroll_block]; + for (size_t unroll_it = 0; unroll_it < unroll_block; unroll_it++) + partial_min[unroll_it] = ret; + + while (i < unrolled_end) + { + for (size_t unroll_it = 0; unroll_it < unroll_block; unroll_it++) + { + if (add_all_elements || !condition_map[i + unroll_it] == add_if_cond_zero) + partial_min[unroll_it] = ComparatorClass::cmp(partial_min[unroll_it], ptr[i + unroll_it]); + } + i += unroll_block; + } + for (size_t unroll_it = 0; unroll_it < unroll_block; unroll_it++) + ret = ComparatorClass::cmp(ret, partial_min[unroll_it]); + } + } + + for (; i < count; i++) + { + if (add_all_elements || !condition_map[i] == add_if_cond_zero) + ret = ComparatorClass::cmp(ret, ptr[i]); + } + + return ret; + } +)) + + +/// Given a vector of T finds the extreme (MIN or MAX) value +template +static std::optional +findNumericExtreme(const T * __restrict ptr, const UInt8 * __restrict condition_map [[maybe_unused]], size_t start, size_t end) +{ +#if USE_MULTITARGET_CODE + /// We see no benefit from using AVX512BW or AVX512F (over AVX2), so we only declare SSE and AVX2 + if (isArchSupported(TargetArch::AVX2)) + return findNumericExtremeImplAVX2(ptr, condition_map, start, end); + + if (isArchSupported(TargetArch::SSE42)) + return findNumericExtremeImplSSE42(ptr, condition_map, start, end); +#endif + return findNumericExtremeImpl(ptr, condition_map, start, end); +} + +template +std::optional findNumericMin(const T * __restrict ptr, size_t start, size_t end) +{ + return findNumericExtreme, true, false>(ptr, nullptr, start, end); +} + +template +std::optional findNumericMinNotNull(const T * __restrict ptr, const UInt8 * __restrict condition_map, size_t start, size_t end) +{ + return findNumericExtreme, false, true>(ptr, condition_map, start, end); +} + +template +std::optional findNumericMinIf(const T * __restrict ptr, const UInt8 * __restrict condition_map, size_t start, size_t end) +{ + return findNumericExtreme, false, false>(ptr, condition_map, start, end); +} + +template +std::optional findNumericMax(const T * __restrict ptr, size_t start, size_t end) +{ + return findNumericExtreme, true, false>(ptr, nullptr, start, end); +} + +template +std::optional findNumericMaxNotNull(const T * __restrict ptr, const UInt8 * __restrict condition_map, size_t start, size_t end) +{ + return findNumericExtreme, false, true>(ptr, condition_map, start, end); +} + +template +std::optional findNumericMaxIf(const T * __restrict ptr, const UInt8 * __restrict condition_map, size_t start, size_t end) +{ + return findNumericExtreme, false, false>(ptr, condition_map, start, end); +} + + +#define EXTERN_INSTANTIATION(T) \ + extern template std::optional findNumericMin(const T * __restrict ptr, size_t start, size_t end); \ + extern template std::optional findNumericMinNotNull(const T * __restrict ptr, const UInt8 * __restrict condition_map, size_t start, size_t end); \ + extern template std::optional findNumericMinIf(const T * __restrict ptr, const UInt8 * __restrict condition_map, size_t start, size_t end); \ + extern template std::optional findNumericMax(const T * __restrict ptr, size_t start, size_t end); \ + extern template std::optional findNumericMaxNotNull(const T * __restrict ptr, const UInt8 * __restrict condition_map, size_t start, size_t end); \ + extern template std::optional findNumericMaxIf(const T * __restrict ptr, const UInt8 * __restrict condition_map, size_t start, size_t end); + + FOR_BASIC_NUMERIC_TYPES(EXTERN_INSTANTIATION) +#undef EXTERN_INSTANTIATION + +} diff --git a/src/Analyzer/Passes/CNF.cpp b/src/Analyzer/Passes/CNF.cpp index 91e973c7573..aa6ee539934 100644 --- a/src/Analyzer/Passes/CNF.cpp +++ b/src/Analyzer/Passes/CNF.cpp @@ -536,7 +536,8 @@ CNF CNF::toCNF(const QueryTreeNodePtr & node, ContextPtr context, size_t max_gro if (!cnf) throw Exception(ErrorCodes::TOO_MANY_TEMPORARY_COLUMNS, "Cannot convert expression '{}' to CNF, because it produces to many clauses." - "Size of boolean formula in CNF can be exponential of size of source formula."); + "Size of boolean formula in CNF can be exponential of size of source formula.", + node->formatConvertedASTForErrorMessage()); return *cnf; } diff --git a/src/Analyzer/Passes/ComparisonTupleEliminationPass.cpp b/src/Analyzer/Passes/ComparisonTupleEliminationPass.cpp index 4e0562a2fe8..117e649ac88 100644 --- a/src/Analyzer/Passes/ComparisonTupleEliminationPass.cpp +++ b/src/Analyzer/Passes/ComparisonTupleEliminationPass.cpp @@ -1,6 +1,8 @@ #include #include +#include +#include #include @@ -52,6 +54,13 @@ public: if (!isTuple(rhs_argument_result_type)) return; + if (function_node->getResultType()->equals(DataTypeNullable(std::make_shared()))) + /** The function `equals` can return Nullable(Nothing), e.g., in the case of (a, b) == (NULL, 1). + * On the other hand, `AND` returns Nullable(UInt8), so we would need to convert types. + * It's better to just skip this trivial case. + */ + return; + auto lhs_argument_node_type = lhs_argument->getNodeType(); auto rhs_argument_node_type = rhs_argument->getNodeType(); diff --git a/src/Analyzer/Passes/IfTransformStringsToEnumPass.cpp b/src/Analyzer/Passes/IfTransformStringsToEnumPass.cpp index 901867b8889..76b14c1a867 100644 --- a/src/Analyzer/Passes/IfTransformStringsToEnumPass.cpp +++ b/src/Analyzer/Passes/IfTransformStringsToEnumPass.cpp @@ -4,6 +4,7 @@ #include #include #include +#include #include #include @@ -41,22 +42,6 @@ DataTypePtr getEnumType(const std::set & string_values) return getDataEnumType(string_values); } -QueryTreeNodePtr createCastFunction(QueryTreeNodePtr from, DataTypePtr result_type, ContextPtr context) -{ - auto enum_literal = std::make_shared(result_type->getName(), std::make_shared()); - auto enum_literal_node = std::make_shared(std::move(enum_literal)); - - auto cast_function = FunctionFactory::instance().get("_CAST", std::move(context)); - QueryTreeNodes arguments{ std::move(from), std::move(enum_literal_node) }; - - auto function_node = std::make_shared("_CAST"); - function_node->getArguments().getNodes() = std::move(arguments); - - function_node->resolveAsFunction(cast_function->build(function_node->getArgumentColumns())); - - return function_node; -} - /// if(arg1, arg2, arg3) will be transformed to if(arg1, _CAST(arg2, Enum...), _CAST(arg3, Enum...)) /// where Enum is generated based on the possible values stored in string_values void changeIfArguments( diff --git a/src/Analyzer/Passes/LogicalExpressionOptimizerPass.cpp b/src/Analyzer/Passes/LogicalExpressionOptimizerPass.cpp index 6fa6c8b0e78..59b3b036698 100644 --- a/src/Analyzer/Passes/LogicalExpressionOptimizerPass.cpp +++ b/src/Analyzer/Passes/LogicalExpressionOptimizerPass.cpp @@ -9,6 +9,8 @@ #include #include +#include + namespace DB { @@ -248,13 +250,13 @@ public: if (function_node->getFunctionName() == "and") { - tryReplaceAndEqualsChainsWithConstant(node); + tryOptimizeAndEqualsNotEqualsChain(node); return; } } private: - void tryReplaceAndEqualsChainsWithConstant(QueryTreeNodePtr & node) + void tryOptimizeAndEqualsNotEqualsChain(QueryTreeNodePtr & node) { auto & function_node = node->as(); assert(function_node.getFunctionName() == "and"); @@ -264,53 +266,132 @@ private: QueryTreeNodes and_operands; - QueryTreeNodePtrWithHashMap node_to_constants; + QueryTreeNodePtrWithHashMap equals_node_to_constants; + QueryTreeNodePtrWithHashMap not_equals_node_to_constants; + QueryTreeNodePtrWithHashMap node_to_not_equals_functions; for (const auto & argument : function_node.getArguments()) { auto * argument_function = argument->as(); - if (!argument_function || argument_function->getFunctionName() != "equals") + const auto valid_functions = std::unordered_set{"equals", "notEquals"}; + if (!argument_function || !valid_functions.contains(argument_function->getFunctionName())) { and_operands.push_back(argument); continue; } - const auto & equals_arguments = argument_function->getArguments().getNodes(); - const auto & lhs = equals_arguments[0]; - const auto & rhs = equals_arguments[1]; + const auto function_name = argument_function->getFunctionName(); + const auto & function_arguments = argument_function->getArguments().getNodes(); + const auto & lhs = function_arguments[0]; + const auto & rhs = function_arguments[1]; - const auto has_and_with_different_constant = [&](const QueryTreeNodePtr & expression, const ConstantNode * constant) + if (function_name == "equals") { - if (auto it = node_to_constants.find(expression); it != node_to_constants.end()) + const auto has_and_with_different_constant = [&](const QueryTreeNodePtr & expression, const ConstantNode * constant) { - if (!it->second->isEqual(*constant)) - return true; + if (auto it = equals_node_to_constants.find(expression); it != equals_node_to_constants.end()) + { + if (!it->second->isEqual(*constant)) + return true; + } + else + { + equals_node_to_constants.emplace(expression, constant); + and_operands.push_back(argument); + } + + return false; + }; + + bool collapse_to_false = false; + + if (const auto * lhs_literal = lhs->as()) + collapse_to_false = has_and_with_different_constant(rhs, lhs_literal); + else if (const auto * rhs_literal = rhs->as()) + collapse_to_false = has_and_with_different_constant(lhs, rhs_literal); + else + and_operands.push_back(argument); + + if (collapse_to_false) + { + auto false_value = std::make_shared(0u, function_node.getResultType()); + auto false_node = std::make_shared(std::move(false_value)); + node = std::move(false_node); + return; + } + } + else if (function_name == "notEquals") + { + /// collect all inequality checks (x <> value) + + const auto add_not_equals_function_if_not_present = [&](const auto & expression_node, const ConstantNode * constant) + { + auto & constant_set = not_equals_node_to_constants[expression_node]; + if (!constant_set.contains(constant)) + { + constant_set.insert(constant); + node_to_not_equals_functions[expression_node].push_back(argument); + } + }; + + if (const auto * lhs_literal = lhs->as(); + lhs_literal && !lhs_literal->getValue().isNull()) + add_not_equals_function_if_not_present(rhs, lhs_literal); + else if (const auto * rhs_literal = rhs->as(); + rhs_literal && !rhs_literal->getValue().isNull()) + add_not_equals_function_if_not_present(lhs, rhs_literal); + else + and_operands.push_back(argument); + } + else + throw Exception(ErrorCodes::LOGICAL_ERROR, "Unexpected function name: '{}'", function_name); + } + + auto not_in_function_resolver = FunctionFactory::instance().get("notIn", getContext()); + + for (auto & [expression, not_equals_functions] : node_to_not_equals_functions) + { + const auto & settings = getSettings(); + if (not_equals_functions.size() < settings.optimize_min_inequality_conjunction_chain_length && !expression.node->getResultType()->lowCardinality()) + { + std::move(not_equals_functions.begin(), not_equals_functions.end(), std::back_inserter(and_operands)); + continue; + } + + Tuple args; + args.reserve(not_equals_functions.size()); + /// first we create tuple from RHS of notEquals functions + for (const auto & not_equals : not_equals_functions) + { + const auto * not_equals_function = not_equals->as(); + assert(not_equals_function && not_equals_function->getFunctionName() == "notEquals"); + + const auto & not_equals_arguments = not_equals_function->getArguments().getNodes(); + if (const auto * rhs_literal = not_equals_arguments[1]->as()) + { + args.push_back(rhs_literal->getValue()); } else { - node_to_constants.emplace(expression, constant); - and_operands.push_back(argument); + const auto * lhs_literal = not_equals_arguments[0]->as(); + assert(lhs_literal); + args.push_back(lhs_literal->getValue()); } - - return false; - }; - - bool collapse_to_false = false; - - if (const auto * lhs_literal = lhs->as()) - collapse_to_false = has_and_with_different_constant(rhs, lhs_literal); - else if (const auto * rhs_literal = rhs->as()) - collapse_to_false = has_and_with_different_constant(lhs, rhs_literal); - else - and_operands.push_back(argument); - - if (collapse_to_false) - { - auto false_value = std::make_shared(0u, function_node.getResultType()); - auto false_node = std::make_shared(std::move(false_value)); - node = std::move(false_node); - return; } + + auto rhs_node = std::make_shared(std::move(args)); + + auto not_in_function = std::make_shared("notIn"); + + QueryTreeNodes not_in_arguments; + not_in_arguments.reserve(2); + not_in_arguments.push_back(expression.node); + not_in_arguments.push_back(std::move(rhs_node)); + + not_in_function->getArguments().getNodes() = std::move(not_in_arguments); + not_in_function->resolveAsFunction(not_in_function_resolver); + + and_operands.push_back(std::move(not_in_function)); } if (and_operands.size() == function_node.getArguments().getNodes().size()) @@ -320,11 +401,21 @@ private: { /// AND operator can have UInt8 or bool as its type. /// bool is used if a bool constant is at least one operand. - /// Because we reduce the number of operands here by eliminating the same equality checks, - /// the only situation we can end up here is we had AND check where all the equality checks are the same so we know the type is UInt8. - /// Otherwise, we will have > 1 operands and we don't have to do anything. - assert(!function_node.getResultType()->isNullable() && and_operands[0]->getResultType()->equals(*function_node.getResultType())); - node = std::move(and_operands[0]); + + auto operand_type = and_operands[0]->getResultType(); + auto function_type = function_node.getResultType(); + assert(!function_type->isNullable()); + if (!function_type->equals(*operand_type)) + { + /// Result of equality operator can be low cardinality, while AND always returns UInt8. + /// In that case we replace `(lc = 1) AND (lc = 1)` with `(lc = 1) AS UInt8` + assert(function_type->equals(*removeLowCardinality(operand_type))); + node = createCastFunction(std::move(and_operands[0]), function_type, getContext()); + } + else + { + node = std::move(and_operands[0]); + } return; } @@ -389,11 +480,14 @@ private: continue; } + bool is_any_nullable = false; Tuple args; args.reserve(equals_functions.size()); /// first we create tuple from RHS of equals functions for (const auto & equals : equals_functions) { + is_any_nullable |= equals->getResultType()->isNullable(); + const auto * equals_function = equals->as(); assert(equals_function && equals_function->getFunctionName() == "equals"); @@ -421,8 +515,20 @@ private: in_function->getArguments().getNodes() = std::move(in_arguments); in_function->resolveAsFunction(in_function_resolver); - - or_operands.push_back(std::move(in_function)); + /** For `k :: UInt8`, expression `k = 1 OR k = NULL` with result type Nullable(UInt8) + * is replaced with `k IN (1, NULL)` with result type UInt8. + * Convert it back to Nullable(UInt8). + */ + if (is_any_nullable && !in_function->getResultType()->isNullable()) + { + auto nullable_result_type = std::make_shared(in_function->getResultType()); + auto in_function_nullable = createCastFunction(std::move(in_function), std::move(nullable_result_type), getContext()); + or_operands.push_back(std::move(in_function_nullable)); + } + else + { + or_operands.push_back(std::move(in_function)); + } } if (or_operands.size() == function_node.getArguments().getNodes().size()) diff --git a/src/Analyzer/Passes/LogicalExpressionOptimizerPass.h b/src/Analyzer/Passes/LogicalExpressionOptimizerPass.h index 80062f38eac..658f6d767c4 100644 --- a/src/Analyzer/Passes/LogicalExpressionOptimizerPass.h +++ b/src/Analyzer/Passes/LogicalExpressionOptimizerPass.h @@ -68,7 +68,25 @@ namespace DB * WHERE a = 1 AND b = 'test'; * ------------------------------- * - * 5. Remove unnecessary IS NULL checks in JOIN ON clause + * 5. Replaces chains of inequality functions inside an AND with a single NOT IN operator. + * The replacement is done if: + * - one of the operands of the inequality function is a constant + * - length of chain is at least 'optimize_min_inequality_conjunction_chain_length' long OR the expression has type of LowCardinality + * + * E.g. (optimize_min_inequality_conjunction_chain_length = 2) + * ------------------------------- + * SELECT * + * FROM table + * WHERE a <> 1 AND a <> 2; + * + * will be transformed into + * + * SELECT * + * FROM TABLE + * WHERE a NOT IN (1, 2); + * ------------------------------- + * + * 6. Remove unnecessary IS NULL checks in JOIN ON clause * - equality check with explicit IS NULL check replaced with <=> operator * ------------------------------- * SELECT * FROM t1 JOIN t2 ON a = b OR (a IS NULL AND b IS NULL) @@ -85,7 +103,11 @@ class LogicalExpressionOptimizerPass final : public IQueryTreePass public: String getName() override { return "LogicalExpressionOptimizer"; } - String getDescription() override { return "Transform equality chain to a single IN function or a constant if possible"; } + String getDescription() override + { + return "Transforms chains of logical expressions if possible, i.e. " + "replace chains of equality functions inside an OR with a single IN operator"; + } void run(QueryTreeNodePtr query_tree_node, ContextPtr context) override; }; diff --git a/src/Analyzer/Passes/QueryAnalysisPass.cpp b/src/Analyzer/Passes/QueryAnalysisPass.cpp index 1e63d5ca8e4..3290d918a8b 100644 --- a/src/Analyzer/Passes/QueryAnalysisPass.cpp +++ b/src/Analyzer/Passes/QueryAnalysisPass.cpp @@ -119,6 +119,7 @@ namespace ErrorCodes extern const int NUMBER_OF_COLUMNS_DOESNT_MATCH; extern const int FUNCTION_CANNOT_HAVE_PARAMETERS; extern const int SYNTAX_ERROR; + extern const int UNEXPECTED_EXPRESSION; } /** Query analyzer implementation overview. Please check documentation in QueryAnalysisPass.h first. @@ -1209,6 +1210,8 @@ private: static void expandGroupByAll(QueryNode & query_tree_node_typed); + static void expandOrderByAll(QueryNode & query_tree_node_typed); + static std::string rewriteAggregateFunctionNameIfNeeded(const std::string & aggregate_function_name, NullsAction action, const ContextPtr & context); @@ -2312,6 +2315,35 @@ void QueryAnalyzer::expandGroupByAll(QueryNode & query_tree_node_typed) recursivelyCollectMaxOrdinaryExpressions(node, group_by_nodes); } +void QueryAnalyzer::expandOrderByAll(QueryNode & query_tree_node_typed) +{ + auto * all_node = query_tree_node_typed.getOrderBy().getNodes()[0]->as(); + if (!all_node) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Select analyze for not sort node."); + + auto & projection_nodes = query_tree_node_typed.getProjection().getNodes(); + auto list_node = std::make_shared(); + list_node->getNodes().reserve(projection_nodes.size()); + + for (auto & node : projection_nodes) + { + if (auto * identifier_node = node->as(); identifier_node != nullptr) + if (Poco::toUpper(identifier_node->getIdentifier().getFullName()) == "ALL" || Poco::toUpper(identifier_node->getAlias()) == "ALL") + throw Exception(ErrorCodes::UNEXPECTED_EXPRESSION, + "Cannot use ORDER BY ALL to sort a column with name 'all', please disable setting `enable_order_by_all` and try again"); + + if (auto * function_node = node->as(); function_node != nullptr) + if (Poco::toUpper(function_node->getAlias()) == "ALL") + throw Exception(ErrorCodes::UNEXPECTED_EXPRESSION, + "Cannot use ORDER BY ALL to sort a column with name 'all', please disable setting `enable_order_by_all` and try again"); + + auto sort_node = std::make_shared(node, all_node->getSortDirection(), all_node->getNullsSortDirection()); + list_node->getNodes().push_back(sort_node); + } + + query_tree_node_typed.getOrderByNode() = list_node; +} + std::string QueryAnalyzer::rewriteAggregateFunctionNameIfNeeded( const std::string & aggregate_function_name, NullsAction action, const ContextPtr & context) { @@ -6975,6 +7007,9 @@ void QueryAnalyzer::resolveQuery(const QueryTreeNodePtr & query_node, Identifier if (query_node_typed.hasHaving() && query_node_typed.isGroupByWithTotals() && is_rollup_or_cube) throw Exception(ErrorCodes::NOT_IMPLEMENTED, "WITH TOTALS and WITH ROLLUP or CUBE are not supported together in presence of HAVING"); + if (settings.enable_order_by_all && query_node_typed.isOrderByAll()) + expandOrderByAll(query_node_typed); + /// Initialize aliases in query node scope QueryExpressionsAliasVisitor visitor(scope); diff --git a/src/Analyzer/QueryNode.h b/src/Analyzer/QueryNode.h index 82bc72b7411..d8b8741afb2 100644 --- a/src/Analyzer/QueryNode.h +++ b/src/Analyzer/QueryNode.h @@ -219,6 +219,18 @@ public: is_group_by_all = is_group_by_all_value; } + /// Returns true, if query node has ORDER BY ALL modifier, false otherwise + bool isOrderByAll() const + { + return is_order_by_all; + } + + /// Set query node ORDER BY ALL modifier value + void setIsOrderByAll(bool is_order_by_all_value) + { + is_order_by_all = is_order_by_all_value; + } + /// Returns true if query node WITH section is not empty, false otherwise bool hasWith() const { @@ -590,6 +602,7 @@ private: bool is_group_by_with_cube = false; bool is_group_by_with_grouping_sets = false; bool is_group_by_all = false; + bool is_order_by_all = false; std::string cte_name; NamesAndTypes projection_columns; diff --git a/src/Analyzer/QueryTreeBuilder.cpp b/src/Analyzer/QueryTreeBuilder.cpp index c541888e5b9..4e2d0ad10a8 100644 --- a/src/Analyzer/QueryTreeBuilder.cpp +++ b/src/Analyzer/QueryTreeBuilder.cpp @@ -284,6 +284,7 @@ QueryTreeNodePtr QueryTreeBuilder::buildSelectExpression(const ASTPtr & select_q current_query_tree->setIsGroupByWithRollup(select_query_typed.group_by_with_rollup); current_query_tree->setIsGroupByWithGroupingSets(select_query_typed.group_by_with_grouping_sets); current_query_tree->setIsGroupByAll(select_query_typed.group_by_all); + current_query_tree->setIsOrderByAll(select_query_typed.order_by_all); current_query_tree->setOriginalAST(select_query); auto current_context = current_query_tree->getContext(); diff --git a/src/Analyzer/Utils.cpp b/src/Analyzer/Utils.cpp index 918126e0ccc..f75022220e7 100644 --- a/src/Analyzer/Utils.cpp +++ b/src/Analyzer/Utils.cpp @@ -667,4 +667,20 @@ NameSet collectIdentifiersFullNames(const QueryTreeNodePtr & node) return out; } +QueryTreeNodePtr createCastFunction(QueryTreeNodePtr node, DataTypePtr result_type, ContextPtr context) +{ + auto enum_literal = std::make_shared(result_type->getName(), std::make_shared()); + auto enum_literal_node = std::make_shared(std::move(enum_literal)); + + auto cast_function = FunctionFactory::instance().get("_CAST", std::move(context)); + QueryTreeNodes arguments{ std::move(node), std::move(enum_literal_node) }; + + auto function_node = std::make_shared("_CAST"); + function_node->getArguments().getNodes() = std::move(arguments); + + function_node->resolveAsFunction(cast_function->build(function_node->getArgumentColumns())); + + return function_node; +} + } diff --git a/src/Analyzer/Utils.h b/src/Analyzer/Utils.h index 060dc7d8bc0..e3316f5ad6b 100644 --- a/src/Analyzer/Utils.h +++ b/src/Analyzer/Utils.h @@ -99,4 +99,7 @@ void rerunFunctionResolve(FunctionNode * function_node, ContextPtr context); /// Just collect all identifiers from query tree NameSet collectIdentifiersFullNames(const QueryTreeNodePtr & node); +/// Wrap node into `_CAST` function +QueryTreeNodePtr createCastFunction(QueryTreeNodePtr node, DataTypePtr result_type, ContextPtr context); + } diff --git a/src/Backups/BackupCoordinationRemote.cpp b/src/Backups/BackupCoordinationRemote.cpp index 2633e1bedd2..b659887e0da 100644 --- a/src/Backups/BackupCoordinationRemote.cpp +++ b/src/Backups/BackupCoordinationRemote.cpp @@ -184,12 +184,12 @@ BackupCoordinationRemote::BackupCoordinationRemote( if (my_is_internal) { String alive_node_path = my_zookeeper_path + "/stage/alive|" + my_current_host; - auto code = zk->tryCreate(alive_node_path, "", zkutil::CreateMode::Ephemeral); - if (code == Coordination::Error::ZNODEEXISTS) - zk->handleEphemeralNodeExistenceNoFailureInjection(alive_node_path, ""); - else if (code != Coordination::Error::ZOK) - throw zkutil::KeeperException::fromPath(code, alive_node_path); + /// Delete the ephemeral node from the previous connection so we don't have to wait for keeper to do it automatically. + zk->tryRemove(alive_node_path); + + zk->createAncestors(alive_node_path); + zk->create(alive_node_path, "", zkutil::CreateMode::Ephemeral); } }) { diff --git a/src/Backups/BackupCoordinationStageSync.cpp b/src/Backups/BackupCoordinationStageSync.cpp index 9b9ddc8515c..2eba3440be9 100644 --- a/src/Backups/BackupCoordinationStageSync.cpp +++ b/src/Backups/BackupCoordinationStageSync.cpp @@ -60,12 +60,6 @@ void BackupCoordinationStageSync::set(const String & current_host, const String } else { - /// Make an ephemeral node so the initiator can track if the current host is still working. - String alive_node_path = zookeeper_path + "/alive|" + current_host; - auto code = zookeeper->tryCreate(alive_node_path, "", zkutil::CreateMode::Ephemeral); - if (code != Coordination::Error::ZOK && code != Coordination::Error::ZNODEEXISTS) - throw zkutil::KeeperException::fromPath(code, alive_node_path); - zookeeper->createIfNotExists(zookeeper_path + "/started|" + current_host, ""); zookeeper->createIfNotExists(zookeeper_path + "/current|" + current_host + "|" + new_stage, message); } @@ -106,39 +100,36 @@ Strings BackupCoordinationStageSync::waitFor(const Strings & all_hosts, const St namespace { - struct UnreadyHostState + struct UnreadyHost { + String host; bool started = false; - bool alive = false; }; } struct BackupCoordinationStageSync::State { - Strings results; - std::map unready_hosts; + std::optional results; std::optional> error; - std::optional host_terminated; + std::optional disconnected_host; + std::optional unready_host; }; BackupCoordinationStageSync::State BackupCoordinationStageSync::readCurrentState( - const Strings & zk_nodes, const Strings & all_hosts, const String & stage_to_wait) const + WithRetries::RetriesControlHolder & retries_control_holder, + const Strings & zk_nodes, + const Strings & all_hosts, + const String & stage_to_wait) const { + auto zookeeper = retries_control_holder.faulty_zookeeper; + auto & retries_ctl = retries_control_holder.retries_ctl; + std::unordered_set zk_nodes_set{zk_nodes.begin(), zk_nodes.end()}; State state; if (zk_nodes_set.contains("error")) { - String errors; - { - auto holder = with_retries.createRetriesControlHolder("readCurrentState"); - holder.retries_ctl.retryLoop( - [&, &zookeeper = holder.faulty_zookeeper]() - { - with_retries.renewZooKeeper(zookeeper); - errors = zookeeper->get(zookeeper_path + "/error"); - }); - } + String errors = zookeeper->get(zookeeper_path + "/error"); ReadBufferFromOwnString buf{errors}; String host; readStringBinary(host, buf); @@ -146,64 +137,50 @@ BackupCoordinationStageSync::State BackupCoordinationStageSync::readCurrentState return state; } + std::optional unready_host; + for (const auto & host : all_hosts) { if (!zk_nodes_set.contains("current|" + host + "|" + stage_to_wait)) { - UnreadyHostState unready_host_state; const String started_node_name = "started|" + host; const String alive_node_name = "alive|" + host; - const String alive_node_path = zookeeper_path + "/" + alive_node_name; - unready_host_state.started = zk_nodes_set.contains(started_node_name); - /// Because we do retries everywhere we can't fully rely on ephemeral nodes anymore. - /// Though we recreate "alive" node when reconnecting it might be not enough and race condition is possible. - /// And everything we can do here - just retry. - /// In worst case when we won't manage to see the alive node for a long time we will just abort the backup. - unready_host_state.alive = zk_nodes_set.contains(alive_node_name); - if (!unready_host_state.alive) + bool started = zk_nodes_set.contains(started_node_name); + bool alive = zk_nodes_set.contains(alive_node_name); + + if (!alive) { - LOG_TRACE(log, "Seems like host ({}) is dead. Will retry the check to confirm", host); - auto holder = with_retries.createRetriesControlHolder("readCurrentState::checkAliveNode"); - holder.retries_ctl.retryLoop( - [&, &zookeeper = holder.faulty_zookeeper]() - { - with_retries.renewZooKeeper(zookeeper); + /// If the "alive" node doesn't exist then we don't have connection to the corresponding host. + /// This node is ephemeral so probably it will be recreated soon. We use zookeeper retries to wait. + /// In worst case when we won't manage to see the alive node for a long time we will just abort the backup. + const auto * const suffix = retries_ctl.isLastRetry() ? "" : ", will retry"; + if (started) + retries_ctl.setUserError(Exception(ErrorCodes::FAILED_TO_SYNC_BACKUP_OR_RESTORE, + "Lost connection to host {}{}", host, suffix)); + else + retries_ctl.setUserError(Exception(ErrorCodes::FAILED_TO_SYNC_BACKUP_OR_RESTORE, + "No connection to host {} yet{}", host, suffix)); - if (zookeeper->existsNoFailureInjection(alive_node_path)) - { - unready_host_state.alive = true; - return; - } - - // Retry with backoff. We also check whether it is last retry or no, because we won't to rethrow an exception. - if (!holder.retries_ctl.isLastRetry()) - holder.retries_ctl.setKeeperError(Coordination::Error::ZNONODE, "There is no alive node for host {}. Will retry", host); - }); + state.disconnected_host = host; + return state; } - LOG_TRACE(log, "Host ({}) appeared to be {}", host, unready_host_state.alive ? "alive" : "dead"); - state.unready_hosts.emplace(host, unready_host_state); - if (!unready_host_state.alive && unready_host_state.started && !state.host_terminated) - state.host_terminated = host; + if (!unready_host) + unready_host.emplace(UnreadyHost{.host = host, .started = started}); } } - if (state.host_terminated || !state.unready_hosts.empty()) - return state; - - auto holder = with_retries.createRetriesControlHolder("waitImpl::collectStagesToWait"); - holder.retries_ctl.retryLoop( - [&, &zookeeper = holder.faulty_zookeeper]() + if (unready_host) { - with_retries.renewZooKeeper(zookeeper); - Strings results; + state.unready_host = std::move(unready_host); + return state; + } - for (const auto & host : all_hosts) - results.emplace_back(zookeeper->get(zookeeper_path + "/current|" + host + "|" + stage_to_wait)); - - state.results = std::move(results); - }); + Strings results; + for (const auto & host : all_hosts) + results.emplace_back(zookeeper->get(zookeeper_path + "/current|" + host + "|" + stage_to_wait)); + state.results = std::move(results); return state; } @@ -229,7 +206,7 @@ Strings BackupCoordinationStageSync::waitImpl( auto watch = std::make_shared(); Strings zk_nodes; { - auto holder = with_retries.createRetriesControlHolder("waitImpl::getChildren"); + auto holder = with_retries.createRetriesControlHolder("waitImpl"); holder.retries_ctl.retryLoop( [&, &zookeeper = holder.faulty_zookeeper]() { @@ -237,17 +214,23 @@ Strings BackupCoordinationStageSync::waitImpl( watch->reset(); /// Get zk nodes and subscribe on their changes. zk_nodes = zookeeper->getChildren(zookeeper_path, nullptr, watch); + + /// Read the current state of zk nodes. + state = readCurrentState(holder, zk_nodes, all_hosts, stage_to_wait); }); } - /// Read and analyze the current state of zk nodes. - state = readCurrentState(zk_nodes, all_hosts, stage_to_wait); - if (state.error || state.host_terminated || state.unready_hosts.empty()) - break; /// Error happened or everything is ready. + /// Analyze the current state of zk nodes. + chassert(state.results || state.error || state.disconnected_host || state.unready_host); - /// Log that we will wait - const auto & unready_host = state.unready_hosts.begin()->first; - LOG_INFO(log, "Waiting on ZooKeeper watch for any node to be changed (currently waiting for host {})", unready_host); + if (state.results || state.error || state.disconnected_host) + break; /// Everything is ready or error happened. + + /// Log what we will wait. + const auto & unready_host = *state.unready_host; + LOG_INFO(log, "Waiting on ZooKeeper watch for any node to be changed (currently waiting for host {}{})", + unready_host.host, + (!unready_host.started ? " which didn't start the operation yet" : "")); /// Wait until `watch_callback` is called by ZooKeeper meaning that zk nodes have changed. { @@ -270,23 +253,23 @@ Strings BackupCoordinationStageSync::waitImpl( state.error->second.rethrow(); /// Another host terminated without errors. - if (state.host_terminated) - throw Exception(ErrorCodes::FAILED_TO_SYNC_BACKUP_OR_RESTORE, "Host {} suddenly stopped working", *state.host_terminated); + if (state.disconnected_host) + throw Exception(ErrorCodes::FAILED_TO_SYNC_BACKUP_OR_RESTORE, "No connection to host {}", *state.disconnected_host); /// Something's unready, timeout is probably not enough. - if (!state.unready_hosts.empty()) + if (state.unready_host) { - const auto & [unready_host, unready_host_state] = *state.unready_hosts.begin(); + const auto & unready_host = *state.unready_host; throw Exception( ErrorCodes::FAILED_TO_SYNC_BACKUP_OR_RESTORE, "Waited for host {} too long (> {}){}", - unready_host, + unready_host.host, to_string(*timeout), - unready_host_state.started ? "" : ": Operation didn't start"); + unready_host.started ? "" : ": Operation didn't start"); } LOG_TRACE(log, "Everything is Ok. All hosts achieved stage {}", stage_to_wait); - return state.results; + return std::move(*state.results); } } diff --git a/src/Backups/BackupCoordinationStageSync.h b/src/Backups/BackupCoordinationStageSync.h index 2efaec46b3a..e34fbcc099b 100644 --- a/src/Backups/BackupCoordinationStageSync.h +++ b/src/Backups/BackupCoordinationStageSync.h @@ -29,7 +29,7 @@ private: void createRootNodes(); struct State; - State readCurrentState(const Strings & zk_nodes, const Strings & all_hosts, const String & stage_to_wait) const; + State readCurrentState(WithRetries::RetriesControlHolder & retries_control_holder, const Strings & zk_nodes, const Strings & all_hosts, const String & stage_to_wait) const; Strings waitImpl(const Strings & all_hosts, const String & stage_to_wait, std::optional timeout) const; diff --git a/src/Backups/BackupEntriesCollector.cpp b/src/Backups/BackupEntriesCollector.cpp index a335b92fe3e..564a518689a 100644 --- a/src/Backups/BackupEntriesCollector.cpp +++ b/src/Backups/BackupEntriesCollector.cpp @@ -43,14 +43,6 @@ namespace Stage = BackupCoordinationStage; namespace { - /// Uppercases the first character of a passed string. - String toUpperFirst(const String & str) - { - String res = str; - res[0] = std::toupper(res[0]); - return res; - } - /// Outputs "table " or "temporary table " String tableNameWithTypeToString(const String & database_name, const String & table_name, bool first_upper) { @@ -96,18 +88,19 @@ BackupEntriesCollector::BackupEntriesCollector( , read_settings(read_settings_) , context(context_) , on_cluster_first_sync_timeout(context->getConfigRef().getUInt64("backups.on_cluster_first_sync_timeout", 180000)) - , collect_metadata_timeout(context->getConfigRef().getUInt64("backups.collect_metadata_timeout", context->getConfigRef().getUInt64("backups.consistent_metadata_snapshot_timeout", 600000))) + , collect_metadata_timeout(context->getConfigRef().getUInt64( + "backups.collect_metadata_timeout", context->getConfigRef().getUInt64("backups.consistent_metadata_snapshot_timeout", 600000))) , attempts_to_collect_metadata_before_sleep(context->getConfigRef().getUInt("backups.attempts_to_collect_metadata_before_sleep", 2)) - , min_sleep_before_next_attempt_to_collect_metadata(context->getConfigRef().getUInt64("backups.min_sleep_before_next_attempt_to_collect_metadata", 100)) - , max_sleep_before_next_attempt_to_collect_metadata(context->getConfigRef().getUInt64("backups.max_sleep_before_next_attempt_to_collect_metadata", 5000)) + , min_sleep_before_next_attempt_to_collect_metadata( + context->getConfigRef().getUInt64("backups.min_sleep_before_next_attempt_to_collect_metadata", 100)) + , max_sleep_before_next_attempt_to_collect_metadata( + context->getConfigRef().getUInt64("backups.max_sleep_before_next_attempt_to_collect_metadata", 5000)) , compare_collected_metadata(context->getConfigRef().getBool("backups.compare_collected_metadata", true)) , log(&Poco::Logger::get("BackupEntriesCollector")) , global_zookeeper_retries_info( - "BackupEntriesCollector", - log, - context->getSettingsRef().backup_restore_keeper_max_retries, - context->getSettingsRef().backup_restore_keeper_retry_initial_backoff_ms, - context->getSettingsRef().backup_restore_keeper_retry_max_backoff_ms) + context->getSettingsRef().backup_restore_keeper_max_retries, + context->getSettingsRef().backup_restore_keeper_retry_initial_backoff_ms, + context->getSettingsRef().backup_restore_keeper_retry_max_backoff_ms) , threadpool(threadpool_) { } @@ -164,7 +157,7 @@ BackupEntries BackupEntriesCollector::run() Strings BackupEntriesCollector::setStage(const String & new_stage, const String & message) { - LOG_TRACE(log, fmt::runtime(toUpperFirst(new_stage))); + LOG_TRACE(log, "Setting stage: {}", new_stage); current_stage = new_stage; backup_coordination->setStage(new_stage, message); @@ -580,7 +573,7 @@ std::vector> BackupEntriesCollector::findTablesInD { /// Database or table could be replicated - so may use ZooKeeper. We need to retry. auto zookeeper_retries_info = global_zookeeper_retries_info; - ZooKeeperRetriesControl retries_ctl("getTablesForBackup", zookeeper_retries_info, nullptr); + ZooKeeperRetriesControl retries_ctl("getTablesForBackup", log, zookeeper_retries_info, nullptr); retries_ctl.retryLoop([&](){ db_tables = database->getTablesForBackup(filter_by_table_name, context); }); } catch (Exception & e) diff --git a/src/Backups/BackupIO_S3.cpp b/src/Backups/BackupIO_S3.cpp index 74195a93072..d143d813a2f 100644 --- a/src/Backups/BackupIO_S3.cpp +++ b/src/Backups/BackupIO_S3.cpp @@ -68,12 +68,16 @@ namespace client_configuration.connectTimeoutMs = 10 * 1000; /// Requests in backups can be extremely long, set to one hour client_configuration.requestTimeoutMs = 60 * 60 * 1000; - client_configuration.retryStrategy = std::make_shared(request_settings.retry_attempts); + + S3::ClientSettings client_settings{ + .use_virtual_addressing = s3_uri.is_virtual_hosted_style, + .disable_checksum = local_settings.s3_disable_checksum, + .gcs_issue_compose_request = context->getConfigRef().getBool("s3.gcs_issue_compose_request", false), + }; return S3::ClientFactory::instance().create( client_configuration, - s3_uri.is_virtual_hosted_style, - local_settings.s3_disable_checksum, + client_settings, credentials.GetAWSAccessKeyId(), credentials.GetAWSSecretKey(), settings.auth_settings.server_side_encryption_customer_key_base64, @@ -146,7 +150,7 @@ UInt64 BackupReaderS3::getFileSize(const String & file_name) { auto objects = listObjects(*client, s3_uri, file_name); if (objects.empty()) - throw Exception(ErrorCodes::S3_ERROR, "Object {} must exist"); + throw Exception(ErrorCodes::S3_ERROR, "Object {} must exist", file_name); return objects[0].GetSize(); } @@ -299,7 +303,7 @@ UInt64 BackupWriterS3::getFileSize(const String & file_name) { auto objects = listObjects(*client, s3_uri, file_name); if (objects.empty()) - throw Exception(ErrorCodes::S3_ERROR, "Object {} must exist"); + throw Exception(ErrorCodes::S3_ERROR, "Object {} must exist", file_name); return objects[0].GetSize(); } diff --git a/src/Backups/BackupImpl.cpp b/src/Backups/BackupImpl.cpp index 61984d58889..9ac68bc2437 100644 --- a/src/Backups/BackupImpl.cpp +++ b/src/Backups/BackupImpl.cpp @@ -157,11 +157,16 @@ BackupImpl::~BackupImpl() void BackupImpl::open() { std::lock_guard lock{mutex}; - LOG_INFO(log, "{} backup: {}", ((open_mode == OpenMode::WRITE) ? "Writing" : "Reading"), backup_name_for_logging); - ProfileEvents::increment((open_mode == OpenMode::WRITE) ? ProfileEvents::BackupsOpenedForWrite : ProfileEvents::BackupsOpenedForRead); - if (open_mode == OpenMode::WRITE) + if (open_mode == OpenMode::READ) { + ProfileEvents::increment(ProfileEvents::BackupsOpenedForRead); + LOG_INFO(log, "Reading backup: {}", backup_name_for_logging); + } + else + { + ProfileEvents::increment(ProfileEvents::BackupsOpenedForWrite); + LOG_INFO(log, "Writing backup: {}", backup_name_for_logging); timestamp = std::time(nullptr); if (!uuid) uuid = UUIDHelpers::generateV4(); @@ -189,7 +194,7 @@ void BackupImpl::open() void BackupImpl::close() { std::lock_guard lock{mutex}; - closeArchive(); + closeArchive(/* finalize= */ false); if (!is_internal_backup && writer && !writing_finalized) removeAllFilesAfterFailure(); @@ -222,8 +227,11 @@ void BackupImpl::openArchive() } } -void BackupImpl::closeArchive() +void BackupImpl::closeArchive(bool finalize) { + if (finalize && archive_writer) + archive_writer->finalize(); + archive_reader.reset(); archive_writer.reset(); } @@ -978,7 +986,7 @@ void BackupImpl::finalizeWriting() { LOG_TRACE(log, "Finalizing backup {}", backup_name_for_logging); writeBackupMetadata(); - closeArchive(); + closeArchive(/* finalize= */ true); setCompressedSize(); removeLockFile(); LOG_TRACE(log, "Finalized backup {}", backup_name_for_logging); diff --git a/src/Backups/BackupImpl.h b/src/Backups/BackupImpl.h index 6070db79aa6..b369fe00171 100644 --- a/src/Backups/BackupImpl.h +++ b/src/Backups/BackupImpl.h @@ -89,7 +89,7 @@ private: void close(); void openArchive(); - void closeArchive(); + void closeArchive(bool finalize); /// Writes the file ".backup" containing backup's metadata. void writeBackupMetadata() TSA_REQUIRES(mutex); diff --git a/src/Backups/BackupInfo.cpp b/src/Backups/BackupInfo.cpp index f993d7ed984..2bff400d4fe 100644 --- a/src/Backups/BackupInfo.cpp +++ b/src/Backups/BackupInfo.cpp @@ -78,13 +78,16 @@ BackupInfo BackupInfo::fromAST(const IAST & ast) } } - res.args.reserve(list->children.size() - index); - for (; index < list->children.size(); ++index) + size_t args_size = list->children.size(); + res.args.reserve(args_size - index); + for (; index < args_size; ++index) { const auto & elem = list->children[index]; const auto * lit = elem->as(); if (!lit) + { throw Exception(ErrorCodes::BAD_ARGUMENTS, "Expected literal, got {}", serializeAST(*elem)); + } res.args.push_back(lit->value); } } diff --git a/src/Backups/BackupOperationInfo.h b/src/Backups/BackupOperationInfo.h index 54f5e5e9965..e57b57d75f1 100644 --- a/src/Backups/BackupOperationInfo.h +++ b/src/Backups/BackupOperationInfo.h @@ -17,6 +17,9 @@ struct BackupOperationInfo /// Operation name, a string like "Disk('backups', 'my_backup')" String name; + /// Base Backup Operation name, a string like "Disk('backups', 'my_base_backup')" + String base_backup_name; + /// This operation is internal and should not be shown in system.backups bool internal = false; diff --git a/src/Backups/BackupsWorker.cpp b/src/Backups/BackupsWorker.cpp index a1f619af0a4..8c4bb7e414c 100644 --- a/src/Backups/BackupsWorker.cpp +++ b/src/Backups/BackupsWorker.cpp @@ -394,9 +394,13 @@ OperationID BackupsWorker::startMakingBackup(const ASTPtr & query, const Context auto backup_info = BackupInfo::fromAST(*backup_query->backup_name); String backup_name_for_logging = backup_info.toStringForLogging(); + String base_backup_name; + if (backup_settings.base_backup_info) + base_backup_name = backup_settings.base_backup_info->toString(); + try { - addInfo(backup_id, backup_name_for_logging, backup_settings.internal, BackupStatus::CREATING_BACKUP); + addInfo(backup_id, backup_name_for_logging, base_backup_name, backup_settings.internal, BackupStatus::CREATING_BACKUP); /// Prepare context to use. ContextPtr context_in_use = context; @@ -606,7 +610,6 @@ void BackupsWorker::doBackup( void BackupsWorker::buildFileInfosForBackupEntries(const BackupPtr & backup, const BackupEntries & backup_entries, const ReadSettings & read_settings, std::shared_ptr backup_coordination) { - LOG_TRACE(log, "{}", Stage::BUILDING_FILE_INFOS); backup_coordination->setStage(Stage::BUILDING_FILE_INFOS, ""); backup_coordination->waitForStage(Stage::BUILDING_FILE_INFOS); backup_coordination->addFileInfos(::DB::buildFileInfosForBackupEntries(backup_entries, backup->getBaseBackup(), read_settings, getThreadPool(ThreadPoolId::BACKUP_MAKE_FILES_LIST))); @@ -745,8 +748,11 @@ OperationID BackupsWorker::startRestoring(const ASTPtr & query, ContextMutablePt { auto backup_info = BackupInfo::fromAST(*restore_query->backup_name); String backup_name_for_logging = backup_info.toStringForLogging(); + String base_backup_name; + if (restore_settings.base_backup_info) + base_backup_name = restore_settings.base_backup_info->toString(); - addInfo(restore_id, backup_name_for_logging, restore_settings.internal, BackupStatus::RESTORING); + addInfo(restore_id, backup_name_for_logging, base_backup_name, restore_settings.internal, BackupStatus::RESTORING); /// Prepare context to use. ContextMutablePtr context_in_use = context; @@ -1005,11 +1011,12 @@ void BackupsWorker::restoreTablesData(const OperationID & restore_id, BackupPtr } -void BackupsWorker::addInfo(const OperationID & id, const String & name, bool internal, BackupStatus status) +void BackupsWorker::addInfo(const OperationID & id, const String & name, const String & base_backup_name, bool internal, BackupStatus status) { BackupOperationInfo info; info.id = id; info.name = name; + info.base_backup_name = base_backup_name; info.internal = internal; info.status = status; info.start_time = std::chrono::system_clock::now(); diff --git a/src/Backups/BackupsWorker.h b/src/Backups/BackupsWorker.h index b0a76eb0fa8..e2bd076314f 100644 --- a/src/Backups/BackupsWorker.h +++ b/src/Backups/BackupsWorker.h @@ -83,7 +83,7 @@ private: /// Run data restoring tasks which insert data to tables. void restoreTablesData(const BackupOperationID & restore_id, BackupPtr backup, DataRestoreTasks && tasks, ThreadPool & thread_pool); - void addInfo(const BackupOperationID & id, const String & name, bool internal, BackupStatus status); + void addInfo(const BackupOperationID & id, const String & name, const String & base_backup_name, bool internal, BackupStatus status); void setStatus(const BackupOperationID & id, BackupStatus status, bool throw_if_error = true); void setStatusSafe(const String & id, BackupStatus status) { setStatus(id, status, false); } void setNumFilesAndSize(const BackupOperationID & id, size_t num_files, UInt64 total_size, size_t num_entries, diff --git a/src/Backups/RestoreCoordinationRemote.cpp b/src/Backups/RestoreCoordinationRemote.cpp index 60a83c580f0..190634de4a9 100644 --- a/src/Backups/RestoreCoordinationRemote.cpp +++ b/src/Backups/RestoreCoordinationRemote.cpp @@ -43,12 +43,12 @@ RestoreCoordinationRemote::RestoreCoordinationRemote( if (my_is_internal) { String alive_node_path = my_zookeeper_path + "/stage/alive|" + my_current_host; - auto code = zk->tryCreate(alive_node_path, "", zkutil::CreateMode::Ephemeral); - if (code == Coordination::Error::ZNODEEXISTS) - zk->handleEphemeralNodeExistenceNoFailureInjection(alive_node_path, ""); - else if (code != Coordination::Error::ZOK) - throw zkutil::KeeperException::fromPath(code, alive_node_path); + /// Delete the ephemeral node from the previous connection so we don't have to wait for keeper to do it automatically. + zk->tryRemove(alive_node_path); + + zk->createAncestors(alive_node_path); + zk->create(alive_node_path, "", zkutil::CreateMode::Ephemeral); } }) { diff --git a/src/Backups/RestorerFromBackup.cpp b/src/Backups/RestorerFromBackup.cpp index 026671edd6a..4e580e493a7 100644 --- a/src/Backups/RestorerFromBackup.cpp +++ b/src/Backups/RestorerFromBackup.cpp @@ -43,14 +43,6 @@ namespace Stage = BackupCoordinationStage; namespace { - /// Uppercases the first character of a passed string. - String toUpperFirst(const String & str) - { - String res = str; - res[0] = std::toupper(res[0]); - return res; - } - /// Outputs "table " or "temporary table " String tableNameWithTypeToString(const String & database_name, const String & table_name, bool first_upper) { @@ -145,7 +137,7 @@ RestorerFromBackup::DataRestoreTasks RestorerFromBackup::run(Mode mode) void RestorerFromBackup::setStage(const String & new_stage, const String & message) { - LOG_TRACE(log, fmt::runtime(toUpperFirst(new_stage))); + LOG_TRACE(log, "Setting stage: {}", new_stage); current_stage = new_stage; if (restore_coordination) diff --git a/src/Backups/WithRetries.cpp b/src/Backups/WithRetries.cpp index 40ae8d06462..55809dc6958 100644 --- a/src/Backups/WithRetries.cpp +++ b/src/Backups/WithRetries.cpp @@ -20,22 +20,19 @@ WithRetries::KeeperSettings WithRetries::KeeperSettings::fromContext(ContextPtr }; } -WithRetries::WithRetries(Poco::Logger * log_, zkutil::GetZooKeeper get_zookeeper_, const KeeperSettings & settings_, RenewerCallback callback_) +WithRetries::WithRetries( + Poco::Logger * log_, zkutil::GetZooKeeper get_zookeeper_, const KeeperSettings & settings_, RenewerCallback callback_) : log(log_) , get_zookeeper(get_zookeeper_) , settings(settings_) , callback(callback_) , global_zookeeper_retries_info( - log->name(), - log, - settings.keeper_max_retries, - settings.keeper_retry_initial_backoff_ms, - settings.keeper_retry_max_backoff_ms) + settings.keeper_max_retries, settings.keeper_retry_initial_backoff_ms, settings.keeper_retry_max_backoff_ms) {} WithRetries::RetriesControlHolder::RetriesControlHolder(const WithRetries * parent, const String & name) : info(parent->global_zookeeper_retries_info) - , retries_ctl(name, info, nullptr) + , retries_ctl(name, parent->log, info, nullptr) , faulty_zookeeper(parent->getFaultyZooKeeper()) {} diff --git a/src/BridgeHelper/LibraryBridgeHelper.cpp b/src/BridgeHelper/LibraryBridgeHelper.cpp index 60588951c32..e83707595b9 100644 --- a/src/BridgeHelper/LibraryBridgeHelper.cpp +++ b/src/BridgeHelper/LibraryBridgeHelper.cpp @@ -12,7 +12,7 @@ LibraryBridgeHelper::LibraryBridgeHelper(ContextPtr context_) , http_timeout(context_->getGlobalContext()->getSettingsRef().http_receive_timeout.value) , bridge_host(config.getString("library_bridge.host", DEFAULT_HOST)) , bridge_port(config.getUInt("library_bridge.port", DEFAULT_PORT)) - , http_timeouts(ConnectionTimeouts::getHTTPTimeouts(context_->getSettingsRef(), {context_->getConfigRef().getUInt("keep_alive_timeout", DEFAULT_HTTP_KEEP_ALIVE_TIMEOUT), 0})) + , http_timeouts(ConnectionTimeouts::getHTTPTimeouts(context_->getSettingsRef(), context_->getServerSettings().keep_alive_timeout)) { } diff --git a/src/BridgeHelper/XDBCBridgeHelper.h b/src/BridgeHelper/XDBCBridgeHelper.h index 44104f26f63..060de74b5b1 100644 --- a/src/BridgeHelper/XDBCBridgeHelper.h +++ b/src/BridgeHelper/XDBCBridgeHelper.h @@ -162,7 +162,7 @@ private: ConnectionTimeouts getHTTPTimeouts() { - return ConnectionTimeouts::getHTTPTimeouts(getContext()->getSettingsRef(), {getContext()->getConfigRef().getUInt("keep_alive_timeout", DEFAULT_HTTP_KEEP_ALIVE_TIMEOUT), 0}); + return ConnectionTimeouts::getHTTPTimeouts(getContext()->getSettingsRef(), getContext()->getServerSettings().keep_alive_timeout); } protected: diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 017b8b283d1..86cb9acd056 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -226,6 +226,7 @@ add_object_library(clickhouse_storages_statistics Storages/Statistics) add_object_library(clickhouse_storages_liveview Storages/LiveView) add_object_library(clickhouse_storages_windowview Storages/WindowView) add_object_library(clickhouse_storages_s3queue Storages/S3Queue) +add_object_library(clickhouse_storages_materializedview Storages/MaterializedView) add_object_library(clickhouse_client Client) add_object_library(clickhouse_bridge BridgeHelper) add_object_library(clickhouse_server Server) @@ -436,6 +437,10 @@ dbms_target_link_libraries(PRIVATE ch_contrib::zstd) target_link_libraries (clickhouse_common_io PUBLIC ch_contrib::zstd) target_link_libraries (clickhouse_common_io PUBLIC ch_contrib::xz) +if (TARGET ch_contrib::pocketfft) + target_link_libraries(clickhouse_common_io PUBLIC ch_contrib::pocketfft) +endif () + if (TARGET ch_contrib::icu) dbms_target_link_libraries (PRIVATE ch_contrib::icu) endif () diff --git a/src/Client/ClientBase.cpp b/src/Client/ClientBase.cpp index a20522993ea..e099aac0de9 100644 --- a/src/Client/ClientBase.cpp +++ b/src/Client/ClientBase.cpp @@ -318,14 +318,14 @@ void ClientBase::setupSignalHandler() sigemptyset(&new_act.sa_mask); #else if (sigemptyset(&new_act.sa_mask)) - throwFromErrno("Cannot set signal handler.", ErrorCodes::CANNOT_SET_SIGNAL_HANDLER); + throw ErrnoException(ErrorCodes::CANNOT_SET_SIGNAL_HANDLER, "Cannot set signal handler"); #endif if (sigaction(SIGINT, &new_act, nullptr)) - throwFromErrno("Cannot set signal handler.", ErrorCodes::CANNOT_SET_SIGNAL_HANDLER); + throw ErrnoException(ErrorCodes::CANNOT_SET_SIGNAL_HANDLER, "Cannot set signal handler"); if (sigaction(SIGQUIT, &new_act, nullptr)) - throwFromErrno("Cannot set signal handler.", ErrorCodes::CANNOT_SET_SIGNAL_HANDLER); + throw ErrnoException(ErrorCodes::CANNOT_SET_SIGNAL_HANDLER, "Cannot set signal handler"); } @@ -543,16 +543,16 @@ try if (!pager.empty()) { if (SIG_ERR == signal(SIGPIPE, SIG_IGN)) - throwFromErrno("Cannot set signal handler for SIGPIPE.", ErrorCodes::CANNOT_SET_SIGNAL_HANDLER); + throw ErrnoException(ErrorCodes::CANNOT_SET_SIGNAL_HANDLER, "Cannot set signal handler for SIGPIPE"); /// We need to reset signals that had been installed in the /// setupSignalHandler() since terminal will send signals to both /// processes and so signals will be delivered to the /// clickhouse-client/local as well, which will be terminated when /// signal will be delivered second time. if (SIG_ERR == signal(SIGINT, SIG_IGN)) - throwFromErrno("Cannot set signal handler for SIGINT.", ErrorCodes::CANNOT_SET_SIGNAL_HANDLER); + throw ErrnoException(ErrorCodes::CANNOT_SET_SIGNAL_HANDLER, "Cannot set signal handler for SIGINT"); if (SIG_ERR == signal(SIGQUIT, SIG_IGN)) - throwFromErrno("Cannot set signal handler for SIGQUIT.", ErrorCodes::CANNOT_SET_SIGNAL_HANDLER); + throw ErrnoException(ErrorCodes::CANNOT_SET_SIGNAL_HANDLER, "Cannot set signal handler for SIGQUIT"); ShellCommand::Config config(pager); config.pipe_stdin_only = true; @@ -1306,11 +1306,11 @@ void ClientBase::resetOutput() pager_cmd->wait(); if (SIG_ERR == signal(SIGPIPE, SIG_DFL)) - throwFromErrno("Cannot set signal handler for SIIGPIEP.", ErrorCodes::CANNOT_SET_SIGNAL_HANDLER); + throw ErrnoException(ErrorCodes::CANNOT_SET_SIGNAL_HANDLER, "Cannot set signal handler for SIGPIPE"); if (SIG_ERR == signal(SIGINT, SIG_DFL)) - throwFromErrno("Cannot set signal handler for SIGINT.", ErrorCodes::CANNOT_SET_SIGNAL_HANDLER); + throw ErrnoException(ErrorCodes::CANNOT_SET_SIGNAL_HANDLER, "Cannot set signal handler for SIGINT"); if (SIG_ERR == signal(SIGQUIT, SIG_DFL)) - throwFromErrno("Cannot set signal handler for SIGQUIT.", ErrorCodes::CANNOT_SET_SIGNAL_HANDLER); + throw ErrnoException(ErrorCodes::CANNOT_SET_SIGNAL_HANDLER, "Cannot set signal handler for SIGQUIT"); setupSignalHandler(); } diff --git a/src/Client/LocalConnection.cpp b/src/Client/LocalConnection.cpp index 849308155b0..dbb115f44ef 100644 --- a/src/Client/LocalConnection.cpp +++ b/src/Client/LocalConnection.cpp @@ -201,7 +201,7 @@ void LocalConnection::sendQuery( catch (...) { state->io.onException(); - state->exception = std::make_unique(ErrorCodes::UNKNOWN_EXCEPTION, "Unknown exception"); + state->exception = std::make_unique(Exception(ErrorCodes::UNKNOWN_EXCEPTION, "Unknown exception")); } } @@ -311,7 +311,7 @@ bool LocalConnection::poll(size_t) catch (...) { state->io.onException(); - state->exception = std::make_unique(ErrorCodes::UNKNOWN_EXCEPTION, "Unknown exception"); + state->exception = std::make_unique(Exception(ErrorCodes::UNKNOWN_EXCEPTION, "Unknown exception")); } } diff --git a/src/Client/Suggest.cpp b/src/Client/Suggest.cpp index c7ebcac1264..836c03d81ff 100644 --- a/src/Client/Suggest.cpp +++ b/src/Client/Suggest.cpp @@ -48,7 +48,7 @@ Suggest::Suggest() "GRANT", "REVOKE", "OPTION", "ADMIN", "EXCEPT", "REPLACE", "IDENTIFIED", "HOST", "NAME", "READONLY", "WRITABLE", "PERMISSIVE", "FOR", "RESTRICTIVE", "RANDOMIZED", "INTERVAL", "LIMITS", "ONLY", "TRACKING", "IP", "REGEXP", "ILIKE", "CLEANUP", "APPEND", - "IGNORE NULLS", "RESPECT NULLS", "OVER"}); + "IGNORE NULLS", "RESPECT NULLS", "OVER", "PASTE"}); } static String getLoadSuggestionQuery(Int32 suggestion_limit, bool basic_suggestion) @@ -77,6 +77,7 @@ static String getLoadSuggestionQuery(Int32 suggestion_limit, bool basic_suggesti }; add_column("name", "functions", false, {}); + add_column("name", "database_engines", false, {}); add_column("name", "table_engines", false, {}); add_column("name", "formats", false, {}); add_column("name", "table_functions", false, {}); diff --git a/src/Columns/ColumnCompressed.cpp b/src/Columns/ColumnCompressed.cpp index 9fb7b108501..3bdc514d6d8 100644 --- a/src/Columns/ColumnCompressed.cpp +++ b/src/Columns/ColumnCompressed.cpp @@ -1,4 +1,5 @@ #include +#include #pragma clang diagnostic ignored "-Wold-style-cast" diff --git a/src/Columns/ColumnFunction.cpp b/src/Columns/ColumnFunction.cpp index 7c7b87bf4a6..d8eea26b7d5 100644 --- a/src/Columns/ColumnFunction.cpp +++ b/src/Columns/ColumnFunction.cpp @@ -248,7 +248,7 @@ void ColumnFunction::appendArguments(const ColumnsWithTypeAndName & columns) auto wanna_capture = columns.size(); if (were_captured + wanna_capture > args) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot capture {} columns because function {} has {} arguments{}.", + throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot capture {} column(s) because function {} has {} arguments{}.", wanna_capture, function->getName(), args, (were_captured ? " and " + toString(were_captured) + " columns have already been captured" : "")); diff --git a/src/Columns/ColumnFunction.h b/src/Columns/ColumnFunction.h index c21e88744e0..efcdc4e4419 100644 --- a/src/Columns/ColumnFunction.h +++ b/src/Columns/ColumnFunction.h @@ -5,6 +5,7 @@ #include #include + namespace DB { namespace ErrorCodes @@ -16,7 +17,7 @@ class IFunctionBase; using FunctionBasePtr = std::shared_ptr; /** A column containing a lambda expression. - * Behaves like a constant-column. Contains an expression, but not input or output data. + * Contains an expression and captured columns, but not input arguments. */ class ColumnFunction final : public COWHelper { @@ -207,8 +208,6 @@ private: bool is_function_compiled; void appendArgument(const ColumnWithTypeAndName & column); - - void addOffsetsForReplication(const IColumn::Offsets & offsets); }; const ColumnFunction * checkAndGetShortCircuitArgument(const ColumnPtr & column); diff --git a/src/Columns/IColumnDummy.cpp b/src/Columns/IColumnDummy.cpp index 42b66e1156c..01091a87049 100644 --- a/src/Columns/IColumnDummy.cpp +++ b/src/Columns/IColumnDummy.cpp @@ -1,4 +1,5 @@ #include +#include #include #include diff --git a/src/Columns/tests/gtest_column_vector.cpp b/src/Columns/tests/gtest_column_vector.cpp index 14bf36434b6..b71d4a095ab 100644 --- a/src/Columns/tests/gtest_column_vector.cpp +++ b/src/Columns/tests/gtest_column_vector.cpp @@ -4,6 +4,7 @@ #include #include #include +#include #include using namespace DB; diff --git a/src/Common/AlignedBuffer.cpp b/src/Common/AlignedBuffer.cpp index f1d3f98ff3a..5e11f16f106 100644 --- a/src/Common/AlignedBuffer.cpp +++ b/src/Common/AlignedBuffer.cpp @@ -18,9 +18,11 @@ void AlignedBuffer::alloc(size_t size, size_t alignment) void * new_buf; int res = ::posix_memalign(&new_buf, std::max(alignment, sizeof(void*)), size); if (0 != res) - throwFromErrno(fmt::format("Cannot allocate memory (posix_memalign), size: {}, alignment: {}.", - ReadableSize(size), ReadableSize(alignment)), - ErrorCodes::CANNOT_ALLOCATE_MEMORY, res); + throw ErrnoException( + ErrorCodes::CANNOT_ALLOCATE_MEMORY, + "Cannot allocate memory (posix_memalign), size: {}, alignment: {}.", + ReadableSize(size), + ReadableSize(alignment)); buf = new_buf; } diff --git a/src/Common/Allocator.cpp b/src/Common/Allocator.cpp index 2e00b157621..c4137920395 100644 --- a/src/Common/Allocator.cpp +++ b/src/Common/Allocator.cpp @@ -1,9 +1,190 @@ -#include "Allocator.h" +#include +#include +#include +#include +#include + +#include +#include + +#include +#include /// MADV_POPULATE_WRITE + + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int CANNOT_ALLOCATE_MEMORY; + extern const int LOGICAL_ERROR; +} + +} + +namespace +{ + +using namespace DB; + +#if defined(MADV_POPULATE_WRITE) +/// Address passed to madvise is required to be aligned to the page boundary. +auto adjustToPageSize(void * buf, size_t len, size_t page_size) +{ + const uintptr_t address_numeric = reinterpret_cast(buf); + const size_t next_page_start = ((address_numeric + page_size - 1) / page_size) * page_size; + return std::make_pair(reinterpret_cast(next_page_start), len - (next_page_start - address_numeric)); +} +#endif + +void prefaultPages([[maybe_unused]] void * buf_, [[maybe_unused]] size_t len_) +{ +#if defined(MADV_POPULATE_WRITE) + if (len_ < POPULATE_THRESHOLD) + return; + + static const size_t page_size = ::getPageSize(); + if (len_ < page_size) /// Rounded address should be still within [buf, buf + len). + return; + + auto [buf, len] = adjustToPageSize(buf_, len_, page_size); + if (auto res = ::madvise(buf, len, MADV_POPULATE_WRITE); res < 0) + LOG_TRACE( + LogFrequencyLimiter(&Poco::Logger::get("Allocator"), 1), + "Attempt to populate pages failed: {} (EINVAL is expected for kernels < 5.14)", + errnoToString(res)); +#endif +} + +template +void * allocNoTrack(size_t size, size_t alignment) +{ + void * buf; + if (alignment <= MALLOC_MIN_ALIGNMENT) + { + if constexpr (clear_memory) + buf = ::calloc(size, 1); + else + buf = ::malloc(size); + + if (nullptr == buf) + throw DB::ErrnoException(DB::ErrorCodes::CANNOT_ALLOCATE_MEMORY, "Allocator: Cannot malloc {}.", ReadableSize(size)); + } + else + { + buf = nullptr; + int res = posix_memalign(&buf, alignment, size); + + if (0 != res) + throw DB::ErrnoException( + DB::ErrorCodes::CANNOT_ALLOCATE_MEMORY, "Cannot allocate memory (posix_memalign) {}.", ReadableSize(size)); + + if constexpr (clear_memory) + memset(buf, 0, size); + } + + if constexpr (populate) + prefaultPages(buf, size); + + return buf; +} + +void freeNoTrack(void * buf) +{ + ::free(buf); +} + +void checkSize(size_t size) +{ + /// More obvious exception in case of possible overflow (instead of just "Cannot mmap"). + if (size >= 0x8000000000000000ULL) + throw DB::Exception(DB::ErrorCodes::LOGICAL_ERROR, "Too large size ({}) passed to allocator. It indicates an error.", size); +} + +} /// Constant is chosen almost arbitrarily, what I observed is 128KB is too small, 1MB is almost indistinguishable from 64MB and 1GB is too large. extern const size_t POPULATE_THRESHOLD = 16 * 1024 * 1024; +template +void * Allocator::alloc(size_t size, size_t alignment) +{ + checkSize(size); + auto trace = CurrentMemoryTracker::alloc(size); + void * ptr = allocNoTrack(size, alignment); + trace.onAlloc(ptr, size); + return ptr; +} + + +template +void Allocator::free(void * buf, size_t size) +{ + try + { + checkSize(size); + freeNoTrack(buf); + auto trace = CurrentMemoryTracker::free(size); + trace.onFree(buf, size); + } + catch (...) + { + DB::tryLogCurrentException("Allocator::free"); + throw; + } +} + +template +void * Allocator::realloc(void * buf, size_t old_size, size_t new_size, size_t alignment) +{ + checkSize(new_size); + + if (old_size == new_size) + { + /// nothing to do. + /// BTW, it's not possible to change alignment while doing realloc. + } + else if (alignment <= MALLOC_MIN_ALIGNMENT) + { + /// Resize malloc'd memory region with no special alignment requirement. + auto trace_free = CurrentMemoryTracker::free(old_size); + auto trace_alloc = CurrentMemoryTracker::alloc(new_size); + trace_free.onFree(buf, old_size); + + void * new_buf = ::realloc(buf, new_size); + if (nullptr == new_buf) + { + throw DB::ErrnoException( + DB::ErrorCodes::CANNOT_ALLOCATE_MEMORY, + "Allocator: Cannot realloc from {} to {}", + ReadableSize(old_size), + ReadableSize(new_size)); + } + + buf = new_buf; + trace_alloc.onAlloc(buf, new_size); + + if constexpr (clear_memory) + if (new_size > old_size) + memset(reinterpret_cast(buf) + old_size, 0, new_size - old_size); + } + else + { + /// Big allocs that requires a copy. MemoryTracker is called inside 'alloc', 'free' methods. + void * new_buf = alloc(new_size, alignment); + memcpy(new_buf, buf, std::min(old_size, new_size)); + free(buf, old_size); + buf = new_buf; + } + + if constexpr (populate) + prefaultPages(buf, new_size); + + return buf; +} + + template class Allocator; template class Allocator; template class Allocator; diff --git a/src/Common/Allocator.h b/src/Common/Allocator.h index a85274e1f69..b865dacc2e9 100644 --- a/src/Common/Allocator.h +++ b/src/Common/Allocator.h @@ -8,47 +8,19 @@ #define ALLOCATOR_ASLR 1 #endif -#include -#include - #if !defined(OS_DARWIN) && !defined(OS_FREEBSD) #include #endif -#include -#include -#include - #include -#include - -#include -#include -#include -#include - #include - -#include -#include -#include +#include extern const size_t POPULATE_THRESHOLD; static constexpr size_t MALLOC_MIN_ALIGNMENT = 8; -namespace DB -{ - -namespace ErrorCodes -{ - extern const int CANNOT_ALLOCATE_MEMORY; - extern const int LOGICAL_ERROR; -} - -} - /** Previously there was a code which tried to use manual mmap and mremap (clickhouse_mremap.h) for large allocations/reallocations (64MB+). * Most modern allocators (including jemalloc) don't use mremap, so the idea was to take advantage from mremap system call for large reallocs. * Actually jemalloc had support for mremap, but it was intentionally removed from codebase https://github.com/jemalloc/jemalloc/commit/e2deab7a751c8080c2b2cdcfd7b11887332be1bb. @@ -69,80 +41,16 @@ class Allocator { public: /// Allocate memory range. - void * alloc(size_t size, size_t alignment = 0) - { - checkSize(size); - auto trace = CurrentMemoryTracker::alloc(size); - void * ptr = allocNoTrack(size, alignment); - trace.onAlloc(ptr, size); - return ptr; - } + void * alloc(size_t size, size_t alignment = 0); /// Free memory range. - void free(void * buf, size_t size) - { - try - { - checkSize(size); - freeNoTrack(buf); - auto trace = CurrentMemoryTracker::free(size); - trace.onFree(buf, size); - } - catch (...) - { - DB::tryLogCurrentException("Allocator::free"); - throw; - } - } + void free(void * buf, size_t size); /** Enlarge memory range. * Data from old range is moved to the beginning of new range. * Address of memory range could change. */ - void * realloc(void * buf, size_t old_size, size_t new_size, size_t alignment = 0) - { - checkSize(new_size); - - if (old_size == new_size) - { - /// nothing to do. - /// BTW, it's not possible to change alignment while doing realloc. - } - else if (alignment <= MALLOC_MIN_ALIGNMENT) - { - /// Resize malloc'd memory region with no special alignment requirement. - auto trace_free = CurrentMemoryTracker::free(old_size); - auto trace_alloc = CurrentMemoryTracker::alloc(new_size); - trace_free.onFree(buf, old_size); - - void * new_buf = ::realloc(buf, new_size); - if (nullptr == new_buf) - { - DB::throwFromErrno( - fmt::format("Allocator: Cannot realloc from {} to {}.", ReadableSize(old_size), ReadableSize(new_size)), DB::ErrorCodes::CANNOT_ALLOCATE_MEMORY); - } - - buf = new_buf; - trace_alloc.onAlloc(buf, new_size); - - if constexpr (clear_memory) - if (new_size > old_size) - memset(reinterpret_cast(buf) + old_size, 0, new_size - old_size); - } - else - { - /// Big allocs that requires a copy. MemoryTracker is called inside 'alloc', 'free' methods. - void * new_buf = alloc(new_size, alignment); - memcpy(new_buf, buf, std::min(old_size, new_size)); - free(buf, old_size); - buf = new_buf; - } - - if constexpr (populate) - prefaultPages(buf, new_size); - - return buf; - } + void * realloc(void * buf, size_t old_size, size_t new_size, size_t alignment = 0); protected: static constexpr size_t getStackThreshold() @@ -153,76 +61,6 @@ protected: static constexpr bool clear_memory = clear_memory_; private: - void * allocNoTrack(size_t size, size_t alignment) - { - void * buf; - if (alignment <= MALLOC_MIN_ALIGNMENT) - { - if constexpr (clear_memory) - buf = ::calloc(size, 1); - else - buf = ::malloc(size); - - if (nullptr == buf) - DB::throwFromErrno(fmt::format("Allocator: Cannot malloc {}.", ReadableSize(size)), DB::ErrorCodes::CANNOT_ALLOCATE_MEMORY); - } - else - { - buf = nullptr; - int res = posix_memalign(&buf, alignment, size); - - if (0 != res) - DB::throwFromErrno(fmt::format("Cannot allocate memory (posix_memalign) {}.", ReadableSize(size)), - DB::ErrorCodes::CANNOT_ALLOCATE_MEMORY, res); - - if constexpr (clear_memory) - memset(buf, 0, size); - } - - if constexpr (populate) - prefaultPages(buf, size); - - return buf; - } - - void freeNoTrack(void * buf) - { - ::free(buf); - } - - void checkSize(size_t size) - { - /// More obvious exception in case of possible overflow (instead of just "Cannot mmap"). - if (size >= 0x8000000000000000ULL) - throw DB::Exception(DB::ErrorCodes::LOGICAL_ERROR, "Too large size ({}) passed to allocator. It indicates an error.", size); - } - - /// Address passed to madvise is required to be aligned to the page boundary. - auto adjustToPageSize(void * buf, size_t len, size_t page_size) - { - const uintptr_t address_numeric = reinterpret_cast(buf); - const size_t next_page_start = ((address_numeric + page_size - 1) / page_size) * page_size; - return std::make_pair(reinterpret_cast(next_page_start), len - (next_page_start - address_numeric)); - } - - void prefaultPages([[maybe_unused]] void * buf_, [[maybe_unused]] size_t len_) - { -#if defined(MADV_POPULATE_WRITE) - if (len_ < POPULATE_THRESHOLD) - return; - - static const size_t page_size = ::getPageSize(); - if (len_ < page_size) /// Rounded address should be still within [buf, buf + len). - return; - - auto [buf, len] = adjustToPageSize(buf_, len_, page_size); - if (auto res = ::madvise(buf, len, MADV_POPULATE_WRITE); res < 0) - LOG_TRACE( - LogFrequencyLimiter(&Poco::Logger::get("Allocator"), 1), - "Attempt to populate pages failed: {} (EINVAL is expected for kernels < 5.14)", - errnoToString(res)); -#endif - } }; diff --git a/src/Common/Arena.h b/src/Common/Arena.h index 7604091442e..917bef0d6e8 100644 --- a/src/Common/Arena.h +++ b/src/Common/Arena.h @@ -8,6 +8,7 @@ #include #include #include +#include #if __has_include() && defined(ADDRESS_SANITIZER) # include diff --git a/src/Common/ArenaWithFreeLists.h b/src/Common/ArenaWithFreeLists.h index 76760a20320..80b4a00241d 100644 --- a/src/Common/ArenaWithFreeLists.h +++ b/src/Common/ArenaWithFreeLists.h @@ -1,5 +1,6 @@ #pragma once +#include #include #if __has_include() && defined(ADDRESS_SANITIZER) # include diff --git a/src/Common/ArrayCache.h b/src/Common/ArrayCache.h index 79aeddb09df..cb15759e1ba 100644 --- a/src/Common/ArrayCache.h +++ b/src/Common/ArrayCache.h @@ -19,11 +19,6 @@ #include #include -/// Required for older Darwin builds, that lack definition of MAP_ANONYMOUS -#ifndef MAP_ANONYMOUS -#define MAP_ANONYMOUS MAP_ANON -#endif - namespace DB { @@ -179,13 +174,22 @@ private: { ptr = mmap(address_hint, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); if (MAP_FAILED == ptr) - DB::throwFromErrno(fmt::format("Allocator: Cannot mmap {}.", ReadableSize(size)), DB::ErrorCodes::CANNOT_ALLOCATE_MEMORY); + throw DB::ErrnoException(DB::ErrorCodes::CANNOT_ALLOCATE_MEMORY, "Allocator: Cannot mmap {}", ReadableSize(size)); } ~Chunk() { if (ptr && 0 != munmap(ptr, size)) - DB::throwFromErrno(fmt::format("Allocator: Cannot munmap {}.", ReadableSize(size)), DB::ErrorCodes::CANNOT_MUNMAP); + { + try + { + throw DB::ErrnoException(DB::ErrorCodes::CANNOT_MUNMAP, "Allocator: Cannot munmap {}", ReadableSize(size)); + } + catch (DB::ErrnoException &) + { + DB::tryLogCurrentException(__PRETTY_FUNCTION__); + } + } } Chunk(Chunk && other) noexcept : ptr(other.ptr), size(other.size) diff --git a/src/Common/AsyncLoader.h b/src/Common/AsyncLoader.h index 7b6e3ebfefe..95a2273a0f4 100644 --- a/src/Common/AsyncLoader.h +++ b/src/Common/AsyncLoader.h @@ -362,7 +362,7 @@ public: bool is_executing = false; }; - // For introspection and debug only, see `system.async_loader` table. + // For introspection and debug only, see `system.asynchronous_loader` table. std::vector getJobStates() const; // For deadlock resolution. Should not be used directly. diff --git a/src/Common/AsynchronousMetrics.cpp b/src/Common/AsynchronousMetrics.cpp index 36c87010fa5..31cf1962251 100644 --- a/src/Common/AsynchronousMetrics.cpp +++ b/src/Common/AsynchronousMetrics.cpp @@ -1,3 +1,4 @@ +#include #include #include #include @@ -8,6 +9,8 @@ #include #include #include +#include +#include #include #include "config.h" @@ -655,6 +658,19 @@ void AsynchronousMetrics::update(TimePoint update_time) total_memory_tracker.setRSS(rss, free_memory_in_allocator_arenas); } } + + { + struct rusage rusage{}; + if (!getrusage(RUSAGE_SELF, &rusage)) + { + new_values["MemoryResidentMax"] = { rusage.ru_maxrss * 1024 /* KiB -> bytes */, + "Maximum amount of physical memory used by the server process, in bytes." }; + } + else + { + LOG_ERROR(log, "Cannot obtain resource usage: {}", errnoToString(errno)); + } + } #endif #if defined(OS_LINUX) @@ -797,7 +813,7 @@ void AsynchronousMetrics::update(TimePoint update_time) int64_t hz = sysconf(_SC_CLK_TCK); if (-1 == hz) - throwFromErrno("Cannot call 'sysconf' to obtain system HZ", ErrorCodes::CANNOT_SYSCONF); + throw ErrnoException(ErrorCodes::CANNOT_SYSCONF, "Cannot call 'sysconf' to obtain system HZ"); double multiplier = 1.0 / hz / (std::chrono::duration_cast(time_after_previous_update).count() / 1e9); size_t num_cpus = 0; diff --git a/src/Common/BitHelpers.h b/src/Common/BitHelpers.h index 79c612d47e4..bb81d271140 100644 --- a/src/Common/BitHelpers.h +++ b/src/Common/BitHelpers.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include diff --git a/src/Common/CalendarTimeInterval.cpp b/src/Common/CalendarTimeInterval.cpp new file mode 100644 index 00000000000..b218e1d3c7c --- /dev/null +++ b/src/Common/CalendarTimeInterval.cpp @@ -0,0 +1,144 @@ +#include + +#include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int BAD_ARGUMENTS; +} + +CalendarTimeInterval::CalendarTimeInterval(const CalendarTimeInterval::Intervals & intervals) +{ + for (auto [kind, val] : intervals) + { + switch (kind.kind) + { + case IntervalKind::Nanosecond: + case IntervalKind::Microsecond: + case IntervalKind::Millisecond: + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Sub-second intervals are not supported here"); + + case IntervalKind::Second: + case IntervalKind::Minute: + case IntervalKind::Hour: + case IntervalKind::Day: + case IntervalKind::Week: + seconds += val * kind.toAvgSeconds(); + break; + + case IntervalKind::Month: + months += val; + break; + case IntervalKind::Quarter: + months += val * 3; + break; + case IntervalKind::Year: + months += val * 12; + break; + } + } +} + +CalendarTimeInterval::Intervals CalendarTimeInterval::toIntervals() const +{ + Intervals res; + auto greedy = [&](UInt64 x, std::initializer_list> kinds) + { + for (auto [kind, count] : kinds) + { + UInt64 k = x / count; + if (k == 0) + continue; + x -= k * count; + res.emplace_back(kind, k); + } + chassert(x == 0); + }; + greedy(months, {{IntervalKind::Year, 12}, {IntervalKind::Month, 1}}); + greedy(seconds, {{IntervalKind::Week, 3600*24*7}, {IntervalKind::Day, 3600*24}, {IntervalKind::Hour, 3600}, {IntervalKind::Minute, 60}, {IntervalKind::Second, 1}}); + return res; +} + +UInt64 CalendarTimeInterval::minSeconds() const +{ + return 3600*24 * (months/12 * 365 + months%12 * 28) + seconds; +} + +UInt64 CalendarTimeInterval::maxSeconds() const +{ + return 3600*24 * (months/12 * 366 + months%12 * 31) + seconds; +} + +void CalendarTimeInterval::assertSingleUnit() const +{ + if (seconds && months) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Interval shouldn't contain both calendar units and clock units (e.g. months and days)"); +} + +void CalendarTimeInterval::assertPositive() const +{ + if (!seconds && !months) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Interval must be positive"); +} + +/// Number of whole months between 1970-01-01 and `t`. +static Int64 toAbsoluteMonth(std::chrono::system_clock::time_point t) +{ + std::chrono::year_month_day ymd(std::chrono::floor(t)); + return (Int64(int(ymd.year())) - 1970) * 12 + Int64(unsigned(ymd.month()) - 1); +} + +static std::chrono::sys_seconds startOfAbsoluteMonth(Int64 absolute_month) +{ + Int64 year = absolute_month >= 0 ? absolute_month/12 : -((-absolute_month+11)/12); + Int64 month = absolute_month - year*12; + chassert(month >= 0 && month < 12); + std::chrono::year_month_day ymd( + std::chrono::year(int(year + 1970)), + std::chrono::month(unsigned(month + 1)), + std::chrono::day(1)); + return std::chrono::sys_days(ymd); +} + +std::chrono::sys_seconds CalendarTimeInterval::advance(std::chrono::system_clock::time_point tp) const +{ + auto t = std::chrono::sys_seconds(std::chrono::floor(tp)); + if (months) + { + auto m = toAbsoluteMonth(t); + auto s = t - startOfAbsoluteMonth(m); + t = startOfAbsoluteMonth(m + Int64(months)) + s; + } + return t + std::chrono::seconds(Int64(seconds)); +} + +std::chrono::sys_seconds CalendarTimeInterval::floor(std::chrono::system_clock::time_point tp) const +{ + assertSingleUnit(); + assertPositive(); + + if (months) + return startOfAbsoluteMonth(toAbsoluteMonth(tp) / months * months); + else + { + constexpr std::chrono::seconds epoch(-3600*24*3); + auto t = std::chrono::sys_seconds(std::chrono::floor(tp)); + /// We want to align with weeks, but 1970-01-01 is a Thursday, so align with 1969-12-29 instead. + return std::chrono::sys_seconds((t.time_since_epoch() - epoch) / seconds * seconds + epoch); + } +} + +bool CalendarTimeInterval::operator==(const CalendarTimeInterval & rhs) const +{ + return std::tie(months, seconds) == std::tie(rhs.months, rhs.seconds); +} + +bool CalendarTimeInterval::operator!=(const CalendarTimeInterval & rhs) const +{ + return !(*this == rhs); +} + +} diff --git a/src/Common/CalendarTimeInterval.h b/src/Common/CalendarTimeInterval.h new file mode 100644 index 00000000000..d5acc6ee2f2 --- /dev/null +++ b/src/Common/CalendarTimeInterval.h @@ -0,0 +1,63 @@ +#pragma once + +#include +#include + +namespace DB +{ + +/// Represents a duration of calendar time, e.g.: +/// * 2 weeks + 5 minutes + and 21 seconds (aka 605121 seconds), +/// * 1 (calendar) month - not equivalent to any number of seconds! +/// * 3 years + 2 weeks (aka 36 months + 604800 seconds). +/// +/// Be careful with calendar arithmetic: it's missing many familiar properties of numbers. +/// E.g. x + y - y is not always equal to x (October 31 + 1 month - 1 month = November 1). +struct CalendarTimeInterval +{ + UInt64 seconds = 0; + UInt64 months = 0; + + using Intervals = std::vector>; + + CalendarTimeInterval() = default; + + /// Year, Quarter, Month are converted to months. + /// Week, Day, Hour, Minute, Second are converted to seconds. + /// Millisecond, Microsecond, Nanosecond throw exception. + explicit CalendarTimeInterval(const Intervals & intervals); + + /// E.g. for {36 months, 604801 seconds} returns {3 years, 2 weeks, 1 second}. + Intervals toIntervals() const; + + /// Approximate shortest and longest duration in seconds. E.g. a month is [28, 31] days. + UInt64 minSeconds() const; + UInt64 maxSeconds() const; + + /// Checks that the interval has only months or only seconds, throws otherwise. + void assertSingleUnit() const; + void assertPositive() const; + + /// Add this interval to the timestamp. First months, then seconds. + /// Gets weird near month boundaries: October 31 + 1 month = December 1. + std::chrono::sys_seconds advance(std::chrono::system_clock::time_point t) const; + + /// Rounds the timestamp down to the nearest timestamp "aligned" with this interval. + /// The interval must satisfy assertSingleUnit() and assertPositive(). + /// * For months, rounds to the start of a month whose abosolute index is divisible by `months`. + /// The month index is 0-based starting from January 1970. + /// E.g. if the interval is 1 month, rounds down to the start of the month. + /// * For seconds, rounds to a timestamp x such that (x - December 29 1969 (Monday)) is divisible + /// by this interval. + /// E.g. if the interval is 1 week, rounds down to the start of the week (Monday). + /// + /// Guarantees: + /// * advance(floor(x)) > x + /// * floor(advance(floor(x))) = advance(floor(x)) + std::chrono::sys_seconds floor(std::chrono::system_clock::time_point t) const; + + bool operator==(const CalendarTimeInterval & rhs) const; + bool operator!=(const CalendarTimeInterval & rhs) const; +}; + +} diff --git a/src/Common/ColumnsHashingImpl.h b/src/Common/ColumnsHashingImpl.h index 3240510ea9b..7116160e94c 100644 --- a/src/Common/ColumnsHashingImpl.h +++ b/src/Common/ColumnsHashingImpl.h @@ -31,6 +31,17 @@ public: using HashMethodContextPtr = std::shared_ptr; +struct LastElementCacheStats +{ + UInt64 hits = 0; + UInt64 misses = 0; + + void update(size_t num_tries, size_t num_misses) + { + hits += num_tries - num_misses; + misses += num_misses; + } +}; namespace columns_hashing_impl { @@ -39,14 +50,19 @@ template struct LastElementCache { static constexpr bool consecutive_keys_optimization = consecutive_keys_optimization_; + Value value; bool empty = true; bool found = false; + UInt64 misses = 0; - bool check(const Value & value_) { return !empty && value == value_; } + bool check(const Value & value_) const { return value == value_; } template - bool check(const Key & key) { return !empty && value.first == key; } + bool check(const Key & key) const { return value.first == key; } + + bool hasOnlyOneValue() const { return found && misses == 1; } + UInt64 getMisses() const { return misses; } }; template @@ -166,6 +182,7 @@ public: return EmplaceResult(!has_null_key); } } + auto key_holder = static_cast(*this).getKeyHolder(row, pool); return emplaceImpl(key_holder, data); } @@ -183,6 +200,7 @@ public: return FindResult(data.hasNullKeyData(), 0); } } + auto key_holder = static_cast(*this).getKeyHolder(row, pool); return findKeyImpl(keyHolderGetKey(key_holder), data); } @@ -194,6 +212,30 @@ public: return data.hash(keyHolderGetKey(key_holder)); } + ALWAYS_INLINE void resetCache() + { + if constexpr (consecutive_keys_optimization) + { + cache.empty = true; + cache.found = false; + cache.misses = 0; + } + } + + ALWAYS_INLINE bool hasOnlyOneValueSinceLastReset() const + { + if constexpr (consecutive_keys_optimization) + return cache.hasOnlyOneValue(); + return false; + } + + ALWAYS_INLINE UInt64 getCacheMissesSinceLastReset() const + { + if constexpr (consecutive_keys_optimization) + return cache.getMisses(); + return 0; + } + ALWAYS_INLINE bool isNullAt(size_t row) const { if constexpr (nullable) @@ -225,17 +267,15 @@ protected: else cache.value = Value(); } - if constexpr (nullable) - { + if constexpr (nullable) null_map = &checkAndGetColumn(column)->getNullMapColumn(); - } } template ALWAYS_INLINE EmplaceResult emplaceImpl(KeyHolder & key_holder, Data & data) { - if constexpr (Cache::consecutive_keys_optimization) + if constexpr (consecutive_keys_optimization) { if (cache.found && cache.check(keyHolderGetKey(key_holder))) { @@ -266,6 +306,7 @@ protected: { cache.found = true; cache.empty = false; + ++cache.misses; if constexpr (has_mapped) { @@ -288,12 +329,12 @@ protected: template ALWAYS_INLINE FindResult findKeyImpl(Key key, Data & data) { - if constexpr (Cache::consecutive_keys_optimization) + if constexpr (consecutive_keys_optimization) { /// It's possible to support such combination, but code will became more complex. /// Now there's not place where we need this options enabled together static_assert(!FindResult::has_offset, "`consecutive_keys_optimization` and `has_offset` are conflicting options"); - if (cache.check(key)) + if (likely(!cache.empty) && cache.check(key)) { if constexpr (has_mapped) return FindResult(&cache.value.second, cache.found, 0); @@ -308,6 +349,7 @@ protected: { cache.found = it != nullptr; cache.empty = false; + ++cache.misses; if constexpr (has_mapped) { @@ -325,9 +367,8 @@ protected: size_t offset = 0; if constexpr (FindResult::has_offset) - { offset = it ? data.offsetInternal(it) : 0; - } + if constexpr (has_mapped) return FindResult(it ? &it->getMapped() : nullptr, it != nullptr, offset); else diff --git a/src/Common/Config/ConfigProcessor.cpp b/src/Common/Config/ConfigProcessor.cpp index c213b7257d9..92e66fee489 100644 --- a/src/Common/Config/ConfigProcessor.cpp +++ b/src/Common/Config/ConfigProcessor.cpp @@ -519,8 +519,9 @@ void ConfigProcessor::doIncludesRecursive( if (attr_nodes["from_zk"]) /// we have zookeeper subst { - if (node->hasChildNodes()) /// only allow substitution for nodes with no value - throw Poco::Exception("Element <" + node->nodeName() + "> has value, can't process from_zk substitution"); + /// only allow substitution for nodes with no value and without "replace" + if (node->hasChildNodes() && !replace) + throw Poco::Exception("Element <" + node->nodeName() + "> has value and does not have 'replace' attribute, can't process from_zk substitution"); contributing_zk_paths.insert(attr_nodes["from_zk"]->getNodeValue()); @@ -544,8 +545,9 @@ void ConfigProcessor::doIncludesRecursive( if (attr_nodes["from_env"]) /// we have env subst { - if (node->hasChildNodes()) /// only allow substitution for nodes with no value - throw Poco::Exception("Element <" + node->nodeName() + "> has value, can't process from_env substitution"); + /// only allow substitution for nodes with no value and without "replace" + if (node->hasChildNodes() && !replace) + throw Poco::Exception("Element <" + node->nodeName() + "> has value and does not have 'replace' attribute, can't process from_env substitution"); XMLDocumentPtr env_document; auto get_env_node = [&](const std::string & name) -> const Node * diff --git a/src/Common/CounterInFile.h b/src/Common/CounterInFile.h index fe3b74173f6..993ed97966a 100644 --- a/src/Common/CounterInFile.h +++ b/src/Common/CounterInFile.h @@ -69,13 +69,13 @@ public: int fd = ::open(path.c_str(), O_RDWR | O_CREAT | O_CLOEXEC, 0666); if (-1 == fd) - DB::throwFromErrnoWithPath("Cannot open file " + path, path, DB::ErrorCodes::CANNOT_OPEN_FILE); + DB::ErrnoException::throwFromPath(DB::ErrorCodes::CANNOT_OPEN_FILE, path, "Cannot open file {}", path); try { int flock_ret = flock(fd, LOCK_EX); if (-1 == flock_ret) - DB::throwFromErrnoWithPath("Cannot lock file " + path, path, DB::ErrorCodes::CANNOT_OPEN_FILE); + DB::ErrnoException::throwFromPath(DB::ErrorCodes::CANNOT_OPEN_FILE, path, "Cannot lock file {}", path); if (!file_doesnt_exists) { @@ -145,7 +145,7 @@ public: int fd = ::open(path.c_str(), O_RDWR | O_CREAT | O_CLOEXEC, 0666); if (-1 == fd) - DB::throwFromErrnoWithPath("Cannot open file " + path, path, DB::ErrorCodes::CANNOT_OPEN_FILE); + DB::ErrnoException::throwFromPath(DB::ErrorCodes::CANNOT_OPEN_FILE, path, "Cannot open file {}", path); try { diff --git a/src/Common/CurrentMetrics.cpp b/src/Common/CurrentMetrics.cpp index 1a7232012da..2613e9ec116 100644 --- a/src/Common/CurrentMetrics.cpp +++ b/src/Common/CurrentMetrics.cpp @@ -212,6 +212,8 @@ M(PartsCommitted, "Deprecated. See PartsActive.") \ M(PartsPreActive, "The part is in data_parts, but not used for SELECTs.") \ M(PartsActive, "Active data part, used by current and upcoming SELECTs.") \ + M(AttachedDatabase, "Active database, used by current and upcoming SELECTs.") \ + M(AttachedTable, "Active table, used by current and upcoming SELECTs.") \ M(PartsOutdated, "Not active data part, but could be used by only current SELECTs, could be deleted after SELECTs finishes.") \ M(PartsDeleting, "Not active data part with identity refcounter, it is deleting right now by a cleaner.") \ M(PartsDeleteOnDestroy, "Part was moved to another disk and should be deleted in own destructor.") \ @@ -251,6 +253,8 @@ M(MergeTreeAllRangesAnnouncementsSent, "The current number of announcement being sent in flight from the remote server to the initiator server about the set of data parts (for MergeTree tables). Measured on the remote server side.") \ M(CreatedTimersInQueryProfiler, "Number of Created thread local timers in QueryProfiler") \ M(ActiveTimersInQueryProfiler, "Number of Active thread local timers in QueryProfiler") \ + M(RefreshableViews, "Number materialized views with periodic refreshing (REFRESH)") \ + M(RefreshingViews, "Number of materialized views currently executing a refresh") \ #ifdef APPLY_FOR_EXTERNAL_METRICS #define APPLY_FOR_METRICS(M) APPLY_FOR_BUILTIN_METRICS(M) APPLY_FOR_EXTERNAL_METRICS(M) @@ -258,6 +262,7 @@ #define APPLY_FOR_METRICS(M) APPLY_FOR_BUILTIN_METRICS(M) #endif + namespace CurrentMetrics { #define M(NAME, DOCUMENTATION) extern const Metric NAME = Metric(__COUNTER__); diff --git a/src/Common/Epoll.cpp b/src/Common/Epoll.cpp index ac06f044beb..49c86222cf0 100644 --- a/src/Common/Epoll.cpp +++ b/src/Common/Epoll.cpp @@ -19,7 +19,7 @@ Epoll::Epoll() : events_count(0) { epoll_fd = epoll_create1(0); if (epoll_fd == -1) - throwFromErrno("Cannot open epoll descriptor", DB::ErrorCodes::EPOLL_ERROR); + throw DB::ErrnoException(DB::ErrorCodes::EPOLL_ERROR, "Cannot open epoll descriptor"); } Epoll::Epoll(Epoll && other) noexcept : epoll_fd(other.epoll_fd), events_count(other.events_count.load()) @@ -47,7 +47,7 @@ void Epoll::add(int fd, void * ptr, uint32_t events) ++events_count; if (epoll_ctl(epoll_fd, EPOLL_CTL_ADD, fd, &event) == -1) - throwFromErrno("Cannot add new descriptor to epoll", DB::ErrorCodes::EPOLL_ERROR); + throw DB::ErrnoException(DB::ErrorCodes::EPOLL_ERROR, "Cannot add new descriptor to epoll"); } void Epoll::remove(int fd) @@ -55,7 +55,7 @@ void Epoll::remove(int fd) --events_count; if (epoll_ctl(epoll_fd, EPOLL_CTL_DEL, fd, nullptr) == -1) - throwFromErrno("Cannot remove descriptor from epoll", DB::ErrorCodes::EPOLL_ERROR); + throw DB::ErrnoException(DB::ErrorCodes::EPOLL_ERROR, "Cannot remove descriptor from epoll"); } size_t Epoll::getManyReady(int max_events, epoll_event * events_out, int timeout) const @@ -82,7 +82,7 @@ size_t Epoll::getManyReady(int max_events, epoll_event * events_out, int timeout continue; } else - throwFromErrno("Error in epoll_wait", DB::ErrorCodes::EPOLL_ERROR); + throw DB::ErrnoException(DB::ErrorCodes::EPOLL_ERROR, "Error in epoll_wait"); } else break; diff --git a/src/Common/EventFD.cpp b/src/Common/EventFD.cpp index af50ca62271..9ec7f128420 100644 --- a/src/Common/EventFD.cpp +++ b/src/Common/EventFD.cpp @@ -21,7 +21,7 @@ EventFD::EventFD() { fd = eventfd(0 /* initval */, 0 /* flags */); if (fd == -1) - throwFromErrno("Cannot create eventfd", ErrorCodes::CANNOT_PIPE); + throw ErrnoException(ErrorCodes::CANNOT_PIPE, "Cannot create eventfd"); } uint64_t EventFD::read() const @@ -33,7 +33,7 @@ uint64_t EventFD::read() const break; if (errno != EINTR) - throwFromErrno("Cannot read from eventfd", ErrorCodes::CANNOT_READ_FROM_SOCKET); + throw ErrnoException(ErrorCodes::CANNOT_READ_FROM_SOCKET, "Cannot read from eventfd"); } return buf; @@ -47,7 +47,7 @@ bool EventFD::write(uint64_t increase) const return false; if (errno != EINTR) - throwFromErrno("Cannot write to eventfd", ErrorCodes::CANNOT_WRITE_TO_SOCKET); + throw ErrnoException(ErrorCodes::CANNOT_WRITE_TO_SOCKET, "Cannot write to eventfd"); } return true; diff --git a/src/Common/Exception.cpp b/src/Common/Exception.cpp index ed9fb00241d..d5f1984a5ff 100644 --- a/src/Common/Exception.cpp +++ b/src/Common/Exception.cpp @@ -1,25 +1,24 @@ #include "Exception.h" #include -#include -#include #include -#include -#include -#include -#include +#include +#include +#include #include -#include #include +#include +#include +#include #include -#include -#include -#include +#include #include +#include #include #include -#include -#include +#include +#include +#include #include @@ -212,17 +211,6 @@ Exception::FramePointers Exception::getStackFramePointers() const thread_local bool Exception::enable_job_stack_trace = false; thread_local std::vector Exception::thread_frame_pointers = {}; - -void throwFromErrno(const std::string & s, int code, int the_errno) -{ - throw ErrnoException(s + ", " + errnoToString(the_errno), code, the_errno); -} - -void throwFromErrnoWithPath(const std::string & s, const std::string & path, int code, int the_errno) -{ - throw ErrnoException(s + ", " + errnoToString(the_errno), code, the_errno, path); -} - static void tryLogCurrentExceptionImpl(Poco::Logger * logger, const std::string & start_of_message) { try diff --git a/src/Common/Exception.h b/src/Common/Exception.h index ac116f5ceca..aabc848b230 100644 --- a/src/Common/Exception.h +++ b/src/Common/Exception.h @@ -7,9 +7,10 @@ #include #include +#include #include -#include #include +#include #include @@ -92,15 +93,6 @@ public: return Exception(msg, code, remote_); } - /// Message must be a compile-time constant - template - requires std::is_convertible_v - Exception(int code, T && message) : Exception(message, code) - { - capture_thread_frame_pointers = thread_frame_pointers; - message_format_string = tryGetStaticFormatString(message); - } - /// These creators are for messages that were received by network or generated by a third-party library in runtime. /// Please use a constructor for all other cases. static Exception createRuntime(int code, const String & message) { return Exception(message, code); } @@ -173,12 +165,61 @@ std::string getExceptionStackTraceString(const std::exception & e); std::string getExceptionStackTraceString(std::exception_ptr e); -/// Contains an additional member `saved_errno`. See the throwFromErrno function. +/// Contains an additional member `saved_errno` class ErrnoException : public Exception { public: - ErrnoException(const std::string & msg, int code, int saved_errno_, const std::optional & path_ = {}) - : Exception(msg, code), saved_errno(saved_errno_), path(path_) {} + ErrnoException(std::string && msg, int code, int with_errno) : Exception(msg, code), saved_errno(with_errno) + { + capture_thread_frame_pointers = thread_frame_pointers; + addMessage(", {}", errnoToString(saved_errno)); + } + + /// Message must be a compile-time constant + template + requires std::is_convertible_v + ErrnoException(int code, T && message) : Exception(message, code), saved_errno(errno) + { + capture_thread_frame_pointers = thread_frame_pointers; + addMessage(", {}", errnoToString(saved_errno)); + } + + // Format message with fmt::format, like the logging functions. + template + ErrnoException(int code, FormatStringHelper fmt, Args &&... args) + : Exception(fmt::format(fmt.fmt_str, std::forward(args)...), code), saved_errno(errno) + { + capture_thread_frame_pointers = thread_frame_pointers; + message_format_string = fmt.message_format_string; + addMessage(", {}", errnoToString(saved_errno)); + } + + template + [[noreturn]] static void throwWithErrno(int code, int with_errno, FormatStringHelper fmt, Args &&... args) + { + auto e = ErrnoException(fmt::format(fmt.fmt_str, std::forward(args)...), code, with_errno); + e.message_format_string = fmt.message_format_string; + throw e; + } + + template + [[noreturn]] static void throwFromPath(int code, const std::string & path, FormatStringHelper fmt, Args &&... args) + { + auto e = ErrnoException(fmt::format(fmt.fmt_str, std::forward(args)...), code, errno); + e.message_format_string = fmt.message_format_string; + e.path = path; + throw e; + } + + template + [[noreturn]] static void + throwFromPathWithErrno(int code, const std::string & path, int with_errno, FormatStringHelper fmt, Args &&... args) + { + auto e = ErrnoException(fmt::format(fmt.fmt_str, std::forward(args)...), code, with_errno); + e.message_format_string = fmt.message_format_string; + e.path = path; + throw e; + } ErrnoException * clone() const override { return new ErrnoException(*this); } void rethrow() const override { throw *this; } // NOLINT @@ -188,7 +229,7 @@ public: private: int saved_errno; - std::optional path; + std::optional path{}; const char * name() const noexcept override { return "DB::ErrnoException"; } const char * className() const noexcept override { return "DB::ErrnoException"; } @@ -233,13 +274,6 @@ private: using Exceptions = std::vector; - -[[noreturn]] void throwFromErrno(const std::string & s, int code, int the_errno = errno); -/// Useful to produce some extra information about available space and inodes on device -[[noreturn]] void throwFromErrnoWithPath(const std::string & s, const std::string & path, int code, - int the_errno = errno); - - /** Try to write an exception to the log (and forget about it). * Can be used in destructors in the catch-all block. */ diff --git a/src/Common/FailPoint.cpp b/src/Common/FailPoint.cpp index 75ea9893f66..9665788dac2 100644 --- a/src/Common/FailPoint.cpp +++ b/src/Common/FailPoint.cpp @@ -28,13 +28,14 @@ static struct InitFiu /// We should define different types of failpoints here. There are four types of them: /// - ONCE: the failpoint will only be triggered once. -/// - REGULAR: the failpoint will always be triggered util disableFailPoint is called. -/// - PAUSAEBLE_ONCE: the failpoint will be blocked one time when pauseFailPoint is called, util disableFailPoint is called. -/// - PAUSAEBLE: the failpoint will be blocked every time when pauseFailPoint is called, util disableFailPoint is called. +/// - REGULAR: the failpoint will always be triggered until disableFailPoint is called. +/// - PAUSEABLE_ONCE: the failpoint will be blocked one time when pauseFailPoint is called, util disableFailPoint is called. +/// - PAUSEABLE: the failpoint will be blocked every time when pauseFailPoint is called, util disableFailPoint is called. #define APPLY_FOR_FAILPOINTS(ONCE, REGULAR, PAUSEABLE_ONCE, PAUSEABLE) \ ONCE(replicated_merge_tree_commit_zk_fail_after_op) \ ONCE(replicated_merge_tree_insert_quorum_fail_0) \ + REGULAR(replicated_merge_tree_commit_zk_fail_when_recovering_from_hw_fault) \ REGULAR(use_delayed_remote_source) \ REGULAR(cluster_discovery_faults) \ REGULAR(check_table_query_delay_for_part) \ diff --git a/src/Common/FiberStack.h b/src/Common/FiberStack.h index 91bb632d807..9d135f27306 100644 --- a/src/Common/FiberStack.h +++ b/src/Common/FiberStack.h @@ -13,6 +13,11 @@ #include #endif +/// Required for older Darwin builds, that lack definition of MAP_ANONYMOUS +#ifndef MAP_ANONYMOUS +#define MAP_ANONYMOUS MAP_ANON +#endif + namespace DB::ErrorCodes { extern const int CANNOT_ALLOCATE_MEMORY; @@ -46,14 +51,14 @@ public: void * vp = ::mmap(nullptr, num_bytes, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); if (MAP_FAILED == vp) - DB::throwFromErrno(fmt::format("FiberStack: Cannot mmap {}.", ReadableSize(num_bytes)), DB::ErrorCodes::CANNOT_ALLOCATE_MEMORY); + throw DB::ErrnoException(DB::ErrorCodes::CANNOT_ALLOCATE_MEMORY, "FiberStack: Cannot mmap {}.", ReadableSize(num_bytes)); /// TODO: make reports on illegal guard page access more clear. /// Currently we will see segfault and almost random stacktrace. if (-1 == ::mprotect(vp, page_size, PROT_NONE)) { ::munmap(vp, num_bytes); - DB::throwFromErrno("FiberStack: cannot protect guard page", DB::ErrorCodes::CANNOT_ALLOCATE_MEMORY); + throw DB::ErrnoException(DB::ErrorCodes::CANNOT_ALLOCATE_MEMORY, "FiberStack: cannot protect guard page"); } /// Do not count guard page in memory usage. diff --git a/src/Common/FieldVisitorToString.cpp b/src/Common/FieldVisitorToString.cpp index 60834afab35..c4cb4266418 100644 --- a/src/Common/FieldVisitorToString.cpp +++ b/src/Common/FieldVisitorToString.cpp @@ -18,16 +18,37 @@ template static inline String formatQuoted(T x) { WriteBufferFromOwnString wb; - writeQuoted(x, wb); - return wb.str(); -} -template -static inline void writeQuoted(const DecimalField & x, WriteBuffer & buf) -{ - writeChar('\'', buf); - writeText(x.getValue(), x.getScale(), buf, {}); - writeChar('\'', buf); + if constexpr (is_decimal_field) + { + writeChar('\'', wb); + writeText(x.getValue(), x.getScale(), wb, {}); + writeChar('\'', wb); + } + else if constexpr (is_big_int_v) + { + writeChar('\'', wb); + writeText(x, wb); + writeChar('\'', wb); + } + else + { + /// While `writeQuoted` sounds like it will always write the value in quotes, + /// in fact it means: write according to the rules of the quoted format, like VALUES, + /// where strings, dates, date-times, UUID are in quotes, and numbers are not. + + /// That's why we take extra care to put Decimal and big integers inside quotes + /// when formatting literals in SQL language, + /// because it is different from the quoted formats like VALUES. + + /// In fact, there are no Decimal and big integer literals in SQL, + /// but they can appear if we format the query from a modified AST. + + /// We can fix this idiosyncrasy later. + + writeQuoted(x, wb); + } + return wb.str(); } /** In contrast to writeFloatText (and writeQuoted), diff --git a/src/Common/InterruptListener.h b/src/Common/InterruptListener.h index b8b2ba6be7d..1f0f021fb03 100644 --- a/src/Common/InterruptListener.h +++ b/src/Common/InterruptListener.h @@ -58,9 +58,8 @@ private: public: InterruptListener() : active(false) { - if (sigemptyset(&sig_set) - || sigaddset(&sig_set, SIGINT)) - throwFromErrno("Cannot manipulate with signal set.", ErrorCodes::CANNOT_MANIPULATE_SIGSET); + if (sigemptyset(&sig_set) || sigaddset(&sig_set, SIGINT)) + throw ErrnoException(ErrorCodes::CANNOT_MANIPULATE_SIGSET, "Cannot manipulate with signal set"); block(); } @@ -82,7 +81,7 @@ public: if (errno == EAGAIN) return false; else - throwFromErrno("Cannot poll signal (sigtimedwait).", ErrorCodes::CANNOT_WAIT_FOR_SIGNAL); + throw ErrnoException(ErrorCodes::CANNOT_WAIT_FOR_SIGNAL, "Cannot poll signal (sigtimedwait)"); } return true; @@ -93,7 +92,7 @@ public: if (!active) { if (pthread_sigmask(SIG_BLOCK, &sig_set, nullptr)) - throwFromErrno("Cannot block signal.", ErrorCodes::CANNOT_BLOCK_SIGNAL); + throw ErrnoException(ErrorCodes::CANNOT_BLOCK_SIGNAL, "Cannot block signal"); active = true; } @@ -105,7 +104,7 @@ public: if (active) { if (pthread_sigmask(SIG_UNBLOCK, &sig_set, nullptr)) - throwFromErrno("Cannot unblock signal.", ErrorCodes::CANNOT_UNBLOCK_SIGNAL); + throw ErrnoException(ErrorCodes::CANNOT_UNBLOCK_SIGNAL, "Cannot unblock signal"); active = false; } diff --git a/src/Common/IntervalKind.h b/src/Common/IntervalKind.h index 6893286f196..0f45d0ac169 100644 --- a/src/Common/IntervalKind.h +++ b/src/Common/IntervalKind.h @@ -71,6 +71,8 @@ struct IntervalKind /// Returns false if the conversion did not succeed. /// For example, `IntervalKind::tryParseString('second', result)` returns `result` equals `IntervalKind::Kind::Second`. static bool tryParseString(const std::string & kind, IntervalKind::Kind & result); + + auto operator<=>(const IntervalKind & other) const { return kind <=> other.kind; } }; /// NOLINTNEXTLINE diff --git a/src/Common/Macros.cpp b/src/Common/Macros.cpp index 891aa53c061..0035e7abfe8 100644 --- a/src/Common/Macros.cpp +++ b/src/Common/Macros.cpp @@ -120,7 +120,7 @@ String Macros::expand(const String & s, auto uuid = ServerUUID::get(); if (UUIDHelpers::Nil == uuid) throw Exception(ErrorCodes::BAD_ARGUMENTS, - "Macro {server_uuid} expanded to zero, which means the UUID is not initialized (most likely it's not a server application)"); + "Macro {{server_uuid}} expanded to zero, which means the UUID is not initialized (most likely it's not a server application)"); res += toString(uuid); info.expanded_other = true; } diff --git a/src/Common/MemoryStatisticsOS.cpp b/src/Common/MemoryStatisticsOS.cpp index f2d2ab5fea9..2092c679336 100644 --- a/src/Common/MemoryStatisticsOS.cpp +++ b/src/Common/MemoryStatisticsOS.cpp @@ -39,7 +39,8 @@ MemoryStatisticsOS::MemoryStatisticsOS() fd = ::open(filename, O_RDONLY | O_CLOEXEC); if (-1 == fd) - throwFromErrno("Cannot open file " + std::string(filename), errno == ENOENT ? ErrorCodes::FILE_DOESNT_EXIST : ErrorCodes::CANNOT_OPEN_FILE); + ErrnoException::throwFromPath( + errno == ENOENT ? ErrorCodes::FILE_DOESNT_EXIST : ErrorCodes::CANNOT_OPEN_FILE, filename, "Cannot open file {}", filename); } MemoryStatisticsOS::~MemoryStatisticsOS() @@ -48,9 +49,8 @@ MemoryStatisticsOS::~MemoryStatisticsOS() { try { - throwFromErrno( - "File descriptor for \"" + std::string(filename) + "\" could not be closed. " - "Something seems to have gone wrong. Inspect errno.", ErrorCodes::CANNOT_CLOSE_FILE); + ErrnoException::throwFromPath( + ErrorCodes::CANNOT_CLOSE_FILE, filename, "File descriptor for '{}' could not be closed", filename); } catch (const ErrnoException &) { @@ -77,7 +77,7 @@ MemoryStatisticsOS::Data MemoryStatisticsOS::get() const if (errno == EINTR) continue; - throwFromErrno("Cannot read from file " + std::string(filename), ErrorCodes::CANNOT_READ_FROM_FILE_DESCRIPTOR); + ErrnoException::throwFromPath(ErrorCodes::CANNOT_READ_FROM_FILE_DESCRIPTOR, filename, "Cannot read from file {}", filename); } assert(res >= 0); @@ -136,7 +136,7 @@ MemoryStatisticsOS::Data MemoryStatisticsOS::get() const size_t len = sizeof(struct kinfo_proc); if (-1 == ::sysctl(mib, 4, &kp, &len, nullptr, 0)) - throwFromErrno("Cannot sysctl(kern.proc.pid." + std::to_string(self) + ")", ErrorCodes::SYSTEM_ERROR); + throw ErrnoException(ErrorCodes::SYSTEM_ERROR, "Cannot sysctl(kern.proc.pid.{})", std::to_string(self)); if (sizeof(struct kinfo_proc) != len) throw DB::Exception(DB::ErrorCodes::SYSTEM_ERROR, "Kernel returns structure of {} bytes instead of expected {}", diff --git a/src/Common/NamePrompter.h b/src/Common/NamePrompter.h index 97c345414bb..cc72554657f 100644 --- a/src/Common/NamePrompter.h +++ b/src/Common/NamePrompter.h @@ -1,7 +1,6 @@ #pragma once #include -#include #include #include diff --git a/src/Common/NetlinkMetricsProvider.cpp b/src/Common/NetlinkMetricsProvider.cpp index 4c228bcc6fc..23173f31689 100644 --- a/src/Common/NetlinkMetricsProvider.cpp +++ b/src/Common/NetlinkMetricsProvider.cpp @@ -117,7 +117,7 @@ struct NetlinkMessage if (errno == EAGAIN) continue; else - throwFromErrno("Can't send a Netlink command", ErrorCodes::NETLINK_ERROR); + throw ErrnoException(ErrorCodes::NETLINK_ERROR, "Can't send a Netlink command"); } if (bytes_sent > request_size) @@ -255,7 +255,7 @@ NetlinkMetricsProvider::NetlinkMetricsProvider() { netlink_socket_fd = ::socket(PF_NETLINK, SOCK_RAW, NETLINK_GENERIC); if (netlink_socket_fd < 0) - throwFromErrno("Can't create PF_NETLINK socket", ErrorCodes::NETLINK_ERROR); + throw ErrnoException(ErrorCodes::NETLINK_ERROR, "Can't create PF_NETLINK socket"); try { @@ -267,7 +267,7 @@ NetlinkMetricsProvider::NetlinkMetricsProvider() tv.tv_usec = 50000; if (0 != ::setsockopt(netlink_socket_fd, SOL_SOCKET, SO_RCVTIMEO, reinterpret_cast(&tv), sizeof(tv))) - throwFromErrno("Can't set timeout on PF_NETLINK socket", ErrorCodes::NETLINK_ERROR); + throw ErrnoException(ErrorCodes::NETLINK_ERROR, "Can't set timeout on PF_NETLINK socket"); union { @@ -277,7 +277,7 @@ NetlinkMetricsProvider::NetlinkMetricsProvider() addr.nl_family = AF_NETLINK; if (::bind(netlink_socket_fd, &sockaddr, sizeof(addr)) < 0) - throwFromErrno("Can't bind PF_NETLINK socket", ErrorCodes::NETLINK_ERROR); + throw ErrnoException(ErrorCodes::NETLINK_ERROR, "Can't bind PF_NETLINK socket"); taskstats_family_id = getFamilyId(netlink_socket_fd); } diff --git a/src/Common/OpenTelemetryTraceContext.cpp b/src/Common/OpenTelemetryTraceContext.cpp index ab1a430cebb..92803af93a9 100644 --- a/src/Common/OpenTelemetryTraceContext.cpp +++ b/src/Common/OpenTelemetryTraceContext.cpp @@ -2,6 +2,7 @@ #include #include +#include #include #include #include diff --git a/src/Common/PODArray.cpp b/src/Common/PODArray.cpp index d21dc40867d..dd1fed08cb5 100644 --- a/src/Common/PODArray.cpp +++ b/src/Common/PODArray.cpp @@ -1,8 +1,46 @@ +#include #include + namespace DB { +namespace ErrorCodes +{ + extern const int CANNOT_MPROTECT; + extern const int CANNOT_ALLOCATE_MEMORY; +} + +namespace PODArrayDetails +{ + +#ifndef NDEBUG +void protectMemoryRegion(void * addr, size_t len, int prot) +{ + if (0 != mprotect(addr, len, prot)) + throw ErrnoException(ErrorCodes::CANNOT_MPROTECT, "Cannot mprotect memory region"); +} +#endif + +size_t byte_size(size_t num_elements, size_t element_size) +{ + size_t amount; + if (__builtin_mul_overflow(num_elements, element_size, &amount)) + throw Exception(ErrorCodes::CANNOT_ALLOCATE_MEMORY, "Amount of memory requested to allocate is more than allowed"); + return amount; +} + +size_t minimum_memory_for_elements(size_t num_elements, size_t element_size, size_t pad_left, size_t pad_right) +{ + size_t amount; + if (__builtin_add_overflow(byte_size(num_elements, element_size), pad_left + pad_right, &amount)) + throw Exception(ErrorCodes::CANNOT_ALLOCATE_MEMORY, "Amount of memory requested to allocate is more than allowed"); + return amount; +} + +} + + /// Used for left padding of PODArray when empty const char empty_pod_array[empty_pod_array_size]{}; @@ -25,4 +63,5 @@ template class PODArray, 0, 0>; template class PODArray, 0, 0>; template class PODArray, 0, 0>; template class PODArray, 0, 0>; + } diff --git a/src/Common/PODArray.h b/src/Common/PODArray.h index 68c1e325f0c..6a048d1c6c0 100644 --- a/src/Common/PODArray.h +++ b/src/Common/PODArray.h @@ -1,27 +1,21 @@ #pragma once +#include +#include +#include +#include +#include +#include #include #include #include #include #include -#include - -#include -#include - -#include -#include -#include -#include - #ifndef NDEBUG - #include +#include #endif -#include - /** Whether we can use memcpy instead of a loop with assignment to T from U. * It is Ok if types are the same. And if types are integral and of the same size, * example: char, signed char, unsigned char. @@ -35,12 +29,6 @@ constexpr bool memcpy_can_be_used_for_assignment = std::is_same_v namespace DB { -namespace ErrorCodes -{ - extern const int CANNOT_MPROTECT; - extern const int CANNOT_ALLOCATE_MEMORY; -} - /** A dynamic array for POD types. * Designed for a small number of large arrays (rather than a lot of small ones). * To be more precise - for use in ColumnVector. @@ -77,6 +65,19 @@ namespace ErrorCodes static constexpr size_t empty_pod_array_size = 1024; extern const char empty_pod_array[empty_pod_array_size]; +namespace PODArrayDetails +{ + +void protectMemoryRegion(void * addr, size_t len, int prot); + +/// The amount of memory occupied by the num_elements of the elements. +size_t byte_size(size_t num_elements, size_t element_size); /// NOLINT + +/// Minimum amount of memory to allocate for num_elements, including padding. +size_t minimum_memory_for_elements(size_t num_elements, size_t element_size, size_t pad_left, size_t pad_right); /// NOLINT + +}; + /** Base class that depend only on size of element, not on element itself. * You can static_cast to this class if you want to insert some data regardless to the actual type T. */ @@ -102,27 +103,9 @@ protected: char * c_end = null; char * c_end_of_storage = null; /// Does not include pad_right. - /// The amount of memory occupied by the num_elements of the elements. - static size_t byte_size(size_t num_elements) /// NOLINT - { - size_t amount; - if (__builtin_mul_overflow(num_elements, ELEMENT_SIZE, &amount)) - throw Exception(ErrorCodes::CANNOT_ALLOCATE_MEMORY, "Amount of memory requested to allocate is more than allowed"); - return amount; - } - - /// Minimum amount of memory to allocate for num_elements, including padding. - static size_t minimum_memory_for_elements(size_t num_elements) - { - size_t amount; - if (__builtin_add_overflow(byte_size(num_elements), pad_left + pad_right, &amount)) - throw Exception(ErrorCodes::CANNOT_ALLOCATE_MEMORY, "Amount of memory requested to allocate is more than allowed"); - return amount; - } - void alloc_for_num_elements(size_t num_elements) /// NOLINT { - alloc(minimum_memory_for_elements(num_elements)); + alloc(PODArrayDetails::minimum_memory_for_elements(num_elements, ELEMENT_SIZE, pad_left, pad_right)); } template @@ -188,7 +171,7 @@ protected: // The allocated memory should be multiplication of ELEMENT_SIZE to hold the element, otherwise, // memory issue such as corruption could appear in edge case. realloc(std::max(integerRoundUp(initial_bytes, ELEMENT_SIZE), - minimum_memory_for_elements(1)), + PODArrayDetails::minimum_memory_for_elements(1, ELEMENT_SIZE, pad_left, pad_right)), std::forward(allocator_params)...); } else @@ -208,8 +191,7 @@ protected: if (right_rounded_down > left_rounded_up) { size_t length = right_rounded_down - left_rounded_up; - if (0 != mprotect(left_rounded_up, length, prot)) - throwFromErrno("Cannot mprotect memory region", ErrorCodes::CANNOT_MPROTECT); + PODArrayDetails::protectMemoryRegion(left_rounded_up, length, prot); } } @@ -232,14 +214,14 @@ public: void reserve(size_t n, TAllocatorParams &&... allocator_params) { if (n > capacity()) - realloc(roundUpToPowerOfTwoOrZero(minimum_memory_for_elements(n)), std::forward(allocator_params)...); + realloc(roundUpToPowerOfTwoOrZero(PODArrayDetails::minimum_memory_for_elements(n, ELEMENT_SIZE, pad_left, pad_right)), std::forward(allocator_params)...); } template void reserve_exact(size_t n, TAllocatorParams &&... allocator_params) /// NOLINT { if (n > capacity()) - realloc(minimum_memory_for_elements(n), std::forward(allocator_params)...); + realloc(PODArrayDetails::minimum_memory_for_elements(n, ELEMENT_SIZE, pad_left, pad_right), std::forward(allocator_params)...); } template @@ -258,7 +240,7 @@ public: void resize_assume_reserved(const size_t n) /// NOLINT { - c_end = c_start + byte_size(n); + c_end = c_start + PODArrayDetails::byte_size(n, ELEMENT_SIZE); } const char * raw_data() const /// NOLINT @@ -339,7 +321,7 @@ public: explicit PODArray(size_t n) { this->alloc_for_num_elements(n); - this->c_end += this->byte_size(n); + this->c_end += PODArrayDetails::byte_size(n, sizeof(T)); } PODArray(size_t n, const T & x) @@ -411,9 +393,9 @@ public: if (n > old_size) { this->reserve(n); - memset(this->c_end, 0, this->byte_size(n - old_size)); + memset(this->c_end, 0, PODArrayDetails::byte_size(n - old_size, sizeof(T))); } - this->c_end = this->c_start + this->byte_size(n); + this->c_end = this->c_start + PODArrayDetails::byte_size(n, sizeof(T)); } void resize_fill(size_t n, const T & value) /// NOLINT @@ -424,7 +406,7 @@ public: this->reserve(n); std::fill(t_end(), t_end() + n - old_size, value); } - this->c_end = this->c_start + this->byte_size(n); + this->c_end = this->c_start + PODArrayDetails::byte_size(n, sizeof(T)); } template @@ -487,7 +469,7 @@ public: if (required_capacity > this->capacity()) this->reserve(roundUpToPowerOfTwoOrZero(required_capacity), std::forward(allocator_params)...); - size_t bytes_to_copy = this->byte_size(from_end - from_begin); + size_t bytes_to_copy = PODArrayDetails::byte_size(from_end - from_begin, sizeof(T)); if (bytes_to_copy) { memcpy(this->c_end, reinterpret_cast(rhs.begin() + from_begin), bytes_to_copy); @@ -502,7 +484,7 @@ public: static_assert(pad_right_ >= PADDING_FOR_SIMD - 1); static_assert(sizeof(T) == sizeof(*from_begin)); insertPrepare(from_begin, from_end, std::forward(allocator_params)...); - size_t bytes_to_copy = this->byte_size(from_end - from_begin); + size_t bytes_to_copy = PODArrayDetails::byte_size(from_end - from_begin, sizeof(T)); memcpySmallAllowReadWriteOverflow15(this->c_end, reinterpret_cast(&*from_begin), bytes_to_copy); this->c_end += bytes_to_copy; } @@ -513,11 +495,11 @@ public: { static_assert(memcpy_can_be_used_for_assignment, std::decay_t>); - size_t bytes_to_copy = this->byte_size(from_end - from_begin); + size_t bytes_to_copy = PODArrayDetails::byte_size(from_end - from_begin, sizeof(T)); if (!bytes_to_copy) return; - size_t bytes_to_move = this->byte_size(end() - it); + size_t bytes_to_move = PODArrayDetails::byte_size(end() - it, sizeof(T)); insertPrepare(from_begin, from_end); @@ -545,10 +527,10 @@ public: if (required_capacity > this->capacity()) this->reserve(roundUpToPowerOfTwoOrZero(required_capacity), std::forward(allocator_params)...); - size_t bytes_to_copy = this->byte_size(copy_size); + size_t bytes_to_copy = PODArrayDetails::byte_size(copy_size, sizeof(T)); if (bytes_to_copy) { - auto begin = this->c_start + this->byte_size(start_index); + auto begin = this->c_start + PODArrayDetails::byte_size(start_index, sizeof(T)); memcpy(this->c_end, reinterpret_cast(&*begin), bytes_to_copy); this->c_end += bytes_to_copy; } @@ -560,7 +542,7 @@ public: static_assert(memcpy_can_be_used_for_assignment, std::decay_t>); this->assertNotIntersects(from_begin, from_end); - size_t bytes_to_copy = this->byte_size(from_end - from_begin); + size_t bytes_to_copy = PODArrayDetails::byte_size(from_end - from_begin, sizeof(T)); if (bytes_to_copy) { memcpy(this->c_end, reinterpret_cast(&*from_begin), bytes_to_copy); @@ -593,13 +575,13 @@ public: /// arr1 takes ownership of the heap memory of arr2. arr1.c_start = arr2.c_start; arr1.c_end_of_storage = arr1.c_start + heap_allocated - arr2.pad_right - arr2.pad_left; - arr1.c_end = arr1.c_start + this->byte_size(heap_size); + arr1.c_end = arr1.c_start + PODArrayDetails::byte_size(heap_size, sizeof(T)); /// Allocate stack space for arr2. arr2.alloc(stack_allocated, std::forward(allocator_params)...); /// Copy the stack content. - memcpy(arr2.c_start, stack_c_start, this->byte_size(stack_size)); - arr2.c_end = arr2.c_start + this->byte_size(stack_size); + memcpy(arr2.c_start, stack_c_start, PODArrayDetails::byte_size(stack_size, sizeof(T))); + arr2.c_end = arr2.c_start + PODArrayDetails::byte_size(stack_size, sizeof(T)); }; auto do_move = [&](PODArray & src, PODArray & dest) @@ -608,8 +590,8 @@ public: { dest.dealloc(); dest.alloc(src.allocated_bytes(), std::forward(allocator_params)...); - memcpy(dest.c_start, src.c_start, this->byte_size(src.size())); - dest.c_end = dest.c_start + this->byte_size(src.size()); + memcpy(dest.c_start, src.c_start, PODArrayDetails::byte_size(src.size(), sizeof(T))); + dest.c_end = dest.c_start + PODArrayDetails::byte_size(src.size(), sizeof(T)); src.c_start = Base::null; src.c_end = Base::null; @@ -666,8 +648,8 @@ public: this->c_end_of_storage = this->c_start + rhs_allocated - Base::pad_right - Base::pad_left; rhs.c_end_of_storage = rhs.c_start + lhs_allocated - Base::pad_right - Base::pad_left; - this->c_end = this->c_start + this->byte_size(rhs_size); - rhs.c_end = rhs.c_start + this->byte_size(lhs_size); + this->c_end = this->c_start + PODArrayDetails::byte_size(rhs_size, sizeof(T)); + rhs.c_end = rhs.c_start + PODArrayDetails::byte_size(lhs_size, sizeof(T)); } else if (this->isAllocatedFromStack() && !rhs.isAllocatedFromStack()) { @@ -702,7 +684,7 @@ public: if (required_capacity > this->capacity()) this->reserve_exact(required_capacity, std::forward(allocator_params)...); - size_t bytes_to_copy = this->byte_size(required_capacity); + size_t bytes_to_copy = PODArrayDetails::byte_size(required_capacity, sizeof(T)); if (bytes_to_copy) memcpy(this->c_start, reinterpret_cast(&*from_begin), bytes_to_copy); diff --git a/src/Common/PipeFDs.cpp b/src/Common/PipeFDs.cpp index 21a9ae59972..f2a913467a9 100644 --- a/src/Common/PipeFDs.cpp +++ b/src/Common/PipeFDs.cpp @@ -29,14 +29,14 @@ void LazyPipeFDs::open() #ifndef OS_DARWIN if (0 != pipe2(fds_rw, O_CLOEXEC)) - throwFromErrno("Cannot create pipe", ErrorCodes::CANNOT_PIPE); + throw ErrnoException(ErrorCodes::CANNOT_PIPE, "Cannot create pipe"); #else if (0 != pipe(fds_rw)) - throwFromErrno("Cannot create pipe", ErrorCodes::CANNOT_PIPE); + throw ErrnoException(ErrorCodes::CANNOT_PIPE, "Cannot create pipe"); if (0 != fcntl(fds_rw[0], F_SETFD, FD_CLOEXEC)) - throwFromErrno("Cannot setup auto-close on exec for read end of pipe", ErrorCodes::CANNOT_FCNTL); + throw ErrnoException(ErrorCodes::CANNOT_FCNTL, "Cannot setup auto-close on exec for read end of pipe"); if (0 != fcntl(fds_rw[1], F_SETFD, FD_CLOEXEC)) - throwFromErrno("Cannot setup auto-close on exec for write end of pipe", ErrorCodes::CANNOT_FCNTL); + throw ErrnoException(ErrorCodes::CANNOT_FCNTL, "Cannot setup auto-close on exec for write end of pipe"); #endif } @@ -47,7 +47,7 @@ void LazyPipeFDs::close() if (fd < 0) continue; if (0 != ::close(fd)) - throwFromErrno("Cannot close pipe", ErrorCodes::CANNOT_PIPE); + throw ErrnoException(ErrorCodes::CANNOT_PIPE, "Cannot close pipe"); fd = -1; } } @@ -74,18 +74,18 @@ void LazyPipeFDs::setNonBlockingWrite() { int flags = fcntl(fds_rw[1], F_GETFL, 0); if (-1 == flags) - throwFromErrno("Cannot get file status flags of pipe", ErrorCodes::CANNOT_FCNTL); + throw ErrnoException(ErrorCodes::CANNOT_FCNTL, "Cannot get file status flags of pipe"); if (-1 == fcntl(fds_rw[1], F_SETFL, flags | O_NONBLOCK)) - throwFromErrno("Cannot set non-blocking mode of pipe", ErrorCodes::CANNOT_FCNTL); + throw ErrnoException(ErrorCodes::CANNOT_FCNTL, "Cannot set non-blocking mode of pipe"); } void LazyPipeFDs::setNonBlockingRead() { int flags = fcntl(fds_rw[0], F_GETFL, 0); if (-1 == flags) - throwFromErrno("Cannot get file status flags of pipe", ErrorCodes::CANNOT_FCNTL); + throw ErrnoException(ErrorCodes::CANNOT_FCNTL, "Cannot get file status flags of pipe"); if (-1 == fcntl(fds_rw[0], F_SETFL, flags | O_NONBLOCK)) - throwFromErrno("Cannot set non-blocking mode of pipe", ErrorCodes::CANNOT_FCNTL); + throw ErrnoException(ErrorCodes::CANNOT_FCNTL, "Cannot set non-blocking mode of pipe"); } void LazyPipeFDs::setNonBlockingReadWrite() @@ -110,13 +110,13 @@ void LazyPipeFDs::tryIncreaseSize(int desired_size) /// It will work nevertheless. } else - throwFromErrno("Cannot get pipe capacity", ErrorCodes::CANNOT_FCNTL); + throw ErrnoException(ErrorCodes::CANNOT_FCNTL, "Cannot get pipe capacity"); } else { for (errno = 0; errno != EPERM && pipe_size < desired_size; pipe_size *= 2) if (-1 == fcntl(fds_rw[1], F_SETPIPE_SZ, pipe_size * 2) && errno != EPERM) - throwFromErrno("Cannot increase pipe capacity to " + std::to_string(pipe_size * 2), ErrorCodes::CANNOT_FCNTL); + throw ErrnoException(ErrorCodes::CANNOT_FCNTL, "Cannot increase pipe capacity to {}", pipe_size * 2); LOG_TRACE(log, "Pipe capacity is {}", ReadableSize(std::min(pipe_size, desired_size))); } diff --git a/src/Common/ProcfsMetricsProvider.cpp b/src/Common/ProcfsMetricsProvider.cpp index 7a94cecee5e..194053cd271 100644 --- a/src/Common/ProcfsMetricsProvider.cpp +++ b/src/Common/ProcfsMetricsProvider.cpp @@ -37,18 +37,15 @@ namespace { [[noreturn]] inline void throwWithFailedToOpenFile(const std::string & filename) { - throwFromErrno( - "Cannot open file " + filename, - errno == ENOENT ? ErrorCodes::FILE_DOESNT_EXIST : ErrorCodes::CANNOT_OPEN_FILE); + ErrnoException::throwFromPath( + errno == ENOENT ? ErrorCodes::FILE_DOESNT_EXIST : ErrorCodes::CANNOT_OPEN_FILE, filename, "Cannot open file {}", filename); } inline void emitErrorMsgWithFailedToCloseFile(const std::string & filename) { try { - throwFromErrno( - "File descriptor for \"" + filename + "\" could not be closed. " - "Something seems to have gone wrong. Inspect errno.", ErrorCodes::CANNOT_CLOSE_FILE); + ErrnoException::throwFromPath(ErrorCodes::CANNOT_CLOSE_FILE, filename, "File descriptor for {} could not be closed", filename); } catch (const ErrnoException &) { @@ -69,9 +66,7 @@ ssize_t readFromFD(const int fd, const char * filename, char * buf, size_t buf_s if (errno == EINTR) continue; - throwFromErrno( - "Cannot read from file " + std::string(filename), - ErrorCodes::CANNOT_READ_FROM_FILE_DESCRIPTOR); + ErrnoException::throwFromPath(ErrorCodes::CANNOT_READ_FROM_FILE_DESCRIPTOR, filename, "Cannot read from file {}", filename); } assert(res >= 0); diff --git a/src/Common/ProfileEvents.cpp b/src/Common/ProfileEvents.cpp index 1054eae4088..d6e5a77b64a 100644 --- a/src/Common/ProfileEvents.cpp +++ b/src/Common/ProfileEvents.cpp @@ -250,9 +250,9 @@ Number of times data after merge is not byte-identical to the data on another re 7. Manual modification of source data after server startup. 8. Manual modification of checksums stored in ZooKeeper. 9. Part format related settings like 'enable_mixed_granularity_parts' are different on different replicas. -The server successfully detected this situation and will download merged part from replica to force byte-identical result. +The server successfully detected this situation and will download merged part from the replica to force the byte-identical result. )") \ - M(DataAfterMutationDiffersFromReplica, "Number of times data after mutation is not byte-identical to the data on another replicas. In addition to the reasons described in 'DataAfterMergeDiffersFromReplica', it is also possible due to non-deterministic mutation.") \ + M(DataAfterMutationDiffersFromReplica, "Number of times data after mutation is not byte-identical to the data on other replicas. In addition to the reasons described in 'DataAfterMergeDiffersFromReplica', it is also possible due to non-deterministic mutation.") \ M(PolygonsAddedToPool, "A polygon has been added to the cache (pool) for the 'pointInPolygon' function.") \ M(PolygonsInPoolAllocatedBytes, "The number of bytes for polygons added to the cache (pool) for the 'pointInPolygon' function.") \ \ @@ -272,12 +272,12 @@ The server successfully detected this situation and will download merged part fr M(PartsLockWaitMicroseconds, "Total time spent waiting for data parts lock in MergeTree tables") \ \ M(RealTimeMicroseconds, "Total (wall clock) time spent in processing (queries and other tasks) threads (note that this is a sum).") \ - M(UserTimeMicroseconds, "Total time spent in processing (queries and other tasks) threads executing CPU instructions in user mode. This include time CPU pipeline was stalled due to main memory access, cache misses, branch mispredictions, hyper-threading, etc.") \ + M(UserTimeMicroseconds, "Total time spent in processing (queries and other tasks) threads executing CPU instructions in user mode. This includes time CPU pipeline was stalled due to main memory access, cache misses, branch mispredictions, hyper-threading, etc.") \ M(SystemTimeMicroseconds, "Total time spent in processing (queries and other tasks) threads executing CPU instructions in OS kernel mode. This is time spent in syscalls, excluding waiting time during blocking syscalls.") \ M(MemoryOvercommitWaitTimeMicroseconds, "Total time spent in waiting for memory to be freed in OvercommitTracker.") \ M(MemoryAllocatorPurge, "Total number of times memory allocator purge was requested") \ M(MemoryAllocatorPurgeTimeMicroseconds, "Total number of times memory allocator purge was requested") \ - M(SoftPageFaults, "The number of soft page faults in query execution threads. Soft page fault usually means a miss in the memory allocator cache which required a new memory mapping from the OS and subsequent allocation of a page of physical memory.") \ + M(SoftPageFaults, "The number of soft page faults in query execution threads. Soft page fault usually means a miss in the memory allocator cache, which requires a new memory mapping from the OS and subsequent allocation of a page of physical memory.") \ M(HardPageFaults, "The number of hard page faults in query execution threads. High values indicate either that you forgot to turn off swap on your server, or eviction of memory pages of the ClickHouse binary during very high memory pressure, or successful usage of the 'mmap' read method for the tables data.") \ \ M(OSIOWaitMicroseconds, "Total time a thread spent waiting for a result of IO operation, from the OS point of view. This is real IO that doesn't include page cache.") \ @@ -290,8 +290,8 @@ The server successfully detected this situation and will download merged part fr \ M(PerfCpuCycles, "Total cycles. Be wary of what happens during CPU frequency scaling.") \ M(PerfInstructions, "Retired instructions. Be careful, these can be affected by various issues, most notably hardware interrupt counts.") \ - M(PerfCacheReferences, "Cache accesses. Usually this indicates Last Level Cache accesses but this may vary depending on your CPU. This may include prefetches and coherency messages; again this depends on the design of your CPU.") \ - M(PerfCacheMisses, "Cache misses. Usually this indicates Last Level Cache misses; this is intended to be used in con‐junction with the PERFCOUNTHWCACHEREFERENCES event to calculate cache miss rates.") \ + M(PerfCacheReferences, "Cache accesses. Usually, this indicates Last Level Cache accesses, but this may vary depending on your CPU. This may include prefetches and coherency messages; again this depends on the design of your CPU.") \ + M(PerfCacheMisses, "Cache misses. Usually this indicates Last Level Cache misses; this is intended to be used in conjunction with the PERFCOUNTHWCACHEREFERENCES event to calculate cache miss rates.") \ M(PerfBranchInstructions, "Retired branch instructions. Prior to Linux 2.6.35, this used the wrong event on AMD processors.") \ M(PerfBranchMisses, "Mispredicted branch instructions.") \ M(PerfBusCycles, "Bus cycles, which can be different from total cycles.") \ @@ -451,25 +451,27 @@ The server successfully detected this situation and will download merged part fr M(ThreadpoolReaderSubmitReadSynchronously, "How many times we haven't scheduled a task on the thread pool and read synchronously instead") \ M(ThreadpoolReaderSubmitReadSynchronouslyBytes, "How many bytes were read synchronously") \ M(ThreadpoolReaderSubmitReadSynchronouslyMicroseconds, "How much time we spent reading synchronously") \ + M(ThreadpoolReaderSubmitLookupInCacheMicroseconds, "How much time we spent checking if content is cached") \ M(AsynchronousReaderIgnoredBytes, "Number of bytes ignored during asynchronous reading") \ \ M(FileSegmentWaitReadBufferMicroseconds, "Metric per file segment. Time spend waiting for internal read buffer (includes cache waiting)") \ M(FileSegmentReadMicroseconds, "Metric per file segment. Time spend reading from file") \ M(FileSegmentCacheWriteMicroseconds, "Metric per file segment. Time spend writing data to cache") \ - M(FileSegmentPredownloadMicroseconds, "Metric per file segment. Time spent predownloading data to cache (predownloading - finishing file segment download (after someone who failed to do that) up to the point current thread was requested to do)") \ + M(FileSegmentPredownloadMicroseconds, "Metric per file segment. Time spent pre-downloading data to cache (pre-downloading - finishing file segment download (after someone who failed to do that) up to the point current thread was requested to do)") \ M(FileSegmentUsedBytes, "Metric per file segment. How many bytes were actually used from current file segment") \ \ M(ReadBufferSeekCancelConnection, "Number of seeks which lead to new connection (s3, http)") \ \ M(SleepFunctionCalls, "Number of times a sleep function (sleep, sleepEachRow) has been called.") \ - M(SleepFunctionMicroseconds, "Time spent sleeping due to a sleep function call.") \ + M(SleepFunctionMicroseconds, "Time set to sleep in a sleep function (sleep, sleepEachRow).") \ + M(SleepFunctionElapsedMicroseconds, "Time spent sleeping in a sleep function (sleep, sleepEachRow).") \ \ - M(ThreadPoolReaderPageCacheHit, "Number of times the read inside ThreadPoolReader was done from page cache.") \ - M(ThreadPoolReaderPageCacheHitBytes, "Number of bytes read inside ThreadPoolReader when it was done from page cache.") \ + M(ThreadPoolReaderPageCacheHit, "Number of times the read inside ThreadPoolReader was done from the page cache.") \ + M(ThreadPoolReaderPageCacheHitBytes, "Number of bytes read inside ThreadPoolReader when it was done from the page cache.") \ M(ThreadPoolReaderPageCacheHitElapsedMicroseconds, "Time spent reading data from page cache in ThreadPoolReader.") \ M(ThreadPoolReaderPageCacheMiss, "Number of times the read inside ThreadPoolReader was not done from page cache and was hand off to thread pool.") \ M(ThreadPoolReaderPageCacheMissBytes, "Number of bytes read inside ThreadPoolReader when read was not done from page cache and was hand off to thread pool.") \ - M(ThreadPoolReaderPageCacheMissElapsedMicroseconds, "Time spent reading data inside the asynchronous job in ThreadPoolReader - when read was not done from page cache.") \ + M(ThreadPoolReaderPageCacheMissElapsedMicroseconds, "Time spent reading data inside the asynchronous job in ThreadPoolReader - when read was not done from the page cache.") \ \ M(AsynchronousReadWaitMicroseconds, "Time spent in waiting for asynchronous reads in asynchronous local read.") \ M(SynchronousReadWaitMicroseconds, "Time spent in waiting for synchronous reads in asynchronous local read.") \ @@ -510,7 +512,7 @@ The server successfully detected this situation and will download merged part fr M(SchemaInferenceCacheSchemaHits, "Number of times the schema is found in schema cache during schema inference") \ M(SchemaInferenceCacheNumRowsHits, "Number of times the number of rows is found in schema cache during count from files") \ M(SchemaInferenceCacheMisses, "Number of times the requested source is not in schema cache") \ - M(SchemaInferenceCacheSchemaMisses, "Number of times the requested source is in cache but the schema is not in cache while schema inference") \ + M(SchemaInferenceCacheSchemaMisses, "Number of times the requested source is in cache but the schema is not in cache during schema inference") \ M(SchemaInferenceCacheNumRowsMisses, "Number of times the requested source is in cache but the number of rows is not in cache while count from files") \ M(SchemaInferenceCacheEvictions, "Number of times a schema from cache was evicted due to overflow") \ M(SchemaInferenceCacheInvalidations, "Number of times a schema in cache became invalid due to changes in data") \ @@ -568,7 +570,7 @@ The server successfully detected this situation and will download merged part fr \ M(ReadTaskRequestsSent, "The number of callbacks requested from the remote server back to the initiator server to choose the read task (for s3Cluster table function and similar). Measured on the remote server side.") \ M(MergeTreeReadTaskRequestsSent, "The number of callbacks requested from the remote server back to the initiator server to choose the read task (for MergeTree tables). Measured on the remote server side.") \ - M(MergeTreeAllRangesAnnouncementsSent, "The number of announcement sent from the remote server to the initiator server about the set of data parts (for MergeTree tables). Measured on the remote server side.") \ + M(MergeTreeAllRangesAnnouncementsSent, "The number of announcements sent from the remote server to the initiator server about the set of data parts (for MergeTree tables). Measured on the remote server side.") \ M(ReadTaskRequestsSentElapsedMicroseconds, "Time spent in callbacks requested from the remote server back to the initiator server to choose the read task (for s3Cluster table function and similar). Measured on the remote server side.") \ M(MergeTreeReadTaskRequestsSentElapsedMicroseconds, "Time spent in callbacks requested from the remote server back to the initiator server to choose the read task (for MergeTree tables). Measured on the remote server side.") \ M(MergeTreeAllRangesAnnouncementsSentElapsedMicroseconds, "Time spent in sending the announcement from the remote server to the initiator server about the set of data parts (for MergeTree tables). Measured on the remote server side.") \ @@ -584,6 +586,21 @@ The server successfully detected this situation and will download merged part fr M(LogWarning, "Number of log messages with level Warning") \ M(LogError, "Number of log messages with level Error") \ M(LogFatal, "Number of log messages with level Fatal") \ + \ + M(InterfaceHTTPSendBytes, "Number of bytes sent through HTTP interfaces") \ + M(InterfaceHTTPReceiveBytes, "Number of bytes received through HTTP interfaces") \ + M(InterfaceNativeSendBytes, "Number of bytes sent through native interfaces") \ + M(InterfaceNativeReceiveBytes, "Number of bytes received through native interfaces") \ + M(InterfacePrometheusSendBytes, "Number of bytes sent through Prometheus interfaces") \ + M(InterfacePrometheusReceiveBytes, "Number of bytes received through Prometheus interfaces") \ + M(InterfaceInterserverSendBytes, "Number of bytes sent through interserver interfaces") \ + M(InterfaceInterserverReceiveBytes, "Number of bytes received through interserver interfaces") \ + M(InterfaceMySQLSendBytes, "Number of bytes sent through MySQL interfaces") \ + M(InterfaceMySQLReceiveBytes, "Number of bytes received through MySQL interfaces") \ + M(InterfacePostgreSQLSendBytes, "Number of bytes sent through PostgreSQL interfaces") \ + M(InterfacePostgreSQLReceiveBytes, "Number of bytes received through PostgreSQL interfaces") \ + \ + M(ParallelReplicasUsedCount, "Number of replicas used to execute a query with task-based parallel replicas") \ #ifdef APPLY_FOR_EXTERNAL_EVENTS #define APPLY_FOR_EVENTS(M) APPLY_FOR_BUILTIN_EVENTS(M) APPLY_FOR_EXTERNAL_EVENTS(M) diff --git a/src/Common/QueryProfiler.cpp b/src/Common/QueryProfiler.cpp index c656e7f992f..16c8d4e223f 100644 --- a/src/Common/QueryProfiler.cpp +++ b/src/Common/QueryProfiler.cpp @@ -141,7 +141,7 @@ void Timer::createIfNecessary(UInt64 thread_id, int clock_type, int pause_signal /// Also, it cannot be created if the server has too many threads. - throwFromErrno("Failed to create thread timer", ErrorCodes::CANNOT_CREATE_TIMER); + throw ErrnoException(ErrorCodes::CANNOT_CREATE_TIMER, "Failed to create thread timer"); } timer_id.emplace(local_timer_id); CurrentMetrics::add(CurrentMetrics::CreatedTimersInQueryProfiler); @@ -164,7 +164,7 @@ void Timer::set(UInt32 period) struct itimerspec timer_spec = {.it_interval = interval, .it_value = offset}; if (timer_settime(*timer_id, 0, &timer_spec, nullptr)) - throwFromErrno("Failed to set thread timer period", ErrorCodes::CANNOT_SET_TIMER_PERIOD); + throw ErrnoException(ErrorCodes::CANNOT_SET_TIMER_PERIOD, "Failed to set thread timer period"); CurrentMetrics::add(CurrentMetrics::ActiveTimersInQueryProfiler); } @@ -238,13 +238,13 @@ QueryProfilerBase::QueryProfilerBase(UInt64 thread_id, int clock_t sa.sa_flags = SA_SIGINFO | SA_RESTART; if (sigemptyset(&sa.sa_mask)) - throwFromErrno("Failed to clean signal mask for query profiler", ErrorCodes::CANNOT_MANIPULATE_SIGSET); + throw ErrnoException(ErrorCodes::CANNOT_MANIPULATE_SIGSET, "Failed to clean signal mask for query profiler"); if (sigaddset(&sa.sa_mask, pause_signal)) - throwFromErrno("Failed to add signal to mask for query profiler", ErrorCodes::CANNOT_MANIPULATE_SIGSET); + throw ErrnoException(ErrorCodes::CANNOT_MANIPULATE_SIGSET, "Failed to add signal to mask for query profiler"); if (sigaction(pause_signal, &sa, nullptr)) - throwFromErrno("Failed to setup signal handler for query profiler", ErrorCodes::CANNOT_SET_SIGNAL_HANDLER); + throw ErrnoException(ErrorCodes::CANNOT_SET_SIGNAL_HANDLER, "Failed to setup signal handler for query profiler"); try { diff --git a/src/Common/SensitiveDataMasker.cpp b/src/Common/SensitiveDataMasker.cpp index c4d6952f31d..2b21c223bd8 100644 --- a/src/Common/SensitiveDataMasker.cpp +++ b/src/Common/SensitiveDataMasker.cpp @@ -1,6 +1,5 @@ #include "SensitiveDataMasker.h" -#include #include #include #include @@ -95,28 +94,20 @@ public: SensitiveDataMasker::~SensitiveDataMasker() = default; std::unique_ptr SensitiveDataMasker::sensitive_data_masker = nullptr; -std::mutex SensitiveDataMasker::instance_mutex; void SensitiveDataMasker::setInstance(std::unique_ptr sensitive_data_masker_) { - if (!sensitive_data_masker_) throw Exception(ErrorCodes::LOGICAL_ERROR, "Logical error: the 'sensitive_data_masker' is not set"); - std::lock_guard lock(instance_mutex); if (sensitive_data_masker_->rulesCount() > 0) { sensitive_data_masker = std::move(sensitive_data_masker_); } - else - { - sensitive_data_masker.reset(); - } } SensitiveDataMasker * SensitiveDataMasker::getInstance() { - std::lock_guard lock(instance_mutex); return sensitive_data_masker.get(); } diff --git a/src/Common/SensitiveDataMasker.h b/src/Common/SensitiveDataMasker.h index abb613043d1..de5cc125dcc 100644 --- a/src/Common/SensitiveDataMasker.h +++ b/src/Common/SensitiveDataMasker.h @@ -1,7 +1,6 @@ #pragma once #include -#include #include #include @@ -46,7 +45,6 @@ class SensitiveDataMasker private: class MaskingRule; std::vector> all_masking_rules; - static std::mutex instance_mutex; static std::unique_ptr sensitive_data_masker; public: diff --git a/src/Common/ShellCommand.cpp b/src/Common/ShellCommand.cpp index 5550b68c824..f4efc9e3526 100644 --- a/src/Common/ShellCommand.cpp +++ b/src/Common/ShellCommand.cpp @@ -145,7 +145,7 @@ std::unique_ptr ShellCommand::executeImpl( #endif if (!real_vfork) - throwFromErrno("Cannot find symbol vfork in myself", ErrorCodes::CANNOT_DLSYM); + throw ErrnoException(ErrorCodes::CANNOT_DLSYM, "Cannot find symbol vfork in myself"); PipeFDs pipe_stdin; PipeFDs pipe_stdout; @@ -163,7 +163,7 @@ std::unique_ptr ShellCommand::executeImpl( pid_t pid = reinterpret_cast(real_vfork)(); if (pid == -1) - throwFromErrno("Cannot vfork", ErrorCodes::CANNOT_FORK); + throw ErrnoException(ErrorCodes::CANNOT_FORK, "Cannot vfork"); if (0 == pid) { @@ -305,7 +305,7 @@ int ShellCommand::tryWait() while (waitpid(pid, &status, 0) < 0) { if (errno != EINTR) - throwFromErrno("Cannot waitpid", ErrorCodes::CANNOT_WAITPID); + throw ErrnoException(ErrorCodes::CANNOT_WAITPID, "Cannot waitpid"); } LOG_TRACE(getLogger(), "Wait for shell command pid {} completed with status {}", pid, status); diff --git a/src/Common/StatusFile.cpp b/src/Common/StatusFile.cpp index a9ffce7ddf8..0a9aa2f2739 100644 --- a/src/Common/StatusFile.cpp +++ b/src/Common/StatusFile.cpp @@ -64,7 +64,7 @@ StatusFile::StatusFile(std::string path_, FillFunction fill_) fd = ::open(path.c_str(), O_WRONLY | O_CREAT | O_CLOEXEC, 0666); if (-1 == fd) - throwFromErrnoWithPath("Cannot open file " + path, path, ErrorCodes::CANNOT_OPEN_FILE); + ErrnoException::throwFromPath(ErrorCodes::CANNOT_OPEN_FILE, path, "Cannot open file {}", path); try { @@ -74,14 +74,14 @@ StatusFile::StatusFile(std::string path_, FillFunction fill_) if (errno == EWOULDBLOCK) throw Exception(ErrorCodes::CANNOT_OPEN_FILE, "Cannot lock file {}. Another server instance in same directory is already running.", path); else - throwFromErrnoWithPath("Cannot lock file " + path, path, ErrorCodes::CANNOT_OPEN_FILE); + ErrnoException::throwFromPath(ErrorCodes::CANNOT_OPEN_FILE, path, "Cannot lock file {}", path); } if (0 != ftruncate(fd, 0)) - throwFromErrnoWithPath("Cannot ftruncate " + path, path, ErrorCodes::CANNOT_TRUNCATE_FILE); + ErrnoException::throwFromPath(ErrorCodes::CANNOT_TRUNCATE_FILE, path, "Cannot ftruncate file {}", path); if (0 != lseek(fd, 0, SEEK_SET)) - throwFromErrnoWithPath("Cannot lseek " + path, path, ErrorCodes::CANNOT_SEEK_THROUGH_FILE); + ErrnoException::throwFromPath(ErrorCodes::CANNOT_SEEK_THROUGH_FILE, path, "Cannot lseek file {}", path); /// Write information about current server instance to the file. WriteBufferFromFileDescriptor out(fd, 1024); diff --git a/src/Common/SystemLogBase.cpp b/src/Common/SystemLogBase.cpp index a0b3d411e38..d82b582fee6 100644 --- a/src/Common/SystemLogBase.cpp +++ b/src/Common/SystemLogBase.cpp @@ -188,6 +188,9 @@ typename SystemLogQueue::Index SystemLogQueue::pop(std:: bool & should_prepare_tables_anyway, bool & exit_this_thread) { + /// Call dtors and deallocate strings without holding the global lock + output.resize(0); + std::unique_lock lock(mutex); flush_event.wait_for(lock, std::chrono::milliseconds(settings.flush_interval_milliseconds), @@ -200,7 +203,6 @@ typename SystemLogQueue::Index SystemLogQueue::pop(std:: queue_front_index += queue.size(); // Swap with existing array from previous flush, to save memory // allocations. - output.resize(0); queue.swap(output); should_prepare_tables_anyway = is_force_prepare_tables; diff --git a/src/Common/TargetSpecific.h b/src/Common/TargetSpecific.h index fd6a57090b8..4ee29d3fc55 100644 --- a/src/Common/TargetSpecific.h +++ b/src/Common/TargetSpecific.h @@ -348,6 +348,25 @@ DECLARE_AVX512VBMI2_SPECIFIC_CODE( #if ENABLE_MULTITARGET_CODE && defined(__GNUC__) && defined(__x86_64__) +/// NOLINTNEXTLINE +#define MULTITARGET_FUNCTION_AVX2_SSE42(FUNCTION_HEADER, name, FUNCTION_BODY) \ + FUNCTION_HEADER \ + \ + AVX2_FUNCTION_SPECIFIC_ATTRIBUTE \ + name##AVX2 \ + FUNCTION_BODY \ + \ + FUNCTION_HEADER \ + \ + SSE42_FUNCTION_SPECIFIC_ATTRIBUTE \ + name##SSE42 \ + FUNCTION_BODY \ + \ + FUNCTION_HEADER \ + \ + name \ + FUNCTION_BODY \ + /// NOLINTNEXTLINE #define MULTITARGET_FUNCTION_AVX512BW_AVX512F_AVX2_SSE42(FUNCTION_HEADER, name, FUNCTION_BODY) \ FUNCTION_HEADER \ @@ -381,6 +400,14 @@ DECLARE_AVX512VBMI2_SPECIFIC_CODE( #else + /// NOLINTNEXTLINE +#define MULTITARGET_FUNCTION_AVX2_SSE42(FUNCTION_HEADER, name, FUNCTION_BODY) \ + FUNCTION_HEADER \ + \ + name \ + FUNCTION_BODY \ + + /// NOLINTNEXTLINE #define MULTITARGET_FUNCTION_AVX512BW_AVX512F_AVX2_SSE42(FUNCTION_HEADER, name, FUNCTION_BODY) \ FUNCTION_HEADER \ diff --git a/src/Common/TerminalSize.cpp b/src/Common/TerminalSize.cpp index c53494fe9a0..bc5b4474384 100644 --- a/src/Common/TerminalSize.cpp +++ b/src/Common/TerminalSize.cpp @@ -19,12 +19,12 @@ uint16_t getTerminalWidth() if (isatty(STDIN_FILENO)) { if (ioctl(STDIN_FILENO, TIOCGWINSZ, &terminal_size)) - DB::throwFromErrno("Cannot obtain terminal window size (ioctl TIOCGWINSZ)", DB::ErrorCodes::SYSTEM_ERROR); + throw DB::ErrnoException(DB::ErrorCodes::SYSTEM_ERROR, "Cannot obtain terminal window size (ioctl TIOCGWINSZ)"); } else if (isatty(STDERR_FILENO)) { if (ioctl(STDERR_FILENO, TIOCGWINSZ, &terminal_size)) - DB::throwFromErrno("Cannot obtain terminal window size (ioctl TIOCGWINSZ)", DB::ErrorCodes::SYSTEM_ERROR); + throw DB::ErrnoException(DB::ErrorCodes::SYSTEM_ERROR, "Cannot obtain terminal window size (ioctl TIOCGWINSZ)"); } /// Default - 0. return terminal_size.ws_col; diff --git a/src/Common/ThreadFuzzer.cpp b/src/Common/ThreadFuzzer.cpp index 433b8a76dba..0868613d880 100644 --- a/src/Common/ThreadFuzzer.cpp +++ b/src/Common/ThreadFuzzer.cpp @@ -258,10 +258,10 @@ void ThreadFuzzer::setup() const #if defined(OS_LINUX) if (sigemptyset(&sa.sa_mask)) - throwFromErrno("Failed to clean signal mask for thread fuzzer", ErrorCodes::CANNOT_MANIPULATE_SIGSET); + throw ErrnoException(ErrorCodes::CANNOT_MANIPULATE_SIGSET, "Failed to clean signal mask for thread fuzzer"); if (sigaddset(&sa.sa_mask, SIGPROF)) - throwFromErrno("Failed to add signal to mask for thread fuzzer", ErrorCodes::CANNOT_MANIPULATE_SIGSET); + throw ErrnoException(ErrorCodes::CANNOT_MANIPULATE_SIGSET, "Failed to add signal to mask for thread fuzzer"); #else // the two following functions always return 0 under mac sigemptyset(&sa.sa_mask); @@ -269,7 +269,7 @@ void ThreadFuzzer::setup() const #endif if (sigaction(SIGPROF, &sa, nullptr)) - throwFromErrno("Failed to setup signal handler for thread fuzzer", ErrorCodes::CANNOT_SET_SIGNAL_HANDLER); + throw ErrnoException(ErrorCodes::CANNOT_SET_SIGNAL_HANDLER, "Failed to setup signal handler for thread fuzzer"); static constexpr UInt32 timer_precision = 1000000; @@ -280,7 +280,7 @@ void ThreadFuzzer::setup() const struct itimerval timer = {.it_interval = interval, .it_value = interval}; if (0 != setitimer(ITIMER_PROF, &timer, nullptr)) - throwFromErrno("Failed to create profiling timer", ErrorCodes::CANNOT_CREATE_TIMER); + throw ErrnoException(ErrorCodes::CANNOT_CREATE_TIMER, "Failed to create profiling timer"); } diff --git a/src/Common/ThreadPool.cpp b/src/Common/ThreadPool.cpp index 8cba13373b9..3c2e6228421 100644 --- a/src/Common/ThreadPool.cpp +++ b/src/Common/ThreadPool.cpp @@ -28,6 +28,40 @@ namespace CurrentMetrics extern const Metric GlobalThreadScheduled; } +class JobWithPriority +{ +public: + using Job = std::function; + + Job job; + Priority priority; + CurrentMetrics::Increment metric_increment; + DB::OpenTelemetry::TracingContextOnThread thread_trace_context; + + /// Call stacks of all jobs' schedulings leading to this one + std::vector frame_pointers; + bool enable_job_stack_trace = false; + + JobWithPriority( + Job job_, Priority priority_, CurrentMetrics::Metric metric, + const DB::OpenTelemetry::TracingContextOnThread & thread_trace_context_, + bool capture_frame_pointers) + : job(job_), priority(priority_), metric_increment(metric), + thread_trace_context(thread_trace_context_), enable_job_stack_trace(capture_frame_pointers) + { + if (!capture_frame_pointers) + return; + /// Save all previous jobs call stacks and append with current + frame_pointers = DB::Exception::thread_frame_pointers; + frame_pointers.push_back(StackTrace().getFramePointers()); + } + + bool operator<(const JobWithPriority & rhs) const + { + return priority > rhs.priority; // Reversed for `priority_queue` max-heap to yield minimum value (i.e. highest priority) first + } +}; + static constexpr auto DEFAULT_THREAD_NAME = "ThreadPool"; template @@ -500,3 +534,10 @@ GlobalThreadPool & GlobalThreadPool::instance() return *the_instance; } +void GlobalThreadPool::shutdown() +{ + if (the_instance) + { + the_instance->finalize(); + } +} diff --git a/src/Common/ThreadPool.h b/src/Common/ThreadPool.h index c8eefedd838..31e4eabf63b 100644 --- a/src/Common/ThreadPool.h +++ b/src/Common/ThreadPool.h @@ -20,9 +20,10 @@ #include #include #include -#include #include +class JobWithPriority; + /** Very simple thread pool similar to boost::threadpool. * Advantages: * - catches exceptions and rethrows on wait. @@ -109,6 +110,8 @@ public: void addOnDestroyCallback(OnDestroyCallback && callback); private: + friend class GlobalThreadPool; + mutable std::mutex mutex; std::condition_variable job_finished; std::condition_variable new_job_or_shutdown; @@ -126,37 +129,6 @@ private: bool threads_remove_themselves = true; const bool shutdown_on_exception = true; - struct JobWithPriority - { - Job job; - Priority priority; - CurrentMetrics::Increment metric_increment; - DB::OpenTelemetry::TracingContextOnThread thread_trace_context; - - /// Call stacks of all jobs' schedulings leading to this one - std::vector frame_pointers; - bool enable_job_stack_trace = false; - - JobWithPriority( - Job job_, Priority priority_, CurrentMetrics::Metric metric, - const DB::OpenTelemetry::TracingContextOnThread & thread_trace_context_, - bool capture_frame_pointers) - : job(job_), priority(priority_), metric_increment(metric), - thread_trace_context(thread_trace_context_), enable_job_stack_trace(capture_frame_pointers) - { - if (!capture_frame_pointers) - return; - /// Save all previous jobs call stacks and append with current - frame_pointers = DB::Exception::thread_frame_pointers; - frame_pointers.push_back(StackTrace().getFramePointers()); - } - - bool operator<(const JobWithPriority & rhs) const - { - return priority > rhs.priority; // Reversed for `priority_queue` max-heap to yield minimum value (i.e. highest priority) first - } - }; - boost::heap::priority_queue jobs; std::list threads; std::exception_ptr first_exception; @@ -205,6 +177,7 @@ class GlobalThreadPool : public FreeThreadPool, private boost::noncopyable public: static void initialize(size_t max_threads = 10000, size_t max_free_threads = 1000, size_t queue_size = 10000); static GlobalThreadPool & instance(); + static void shutdown(); }; diff --git a/src/Common/ThreadStatus.cpp b/src/Common/ThreadStatus.cpp index 101a56cd620..c99823b2dfa 100644 --- a/src/Common/ThreadStatus.cpp +++ b/src/Common/ThreadStatus.cpp @@ -4,6 +4,7 @@ #include #include #include +#include #include #include diff --git a/src/Common/TimerDescriptor.cpp b/src/Common/TimerDescriptor.cpp index 2fb9618b60a..248febe226e 100644 --- a/src/Common/TimerDescriptor.cpp +++ b/src/Common/TimerDescriptor.cpp @@ -24,7 +24,7 @@ TimerDescriptor::TimerDescriptor(int clockid, int flags) throw Exception(ErrorCodes::CANNOT_CREATE_TIMER, "Cannot create timer_fd descriptor"); if (-1 == fcntl(timer_fd, F_SETFL, O_NONBLOCK)) - throwFromErrno("Cannot set O_NONBLOCK for timer_fd", ErrorCodes::CANNOT_FCNTL); + throw ErrnoException(ErrorCodes::CANNOT_FCNTL, "Cannot set O_NONBLOCK for timer_fd"); } TimerDescriptor::TimerDescriptor(TimerDescriptor && other) noexcept : timer_fd(other.timer_fd) @@ -57,7 +57,7 @@ void TimerDescriptor::reset() const spec.it_value.tv_nsec = 0; if (-1 == timerfd_settime(timer_fd, 0 /*relative timer */, &spec, nullptr)) - throwFromErrno("Cannot reset timer_fd", ErrorCodes::CANNOT_SET_TIMER_PERIOD); + throw ErrnoException(ErrorCodes::CANNOT_SET_TIMER_PERIOD, "Cannot reset timer_fd"); /// Drain socket. /// It may be possible that alarm happened and socket is readable. @@ -78,7 +78,7 @@ void TimerDescriptor::drain() const break; if (errno != EINTR) - throwFromErrno("Cannot drain timer_fd", ErrorCodes::CANNOT_READ_FROM_SOCKET); + throw ErrnoException(ErrorCodes::CANNOT_READ_FROM_SOCKET, "Cannot drain timer_fd"); } } } @@ -94,7 +94,7 @@ void TimerDescriptor::setRelative(uint64_t usec) const spec.it_value.tv_nsec = (usec % TIMER_PRECISION) * 1'000; if (-1 == timerfd_settime(timer_fd, 0 /*relative timer */, &spec, nullptr)) - throwFromErrno("Cannot set time for timer_fd", ErrorCodes::CANNOT_SET_TIMER_PERIOD); + throw ErrnoException(ErrorCodes::CANNOT_SET_TIMER_PERIOD, "Cannot set time for timer_fd"); } void TimerDescriptor::setRelative(Poco::Timespan timespan) const diff --git a/src/Common/ZooKeeper/IKeeper.h b/src/Common/ZooKeeper/IKeeper.h index 80dee2b5c81..76cdfe9f230 100644 --- a/src/Common/ZooKeeper/IKeeper.h +++ b/src/Common/ZooKeeper/IKeeper.h @@ -471,7 +471,7 @@ private: /// Message must be a compile-time constant template requires std::is_convertible_v - Exception(T && message, const Error code_) : DB::Exception(DB::ErrorCodes::KEEPER_EXCEPTION, std::forward(message)), code(code_) + Exception(T && message, const Error code_) : DB::Exception(std::forward(message), DB::ErrorCodes::KEEPER_EXCEPTION, /* remote_= */ false), code(code_) { incrementErrorMetrics(code); } diff --git a/src/Common/ZooKeeper/TestKeeper.cpp b/src/Common/ZooKeeper/TestKeeper.cpp index a19892736ea..a25329ad7c0 100644 --- a/src/Common/ZooKeeper/TestKeeper.cpp +++ b/src/Common/ZooKeeper/TestKeeper.cpp @@ -99,6 +99,7 @@ struct TestKeeperExistsRequest final : ExistsRequest, TestKeeperRequest struct TestKeeperGetRequest final : GetRequest, TestKeeperRequest { TestKeeperGetRequest() = default; + explicit TestKeeperGetRequest(const GetRequest & base) : GetRequest(base) {} ResponsePtr createResponse() const override; std::pair process(TestKeeper::Container & container, int64_t zxid) const override; }; @@ -118,6 +119,8 @@ struct TestKeeperSetRequest final : SetRequest, TestKeeperRequest struct TestKeeperListRequest : ListRequest, TestKeeperRequest { + TestKeeperListRequest() = default; + explicit TestKeeperListRequest(const ListRequest & base) : ListRequest(base) {} ResponsePtr createResponse() const override; std::pair process(TestKeeper::Container & container, int64_t zxid) const override; }; @@ -176,6 +179,14 @@ struct TestKeeperMultiRequest final : MultiRequest, TestKeeperRequest { requests.push_back(std::make_shared(*concrete_request_check)); } + else if (const auto * concrete_request_get = dynamic_cast(generic_request.get())) + { + requests.push_back(std::make_shared(*concrete_request_get)); + } + else if (const auto * concrete_request_list = dynamic_cast(generic_request.get())) + { + requests.push_back(std::make_shared(*concrete_request_list)); + } else throw Exception::fromMessage(Error::ZBADARGUMENTS, "Illegal command as part of multi ZooKeeper request"); } diff --git a/src/Common/ZooKeeper/ZooKeeper.cpp b/src/Common/ZooKeeper/ZooKeeper.cpp index 8d18494e964..70b8df5cd2c 100644 --- a/src/Common/ZooKeeper/ZooKeeper.cpp +++ b/src/Common/ZooKeeper/ZooKeeper.cpp @@ -497,6 +497,17 @@ bool ZooKeeper::exists(const std::string & path, Coordination::Stat * stat, cons return existsWatch(path, stat, callbackForEvent(watch)); } +bool ZooKeeper::anyExists(const std::vector & paths) +{ + auto exists_multi_response = exists(paths); + for (size_t i = 0; i < exists_multi_response.size(); ++i) + { + if (exists_multi_response[i].error == Coordination::Error::ZOK) + return true; + } + return false; +} + bool ZooKeeper::existsWatch(const std::string & path, Coordination::Stat * stat, Coordination::WatchCallback watch_callback) { Coordination::Error code = existsImpl(path, stat, watch_callback); @@ -858,7 +869,7 @@ bool ZooKeeper::waitForDisappear(const std::string & path, const WaitCondition & /// method is called. do { - /// Use getData insteand of exists to avoid watch leak. + /// Use getData instead of exists to avoid watch leak. impl->get(path, callback, std::make_shared(watch)); if (!state->event.tryWait(1000)) @@ -877,7 +888,7 @@ bool ZooKeeper::waitForDisappear(const std::string & path, const WaitCondition & return false; } -void ZooKeeper::handleEphemeralNodeExistence(const std::string & path, const std::string & fast_delete_if_equal_value) +void ZooKeeper::deleteEphemeralNodeIfContentMatches(const std::string & path, const std::string & fast_delete_if_equal_value) { zkutil::EventPtr eph_node_disappeared = std::make_shared(); String content; @@ -1164,6 +1175,7 @@ std::future ZooKeeper::asyncRemove(const std::stri return future; } +/// Needs to match ZooKeeperWithInjection::asyncTryRemove implementation std::future ZooKeeper::asyncTryRemove(const std::string & path, int32_t version) { auto promise = std::make_shared>(); diff --git a/src/Common/ZooKeeper/ZooKeeper.h b/src/Common/ZooKeeper/ZooKeeper.h index 785842b94bd..1f29af0797b 100644 --- a/src/Common/ZooKeeper/ZooKeeper.h +++ b/src/Common/ZooKeeper/ZooKeeper.h @@ -33,7 +33,8 @@ namespace CurrentMetrics namespace DB { - class ZooKeeperLog; +class ZooKeeperLog; +class ZooKeeperWithFaultInjection; namespace ErrorCodes { @@ -194,6 +195,9 @@ private: /// Methods with names not starting at try- raise KeeperException on any error. class ZooKeeper { + /// ZooKeeperWithFaultInjection wants access to `impl` pointer to reimplement some async functions with faults + friend class DB::ZooKeeperWithFaultInjection; + public: using Ptr = std::shared_ptr; @@ -286,6 +290,8 @@ public: return exists(paths.begin(), paths.end()); } + bool anyExists(const std::vector & paths); + std::string get(const std::string & path, Coordination::Stat * stat = nullptr, const EventPtr & watch = nullptr); std::string getWatch(const std::string & path, Coordination::Stat * stat, Coordination::WatchCallback watch_callback); @@ -422,8 +428,9 @@ public: /// Performs several operations in a transaction. /// Throws on every error. Coordination::Responses multi(const Coordination::Requests & requests); - /// Throws only if some operation has returned an "unexpected" error - /// - an error that would cause the corresponding try- method to throw. + /// Throws only if some operation has returned an "unexpected" error - an error that would cause + /// the corresponding try- method to throw. + /// On exception, `responses` may or may not be populated. Coordination::Error tryMulti(const Coordination::Requests & requests, Coordination::Responses & responses); /// Throws nothing (even session expired errors) Coordination::Error tryMultiNoThrow(const Coordination::Requests & requests, Coordination::Responses & responses); @@ -467,7 +474,7 @@ public: /// If the node exists and its value is equal to fast_delete_if_equal_value it will remove it /// If the node exists and its value is different, it will wait for it to disappear. It will throw a LOGICAL_ERROR if the node doesn't /// disappear automatically after 3x session_timeout. - void handleEphemeralNodeExistence(const std::string & path, const std::string & fast_delete_if_equal_value); + void deleteEphemeralNodeIfContentMatches(const std::string & path, const std::string & fast_delete_if_equal_value); Coordination::ReconfigResponse reconfig( const std::string & joining, @@ -567,8 +574,11 @@ public: void setZooKeeperLog(std::shared_ptr zk_log_); UInt32 getSessionUptime() const { return static_cast(session_uptime.elapsedSeconds()); } + bool hasReachedDeadline() const { return impl->hasReachedDeadline(); } + uint64_t getSessionTimeoutMS() const { return args.session_timeout_ms; } + void setServerCompletelyStarted(); Int8 getConnectedHostIdx() const; @@ -640,8 +650,6 @@ private: ZooKeeperArgs args; - std::mutex mutex; - Poco::Logger * log = nullptr; std::shared_ptr zk_log; diff --git a/src/Common/ZooKeeper/ZooKeeperImpl.cpp b/src/Common/ZooKeeper/ZooKeeperImpl.cpp index 9ec7208d3eb..d732b900d37 100644 --- a/src/Common/ZooKeeper/ZooKeeperImpl.cpp +++ b/src/Common/ZooKeeper/ZooKeeperImpl.cpp @@ -1,4 +1,5 @@ -#include "Common/ZooKeeper/ZooKeeperConstants.h" +#include +#include #include #include diff --git a/src/Common/ZooKeeper/ZooKeeperWithFaultInjection.cpp b/src/Common/ZooKeeper/ZooKeeperWithFaultInjection.cpp new file mode 100644 index 00000000000..72923ca0487 --- /dev/null +++ b/src/Common/ZooKeeper/ZooKeeperWithFaultInjection.cpp @@ -0,0 +1,637 @@ +#include +#include + +namespace DB +{ + +ZooKeeperWithFaultInjection::ZooKeeperWithFaultInjection( + zkutil::ZooKeeper::Ptr const & keeper_, + double fault_injection_probability, + UInt64 fault_injection_seed, + std::string name_, + Poco::Logger * logger_) + : keeper(keeper_) + , fault_policy(std::make_unique(fault_injection_probability, fault_injection_seed)) + , name(std::move(name_)) + , logger(logger_) + , seed(fault_injection_seed) +{ +} + +void ZooKeeperWithFaultInjection::resetKeeper() +{ + /// When an error is injected, we need to reset keeper for several reasons + /// a) Avoid processing further requests in this keeper (in async code) + /// b) Simulate a fault as ZooKeeperImpl does, forcing a new session (which drops ephemeral nodes) + /// + /// Ideally we would call `keeper->finalize("Fault injection");` to force the session reload. + /// The problem with that is that many operations currently aren't able to cope with keeper faults correctly, + /// so they would fail. While this is what happens in production, it's not what we want in the CI. + /// + /// Until all the code can handle keeper session resets, we need to simulate it so the code that relies on its + /// behaviour keeps working. An example of such code is insert block ids: If keeper dies between the block id being + /// reserved (via ephemeral node) and the metadata being pushed, the reserved block id will be deleted automatically + /// in keeper (connection drop == delete all ephemeral nodes attached to that connection). This way retrying and + /// getting a new block id is ok. But without a connection reset (because ZooKeeperWithFaultInjection doesn't + /// enforce it yet), the old ephemeral nodes associated with "committing_blocks" will still be there and operations + /// such as block merges, mutations, etc., will think they are alive and wait for them to be ready (which will never + /// happen) + /// Our poor man session reload is to keep track of ephemeral nodes created by this Faulty keeper and delete + /// them manually when we force a fault. This is obviously limited as it will only apply for operations processed by + /// this instance, but let's trust more and more code can handle session reloads and we can eliminate the hack. + /// Until that time, the hack remains. + if (keeper) + { + for (const auto & path_created : session_ephemeral_nodes) + { + try + { + keeper->remove(path_created); + } + catch (const Coordination::Exception & e) + { + if (logger) + LOG_TRACE(logger, "Failed to delete ephemeral node ({}) during fault cleanup: {}", path_created, e.what()); + } + } + } + + session_ephemeral_nodes.clear(); + keeper.reset(); +} + +void ZooKeeperWithFaultInjection::multiResponseSaveEphemeralNodePaths( + const Coordination::Requests & requests, const Coordination::Responses & responses) +{ + if (responses.empty()) + return; + + chassert(requests.size() == responses.size()); + + for (size_t i = 0; i < requests.size(); i++) + { + const auto * create_req = dynamic_cast(requests[i].get()); + if (create_req && create_req->is_ephemeral) + { + const auto * create_resp = dynamic_cast(responses.at(i).get()); + chassert(create_resp); + session_ephemeral_nodes.emplace_back(create_resp->path_created); + } + } +} + +void ZooKeeperWithFaultInjection::injectFailureBeforeOperationThrow(const char * func_name, const String & path) +{ + if (unlikely(!keeper)) + { + /// This is ok for async requests, where you call several of them and one introduced a fault + /// In the faults we reset the pointer to mark the connection as failed and inject failures in any + /// subsequent async requests + if (logger) + LOG_TRACE(logger, "ZooKeeperWithFaultInjection called after fault: seed={}, func={} path={}", seed, func_name, path); + throw zkutil::KeeperException::fromMessage(RandomFaultInjection::error_before_op, RandomFaultInjection::msg_session_expired); + } + + if (unlikely(fault_policy) && fault_policy->beforeOperation()) + { + if (logger) + LOG_TRACE( + logger, + "ZooKeeperWithFaultInjection call FAILED: seed={} func={} path={} code={} message={} ", + seed, + func_name, + path, + RandomFaultInjection::error_before_op, + RandomFaultInjection::msg_before_op); + resetKeeper(); + throw zkutil::KeeperException::fromMessage(RandomFaultInjection::error_before_op, RandomFaultInjection::msg_before_op); + } +} + +void ZooKeeperWithFaultInjection::injectFailureAfterOperationThrow(const char * func_name, const String & path) +{ + if (unlikely(fault_policy) && fault_policy->afterOperation()) + { + if (logger) + LOG_TRACE( + logger, + "ZooKeeperWithFaultInjection call FAILED: seed={} func={} path={} code={} message={} ", + seed, + func_name, + path, + RandomFaultInjection::error_after_op, + RandomFaultInjection::msg_after_op); + resetKeeper(); + throw zkutil::KeeperException::fromMessage(RandomFaultInjection::error_after_op, RandomFaultInjection::msg_after_op); + } +} + + +template +std::invoke_result_t +ZooKeeperWithFaultInjection::executeWithFaultSync(const char * func_name, const std::string & path, Operation operation) +{ + injectFailureBeforeOperationThrow(func_name, path); + + if constexpr (!std::is_same_v, void>) + { + auto res = operation(); + injectFailureAfterOperationThrow(func_name, path); + return res; + } + else + { + operation(); + injectFailureAfterOperationThrow(func_name, path); + } +} + +template +bool ZooKeeperWithFaultInjection::injectFailureBeforeOperationPromise(const char * func_name, Promise & promise, const String & path) +{ + if (unlikely(!keeper)) + { + if (logger) + LOG_ERROR(logger, "ZooKeeperWithFaultInjection called after fault injection: seed={}, func={} path={}", seed, func_name, path); + promise->set_exception(std::make_exception_ptr( + zkutil::KeeperException::fromMessage(RandomFaultInjection::error_before_op, RandomFaultInjection::msg_session_expired))); + } + + if (unlikely(fault_policy) && fault_policy->beforeOperation()) + { + if (logger) + LOG_TRACE( + logger, "ZooKeeperWithFaultInjection injected fault before operation: seed={} func={} path={}", seed, func_name, path); + resetKeeper(); + promise->set_exception(std::make_exception_ptr( + zkutil::KeeperException::fromMessage(RandomFaultInjection::error_before_op, RandomFaultInjection::msg_before_op))); + return true; + } + return false; +} + +template +bool ZooKeeperWithFaultInjection::injectFailureAfterOperationPromise(const char * func_name, Promise & promise, const String & path) +{ + if (unlikely(fault_policy) && fault_policy->afterOperation()) + { + promise->set_exception(std::make_exception_ptr( + zkutil::KeeperException::fromMessage(RandomFaultInjection::error_after_op, RandomFaultInjection::msg_after_op))); + if (logger) + LOG_TRACE(logger, "ZooKeeperWithFaultInjection injected fault after operation: seed={} func={} path={}", seed, func_name, path); + resetKeeper(); + return true; + } + return false; +} + +Strings ZooKeeperWithFaultInjection::getChildren( + const std::string & path, Coordination::Stat * stat, const zkutil::EventPtr & watch, Coordination::ListRequestType list_request_type) +{ + return executeWithFaultSync(__func__, path, [&]() { return keeper->getChildren(path, stat, watch, list_request_type); }); +} + +zkutil::ZooKeeper::MultiGetChildrenResponse +ZooKeeperWithFaultInjection::getChildren(const std::vector & paths, Coordination::ListRequestType list_request_type) +{ + return executeWithFaultSync( + __func__, !paths.empty() ? paths.front() : "", [&]() { return keeper->getChildren(paths, list_request_type); }); +} + +Coordination::Error ZooKeeperWithFaultInjection::tryGetChildren( + const std::string & path, + Strings & res, + Coordination::Stat * stat, + const zkutil::EventPtr & watch, + Coordination::ListRequestType list_request_type) +{ + return executeWithFaultSync(__func__, path, [&]() { return keeper->tryGetChildren(path, res, stat, watch, list_request_type); }); +} + +zkutil::ZooKeeper::MultiTryGetChildrenResponse +ZooKeeperWithFaultInjection::tryGetChildren(const std::vector & paths, Coordination::ListRequestType list_request_type) +{ + return executeWithFaultSync( + __func__, !paths.empty() ? paths.front() : "", [&]() { return keeper->tryGetChildren(paths, list_request_type); }); +} + +Coordination::Error ZooKeeperWithFaultInjection::tryGetChildrenWatch( + const std::string & path, + Strings & res, + Coordination::Stat * stat, + Coordination::WatchCallback watch_callback, + Coordination::ListRequestType list_request_type) +{ + return executeWithFaultSync( + __func__, path, [&]() { return keeper->tryGetChildrenWatch(path, res, stat, watch_callback, list_request_type); }); +} + +Strings ZooKeeperWithFaultInjection::getChildrenWatch( + const std::string & path, + Coordination::Stat * stat, + Coordination::WatchCallback watch_callback, + Coordination::ListRequestType list_request_type) +{ + return executeWithFaultSync(__func__, path, [&]() { return keeper->getChildrenWatch(path, stat, watch_callback, list_request_type); }); +} + +Strings ZooKeeperWithFaultInjection::getChildrenWatch( + const std::string & path, + Coordination::Stat * stat, + Coordination::WatchCallbackPtr watch_callback, + Coordination::ListRequestType list_request_type) +{ + return executeWithFaultSync(__func__, path, [&]() { return keeper->getChildrenWatch(path, stat, watch_callback, list_request_type); }); +} + +bool ZooKeeperWithFaultInjection::tryGet( + const std::string & path, std::string & res, Coordination::Stat * stat, const zkutil::EventPtr & watch, Coordination::Error * code) +{ + return executeWithFaultSync(__func__, path, [&]() { return keeper->tryGet(path, res, stat, watch, code); }); +} + +bool ZooKeeperWithFaultInjection::tryGetWatch( + const std::string & path, + std::string & res, + Coordination::Stat * stat, + Coordination::WatchCallback watch_callback, + Coordination::Error * code) +{ + return executeWithFaultSync(__func__, path, [&]() { return keeper->tryGetWatch(path, res, stat, watch_callback, code); }); +} + +std::string ZooKeeperWithFaultInjection::get(const std::string & path, Coordination::Stat * stat, const zkutil::EventPtr & watch) +{ + return executeWithFaultSync(__func__, path, [&]() { return keeper->get(path, stat, watch); }); +} + +zkutil::ZooKeeper::MultiGetResponse ZooKeeperWithFaultInjection::get(const std::vector & paths) +{ + return executeWithFaultSync(__func__, !paths.empty() ? paths.front() : "", [&]() { return keeper->get(paths); }); +} + +zkutil::ZooKeeper::MultiTryGetResponse ZooKeeperWithFaultInjection::tryGet(const std::vector & paths) +{ + return executeWithFaultSync(__func__, !paths.empty() ? paths.front() : "", [&]() { return keeper->tryGet(paths); }); +} + +void ZooKeeperWithFaultInjection::set(const String & path, const String & data, int32_t version, Coordination::Stat * stat) +{ + return executeWithFaultSync(__func__, path, [&]() { return keeper->set(path, data, version, stat); }); +} + +void ZooKeeperWithFaultInjection::remove(const String & path, int32_t version) +{ + return executeWithFaultSync(__func__, path, [&]() { return keeper->remove(path, version); }); +} + +bool ZooKeeperWithFaultInjection::exists(const std::string & path, Coordination::Stat * stat, const zkutil::EventPtr & watch) +{ + return executeWithFaultSync(__func__, path, [&]() { return keeper->exists(path, stat, watch); }); +} + +bool ZooKeeperWithFaultInjection::anyExists(const std::vector & paths) +{ + return executeWithFaultSync(__func__, !paths.empty() ? paths.front() : "", [&]() { return keeper->anyExists(paths); }); +} + +zkutil::ZooKeeper::MultiExistsResponse ZooKeeperWithFaultInjection::exists(const std::vector & paths) +{ + return executeWithFaultSync(__func__, !paths.empty() ? paths.front() : "", [&]() { return keeper->exists(paths); }); +} + +std::string ZooKeeperWithFaultInjection::create(const std::string & path, const std::string & data, int32_t mode) +{ + return executeWithFaultSync( + __func__, + path, + [&]() + { + auto path_created = keeper->create(path, data, mode); + if (unlikely(fault_policy) && (mode == zkutil::CreateMode::EphemeralSequential || mode == zkutil::CreateMode::Ephemeral)) + session_ephemeral_nodes.emplace_back(path_created); + return path_created; + }); +} + +Coordination::Error +ZooKeeperWithFaultInjection::tryCreate(const std::string & path, const std::string & data, int32_t mode, std::string & path_created) +{ + return executeWithFaultSync( + __func__, + path, + [&]() + { + Coordination::Error code = keeper->tryCreate(path, data, mode, path_created); + if (unlikely(fault_policy) && code == Coordination::Error::ZOK + && (mode == zkutil::CreateMode::EphemeralSequential || mode == zkutil::CreateMode::Ephemeral)) + session_ephemeral_nodes.emplace_back(path_created); + return code; + }); +} + +Coordination::Error ZooKeeperWithFaultInjection::tryCreate(const std::string & path, const std::string & data, int32_t mode) +{ + std::string path_created; + return tryCreate(path, data, mode, path_created); +} + +Coordination::Responses ZooKeeperWithFaultInjection::multi(const Coordination::Requests & requests) +{ + return executeWithFaultSync( + __func__, + !requests.empty() ? requests.front()->getPath() : "", + [&]() + { + auto responses = keeper->multi(requests); + if (unlikely(fault_policy)) + multiResponseSaveEphemeralNodePaths(requests, responses); + return responses; + }); +} + +void ZooKeeperWithFaultInjection::createIfNotExists(const std::string & path, const std::string & data) +{ + return executeWithFaultSync(__func__, path, [&]() { return keeper->createIfNotExists(path, data); }); +} + +void ZooKeeperWithFaultInjection::createOrUpdate(const std::string & path, const std::string & data, int32_t mode) +{ + chassert(mode != zkutil::CreateMode::EphemeralSequential && mode != zkutil::CreateMode::Ephemeral); + return executeWithFaultSync(__func__, path, [&]() { return keeper->createOrUpdate(path, data, mode); }); +} + +void ZooKeeperWithFaultInjection::createAncestors(const std::string & path) +{ + return executeWithFaultSync(__func__, path, [&]() { return keeper->createAncestors(path); }); +} + +Coordination::Error ZooKeeperWithFaultInjection::tryRemove(const std::string & path, int32_t version) +{ + return executeWithFaultSync(__func__, path, [&]() { return keeper->tryRemove(path, version); }); +} + +void ZooKeeperWithFaultInjection::removeRecursive(const std::string & path) +{ + return executeWithFaultSync(__func__, path, [&]() { return keeper->removeRecursive(path); }); +} + +void ZooKeeperWithFaultInjection::tryRemoveRecursive(const std::string & path) +{ + return executeWithFaultSync(__func__, path, [&]() { return keeper->tryRemoveRecursive(path); }); +} + +void ZooKeeperWithFaultInjection::removeChildren(const std::string & path) +{ + return executeWithFaultSync(__func__, path, [&]() { return keeper->removeChildren(path); }); +} + +bool ZooKeeperWithFaultInjection::tryRemoveChildrenRecursive( + const std::string & path, bool probably_flat, zkutil::RemoveException keep_child) +{ + return executeWithFaultSync(__func__, path, [&]() { return keeper->tryRemoveChildrenRecursive(path, probably_flat, keep_child); }); +} + +bool ZooKeeperWithFaultInjection::waitForDisappear(const std::string & path, const zkutil::ZooKeeper::WaitCondition & condition) +{ + return executeWithFaultSync(__func__, path, [&]() { return keeper->waitForDisappear(path, condition); }); +} + +std::string ZooKeeperWithFaultInjection::sync(const std::string & path) +{ + return executeWithFaultSync(__func__, path, [&]() { return keeper->sync(path); }); +} + +Coordination::Error +ZooKeeperWithFaultInjection::trySet(const std::string & path, const std::string & data, int32_t version, Coordination::Stat * stat) +{ + return executeWithFaultSync(__func__, path, [&]() { return keeper->trySet(path, data, version, stat); }); +} + +void ZooKeeperWithFaultInjection::checkExistsAndGetCreateAncestorsOps(const std::string & path, Coordination::Requests & requests) +{ + return executeWithFaultSync(__func__, path, [&]() { return keeper->checkExistsAndGetCreateAncestorsOps(path, requests); }); +} + +void ZooKeeperWithFaultInjection::deleteEphemeralNodeIfContentMatches( + const std::string & path, const std::string & fast_delete_if_equal_value) +{ + return executeWithFaultSync( + __func__, path, [&]() { return keeper->deleteEphemeralNodeIfContentMatches(path, fast_delete_if_equal_value); }); +} + +Coordination::Error ZooKeeperWithFaultInjection::tryMulti(const Coordination::Requests & requests, Coordination::Responses & responses) +{ + return executeWithFaultSync( + __func__, + !requests.empty() ? requests.front()->getPath() : "", + [&]() + { + auto code = keeper->tryMulti(requests, responses); + if (unlikely(fault_policy) && code == Coordination::Error::ZOK) + multiResponseSaveEphemeralNodePaths(requests, responses); + return code; + }); +} + +Coordination::Error +ZooKeeperWithFaultInjection::tryMultiNoThrow(const Coordination::Requests & requests, Coordination::Responses & responses) +{ + try + { + return tryMulti(requests, responses); + } + catch (const Coordination::Exception & e) + { + return e.code; + } +} + +zkutil::ZooKeeper::FutureExists ZooKeeperWithFaultInjection::asyncExists(std::string path, Coordination::WatchCallback watch_callback) +{ + auto promise = std::make_shared>(); + auto future = promise->get_future(); + if (injectFailureBeforeOperationPromise(__func__, promise, path)) + return future; + + const char * function_name = __func__; + auto callback = [&, promise](const Coordination::ExistsResponse & response) mutable + { + if (injectFailureAfterOperationPromise(function_name, promise, path)) + return; + + if (response.error != Coordination::Error::ZOK && response.error != Coordination::Error::ZNONODE) + promise->set_exception(std::make_exception_ptr(zkutil::KeeperException::fromPath(response.error, path))); + else + promise->set_value(response); + }; + + keeper->impl->exists( + path, + std::move(callback), + watch_callback ? std::make_shared(watch_callback) : Coordination::WatchCallbackPtr{}); + return future; +} + +zkutil::ZooKeeper::FutureGet ZooKeeperWithFaultInjection::asyncTryGet(std::string path) +{ + auto promise = std::make_shared>(); + auto future = promise->get_future(); + if (injectFailureBeforeOperationPromise(__func__, promise, path)) + return future; + + const char * function_name = __func__; + auto callback = [&, promise](const Coordination::GetResponse & response) mutable + { + if (injectFailureAfterOperationPromise(function_name, promise, path)) + return; + + if (response.error != Coordination::Error::ZOK && response.error != Coordination::Error::ZNONODE) + promise->set_exception(std::make_exception_ptr(zkutil::KeeperException::fromPath(response.error, path))); + else + promise->set_value(response); + }; + + keeper->impl->get(path, std::move(callback), {}); + return future; +} + + +zkutil::ZooKeeper::FutureMulti ZooKeeperWithFaultInjection::asyncTryMultiNoThrow(const Coordination::Requests & ops) +{ +#ifndef NDEBUG + /// asyncTryMultiNoThrow is not setup to handle faults with ephemeral nodes + /// To do it we'd need to look at ops and save the indexes BEFORE the callback, as the ops are not + /// guaranteed to live until then + for (const auto & op : ops) + { + const auto * create_req = dynamic_cast(op.get()); + if (create_req) + chassert(!create_req->is_ephemeral); + } +#endif + + auto promise = std::make_shared>(); + auto future = promise->get_future(); + size_t request_size = ops.size(); + String path = ops.empty() ? "" : ops.front()->getPath(); + + if (!keeper || (unlikely(fault_policy) && fault_policy->beforeOperation())) + { + if (logger) + LOG_TRACE(logger, "ZooKeeperWithFaultInjection injected fault before operation: seed={} func={} path={}", seed, __func__, path); + resetKeeper(); + Coordination::MultiResponse errors; + for (size_t i = 0; i < request_size; i++) + { + auto r = std::make_shared(); + r->error = RandomFaultInjection::error_before_op; + errors.responses.emplace_back(std::move(r)); + } + promise->set_value(errors); + return future; + } + + const char * function_name = __func__; + auto callback = [&, promise](const Coordination::MultiResponse & response) mutable + { + if (unlikely(fault_policy) && fault_policy->afterOperation()) + { + if (logger) + LOG_TRACE( + logger, + "ZooKeeperWithFaultInjection injected fault after operation: seed={} func={} path={}", + seed, + function_name, + path); + resetKeeper(); + Coordination::MultiResponse errors; + for (size_t i = 0; i < request_size; i++) + { + auto r = std::make_shared(); + r->error = RandomFaultInjection::error_after_op; + errors.responses.emplace_back(std::move(r)); + } + promise->set_value(errors); + } + else + { + promise->set_value(response); + } + }; + + keeper->impl->multi(ops, std::move(callback)); + return future; +} + +/// Needs to match ZooKeeper::asyncTryRemove implementation +zkutil::ZooKeeper::FutureRemove ZooKeeperWithFaultInjection::asyncTryRemove(std::string path, int32_t version) +{ + auto promise = std::make_shared>(); + auto future = promise->get_future(); + if (injectFailureBeforeOperationPromise(__func__, promise, path)) + return future; + + const char * function_name = __func__; + auto callback = [&, promise](const Coordination::RemoveResponse & response) mutable + { + if (injectFailureAfterOperationPromise(function_name, promise, path)) + return; + + if (response.error != Coordination::Error::ZOK && response.error != Coordination::Error::ZNONODE + && response.error != Coordination::Error::ZBADVERSION && response.error != Coordination::Error::ZNOTEMPTY) + { + promise->set_exception(std::make_exception_ptr(zkutil::KeeperException::fromPath(response.error, path))); + } + else + promise->set_value(response); + }; + + keeper->impl->remove(path, version, std::move(callback)); + return future; +} + +zkutil::ZooKeeper::FutureRemove ZooKeeperWithFaultInjection::asyncTryRemoveNoThrow(const std::string & path, int32_t version) +{ + auto promise = std::make_shared>(); + auto future = promise->get_future(); + + if (!keeper || (unlikely(fault_policy) && fault_policy->beforeOperation())) + { + if (logger) + LOG_TRACE(logger, "ZooKeeperWithFaultInjection injected fault before operation: seed={} func={} path={}", seed, __func__, path); + resetKeeper(); + Coordination::RemoveResponse r; + r.error = RandomFaultInjection::error_before_op; + promise->set_value(r); + return future; + } + + const char * function_name = __func__; + auto callback = [&, promise](const Coordination::RemoveResponse & response) mutable + { + if (unlikely(fault_policy) && fault_policy->afterOperation()) + { + if (logger) + LOG_TRACE( + logger, + "ZooKeeperWithFaultInjection injected fault after operation: seed={} func={} path={}", + seed, + function_name, + path); + resetKeeper(); + Coordination::RemoveResponse r; + r.error = RandomFaultInjection::error_after_op; + promise->set_value(r); + } + else + { + promise->set_value(response); + } + }; + + keeper->impl->remove(path, version, std::move(callback)); + + return future; +} +} diff --git a/src/Common/ZooKeeper/ZooKeeperWithFaultInjection.h b/src/Common/ZooKeeper/ZooKeeperWithFaultInjection.h index 0b0a033808f..57e1f0f3b87 100644 --- a/src/Common/ZooKeeper/ZooKeeperWithFaultInjection.h +++ b/src/Common/ZooKeeper/ZooKeeperWithFaultInjection.h @@ -12,10 +12,6 @@ namespace DB { -namespace ErrorCodes -{ - extern const int LOGICAL_ERROR; -} class RandomFaultInjection { @@ -23,23 +19,33 @@ public: bool must_fail_after_op = false; bool must_fail_before_op = false; + static constexpr auto msg_session_expired = "Called after fault injection"; + static constexpr auto error_before_op = Coordination::Error::ZSESSIONEXPIRED; + static constexpr auto msg_before_op = "Fault injection before operation"; + static constexpr auto error_after_op = Coordination::Error::ZOPERATIONTIMEOUT; + static constexpr auto msg_after_op = "Fault injection after operation"; + RandomFaultInjection(double probability, UInt64 seed_) : rndgen(seed_), distribution(probability) { } - void beforeOperation() + + bool beforeOperation() { - if (distribution(rndgen) || must_fail_before_op) + if (must_fail_before_op || distribution(rndgen)) { must_fail_before_op = false; - throw zkutil::KeeperException::fromMessage(Coordination::Error::ZSESSIONEXPIRED, "Fault injection before operation"); + return true; } + return false; } - void afterOperation() + + bool afterOperation() { - if (distribution(rndgen) || must_fail_after_op) + if (must_fail_after_op || distribution(rndgen)) { must_fail_after_op = false; - throw zkutil::KeeperException::fromMessage(Coordination::Error::ZOPERATIONTIMEOUT, "Fault injection after operation"); + return true; } + return false; } private: @@ -52,563 +58,209 @@ private: /// class ZooKeeperWithFaultInjection { - template - friend class ReplicatedMergeTreeSinkImpl; + zkutil::ZooKeeper::Ptr keeper; - using zk = zkutil::ZooKeeper; - - zk::Ptr keeper; - zk::Ptr keeper_prev; std::unique_ptr fault_policy; std::string name; Poco::Logger * logger = nullptr; - UInt64 calls_total = 0; - UInt64 calls_without_fault_injection = 0; const UInt64 seed = 0; - std::vector ephemeral_nodes; + std::vector session_ephemeral_nodes; - ZooKeeperWithFaultInjection( - zk::Ptr const & keeper_, - double fault_injection_probability, - UInt64 fault_injection_seed, - std::string name_, - Poco::Logger * logger_) - : keeper(keeper_), name(std::move(name_)), logger(logger_), seed(fault_injection_seed) - { - fault_policy = std::make_unique(fault_injection_probability, fault_injection_seed); + template + std::invoke_result_t executeWithFaultSync(const char * func_name, const std::string & path, Operation); + void injectFailureBeforeOperationThrow(const char * func_name, const String & path); + void injectFailureAfterOperationThrow(const char * func_name, const String & path); + template + bool injectFailureBeforeOperationPromise(const char * func_name, Promise & promise, const String & path); + template + bool injectFailureAfterOperationPromise(const char * func_name, Promise & promise, const String & path); - if (unlikely(logger)) - LOG_TRACE( - logger, - "ZooKeeperWithFaultInjection created: name={} seed={} fault_probability={}", - name, - seed, - fault_injection_probability); - } + void resetKeeper(); + void multiResponseSaveEphemeralNodePaths(const Coordination::Requests & requests, const Coordination::Responses & responses); public: using Ptr = std::shared_ptr; + ZooKeeperWithFaultInjection( + zkutil::ZooKeeper::Ptr const & keeper_, + double fault_injection_probability, + UInt64 fault_injection_seed, + std::string name_, + Poco::Logger * logger_); + + explicit ZooKeeperWithFaultInjection(zkutil::ZooKeeper::Ptr const & keeper_) : keeper(keeper_) { } static ZooKeeperWithFaultInjection::Ptr createInstance( - double fault_injection_probability, UInt64 fault_injection_seed, const zk::Ptr & zookeeper, std::string name, Poco::Logger * logger) + double fault_injection_probability, + UInt64 fault_injection_seed, + zkutil::ZooKeeper::Ptr const & zookeeper, + std::string name, + Poco::Logger * logger) { /// validate all parameters here, constructor just accept everything - if (fault_injection_probability < 0.0) fault_injection_probability = .0; else if (fault_injection_probability > 1.0) fault_injection_probability = 1.0; - if (0 == fault_injection_seed) + if (fault_injection_seed == 0) fault_injection_seed = randomSeed(); if (fault_injection_probability > 0.0) - return std::shared_ptr( - new ZooKeeperWithFaultInjection(zookeeper, fault_injection_probability, fault_injection_seed, std::move(name), logger)); + return std::make_shared( + zookeeper, fault_injection_probability, fault_injection_seed, std::move(name), logger); /// if no fault injection provided, create instance which will not log anything return std::make_shared(zookeeper); } - explicit ZooKeeperWithFaultInjection(zk::Ptr const & keeper_) : keeper(keeper_) { } + void setKeeper(zkutil::ZooKeeper::Ptr const & keeper_) { keeper = keeper_; } + zkutil::ZooKeeper::Ptr getKeeper() const { return keeper; } + bool isNull() const { return keeper.get() == nullptr; } + bool expired() const { return !keeper || keeper->expired(); } + bool isFeatureEnabled(KeeperFeatureFlag feature_flag) const { return keeper->isFeatureEnabled(feature_flag); } - ~ZooKeeperWithFaultInjection() + void forceFailureBeforeOperation() { - if (unlikely(logger)) - LOG_TRACE( - logger, - "ZooKeeperWithFaultInjection report: name={} seed={} calls_total={} calls_succeeded={} calls_failed={} failure_rate={}", - name, - seed, - calls_total, - calls_without_fault_injection, - calls_total - calls_without_fault_injection, - float(calls_total - calls_without_fault_injection) / calls_total); + if (!fault_policy) + fault_policy = std::make_unique(0, 0); + fault_policy->must_fail_before_op = true; } - void setKeeper(zk::Ptr const & keeper_) { keeper = keeper_; } - bool isNull() const { return keeper.get() == nullptr; } - bool expired() { return keeper->expired(); } + void forceFailureAfterOperation() + { + if (!fault_policy) + fault_policy = std::make_unique(0, 0); + fault_policy->must_fail_after_op = true; + } /// - /// mirror ZooKeeper interface + /// mirror ZooKeeper interface: Sync functions /// Strings getChildren( const std::string & path, Coordination::Stat * stat = nullptr, const zkutil::EventPtr & watch = nullptr, - Coordination::ListRequestType list_request_type = Coordination::ListRequestType::ALL) - { - return access("getChildren", path, [&]() { return keeper->getChildren(path, stat, watch, list_request_type); }); - } + Coordination::ListRequestType list_request_type = Coordination::ListRequestType::ALL); + + zkutil::ZooKeeper::MultiGetChildrenResponse getChildren( + const std::vector & paths, Coordination::ListRequestType list_request_type = Coordination::ListRequestType::ALL); Coordination::Error tryGetChildren( const std::string & path, Strings & res, Coordination::Stat * stat = nullptr, const zkutil::EventPtr & watch = nullptr, - Coordination::ListRequestType list_request_type = Coordination::ListRequestType::ALL) - { - return access("tryGetChildren", path, [&]() { return keeper->tryGetChildren(path, res, stat, watch, list_request_type); }); - } + Coordination::ListRequestType list_request_type = Coordination::ListRequestType::ALL); - zk::FutureExists asyncExists(const std::string & path, Coordination::WatchCallback watch_callback = {}) - { - return access("asyncExists", path, [&]() { return keeper->asyncExists(path, watch_callback); }); - } + zkutil::ZooKeeper::MultiTryGetChildrenResponse tryGetChildren( + const std::vector & paths, Coordination::ListRequestType list_request_type = Coordination::ListRequestType::ALL); - zk::FutureGet asyncTryGet(const std::string & path) - { - return access("asyncTryGet", path, [&]() { return keeper->asyncTryGet(path); }); - } + Coordination::Error tryGetChildrenWatch( + const std::string & path, + Strings & res, + Coordination::Stat * stat, + Coordination::WatchCallback watch_callback, + Coordination::ListRequestType list_request_type = Coordination::ListRequestType::ALL); + + Strings getChildrenWatch( + const std::string & path, + Coordination::Stat * stat, + Coordination::WatchCallback watch_callback, + Coordination::ListRequestType list_request_type = Coordination::ListRequestType::ALL); + + Strings getChildrenWatch( + const std::string & path, + Coordination::Stat * stat, + Coordination::WatchCallbackPtr watch_callback, + Coordination::ListRequestType list_request_type = Coordination::ListRequestType::ALL); bool tryGet( const std::string & path, std::string & res, Coordination::Stat * stat = nullptr, const zkutil::EventPtr & watch = nullptr, - Coordination::Error * code = nullptr) - { - return access("tryGet", path, [&]() { return keeper->tryGet(path, res, stat, watch, code); }); - } + Coordination::Error * code = nullptr); - Coordination::Error tryMulti(const Coordination::Requests & requests, Coordination::Responses & responses) - { - constexpr auto method = "tryMulti"; - auto error = access( - method, - !requests.empty() ? requests.front()->getPath() : "", - [&]() { return keeper->tryMulti(requests, responses); }, - [&](const Coordination::Error & original_error) - { - if (original_error == Coordination::Error::ZOK) - faultInjectionPostAction(method, requests, responses); - }, - [&]() - { - responses.clear(); - for (size_t i = 0; i < requests.size(); ++i) - responses.emplace_back(std::make_shared()); - }); - - - /// collect ephemeral nodes when no fault was injected (to clean up on demand) - if (unlikely(fault_policy) && Coordination::Error::ZOK == error) - { - doForEachCreatedEphemeralNode( - method, requests, responses, [&](const String & path_created) { ephemeral_nodes.push_back(path_created); }); - } - return error; - } - - Coordination::Error tryMultiNoThrow(const Coordination::Requests & requests, Coordination::Responses & responses) - { - constexpr auto method = "tryMultiNoThrow"; - constexpr auto no_throw = true; - constexpr auto inject_failure_before_op = false; - auto error = access( - method, - !requests.empty() ? requests.front()->getPath() : "", - [&]() { return keeper->tryMultiNoThrow(requests, responses); }, - [&](const Coordination::Error & original_error) - { - if (original_error == Coordination::Error::ZOK) - faultInjectionPostAction(method, requests, responses); - }, - [&]() - { - responses.clear(); - for (size_t i = 0; i < requests.size(); ++i) - responses.emplace_back(std::make_shared()); - }); - - /// collect ephemeral nodes when no fault was injected (to clean up later) - if (unlikely(fault_policy) && Coordination::Error::ZOK == error) - { - doForEachCreatedEphemeralNode( - method, requests, responses, [&](const String & path_created) { ephemeral_nodes.push_back(path_created); }); - } - return error; - } - - std::string get(const std::string & path, Coordination::Stat * stat = nullptr, const zkutil::EventPtr & watch = nullptr) - { - return access("get", path, [&]() { return keeper->get(path, stat, watch); }); - } - - zkutil::ZooKeeper::MultiGetResponse get(const std::vector & paths) - { - return access("get", !paths.empty() ? paths.front() : "", [&]() { return keeper->get(paths); }); - } - - zkutil::ZooKeeper::MultiTryGetResponse tryGet(const std::vector & paths) - { - return access("tryGet", !paths.empty() ? paths.front() : "", [&]() { return keeper->tryGet(paths); }); - } - - bool exists(const std::string & path, Coordination::Stat * stat = nullptr, const zkutil::EventPtr & watch = nullptr) - { - return access("exists", path, [&]() { return keeper->exists(path, stat, watch); }); - } - - bool existsNoFailureInjection(const std::string & path, Coordination::Stat * stat = nullptr, const zkutil::EventPtr & watch = nullptr) - { - return access("exists", path, [&]() { return keeper->exists(path, stat, watch); }); - } - - zkutil::ZooKeeper::MultiExistsResponse exists(const std::vector & paths) - { - return access("exists", !paths.empty() ? paths.front() : "", [&]() { return keeper->exists(paths); }); - } - - std::string create(const std::string & path, const std::string & data, int32_t mode) - { - std::string path_created; - auto code = tryCreate(path, data, mode, path_created); - - if (code != Coordination::Error::ZOK) - throw zkutil::KeeperException::fromPath(code, path); - - return path_created; - } - - Coordination::Error tryCreate(const std::string & path, const std::string & data, int32_t mode, std::string & path_created) - { - path_created.clear(); - - auto error = access( - "tryCreate", - path, - [&]() { return keeper->tryCreate(path, data, mode, path_created); }, - [&](Coordination::Error & code) - { - try - { - if (!path_created.empty() && (mode == zkutil::CreateMode::EphemeralSequential || mode == zkutil::CreateMode::Ephemeral)) - { - keeper->remove(path_created); - if (unlikely(logger)) - LOG_TRACE(logger, "ZooKeeperWithFaultInjection cleanup: seed={} func={} path={} path_created={} code={}", - seed, "tryCreate", path, path_created, code); - } - } - catch (const zkutil::KeeperException & e) - { - if (unlikely(logger)) - LOG_TRACE( - logger, - "ZooKeeperWithFaultInjection cleanup FAILED: seed={} func={} path={} path_created={} code={} message={} ", - seed, - "tryCreate", - path, - path_created, - e.code, - e.message()); - } - }); - - /// collect ephemeral nodes when no fault was injected (to clean up later) - if (unlikely(fault_policy)) - { - if (!path_created.empty() && (mode == zkutil::CreateMode::EphemeralSequential || mode == zkutil::CreateMode::Ephemeral)) - ephemeral_nodes.push_back(path_created); - } - - return error; - } - - Coordination::Error tryCreate(const std::string & path, const std::string & data, int32_t mode) - { - String path_created; - return tryCreate(path, data, mode, path_created); - } - - void createIfNotExists(const std::string & path, const std::string & data) - { - std::string path_created; - auto code = tryCreate(path, data, zkutil::CreateMode::Persistent, path_created); - - if (code == Coordination::Error::ZOK || code == Coordination::Error::ZNODEEXISTS) - return; - - throw zkutil::KeeperException::fromPath(code, path); - } - - Coordination::Responses multi(const Coordination::Requests & requests) - { - constexpr auto method = "multi"; - auto result = access( - method, - !requests.empty() ? requests.front()->getPath() : "", - [&]() { return keeper->multi(requests); }, - [&](Coordination::Responses & responses) { faultInjectionPostAction(method, requests, responses); }); - - /// collect ephemeral nodes to clean up - if (unlikely(fault_policy)) - { - doForEachCreatedEphemeralNode( - method, requests, result, [&](const String & path_created) { ephemeral_nodes.push_back(path_created); }); - } - return result; - } - - void createAncestors(const std::string & path) - { - access("createAncestors", path, [&]() { return keeper->createAncestors(path); }); - } - - Coordination::Error tryRemove(const std::string & path, int32_t version = -1) - { - return access("tryRemove", path, [&]() { return keeper->tryRemove(path, version); }); - } - - void removeRecursive(const std::string & path) - { - return access("removeRecursive", path, [&]() { return keeper->removeRecursive(path); }); - } - - std::string sync(const std::string & path) - { - return access("sync", path, [&]() { return keeper->sync(path); }); - } - - Coordination::Error trySet(const std::string & path, const std::string & data, int32_t version = -1, Coordination::Stat * stat = nullptr) - { - return access("trySet", path, [&]() { return keeper->trySet(path, data, version, stat); }); - } - - void checkExistsAndGetCreateAncestorsOps(const std::string & path, Coordination::Requests & requests) - { - return access("checkExistsAndGetCreateAncestorsOps", path, [&]() { return keeper->checkExistsAndGetCreateAncestorsOps(path, requests); }); - } - - void handleEphemeralNodeExistenceNoFailureInjection(const std::string & path, const std::string & fast_delete_if_equal_value) - { - return access("handleEphemeralNodeExistence", path, [&]() { return keeper->handleEphemeralNodeExistence(path, fast_delete_if_equal_value); }); - } - - void cleanupEphemeralNodes() - { - for (const auto & path : ephemeral_nodes) - { - try - { - if (keeper_prev) - keeper_prev->tryRemove(path); - } - catch (...) - { - if (unlikely(logger)) - tryLogCurrentException(logger, "Exception during ephemeral nodes clean up"); - } - } - - ephemeral_nodes.clear(); - } - - bool isFeatureEnabled(KeeperFeatureFlag feature_flag) const - { - return keeper->isFeatureEnabled(feature_flag); - } - -private: - void faultInjectionBefore(std::function fault_cleanup) - { - try - { - if (unlikely(fault_policy)) - fault_policy->beforeOperation(); - } - catch (const zkutil::KeeperException &) - { - fault_cleanup(); - throw; - } - } - void faultInjectionAfter(std::function fault_cleanup) - { - try - { - if (unlikely(fault_policy)) - fault_policy->afterOperation(); - } - catch (const zkutil::KeeperException &) - { - fault_cleanup(); - throw; - } - } - - void doForEachCreatedEphemeralNode( - const char * method, const Coordination::Requests & requests, const Coordination::Responses & responses, auto && action) - { - if (responses.empty()) - return; - - if (responses.size() != requests.size()) - throw Exception( - ErrorCodes::LOGICAL_ERROR, - "Number of responses doesn't match number of requests: method={} requests={} responses={}", - method, - requests.size(), - responses.size()); - - /// find create request with ephemeral flag - std::vector> create_requests; - for (size_t i = 0; i < requests.size(); ++i) - { - const auto * create_req = dynamic_cast(requests[i].get()); - if (create_req && create_req->is_ephemeral) - create_requests.emplace_back(i, create_req); - } - - for (auto && [i, req] : create_requests) - { - const auto * create_resp = dynamic_cast(responses.at(i).get()); - if (!create_resp) - throw Exception( - ErrorCodes::LOGICAL_ERROR, "Response should be CreateResponse: method={} index={} path={}", method, i, req->path); - - action(create_resp->path_created); - } - } - - void faultInjectionPostAction(const char * method, const Coordination::Requests & requests, Coordination::Responses & responses) - { - doForEachCreatedEphemeralNode(method, requests, responses, [&](const String & path_created) { keeper->remove(path_created); }); - } - - template - struct FaultCleanupTypeImpl - { - using Type = std::function; - }; - - template <> - struct FaultCleanupTypeImpl - { - using Type = std::function; - }; - - template - using FaultCleanupType = typename FaultCleanupTypeImpl::Type; - - template < - bool no_throw_access = false, - bool inject_failure_before_op = true, - int inject_failure_after_op = true, - typename Operation, - typename Result = std::invoke_result_t> - Result access( - const char * func_name, + bool tryGetWatch( const std::string & path, - Operation operation, - FaultCleanupType fault_after_op_cleanup = {}, - FaultCleanupType fault_before_op_cleanup = {}) - { - try - { - ++calls_total; + std::string & res, + Coordination::Stat * stat, + Coordination::WatchCallback watch_callback, + Coordination::Error * code = nullptr); - if (!keeper) - throw zkutil::KeeperException::fromMessage(Coordination::Error::ZSESSIONEXPIRED, - "Session is considered to be expired due to fault injection"); + std::string get(const std::string & path, Coordination::Stat * stat = nullptr, const zkutil::EventPtr & watch = nullptr); - if constexpr (inject_failure_before_op) - { - faultInjectionBefore( - [&] - { - if (fault_before_op_cleanup) - fault_before_op_cleanup(); - }); - } + zkutil::ZooKeeper::MultiGetResponse get(const std::vector & paths); - if constexpr (!std::is_same_v) - { - Result res = operation(); + zkutil::ZooKeeper::MultiTryGetResponse tryGet(const std::vector & paths); - /// if connectivity error occurred w/o fault injection -> just return it - if constexpr (std::is_same_v) - { - if (Coordination::isHardwareError(res)) - return res; - } + void set(const String & path, const String & data, int32_t version = -1, Coordination::Stat * stat = nullptr); - if constexpr (inject_failure_after_op) - { - faultInjectionAfter( - [&] - { - if (fault_after_op_cleanup) - fault_after_op_cleanup(res); - }); - } + void remove(const String & path, int32_t version = -1); - ++calls_without_fault_injection; + bool exists(const std::string & path, Coordination::Stat * stat = nullptr, const zkutil::EventPtr & watch = nullptr); - if (unlikely(logger)) - LOG_TRACE(logger, "ZooKeeperWithFaultInjection call SUCCEEDED: seed={} func={} path={}", seed, func_name, path); + zkutil::ZooKeeper::MultiExistsResponse exists(const std::vector & paths); - return res; - } - else - { - operation(); + bool anyExists(const std::vector & paths); - if constexpr (inject_failure_after_op) - { - faultInjectionAfter( - [&fault_after_op_cleanup] - { - if (fault_after_op_cleanup) - fault_after_op_cleanup(); - }); - } + std::string create(const std::string & path, const std::string & data, int32_t mode); - ++calls_without_fault_injection; + Coordination::Error tryCreate(const std::string & path, const std::string & data, int32_t mode, std::string & path_created); - if (unlikely(logger)) - LOG_TRACE(logger, "ZooKeeperWithFaultInjection call SUCCEEDED: seed={} func={} path={}", seed, func_name, path); - } - } - catch (const zkutil::KeeperException & e) - { - if (unlikely(logger)) - LOG_TRACE( - logger, - "ZooKeeperWithFaultInjection call FAILED: seed={} func={} path={} code={} message={} ", - seed, - func_name, - path, - e.code, - e.message()); + Coordination::Error tryCreate(const std::string & path, const std::string & data, int32_t mode); - /// save valid pointer to clean up ephemeral nodes later if necessary - if (keeper) - keeper_prev = keeper; - keeper.reset(); + Coordination::Responses multi(const Coordination::Requests & requests); - /// for try*NoThrow() methods - if constexpr (no_throw_access) - return e.code; + void createIfNotExists(const std::string & path, const std::string & data); - if constexpr (std::is_same_v) - { - /// try*() methods throws at least on hardware error and return only on user errors - /// todo: the methods return only on subset of user errors, and throw on another errors - /// to mimic the methods exactly - we need to specify errors on which to return for each such method - if (Coordination::isHardwareError(e.code)) - throw; + void createOrUpdate(const std::string & path, const std::string & data, int32_t mode); - return e.code; - } + void createAncestors(const std::string & path); - throw; - } - } + Coordination::Error tryRemove(const std::string & path, int32_t version = -1); + + void removeRecursive(const std::string & path); + + void tryRemoveRecursive(const std::string & path); + + void removeChildren(const std::string & path); + + bool tryRemoveChildrenRecursive( + const std::string & path, bool probably_flat = false, zkutil::RemoveException keep_child = zkutil::RemoveException{}); + + bool waitForDisappear(const std::string & path, const zkutil::ZooKeeper::WaitCondition & condition = {}); + + std::string sync(const std::string & path); + + Coordination::Error + trySet(const std::string & path, const std::string & data, int32_t version = -1, Coordination::Stat * stat = nullptr); + + void checkExistsAndGetCreateAncestorsOps(const std::string & path, Coordination::Requests & requests); + + void deleteEphemeralNodeIfContentMatches(const std::string & path, const std::string & fast_delete_if_equal_value); + + Coordination::Error tryMulti(const Coordination::Requests & requests, Coordination::Responses & responses); + + Coordination::Error tryMultiNoThrow(const Coordination::Requests & requests, Coordination::Responses & responses); + + /// + /// mirror ZooKeeper interface: Async functions + /// Note that there is not guarantees that the parameters will live until the internal callback is called + /// so we might need to copy them + /// + + zkutil::ZooKeeper::FutureExists asyncExists(std::string path, Coordination::WatchCallback watch_callback = {}); + + zkutil::ZooKeeper::FutureGet asyncTryGet(std::string path); + + zkutil::ZooKeeper::FutureMulti asyncTryMultiNoThrow(const Coordination::Requests & ops); + + zkutil::ZooKeeper::FutureRemove asyncTryRemove(std::string path, int32_t version = -1); + + zkutil::ZooKeeper::FutureRemove asyncTryRemoveNoThrow(const std::string & path, int32_t version = -1); }; using ZooKeeperWithFaultInjectionPtr = ZooKeeperWithFaultInjection::Ptr; diff --git a/src/Common/assertProcessUserMatchesDataOwner.cpp b/src/Common/assertProcessUserMatchesDataOwner.cpp index f2557a4aaaf..a2f87825257 100644 --- a/src/Common/assertProcessUserMatchesDataOwner.cpp +++ b/src/Common/assertProcessUserMatchesDataOwner.cpp @@ -31,7 +31,8 @@ namespace const auto error = getpwuid_r(user_id, &passwd_entry, buffer.data(), buffer_size, &result); if (error) - throwFromErrno("Failed to find user name for " + std::to_string(user_id), ErrorCodes::FAILED_TO_GETPWUID, error); + ErrnoException::throwWithErrno( + ErrorCodes::FAILED_TO_GETPWUID, error, "Failed to find user name for {}", std::to_string(user_id)); else if (result) return result->pw_name; return std::to_string(user_id); diff --git a/src/Common/atomicRename.cpp b/src/Common/atomicRename.cpp index 69e077e38f5..44e02995858 100644 --- a/src/Common/atomicRename.cpp +++ b/src/Common/atomicRename.cpp @@ -87,10 +87,12 @@ static bool renameat2(const std::string & old_path, const std::string & new_path return false; if (errno == EEXIST) - throwFromErrno(fmt::format("Cannot rename {} to {} because the second path already exists", old_path, new_path), ErrorCodes::ATOMIC_RENAME_FAIL); + throw ErrnoException( + ErrorCodes::ATOMIC_RENAME_FAIL, "Cannot rename {} to {} because the second path already exists", old_path, new_path); if (errno == ENOENT) - throwFromErrno(fmt::format("Paths cannot be exchanged because {} or {} does not exist", old_path, new_path), ErrorCodes::ATOMIC_RENAME_FAIL); - throwFromErrnoWithPath(fmt::format("Cannot rename {} to {}", old_path, new_path), new_path, ErrorCodes::SYSTEM_ERROR); + throw ErrnoException( + ErrorCodes::ATOMIC_RENAME_FAIL, "Paths cannot be exchanged because {} or {} does not exist", old_path, new_path); + ErrnoException::throwFromPath(ErrorCodes::SYSTEM_ERROR, new_path, "Cannot rename {} to {}", old_path, new_path); } bool supportsAtomicRename() @@ -139,11 +141,12 @@ static bool renameat2(const std::string & old_path, const std::string & new_path if (errnum == ENOTSUP || errnum == EINVAL) return false; if (errnum == EEXIST) - throwFromErrno(fmt::format("Cannot rename {} to {} because the second path already exists", old_path, new_path), ErrorCodes::ATOMIC_RENAME_FAIL); + throw ErrnoException( + ErrorCodes::ATOMIC_RENAME_FAIL, "Cannot rename {} to {} because the second path already exists", old_path, new_path); if (errnum == ENOENT) - throwFromErrno(fmt::format("Paths cannot be exchanged because {} or {} does not exist", old_path, new_path), ErrorCodes::ATOMIC_RENAME_FAIL); - throwFromErrnoWithPath( - fmt::format("Cannot rename {} to {}: {}", old_path, new_path, strerror(errnum)), new_path, ErrorCodes::SYSTEM_ERROR); + throw ErrnoException( + ErrorCodes::ATOMIC_RENAME_FAIL, "Paths cannot be exchanged because {} or {} does not exist", old_path, new_path); + ErrnoException::throwFromPath(ErrorCodes::SYSTEM_ERROR, new_path, "Cannot rename {} to {}", old_path, new_path); } diff --git a/src/Common/checkStackSize.cpp b/src/Common/checkStackSize.cpp index 5ab8d124fe4..8c2a0aaed7f 100644 --- a/src/Common/checkStackSize.cpp +++ b/src/Common/checkStackSize.cpp @@ -54,7 +54,7 @@ static size_t getStackSize(void ** out_address) # if defined(OS_FREEBSD) || defined(OS_SUNOS) pthread_attr_init(&attr); if (0 != pthread_attr_get_np(pthread_self(), &attr)) - throwFromErrno("Cannot pthread_attr_get_np", ErrorCodes::CANNOT_PTHREAD_ATTR); + throw ErrnoException(ErrorCodes::CANNOT_PTHREAD_ATTR, "Cannot pthread_attr_get_np"); # else if (0 != pthread_getattr_np(pthread_self(), &attr)) { @@ -64,14 +64,14 @@ static size_t getStackSize(void ** out_address) return 0; } else - throwFromErrno("Cannot pthread_getattr_np", ErrorCodes::CANNOT_PTHREAD_ATTR); + throw ErrnoException(ErrorCodes::CANNOT_PTHREAD_ATTR, "Cannot pthread_getattr_np"); } # endif SCOPE_EXIT({ pthread_attr_destroy(&attr); }); if (0 != pthread_attr_getstack(&attr, &address, &size)) - throwFromErrno("Cannot pthread_getattr_np", ErrorCodes::CANNOT_PTHREAD_ATTR); + throw ErrnoException(ErrorCodes::CANNOT_PTHREAD_ATTR, "Cannot pthread_attr_getstack"); #ifdef USE_MUSL /// Adjust stack size for the main thread under musl. diff --git a/src/Common/config.h.in b/src/Common/config.h.in index c4869094010..f84e28942c5 100644 --- a/src/Common/config.h.in +++ b/src/Common/config.h.in @@ -62,6 +62,7 @@ #cmakedefine01 FIU_ENABLE #cmakedefine01 USE_BCRYPT #cmakedefine01 USE_LIBARCHIVE +#cmakedefine01 USE_POCKETFFT /// This is needed for .incbin in assembly. For some reason, include paths don't work there in presence of LTO. /// That's why we use absolute paths. diff --git a/src/Common/createHardLink.cpp b/src/Common/createHardLink.cpp index 238851d7f01..f8a9dfa97c1 100644 --- a/src/Common/createHardLink.cpp +++ b/src/Common/createHardLink.cpp @@ -26,19 +26,21 @@ void createHardLink(const String & source_path, const String & destination_path) struct stat destination_descr; if (0 != lstat(source_path.c_str(), &source_descr)) - throwFromErrnoWithPath("Cannot stat " + source_path, source_path, ErrorCodes::CANNOT_STAT); + ErrnoException::throwFromPath(ErrorCodes::CANNOT_STAT, source_path, "Cannot stat {}", source_path); if (0 != lstat(destination_path.c_str(), &destination_descr)) - throwFromErrnoWithPath("Cannot stat " + destination_path, destination_path, ErrorCodes::CANNOT_STAT); + ErrnoException::throwFromPath(ErrorCodes::CANNOT_STAT, destination_path, "Cannot stat {}", destination_path); if (source_descr.st_ino != destination_descr.st_ino) - throwFromErrnoWithPath( - "Destination file " + destination_path + " is already exist and have different inode.", - destination_path, ErrorCodes::CANNOT_LINK, link_errno); + ErrnoException::throwFromPathWithErrno( + ErrorCodes::CANNOT_LINK, + destination_path, + link_errno, + "Destination file {} already exists and has a different inode", + destination_path); } else - throwFromErrnoWithPath("Cannot link " + source_path + " to " + destination_path, destination_path, - ErrorCodes::CANNOT_LINK); + ErrnoException::throwFromPath(ErrorCodes::CANNOT_LINK, destination_path, "Cannot link {} to {}", source_path, destination_path); } } diff --git a/src/Common/examples/arena_with_free_lists.cpp b/src/Common/examples/arena_with_free_lists.cpp index 3f1b3e88328..63c2f231261 100644 --- a/src/Common/examples/arena_with_free_lists.cpp +++ b/src/Common/examples/arena_with_free_lists.cpp @@ -248,7 +248,7 @@ int main(int argc, char ** argv) rusage resource_usage; if (0 != getrusage(RUSAGE_SELF, &resource_usage)) - throwFromErrno("Cannot getrusage", ErrorCodes::SYSTEM_ERROR); + throw ErrnoException(ErrorCodes::SYSTEM_ERROR, "Cannot getrusage"); size_t allocated_bytes = resource_usage.ru_maxrss * 1024; std::cerr << "Current memory usage: " << allocated_bytes << " bytes.\n"; diff --git a/src/Common/examples/shell_command_inout.cpp b/src/Common/examples/shell_command_inout.cpp index 615700cd042..a646dfba311 100644 --- a/src/Common/examples/shell_command_inout.cpp +++ b/src/Common/examples/shell_command_inout.cpp @@ -6,6 +6,7 @@ #include #include #include +#include /** This example shows how we can proxy stdin to ShellCommand and obtain stdout in streaming fashion. */ diff --git a/src/Common/examples/thread_creation_latency.cpp b/src/Common/examples/thread_creation_latency.cpp index 60fb27dc345..d511cab9a0e 100644 --- a/src/Common/examples/thread_creation_latency.cpp +++ b/src/Common/examples/thread_creation_latency.cpp @@ -82,9 +82,9 @@ int main(int argc, char ** argv) { pthread_t thread; if (pthread_create(&thread, nullptr, g, nullptr)) - DB::throwFromErrno("Cannot create thread.", DB::ErrorCodes::PTHREAD_ERROR); + throw DB::ErrnoException(DB::ErrorCodes::PTHREAD_ERROR, "Cannot create thread"); if (pthread_join(thread, nullptr)) - DB::throwFromErrno("Cannot join thread.", DB::ErrorCodes::PTHREAD_ERROR); + throw DB::ErrnoException(DB::ErrorCodes::PTHREAD_ERROR, "Cannot join thread"); }); test(n, "Create and destroy std::thread each iteration", [] diff --git a/src/Common/filesystemHelpers.cpp b/src/Common/filesystemHelpers.cpp index eabc7bdafbb..0d3b5cb83c8 100644 --- a/src/Common/filesystemHelpers.cpp +++ b/src/Common/filesystemHelpers.cpp @@ -49,7 +49,7 @@ struct statvfs getStatVFS(const String & path) { if (errno == EINTR) continue; - throwFromErrnoWithPath("Could not calculate available disk space (statvfs)", path, ErrorCodes::CANNOT_STATVFS); + DB::ErrnoException::throwFromPath(DB::ErrorCodes::CANNOT_STATVFS, path, "Could not calculate available disk space (statvfs)"); } return fs; } @@ -79,7 +79,7 @@ String getBlockDeviceId([[maybe_unused]] const String & path) #if defined(OS_LINUX) struct stat sb; if (lstat(path.c_str(), &sb)) - throwFromErrnoWithPath("Cannot lstat " + path, path, ErrorCodes::CANNOT_STAT); + DB::ErrnoException::throwFromPath(DB::ErrorCodes::CANNOT_STAT, path, "Cannot lstat {}", path); WriteBufferFromOwnString ss; ss << major(sb.st_dev) << ":" << minor(sb.st_dev); return ss.str(); @@ -164,7 +164,7 @@ std::filesystem::path getMountPoint(std::filesystem::path absolute_path) { struct stat st; if (stat(p.c_str(), &st)) /// NOTE: man stat does not list EINTR as possible error - throwFromErrnoWithPath("Cannot stat " + p.string(), p.string(), ErrorCodes::SYSTEM_ERROR); + DB::ErrnoException::throwFromPath(DB::ErrorCodes::SYSTEM_ERROR, p.string(), "Cannot stat {}", p.string()); return st.st_dev; }; @@ -250,10 +250,8 @@ size_t getSizeFromFileDescriptor(int fd, const String & file_name) int res = fstat(fd, &buf); if (-1 == res) { - throwFromErrnoWithPath( - "Cannot execute fstat" + (file_name.empty() ? "" : " file: " + file_name), - file_name, - ErrorCodes::CANNOT_FSTAT); + DB::ErrnoException::throwFromPath( + DB::ErrorCodes::CANNOT_FSTAT, file_name, "Cannot execute fstat{}", file_name.empty() ? "" : " file: " + file_name); } return buf.st_size; } @@ -263,10 +261,7 @@ Int64 getINodeNumberFromPath(const String & path) struct stat file_stat; if (stat(path.data(), &file_stat)) { - throwFromErrnoWithPath( - "Cannot execute stat for file " + path, - path, - ErrorCodes::CANNOT_STAT); + DB::ErrnoException::throwFromPath(DB::ErrorCodes::CANNOT_STAT, path, "Cannot execute stat for file {}", path); } return file_stat.st_ino; } @@ -302,7 +297,7 @@ bool createFile(const std::string & path) close(n); return true; } - DB::throwFromErrnoWithPath("Cannot create file: " + path, path, DB::ErrorCodes::CANNOT_CREATE_FILE); + DB::ErrnoException::throwFromPath(DB::ErrorCodes::CANNOT_CREATE_FILE, path, "Cannot create file: {}", path); } bool exists(const std::string & path) @@ -317,7 +312,7 @@ bool canRead(const std::string & path) return true; if (errno == EACCES) return false; - DB::throwFromErrnoWithPath("Cannot check read access to file: " + path, path, DB::ErrorCodes::PATH_ACCESS_DENIED); + DB::ErrnoException::throwFromPath(DB::ErrorCodes::PATH_ACCESS_DENIED, path, "Cannot check read access to file: {}", path); } bool canWrite(const std::string & path) @@ -327,7 +322,7 @@ bool canWrite(const std::string & path) return true; if (errno == EACCES) return false; - DB::throwFromErrnoWithPath("Cannot check write access to file: " + path, path, DB::ErrorCodes::PATH_ACCESS_DENIED); + DB::ErrnoException::throwFromPath(DB::ErrorCodes::PATH_ACCESS_DENIED, path, "Cannot check write access to file: {}", path); } bool canExecute(const std::string & path) @@ -337,7 +332,7 @@ bool canExecute(const std::string & path) return true; if (errno == EACCES) return false; - DB::throwFromErrnoWithPath("Cannot check write access to file: " + path, path, DB::ErrorCodes::PATH_ACCESS_DENIED); + DB::ErrnoException::throwFromPath(DB::ErrorCodes::PATH_ACCESS_DENIED, path, "Cannot check execute access to file: {}", path); } time_t getModificationTime(const std::string & path) @@ -369,7 +364,7 @@ void setModificationTime(const std::string & path, time_t time) tb.actime = time; tb.modtime = time; if (utime(path.c_str(), &tb) != 0) - DB::throwFromErrnoWithPath("Cannot set modification time for file: " + path, path, DB::ErrorCodes::PATH_ACCESS_DENIED); + DB::ErrnoException::throwFromPath(DB::ErrorCodes::PATH_ACCESS_DENIED, path, "Cannot set modification time to file: {}", path); } bool isSymlink(const fs::path & path) diff --git a/src/Common/format.h b/src/Common/format.h index b72c4f15125..27018f64064 100644 --- a/src/Common/format.h +++ b/src/Common/format.h @@ -123,7 +123,7 @@ namespace Format throw Exception(ErrorCodes::BAD_ARGUMENTS, "Cannot switch from automatic field numbering to manual field specification"); is_plain_numbering = true; if (index_if_plain >= argument_number) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Argument is too big for formatting"); + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Not enough arguments to fill the placeholders in the format string"); index_positions.back() = index_if_plain++; } else diff --git a/src/Common/hasLinuxCapability.cpp b/src/Common/hasLinuxCapability.cpp index 5d823b4ecaf..bf236eb5c56 100644 --- a/src/Common/hasLinuxCapability.cpp +++ b/src/Common/hasLinuxCapability.cpp @@ -27,7 +27,7 @@ static __user_cap_data_struct getCapabilities() /// Avoid dependency on 'libcap'. if (0 != syscall(SYS_capget, &request, &response)) - throwFromErrno("Cannot do 'capget' syscall", ErrorCodes::NETLINK_ERROR); + throw ErrnoException(ErrorCodes::NETLINK_ERROR, "Cannot do 'capget' syscall"); return response; } diff --git a/src/Common/isLocalAddress.cpp b/src/Common/isLocalAddress.cpp index ac6daf620d0..399de9d89a0 100644 --- a/src/Common/isLocalAddress.cpp +++ b/src/Common/isLocalAddress.cpp @@ -28,9 +28,7 @@ struct NetworkInterfaces : public boost::noncopyable NetworkInterfaces() { if (getifaddrs(&ifaddr) == -1) - { - throwFromErrno("Cannot getifaddrs", ErrorCodes::SYSTEM_ERROR); - } + throw ErrnoException(ErrorCodes::SYSTEM_ERROR, "Cannot getifaddrs"); } bool hasAddress(const Poco::Net::IPAddress & address) const diff --git a/src/Common/memcpySmall.h b/src/Common/memcpySmall.h index 5eaa1203f05..0c2aee96250 100644 --- a/src/Common/memcpySmall.h +++ b/src/Common/memcpySmall.h @@ -1,6 +1,7 @@ #pragma once #include +#include /// ssize_t #ifdef __SSE2__ # include diff --git a/src/Common/mysqlxx/Query.cpp b/src/Common/mysqlxx/Query.cpp index 42c35d26ecf..e30ed2b75c8 100644 --- a/src/Common/mysqlxx/Query.cpp +++ b/src/Common/mysqlxx/Query.cpp @@ -6,7 +6,7 @@ #include #endif -#include +#include #include #include @@ -52,8 +52,7 @@ void Query::executeImpl() { MYSQL* mysql_driver = conn->getDriver(); - auto & logger = Poco::Logger::get("mysqlxx::Query"); - logger.trace("Running MySQL query using connection %lu", mysql_thread_id(mysql_driver)); + LOG_TRACE(&Poco::Logger::get("mysqlxx::Query"), "Running MySQL query using connection {}", mysql_thread_id(mysql_driver)); if (mysql_real_query(mysql_driver, query.data(), query.size())) { const auto err_no = mysql_errno(mysql_driver); diff --git a/src/Common/parseRemoteDescription.cpp b/src/Common/parseRemoteDescription.cpp index 8ea3f4a0aa5..7b2045b9de1 100644 --- a/src/Common/parseRemoteDescription.cpp +++ b/src/Common/parseRemoteDescription.cpp @@ -184,7 +184,7 @@ std::vector> parseRemoteDescriptionForExternalDataba } else { - result.emplace_back(std::make_pair(address.substr(0, colon), DB::parseFromString(address.substr(colon + 1)))); + result.emplace_back(std::make_pair(address.substr(0, colon), parseFromString(address.substr(colon + 1)))); } } diff --git a/src/Common/parseRemoteDescription.h b/src/Common/parseRemoteDescription.h index d97558c4728..12435bc68a0 100644 --- a/src/Common/parseRemoteDescription.h +++ b/src/Common/parseRemoteDescription.h @@ -1,8 +1,12 @@ #pragma once + #include #include + + namespace DB { + /* Parse a string that generates shards and replicas. Separator - one of two characters '|' or ',' * depending on whether shards or replicas are generated. * For example: diff --git a/src/Common/randomSeed.cpp b/src/Common/randomSeed.cpp index e10ef87283f..e9616abf7ca 100644 --- a/src/Common/randomSeed.cpp +++ b/src/Common/randomSeed.cpp @@ -24,7 +24,7 @@ DB::UInt64 randomSeed() { struct timespec times; if (clock_gettime(CLOCK_MONOTONIC, ×)) - DB::throwFromErrno("Cannot clock_gettime.", DB::ErrorCodes::CANNOT_CLOCK_GETTIME); + throw DB::ErrnoException(DB::ErrorCodes::CANNOT_CLOCK_GETTIME, "Cannot clock_gettime"); /// Not cryptographically secure as time, pid and stack address can be predictable. diff --git a/src/Common/remapExecutable.cpp b/src/Common/remapExecutable.cpp index 206314ea295..911447d3adc 100644 --- a/src/Common/remapExecutable.cpp +++ b/src/Common/remapExecutable.cpp @@ -120,7 +120,7 @@ __attribute__((__noinline__)) void remapToHugeStep1(void * begin, size_t size) void * scratch = mmap(nullptr, size, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); if (MAP_FAILED == scratch) - throwFromErrno(fmt::format("Cannot mmap {} bytes", size), ErrorCodes::CANNOT_ALLOCATE_MEMORY); + throw ErrnoException(ErrorCodes::CANNOT_ALLOCATE_MEMORY, "Cannot mmap {} bytes", size); memcpy(scratch, begin, size); diff --git a/src/Common/setThreadName.cpp b/src/Common/setThreadName.cpp index f90398825af..aae80272206 100644 --- a/src/Common/setThreadName.cpp +++ b/src/Common/setThreadName.cpp @@ -28,25 +28,31 @@ namespace ErrorCodes static thread_local char thread_name[THREAD_NAME_SIZE]{}; -void setThreadName(const char * name) +void setThreadName(const char * name, bool truncate) { - if (strlen(name) > THREAD_NAME_SIZE - 1) + size_t name_len = strlen(name); + if (!truncate && name_len > THREAD_NAME_SIZE - 1) throw DB::Exception(DB::ErrorCodes::PTHREAD_ERROR, "Thread name cannot be longer than 15 bytes"); + size_t name_capped_len = std::min(1 + name_len, THREAD_NAME_SIZE - 1); + char name_capped[THREAD_NAME_SIZE]; + memcpy(name_capped, name, name_capped_len); + name_capped[name_capped_len] = '\0'; + #if defined(OS_FREEBSD) - pthread_set_name_np(pthread_self(), name); + pthread_set_name_np(pthread_self(), name_capped); if ((false)) #elif defined(OS_DARWIN) - if (0 != pthread_setname_np(name)) + if (0 != pthread_setname_np(name_capped)) #elif defined(OS_SUNOS) - if (0 != pthread_setname_np(pthread_self(), name)) + if (0 != pthread_setname_np(pthread_self(), name_capped)) #else - if (0 != prctl(PR_SET_NAME, name, 0, 0, 0)) + if (0 != prctl(PR_SET_NAME, name_capped, 0, 0, 0)) #endif if (errno != ENOSYS && errno != EPERM) /// It's ok if the syscall is unsupported or not allowed in some environments. - DB::throwFromErrno("Cannot set thread name with prctl(PR_SET_NAME, ...)", DB::ErrorCodes::PTHREAD_ERROR); + throw DB::ErrnoException(DB::ErrorCodes::PTHREAD_ERROR, "Cannot set thread name with prctl(PR_SET_NAME, ...)"); - memcpy(thread_name, name, std::min(1 + strlen(name), THREAD_NAME_SIZE - 1)); + memcpy(thread_name, name_capped, name_capped_len); } const char * getThreadName() @@ -64,7 +70,7 @@ const char * getThreadName() #else if (0 != prctl(PR_GET_NAME, thread_name, 0, 0, 0)) if (errno != ENOSYS && errno != EPERM) /// It's ok if the syscall is unsupported or not allowed in some environments. - DB::throwFromErrno("Cannot get thread name with prctl(PR_GET_NAME)", DB::ErrorCodes::PTHREAD_ERROR); + throw DB::ErrnoException(DB::ErrorCodes::PTHREAD_ERROR, "Cannot get thread name with prctl(PR_GET_NAME)"); #endif return thread_name; diff --git a/src/Common/setThreadName.h b/src/Common/setThreadName.h index 1834ea9696f..fdb2717925f 100644 --- a/src/Common/setThreadName.h +++ b/src/Common/setThreadName.h @@ -4,7 +4,9 @@ /** Sets the thread name (maximum length is 15 bytes), * which will be visible in ps, gdb, /proc, * for convenience of observation and debugging. + * + * @param truncate - if true, will truncate to 15 automatically, otherwise throw */ -void setThreadName(const char * name); +void setThreadName(const char * name, bool truncate = false); const char * getThreadName(); diff --git a/src/Common/tests/gtest_async_loader.cpp b/src/Common/tests/gtest_async_loader.cpp index f2e741e4b9f..5c54dedbbde 100644 --- a/src/Common/tests/gtest_async_loader.cpp +++ b/src/Common/tests/gtest_async_loader.cpp @@ -902,66 +902,6 @@ TEST(AsyncLoader, SetMaxThreads) t.loader.wait(); } -TEST(AsyncLoader, DynamicPools) -{ - const size_t max_threads[] { 2, 10 }; - const int jobs_in_chain = 16; - AsyncLoaderTest t({ - {.max_threads = max_threads[0], .priority{0}}, - {.max_threads = max_threads[1], .priority{-1}}, - }); - - t.loader.start(); - - std::atomic executing[2] { 0, 0 }; // Number of currently executing jobs per pool - - for (int concurrency = 1; concurrency <= 12; concurrency++) - { - std::atomic boosted{false}; // Visible concurrency was increased - std::atomic left{concurrency * jobs_in_chain / 2}; // Number of jobs to start before `prioritize()` call - std::shared_mutex prioritization_mutex; // To slow down job execution during prioritization to avoid race condition - - LoadJobSet jobs_to_prioritize; - - auto job_func = [&] (AsyncLoader & loader, const LoadJobPtr & self) - { - auto pool_id = self->executionPool(); - executing[pool_id]++; - if (executing[pool_id] > max_threads[0]) - boosted = true; - ASSERT_LE(executing[pool_id], max_threads[pool_id]); - - // Dynamic prioritization - if (--left == 0) - { - std::unique_lock lock{prioritization_mutex}; - for (const auto & job : jobs_to_prioritize) - loader.prioritize(job, 1); - } - - std::shared_lock lock{prioritization_mutex}; - t.randomSleepUs(100, 200, 100); - - ASSERT_LE(executing[pool_id], max_threads[pool_id]); - executing[pool_id]--; - }; - - std::vector tasks; - tasks.reserve(concurrency); - for (int i = 0; i < concurrency; i++) - tasks.push_back(makeLoadTask(t.loader, t.chainJobSet(jobs_in_chain, job_func, fmt::format("c{}-j", i)))); - jobs_to_prioritize = getGoals(tasks); // All jobs - scheduleLoad(tasks); - waitLoad(tasks); - - ASSERT_EQ(executing[0], 0); - ASSERT_EQ(executing[1], 0); - ASSERT_EQ(boosted, concurrency > 2); - boosted = false; - } - -} - TEST(AsyncLoader, SubJobs) { AsyncLoaderTest t(1); @@ -1000,7 +940,7 @@ TEST(AsyncLoader, SubJobs) std::atomic jobs_left; // It is a good practice to keep load task inside the component: // 1) to make sure it outlives its load jobs; - // 2) to avoid removing load jobs from `system.async_loader` while we use the component + // 2) to avoid removing load jobs from `system.asynchronous_loader` while we use the component LoadTaskPtr load_task; }; @@ -1070,7 +1010,7 @@ TEST(AsyncLoader, RecursiveJob) std::atomic jobs_left; // It is a good practice to keep load task inside the component: // 1) to make sure it outlives its load jobs; - // 2) to avoid removing load jobs from `system.async_loader` while we use the component + // 2) to avoid removing load jobs from `system.asynchronous_loader` while we use the component LoadTaskPtr load_task; }; diff --git a/src/Common/tests/gtest_config_dot.cpp b/src/Common/tests/gtest_config_dot.cpp new file mode 100644 index 00000000000..d88d896677b --- /dev/null +++ b/src/Common/tests/gtest_config_dot.cpp @@ -0,0 +1,30 @@ +#include +#include +#include +#include + +#include + + +using namespace DB; + +TEST(Common, ConfigWithDotInKeys) +{ + std::string xml(R"CONFIG( + 1 +)CONFIG"); + + Poco::XML::DOMParser dom_parser; + Poco::AutoPtr document = dom_parser.parseString(xml); + Poco::AutoPtr config = new Poco::Util::XMLConfiguration(document); + + /// directly + EXPECT_EQ(ConfigHelper::getBool(*config, "foo.bar", false, false), false); + EXPECT_EQ(ConfigHelper::getBool(*config, "foo\\.bar", false, false), true); + + /// via keys() + Poco::Util::AbstractConfiguration::Keys keys; + config->keys("", keys); + ASSERT_EQ(1, keys.size()); + ASSERT_EQ("foo\\.bar", keys[0]); +} diff --git a/src/Common/tests/gtest_rw_lock.cpp b/src/Common/tests/gtest_rw_lock.cpp index 16ba01d02c6..08a14aba8fb 100644 --- a/src/Common/tests/gtest_rw_lock.cpp +++ b/src/Common/tests/gtest_rw_lock.cpp @@ -31,11 +31,13 @@ namespace public: Events() : start_time(std::chrono::steady_clock::now()) {} - void add(String && event) + void add(String && event, std::chrono::milliseconds correction = std::chrono::milliseconds::zero()) { String timepoint = std::to_string(std::chrono::duration_cast(std::chrono::steady_clock::now() - start_time).count()); if (timepoint.length() < 5) timepoint.insert(0, 5 - timepoint.length(), ' '); + if (correction.count()) + std::this_thread::sleep_for(correction); std::lock_guard lock{mutex}; //std::cout << timepoint << " : " << event << std::endl; events.emplace_back(std::move(event)); @@ -324,6 +326,22 @@ TEST(Common, RWLockNotUpgradeableWithNoQuery) TEST(Common, RWLockWriteLockTimeoutDuringRead) { + /// 0 100 200 300 400 + /// <---------------------------------------- ra ----------------------------------------------> + /// <----- wc (acquiring lock, failed by timeout) -----> + /// + /// + /// 0 : Locking ra + /// 0 : Locked ra + /// 100 : Locking wc + /// 300 : Failed to lock wc + /// 400 : Unlocking ra + /// 400 : Unlocked ra + /// 400 : Locking wd + /// 400 : Locked wd + /// 400 : Unlocking wd + /// 400 : Unlocked wd + static auto rw_lock = RWLockImpl::create(); Events events; @@ -379,6 +397,27 @@ TEST(Common, RWLockWriteLockTimeoutDuringRead) TEST(Common, RWLockWriteLockTimeoutDuringTwoReads) { + /// 0 100 200 300 400 500 + /// <---------------------------------------- ra -----------------------------------------------> + /// <------ wc (acquiring lock, failed by timeout) -------> + /// <-- rb (acquiring lock) --><---------- rb (locked) ------------> + /// + /// + /// 0 : Locking ra + /// 0 : Locked ra + /// 100 : Locking wc + /// 200 : Locking rb + /// 300 : Failed to lock wc + /// 300 : Locked rb + /// 400 : Unlocking ra + /// 400 : Unlocked ra + /// 500 : Unlocking rb + /// 500 : Unlocked rb + /// 501 : Locking wd + /// 501 : Locked wd + /// 501 : Unlocking wd + /// 501 : Unlocked wd + static auto rw_lock = RWLockImpl::create(); Events events; @@ -402,10 +441,14 @@ TEST(Common, RWLockWriteLockTimeoutDuringTwoReads) events.add("Locking rb"); auto rb = rw_lock->getLock(RWLockImpl::Read, "rb"); - events.add(rb ? "Locked rb" : "Failed to lock rb"); + + /// `correction` is used here to add an event to `events` a little later. + /// (Because the event "Locked rb" happens at nearly the same time as "Failed to lock wc" and we don't want our test to be flaky.) + auto correction = std::chrono::duration(50); + events.add(rb ? "Locked rb" : "Failed to lock rb", correction); EXPECT_NE(rb, nullptr); - std::this_thread::sleep_for(std::chrono::duration(200)); + std::this_thread::sleep_for(std::chrono::duration(200) - correction); events.add("Unlocking rb"); rb.reset(); events.add("Unlocked rb"); @@ -454,6 +497,25 @@ TEST(Common, RWLockWriteLockTimeoutDuringTwoReads) TEST(Common, RWLockWriteLockTimeoutDuringWriteWithWaitingRead) { + /// 0 100 200 300 400 500 + /// <--------------------------------------------------- wa --------------------------------------------------------> + /// <------ wb (acquiring lock, failed by timeout) ------> + /// <-- rc (acquiring lock, failed by timeout) --> + /// + /// + /// 0 : Locking wa + /// 0 : Locked wa + /// 100 : Locking wb + /// 200 : Locking rc + /// 300 : Failed to lock wb + /// 400 : Failed to lock rc + /// 500 : Unlocking wa + /// 500 : Unlocked wa + /// 501 : Locking wd + /// 501 : Locked wd + /// 501 : Unlocking wd + /// 501 : Unlocked wd + static auto rw_lock = RWLockImpl::create(); Events events; diff --git a/src/Compression/CompressionCodecDeflateQpl.cpp b/src/Compression/CompressionCodecDeflateQpl.cpp index 25d809c9726..ee0356adde5 100644 --- a/src/Compression/CompressionCodecDeflateQpl.cpp +++ b/src/Compression/CompressionCodecDeflateQpl.cpp @@ -11,6 +11,7 @@ #include "libaccel_config.h" #include #include +#include #include diff --git a/src/Compression/examples/compressed_buffer.cpp b/src/Compression/examples/compressed_buffer.cpp index aef2cf4ab90..74646ff0f28 100644 --- a/src/Compression/examples/compressed_buffer.cpp +++ b/src/Compression/examples/compressed_buffer.cpp @@ -23,7 +23,7 @@ int main(int, char **) Stopwatch stopwatch; { - DB::WriteBufferFromFile buf("test1", DBMS_DEFAULT_BUFFER_SIZE, O_WRONLY | O_CREAT | O_TRUNC); + DB::WriteBufferFromFile buf("test1", DB::DBMS_DEFAULT_BUFFER_SIZE, O_WRONLY | O_CREAT | O_TRUNC); DB::CompressedWriteBuffer compressed_buf(buf); stopwatch.restart(); diff --git a/src/Coordination/CoordinationSettings.h b/src/Coordination/CoordinationSettings.h index 13ef304b353..a58f2b04797 100644 --- a/src/Coordination/CoordinationSettings.h +++ b/src/Coordination/CoordinationSettings.h @@ -43,7 +43,6 @@ struct Settings; M(UInt64, max_requests_batch_bytes_size, 100*1024, "Max size in bytes of batch of requests that can be sent to RAFT", 0) \ M(UInt64, max_flush_batch_size, 1000, "Max size of batch of requests that can be flushed together", 0) \ M(UInt64, max_requests_quick_batch_size, 100, "Max size of batch of requests to try to get before proceeding with RAFT. Keeper will not wait for requests but take only requests that are already in queue" , 0) \ - M(UInt64, max_memory_usage_soft_limit, 0, "Soft limit in bytes of keeper memory usage", 0) \ M(Bool, quorum_reads, false, "Execute read requests as writes through whole RAFT consesus with similar speed", 0) \ M(Bool, force_sync, true, "Call fsync on each change in RAFT changelog", 0) \ M(Bool, compress_logs, false, "Write compressed coordination logs in ZSTD format", 0) \ diff --git a/src/Coordination/KeeperAsynchronousMetrics.cpp b/src/Coordination/KeeperAsynchronousMetrics.cpp index 890079e98f7..4471012e917 100644 --- a/src/Coordination/KeeperAsynchronousMetrics.cpp +++ b/src/Coordination/KeeperAsynchronousMetrics.cpp @@ -113,6 +113,12 @@ KeeperAsynchronousMetrics::KeeperAsynchronousMetrics( { } +KeeperAsynchronousMetrics::~KeeperAsynchronousMetrics() +{ + /// NOTE: stop() from base class is not enough, since this leads to leak on vptr + stop(); +} + void KeeperAsynchronousMetrics::updateImpl(AsynchronousMetricValues & new_values, TimePoint /*update_time*/, TimePoint /*current_time*/) { #if USE_NURAFT diff --git a/src/Coordination/KeeperAsynchronousMetrics.h b/src/Coordination/KeeperAsynchronousMetrics.h index 14092c11c15..457a7112507 100644 --- a/src/Coordination/KeeperAsynchronousMetrics.h +++ b/src/Coordination/KeeperAsynchronousMetrics.h @@ -14,6 +14,7 @@ class KeeperAsynchronousMetrics : public AsynchronousMetrics public: KeeperAsynchronousMetrics( ContextPtr context_, int update_period_seconds, const ProtocolServerMetricsFunc & protocol_server_metrics_func_); + ~KeeperAsynchronousMetrics() override; private: ContextPtr context; diff --git a/src/Coordination/KeeperContext.cpp b/src/Coordination/KeeperContext.cpp index 7e0b75a6353..6bb5b066d9f 100644 --- a/src/Coordination/KeeperContext.cpp +++ b/src/Coordination/KeeperContext.cpp @@ -59,6 +59,8 @@ void KeeperContext::initialize(const Poco::Util::AbstractConfiguration & config, } } + updateKeeperMemorySoftLimit(config); + digest_enabled = config.getBool("keeper_server.digest_enabled", false); ignore_system_path_on_startup = config.getBool("keeper_server.ignore_system_path_on_startup", false); @@ -375,4 +377,10 @@ void KeeperContext::initializeFeatureFlags(const Poco::Util::AbstractConfigurati feature_flags.logFlags(&Poco::Logger::get("KeeperContext")); } +void KeeperContext::updateKeeperMemorySoftLimit(const Poco::Util::AbstractConfiguration & config) +{ + if (config.hasProperty("keeper_server.max_memory_usage_soft_limit")) + memory_soft_limit = config.getUInt64("keeper_server.max_memory_usage_soft_limit"); +} + } diff --git a/src/Coordination/KeeperContext.h b/src/Coordination/KeeperContext.h index 1af34b19ccf..c1c34db2c4b 100644 --- a/src/Coordination/KeeperContext.h +++ b/src/Coordination/KeeperContext.h @@ -53,6 +53,9 @@ public: constexpr KeeperDispatcher * getDispatcher() const { return dispatcher; } + UInt64 getKeeperMemorySoftLimit() const { return memory_soft_limit; } + void updateKeeperMemorySoftLimit(const Poco::Util::AbstractConfiguration & config); + /// set to true when we have preprocessed or committed all the logs /// that were already present locally during startup std::atomic local_logs_preprocessed = false; @@ -92,6 +95,8 @@ private: KeeperFeatureFlags feature_flags; KeeperDispatcher * dispatcher{nullptr}; + + std::atomic memory_soft_limit = 0; }; using KeeperContextPtr = std::shared_ptr; diff --git a/src/Coordination/KeeperDispatcher.cpp b/src/Coordination/KeeperDispatcher.cpp index 1299e9c9f20..dcd22552fe3 100644 --- a/src/Coordination/KeeperDispatcher.cpp +++ b/src/Coordination/KeeperDispatcher.cpp @@ -143,7 +143,7 @@ void KeeperDispatcher::requestThread() if (shutdown_called) break; - Int64 mem_soft_limit = configuration_and_settings->coordination_settings->max_memory_usage_soft_limit; + Int64 mem_soft_limit = keeper_context->getKeeperMemorySoftLimit(); if (configuration_and_settings->standalone_keeper && mem_soft_limit > 0 && total_memory_tracker.get() >= mem_soft_limit && checkIfRequestIncreaseMem(request.request)) { LOG_TRACE(log, "Processing requests refused because of max_memory_usage_soft_limit {}, the total used memory is {}, request type is {}", mem_soft_limit, total_memory_tracker.get(), request.request->getOpNum()); @@ -930,6 +930,8 @@ void KeeperDispatcher::updateConfiguration(const Poco::Util::AbstractConfigurati throw Exception(ErrorCodes::SYSTEM_ERROR, "Cannot push configuration update to queue"); snapshot_s3.updateS3Configuration(config, macros); + + keeper_context->updateKeeperMemorySoftLimit(config); } void KeeperDispatcher::updateKeeperStatLatency(uint64_t process_time_ms) diff --git a/src/Coordination/KeeperServer.cpp b/src/Coordination/KeeperServer.cpp index bc5e3a723f2..fb56d58cb72 100644 --- a/src/Coordination/KeeperServer.cpp +++ b/src/Coordination/KeeperServer.cpp @@ -660,6 +660,12 @@ nuraft::cb_func::ReturnCode KeeperServer::callbackFunc(nuraft::cb_func::Type typ switch (type) { + case nuraft::cb_func::PreAppendLogLeader: + { + /// we cannot preprocess anything new as leader because we don't have up-to-date in-memory state + /// until we preprocess all stored logs + return nuraft::cb_func::ReturnCode::ReturnNull; + } case nuraft::cb_func::InitialBatchCommited: { preprocess_logs(); @@ -859,6 +865,10 @@ nuraft::cb_func::ReturnCode KeeperServer::callbackFunc(nuraft::cb_func::Type typ initial_batch_committed = true; return nuraft::cb_func::ReturnCode::Ok; } + case nuraft::cb_func::PreAppendLogLeader: + { + return nuraft::cb_func::ReturnCode::ReturnNull; + } case nuraft::cb_func::PreAppendLogFollower: { const auto & entry = *static_cast(param->ctx); diff --git a/src/Coordination/KeeperSnapshotManagerS3.cpp b/src/Coordination/KeeperSnapshotManagerS3.cpp index 56f64d58e2f..910615bf6ef 100644 --- a/src/Coordination/KeeperSnapshotManagerS3.cpp +++ b/src/Coordination/KeeperSnapshotManagerS3.cpp @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include @@ -76,7 +77,7 @@ void KeeperSnapshotManagerS3::updateS3Configuration(const Poco::Util::AbstractCo LOG_INFO(log, "S3 configuration was updated"); - auto credentials = Aws::Auth::AWSCredentials(auth_settings.access_key_id, auth_settings.secret_access_key); + auto credentials = Aws::Auth::AWSCredentials(auth_settings.access_key_id, auth_settings.secret_access_key, auth_settings.session_token); auto headers = auth_settings.headers; static constexpr size_t s3_max_redirects = 10; @@ -98,10 +99,15 @@ void KeeperSnapshotManagerS3::updateS3Configuration(const Poco::Util::AbstractCo client_configuration.endpointOverride = new_uri.endpoint; + S3::ClientSettings client_settings{ + .use_virtual_addressing = new_uri.is_virtual_hosted_style, + .disable_checksum = false, + .gcs_issue_compose_request = false, + }; + auto client = S3::ClientFactory::instance().create( client_configuration, - new_uri.is_virtual_hosted_style, - /* disable_checksum= */ false, + client_settings, credentials.GetAWSAccessKeyId(), credentials.GetAWSSecretKey(), auth_settings.server_side_encryption_customer_key_base64, @@ -208,6 +214,9 @@ void KeeperSnapshotManagerS3::uploadSnapshotImpl(const SnapshotFileInfo & snapsh return; } + /// To avoid reference to binding + const auto & snapshot_path_ref = snapshot_path; + SCOPE_EXIT( { LOG_INFO(log, "Removing lock file"); @@ -223,7 +232,7 @@ void KeeperSnapshotManagerS3::uploadSnapshotImpl(const SnapshotFileInfo & snapsh } catch (...) { - LOG_INFO(log, "Failed to delete lock file for {} from S3", snapshot_file_info.path); + LOG_INFO(log, "Failed to delete lock file for {} from S3", snapshot_path_ref); tryLogCurrentException(__PRETTY_FUNCTION__); } }); diff --git a/src/Coordination/KeeperStorage.cpp b/src/Coordination/KeeperStorage.cpp index 0d1d07ec7c5..41e6f5b5e2b 100644 --- a/src/Coordination/KeeperStorage.cpp +++ b/src/Coordination/KeeperStorage.cpp @@ -914,7 +914,7 @@ void KeeperStorage::unregisterEphemeralPath(int64_t session_id, const std::strin { auto ephemerals_it = ephemerals.find(session_id); if (ephemerals_it == ephemerals.end()) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Session {} is missing ephemeral path"); + throw Exception(ErrorCodes::LOGICAL_ERROR, "Session {} is missing ephemeral path", session_id); ephemerals_it->second.erase(path); if (ephemerals_it->second.empty()) diff --git a/src/Coordination/Standalone/Context.cpp b/src/Coordination/Standalone/Context.cpp index dba4a8934b9..374610769c4 100644 --- a/src/Coordination/Standalone/Context.cpp +++ b/src/Coordination/Standalone/Context.cpp @@ -4,6 +4,7 @@ #include #include #include +#include #include @@ -34,6 +35,7 @@ namespace DB namespace ErrorCodes { extern const int LOGICAL_ERROR; + extern const int UNSUPPORTED_METHOD; } struct ContextSharedPart : boost::noncopyable @@ -62,6 +64,11 @@ struct ContextSharedPart : boost::noncopyable mutable std::unique_ptr asynchronous_local_fs_reader; mutable std::unique_ptr synchronous_local_fs_reader; +#if USE_LIBURING + mutable OnceFlag io_uring_reader_initialized; + mutable std::unique_ptr io_uring_reader; +#endif + mutable OnceFlag threadpool_writer_initialized; mutable std::unique_ptr threadpool_writer; @@ -225,6 +232,17 @@ IAsynchronousReader & Context::getThreadPoolReader(FilesystemReaderType type) co } } +#if USE_LIBURING +IOUringReader & Context::getIOURingReader() const +{ + callOnce(shared->io_uring_reader_initialized, [&] { + shared->io_uring_reader = std::make_unique(512); + }); + + return *shared->io_uring_reader; +} +#endif + std::shared_ptr Context::getFilesystemCacheLog() const { return nullptr; @@ -359,4 +377,9 @@ void Context::updateKeeperConfiguration([[maybe_unused]] const Poco::Util::Abstr shared->keeper_dispatcher->updateConfiguration(getConfigRef(), getMacros()); } +std::shared_ptr Context::getZooKeeper() const +{ + throw Exception(ErrorCodes::UNSUPPORTED_METHOD, "Cannot connect to ZooKeeper from Keeper"); +} + } diff --git a/src/Coordination/Standalone/Context.h b/src/Coordination/Standalone/Context.h index 7555618233d..a6199864422 100644 --- a/src/Coordination/Standalone/Context.h +++ b/src/Coordination/Standalone/Context.h @@ -20,6 +20,13 @@ #include +#include "config.h" +namespace zkutil +{ + class ZooKeeper; + using ZooKeeperPtr = std::shared_ptr; +} + namespace DB { @@ -28,6 +35,7 @@ class Macros; class FilesystemCacheLog; class FilesystemReadPrefetchesLog; class BlobStorageLog; +class IOUringReader; /// A small class which owns ContextShared. /// We don't use something like unique_ptr directly to allow ContextShared type to be incomplete. @@ -127,6 +135,9 @@ public: ApplicationType getApplicationType() const { return ApplicationType::KEEPER; } IAsynchronousReader & getThreadPoolReader(FilesystemReaderType type) const; +#if USE_LIBURING + IOUringReader & getIOURingReader() const; +#endif std::shared_ptr getAsyncReadCounters() const; ThreadPool & getThreadPoolWriter() const; @@ -147,6 +158,8 @@ public: void initializeKeeperDispatcher(bool start_async) const; void shutdownKeeperDispatcher() const; void updateKeeperConfiguration(const Poco::Util::AbstractConfiguration & config); + + zkutil::ZooKeeperPtr getZooKeeper() const; }; } diff --git a/src/Coordination/tests/gtest_coordination.cpp b/src/Coordination/tests/gtest_coordination.cpp index 2b5fd3424c0..dd19f0b9967 100644 --- a/src/Coordination/tests/gtest_coordination.cpp +++ b/src/Coordination/tests/gtest_coordination.cpp @@ -1000,7 +1000,7 @@ TEST_P(CoordinationTest, ChangelogTestReadAfterBrokenTruncate) EXPECT_TRUE(fs::exists("./logs/changelog_31_35.bin" + params.extension)); DB::WriteBufferFromFile plain_buf( - "./logs/changelog_11_15.bin" + params.extension, DBMS_DEFAULT_BUFFER_SIZE, O_APPEND | O_CREAT | O_WRONLY); + "./logs/changelog_11_15.bin" + params.extension, DB::DBMS_DEFAULT_BUFFER_SIZE, O_APPEND | O_CREAT | O_WRONLY); plain_buf.truncate(0); DB::KeeperLogStore changelog_reader( @@ -1073,7 +1073,7 @@ TEST_P(CoordinationTest, ChangelogTestReadAfterBrokenTruncate2) EXPECT_TRUE(fs::exists("./logs/changelog_21_40.bin" + params.extension)); DB::WriteBufferFromFile plain_buf( - "./logs/changelog_1_20.bin" + params.extension, DBMS_DEFAULT_BUFFER_SIZE, O_APPEND | O_CREAT | O_WRONLY); + "./logs/changelog_1_20.bin" + params.extension, DB::DBMS_DEFAULT_BUFFER_SIZE, O_APPEND | O_CREAT | O_WRONLY); plain_buf.truncate(30); DB::KeeperLogStore changelog_reader( @@ -1130,7 +1130,7 @@ TEST_F(CoordinationTest, ChangelogTestReadAfterBrokenTruncate3) EXPECT_TRUE(fs::exists("./logs/changelog_21_40.bin")); DB::WriteBufferFromFile plain_buf( - "./logs/changelog_1_20.bin", DBMS_DEFAULT_BUFFER_SIZE, O_APPEND | O_CREAT | O_WRONLY); + "./logs/changelog_1_20.bin", DB::DBMS_DEFAULT_BUFFER_SIZE, O_APPEND | O_CREAT | O_WRONLY); plain_buf.truncate(plain_buf.size() - 30); DB::KeeperLogStore changelog_reader( @@ -1733,7 +1733,7 @@ TEST_P(CoordinationTest, TestStorageSnapshotBroken) /// Let's corrupt file DB::WriteBufferFromFile plain_buf( - "./snapshots/snapshot_50.bin" + params.extension, DBMS_DEFAULT_BUFFER_SIZE, O_APPEND | O_CREAT | O_WRONLY); + "./snapshots/snapshot_50.bin" + params.extension, DB::DBMS_DEFAULT_BUFFER_SIZE, O_APPEND | O_CREAT | O_WRONLY); plain_buf.truncate(34); plain_buf.sync(); @@ -2770,7 +2770,7 @@ TEST_P(CoordinationTest, TestDurableState) { SCOPED_TRACE("Read from corrupted file"); state_manager.reset(); - DB::WriteBufferFromFile write_buf("./state", DBMS_DEFAULT_BUFFER_SIZE, O_WRONLY); + DB::WriteBufferFromFile write_buf("./state", DB::DBMS_DEFAULT_BUFFER_SIZE, O_WRONLY); write_buf.seek(20, SEEK_SET); DB::writeIntBinary(31, write_buf); write_buf.sync(); @@ -2787,7 +2787,7 @@ TEST_P(CoordinationTest, TestDurableState) SCOPED_TRACE("Read from file with invalid size"); state_manager.reset(); - DB::WriteBufferFromFile write_buf("./state", DBMS_DEFAULT_BUFFER_SIZE, O_TRUNC | O_CREAT | O_WRONLY); + DB::WriteBufferFromFile write_buf("./state", DB::DBMS_DEFAULT_BUFFER_SIZE, O_TRUNC | O_CREAT | O_WRONLY); DB::writeIntBinary(20, write_buf); write_buf.sync(); write_buf.close(); diff --git a/src/Core/BackgroundSchedulePool.cpp b/src/Core/BackgroundSchedulePool.cpp index ec1ae047d05..fa892bc3c84 100644 --- a/src/Core/BackgroundSchedulePool.cpp +++ b/src/Core/BackgroundSchedulePool.cpp @@ -31,7 +31,7 @@ bool BackgroundSchedulePoolTaskInfo::schedule() return true; } -bool BackgroundSchedulePoolTaskInfo::scheduleAfter(size_t milliseconds, bool overwrite) +bool BackgroundSchedulePoolTaskInfo::scheduleAfter(size_t milliseconds, bool overwrite, bool only_if_scheduled) { std::lock_guard lock(schedule_mutex); @@ -39,6 +39,8 @@ bool BackgroundSchedulePoolTaskInfo::scheduleAfter(size_t milliseconds, bool ove return false; if (delayed && !overwrite) return false; + if (!delayed && only_if_scheduled) + return false; pool.scheduleDelayedTask(shared_from_this(), milliseconds, lock); return true; diff --git a/src/Core/BackgroundSchedulePool.h b/src/Core/BackgroundSchedulePool.h index e97b02e976f..eca93353283 100644 --- a/src/Core/BackgroundSchedulePool.h +++ b/src/Core/BackgroundSchedulePool.h @@ -106,8 +106,10 @@ public: bool schedule(); /// Schedule for execution after specified delay. - /// If overwrite is set then the task will be re-scheduled (if it was already scheduled, i.e. delayed == true). - bool scheduleAfter(size_t milliseconds, bool overwrite = true); + /// If overwrite is set, and the task is already scheduled with a delay (delayed == true), + /// the task will be re-scheduled with the new delay. + /// If only_if_scheduled is set, don't do anything unless the task is already scheduled with a delay. + bool scheduleAfter(size_t milliseconds, bool overwrite = true, bool only_if_scheduled = false); /// Further attempts to schedule become no-op. Will wait till the end of the current execution of the task. void deactivate(); diff --git a/src/Core/Defines.h b/src/Core/Defines.h index e2ffc2b7d7a..a3ab76c0b93 100644 --- a/src/Core/Defines.h +++ b/src/Core/Defines.h @@ -3,66 +3,70 @@ #include #include -#define DBMS_DEFAULT_PORT 9000 -#define DBMS_DEFAULT_SECURE_PORT 9440 -#define DBMS_DEFAULT_CONNECT_TIMEOUT_SEC 10 -#define DBMS_DEFAULT_SEND_TIMEOUT_SEC 300 -#define DBMS_DEFAULT_RECEIVE_TIMEOUT_SEC 300 +namespace DB +{ + +static constexpr auto DBMS_DEFAULT_PORT = 9000; +static constexpr auto DBMS_DEFAULT_SECURE_PORT = 9440; +static constexpr auto DBMS_DEFAULT_CONNECT_TIMEOUT_SEC = 10; +static constexpr auto DBMS_DEFAULT_SEND_TIMEOUT_SEC = 300; +static constexpr auto DBMS_DEFAULT_RECEIVE_TIMEOUT_SEC = 300; /// Timeout for synchronous request-result protocol call (like Ping or TablesStatus). -#define DBMS_DEFAULT_SYNC_REQUEST_TIMEOUT_SEC 5 -#define DBMS_DEFAULT_POLL_INTERVAL 10 +static constexpr auto DBMS_DEFAULT_SYNC_REQUEST_TIMEOUT_SEC = 5; +static constexpr auto DBMS_DEFAULT_POLL_INTERVAL = 10; /// The size of the I/O buffer by default. -#define DBMS_DEFAULT_BUFFER_SIZE 1048576ULL +static constexpr auto DBMS_DEFAULT_BUFFER_SIZE = 1048576ULL; -#define PADDING_FOR_SIMD 64 +static constexpr auto PADDING_FOR_SIMD = 64; /** Which blocks by default read the data (by number of rows). * Smaller values give better cache locality, less consumption of RAM, but more overhead to process the query. */ -#define DEFAULT_BLOCK_SIZE 65409 /// 65536 - PADDING_FOR_SIMD - (PADDING_FOR_SIMD - 1) bytes padding that we usually have in arrays +static constexpr auto DEFAULT_BLOCK_SIZE + = 65409; /// 65536 - PADDING_FOR_SIMD - (PADDING_FOR_SIMD - 1) bytes padding that we usually have in = arrays /** Which blocks should be formed for insertion into the table, if we control the formation of blocks. * (Sometimes the blocks are inserted exactly such blocks that have been read / transmitted from the outside, and this parameter does not affect their size.) * More than DEFAULT_BLOCK_SIZE, because in some tables a block of data on the disk is created for each block (quite a big thing), * and if the parts were small, then it would be costly then to combine them. */ -#define DEFAULT_INSERT_BLOCK_SIZE \ - 1048449 /// 1048576 - PADDING_FOR_SIMD - (PADDING_FOR_SIMD - 1) bytes padding that we usually have in arrays +static constexpr auto DEFAULT_INSERT_BLOCK_SIZE + = 1048449; /// 1048576 - PADDING_FOR_SIMD - (PADDING_FOR_SIMD - 1) bytes padding that we usually have in arrays -#define DEFAULT_PERIODIC_LIVE_VIEW_REFRESH_SEC 60 -#define SHOW_CHARS_ON_SYNTAX_ERROR ptrdiff_t(160) +static constexpr auto DEFAULT_PERIODIC_LIVE_VIEW_REFRESH_SEC = 60; +static constexpr auto SHOW_CHARS_ON_SYNTAX_ERROR = ptrdiff_t(160); /// each period reduces the error counter by 2 times /// too short a period can cause errors to disappear immediately after creation. -#define DBMS_CONNECTION_POOL_WITH_FAILOVER_DEFAULT_DECREASE_ERROR_PERIOD 60 +static constexpr auto DBMS_CONNECTION_POOL_WITH_FAILOVER_DEFAULT_DECREASE_ERROR_PERIOD = 60; /// replica error max cap, this is to prevent replica from accumulating too many errors and taking to long to recover. -#define DBMS_CONNECTION_POOL_WITH_FAILOVER_MAX_ERROR_COUNT 1000 +static constexpr auto DBMS_CONNECTION_POOL_WITH_FAILOVER_MAX_ERROR_COUNT = 1000; /// The boundary on which the blocks for asynchronous file operations should be aligned. -#define DEFAULT_AIO_FILE_BLOCK_SIZE 4096 +static constexpr auto DEFAULT_AIO_FILE_BLOCK_SIZE = 4096; -#define DEFAULT_HTTP_READ_BUFFER_TIMEOUT 30 -#define DEFAULT_HTTP_READ_BUFFER_CONNECTION_TIMEOUT 1 +static constexpr auto DEFAULT_HTTP_READ_BUFFER_TIMEOUT = 30; +static constexpr auto DEFAULT_HTTP_READ_BUFFER_CONNECTION_TIMEOUT = 1; /// Maximum number of http-connections between two endpoints /// the number is unmotivated -#define DEFAULT_COUNT_OF_HTTP_CONNECTIONS_PER_ENDPOINT 15 +static constexpr auto DEFAULT_COUNT_OF_HTTP_CONNECTIONS_PER_ENDPOINT = 15; -#define DEFAULT_HTTP_KEEP_ALIVE_TIMEOUT 30 +static constexpr auto DEFAULT_HTTP_KEEP_ALIVE_TIMEOUT = 30; -#define DBMS_DEFAULT_PATH "/var/lib/clickhouse/" +static constexpr auto DBMS_DEFAULT_PATH = "/var/lib/clickhouse/"; /// Actually, there may be multiple acquisitions of different locks for a given table within one query. /// Check with IStorage class for the list of possible locks -#define DBMS_DEFAULT_LOCK_ACQUIRE_TIMEOUT_SEC 120 +static constexpr auto DBMS_DEFAULT_LOCK_ACQUIRE_TIMEOUT_SEC = 120; /// Default limit on recursion depth of recursive descend parser. -#define DBMS_DEFAULT_MAX_PARSER_DEPTH 1000 +static constexpr auto DBMS_DEFAULT_MAX_PARSER_DEPTH = 1000; /// Default limit on query size. -#define DBMS_DEFAULT_MAX_QUERY_SIZE 262144 +static constexpr auto DBMS_DEFAULT_MAX_QUERY_SIZE = 262144; /// Max depth of hierarchical dictionary -#define DBMS_HIERARCHICAL_DICTIONARY_MAX_DEPTH 1000 +static constexpr auto DBMS_HIERARCHICAL_DICTIONARY_MAX_DEPTH = 1000; /// Default maximum (total and entry) sizes and policies of various caches static constexpr auto DEFAULT_UNCOMPRESSED_CACHE_POLICY = "SLRU"; @@ -95,7 +99,9 @@ static constexpr auto DEFAULT_QUERY_CACHE_MAX_ENTRY_SIZE_IN_ROWS = 30'000'000uz; /// /// Look at compiler-rt/lib/sanitizer_common/sanitizer_stacktrace.h #if !defined(SANITIZER) -#define QUERY_PROFILER_DEFAULT_SAMPLE_RATE_NS 1000000000 +static constexpr auto QUERY_PROFILER_DEFAULT_SAMPLE_RATE_NS = 1000000000; #else -#define QUERY_PROFILER_DEFAULT_SAMPLE_RATE_NS 0 +static constexpr auto QUERY_PROFILER_DEFAULT_SAMPLE_RATE_NS = 0; #endif + +} diff --git a/src/Core/ExternalResultDescription.cpp b/src/Core/ExternalResultDescription.cpp index 0700200a9ec..f7e8a69d355 100644 --- a/src/Core/ExternalResultDescription.cpp +++ b/src/Core/ExternalResultDescription.cpp @@ -20,6 +20,11 @@ namespace ErrorCodes extern const int UNKNOWN_TYPE; } +ExternalResultDescription::ExternalResultDescription(const Block & sample_block_) +{ + init(sample_block_); +} + void ExternalResultDescription::init(const Block & sample_block_) { sample_block = sample_block_; diff --git a/src/Core/ExternalResultDescription.h b/src/Core/ExternalResultDescription.h index a9ffe8b2ed2..b7d852b99cf 100644 --- a/src/Core/ExternalResultDescription.h +++ b/src/Core/ExternalResultDescription.h @@ -41,6 +41,9 @@ struct ExternalResultDescription Block sample_block; std::vector> types; + ExternalResultDescription() = default; + explicit ExternalResultDescription(const Block & sample_block_); + void init(const Block & sample_block_); }; diff --git a/src/Core/Field.h b/src/Core/Field.h index e77217abc03..6afa98ed9c0 100644 --- a/src/Core/Field.h +++ b/src/Core/Field.h @@ -122,7 +122,7 @@ struct CustomType bool isSecret() const { return impl->isSecret(); } const char * getTypeName() const { return impl->getTypeName(); } String toString(bool show_secrets = true) const { return impl->toString(show_secrets); } - const CustomTypeImpl & getImpl() { return *impl; } + const CustomTypeImpl & getImpl() const { return *impl; } bool operator < (const CustomType & rhs) const { return *impl < *rhs.impl; } bool operator <= (const CustomType & rhs) const { return *impl <= *rhs.impl; } @@ -292,7 +292,7 @@ concept not_field_or_bool_or_stringlike /** 32 is enough. Round number is used for alignment and for better arithmetic inside std::vector. * NOTE: Actually, sizeof(std::string) is 32 when using libc++, so Field is 40 bytes. */ -#define DBMS_MIN_FIELD_SIZE 32 +static constexpr auto DBMS_MIN_FIELD_SIZE = 32; /** Discriminated union of several types. diff --git a/src/Core/InterpolateDescription.cpp b/src/Core/InterpolateDescription.cpp index e7b74716b79..d828c2e85e9 100644 --- a/src/Core/InterpolateDescription.cpp +++ b/src/Core/InterpolateDescription.cpp @@ -3,10 +3,16 @@ #include #include #include +#include +#include +#include +#include +#include +#include + namespace DB { - InterpolateDescription::InterpolateDescription(ActionsDAGPtr actions_, const Aliases & aliases) : actions(actions_) { @@ -28,5 +34,4 @@ namespace DB result_columns_order.push_back(name); } } - } diff --git a/src/Core/InterpolateDescription.h b/src/Core/InterpolateDescription.h index 8aabce1470e..62d7120508b 100644 --- a/src/Core/InterpolateDescription.h +++ b/src/Core/InterpolateDescription.h @@ -2,20 +2,18 @@ #include #include -#include #include -#include -#include -#include -#include -#include -#include -#include +#include +#include namespace DB { +class ActionsDAG; +using ActionsDAGPtr = std::shared_ptr; +using Aliases = std::unordered_map; + /// Interpolate description struct InterpolateDescription { diff --git a/src/Core/Joins.cpp b/src/Core/Joins.cpp index 9c8ece82224..77568223d71 100644 --- a/src/Core/Joins.cpp +++ b/src/Core/Joins.cpp @@ -13,6 +13,7 @@ const char * toString(JoinKind kind) case JoinKind::Full: return "FULL"; case JoinKind::Cross: return "CROSS"; case JoinKind::Comma: return "COMMA"; + case JoinKind::Paste: return "PASTE"; } }; diff --git a/src/Core/Joins.h b/src/Core/Joins.h index 6884e8dfd9a..cc69f07263d 100644 --- a/src/Core/Joins.h +++ b/src/Core/Joins.h @@ -13,7 +13,8 @@ enum class JoinKind Right, Full, Cross, /// Direct product. Strictness and condition doesn't matter. - Comma /// Same as direct product. Intended to be converted to INNER JOIN with conditions from WHERE. + Comma, /// Same as direct product. Intended to be converted to INNER JOIN with conditions from WHERE. + Paste, /// Used to join parts without `ON` clause. }; const char * toString(JoinKind kind); @@ -27,6 +28,7 @@ inline constexpr bool isRightOrFull(JoinKind kind) { return kind == JoinKind::R inline constexpr bool isLeftOrFull(JoinKind kind) { return kind == JoinKind::Left || kind == JoinKind::Full; } inline constexpr bool isInnerOrRight(JoinKind kind) { return kind == JoinKind::Inner || kind == JoinKind::Right; } inline constexpr bool isInnerOrLeft(JoinKind kind) { return kind == JoinKind::Inner || kind == JoinKind::Left; } +inline constexpr bool isPaste(JoinKind kind) { return kind == JoinKind::Paste; } /// Allows more optimal JOIN for typical cases. enum class JoinStrictness diff --git a/src/Core/MySQL/MySQLCharset.cpp b/src/Core/MySQL/MySQLCharset.cpp index 0acf3f130a6..d8e68565f3d 100644 --- a/src/Core/MySQL/MySQLCharset.cpp +++ b/src/Core/MySQL/MySQLCharset.cpp @@ -5,13 +5,16 @@ #if USE_ICU #include -#define CHUNK_SIZE 1024 -static const char * TARGET_CHARSET = "utf8"; #endif namespace DB { +#if USE_ICU +static constexpr auto CHUNK_SIZE = 1024; +static constexpr auto TARGET_CHARSET = "utf8"; +#endif + namespace ErrorCodes { extern const int UNKNOWN_EXCEPTION; diff --git a/src/Core/PostgreSQL/insertPostgreSQLValue.cpp b/src/Core/PostgreSQL/insertPostgreSQLValue.cpp index d2e8071c5de..2f041134f06 100644 --- a/src/Core/PostgreSQL/insertPostgreSQLValue.cpp +++ b/src/Core/PostgreSQL/insertPostgreSQLValue.cpp @@ -36,7 +36,7 @@ void insertDefaultPostgreSQLValue(IColumn & column, const IColumn & sample_colum void insertPostgreSQLValue( IColumn & column, std::string_view value, const ExternalResultDescription::ValueType type, const DataTypePtr data_type, - std::unordered_map & array_info, size_t idx) + const std::unordered_map & array_info, size_t idx) { switch (type) { @@ -125,8 +125,8 @@ void insertPostgreSQLValue( pqxx::array_parser parser{value}; std::pair parsed = parser.get_next(); - size_t dimension = 0, max_dimension = 0, expected_dimensions = array_info[idx].num_dimensions; - const auto parse_value = array_info[idx].pqxx_parser; + size_t dimension = 0, max_dimension = 0, expected_dimensions = array_info.at(idx).num_dimensions; + const auto parse_value = array_info.at(idx).pqxx_parser; std::vector dimensions(expected_dimensions + 1); while (parsed.first != pqxx::array_parser::juncture::done) @@ -138,7 +138,7 @@ void insertPostgreSQLValue( dimensions[dimension].emplace_back(parse_value(parsed.second)); else if (parsed.first == pqxx::array_parser::juncture::null_value) - dimensions[dimension].emplace_back(array_info[idx].default_value); + dimensions[dimension].emplace_back(array_info.at(idx).default_value); else if (parsed.first == pqxx::array_parser::juncture::row_end) { diff --git a/src/Core/PostgreSQL/insertPostgreSQLValue.h b/src/Core/PostgreSQL/insertPostgreSQLValue.h index b842d86ed47..3bc83292b96 100644 --- a/src/Core/PostgreSQL/insertPostgreSQLValue.h +++ b/src/Core/PostgreSQL/insertPostgreSQLValue.h @@ -23,7 +23,7 @@ struct PostgreSQLArrayInfo void insertPostgreSQLValue( IColumn & column, std::string_view value, const ExternalResultDescription::ValueType type, const DataTypePtr data_type, - std::unordered_map & array_info, size_t idx); + const std::unordered_map & array_info, size_t idx); void preparePostgreSQLArrayInfo( std::unordered_map & array_info, size_t column_idx, const DataTypePtr data_type); diff --git a/src/Core/ProtocolDefines.h b/src/Core/ProtocolDefines.h index 0e2e5b3dc60..058c6fdc903 100644 --- a/src/Core/ProtocolDefines.h +++ b/src/Core/ProtocolDefines.h @@ -1,77 +1,80 @@ #pragma once -#define DBMS_MIN_REVISION_WITH_CLIENT_INFO 54032 -#define DBMS_MIN_REVISION_WITH_SERVER_TIMEZONE 54058 -#define DBMS_MIN_REVISION_WITH_QUOTA_KEY_IN_CLIENT_INFO 54060 -#define DBMS_MIN_REVISION_WITH_TABLES_STATUS 54226 -#define DBMS_MIN_REVISION_WITH_TIME_ZONE_PARAMETER_IN_DATETIME_DATA_TYPE 54337 -#define DBMS_MIN_REVISION_WITH_SERVER_DISPLAY_NAME 54372 -#define DBMS_MIN_REVISION_WITH_VERSION_PATCH 54401 -#define DBMS_MIN_REVISION_WITH_SERVER_LOGS 54406 +namespace DB +{ + +static constexpr auto DBMS_MIN_REVISION_WITH_CLIENT_INFO = 54032; +static constexpr auto DBMS_MIN_REVISION_WITH_SERVER_TIMEZONE = 54058; +static constexpr auto DBMS_MIN_REVISION_WITH_QUOTA_KEY_IN_CLIENT_INFO = 54060; +static constexpr auto DBMS_MIN_REVISION_WITH_TABLES_STATUS = 54226; +static constexpr auto DBMS_MIN_REVISION_WITH_TIME_ZONE_PARAMETER_IN_DATETIME_DATA_TYPE = 54337; +static constexpr auto DBMS_MIN_REVISION_WITH_SERVER_DISPLAY_NAME = 54372; +static constexpr auto DBMS_MIN_REVISION_WITH_VERSION_PATCH = 54401; +static constexpr auto DBMS_MIN_REVISION_WITH_SERVER_LOGS = 54406; /// Minimum revision with exactly the same set of aggregation methods and rules to select them. /// Two-level (bucketed) aggregation is incompatible if servers are inconsistent in these rules /// (keys will be placed in different buckets and result will not be fully aggregated). -#define DBMS_MIN_REVISION_WITH_CURRENT_AGGREGATION_VARIANT_SELECTION_METHOD 54448 -#define DBMS_MIN_MAJOR_VERSION_WITH_CURRENT_AGGREGATION_VARIANT_SELECTION_METHOD 21 -#define DBMS_MIN_MINOR_VERSION_WITH_CURRENT_AGGREGATION_VARIANT_SELECTION_METHOD 4 -#define DBMS_MIN_REVISION_WITH_COLUMN_DEFAULTS_METADATA 54410 +static constexpr auto DBMS_MIN_REVISION_WITH_CURRENT_AGGREGATION_VARIANT_SELECTION_METHOD = 54448; +static constexpr auto DBMS_MIN_MAJOR_VERSION_WITH_CURRENT_AGGREGATION_VARIANT_SELECTION_METHOD = 21; +static constexpr auto DBMS_MIN_MINOR_VERSION_WITH_CURRENT_AGGREGATION_VARIANT_SELECTION_METHOD = 4; +static constexpr auto DBMS_MIN_REVISION_WITH_COLUMN_DEFAULTS_METADATA = 54410; -#define DBMS_MIN_REVISION_WITH_LOW_CARDINALITY_TYPE 54405 -#define DBMS_MIN_REVISION_WITH_CLIENT_WRITE_INFO 54420 +static constexpr auto DBMS_MIN_REVISION_WITH_LOW_CARDINALITY_TYPE = 54405; +static constexpr auto DBMS_MIN_REVISION_WITH_CLIENT_WRITE_INFO = 54420; /// Minimum revision supporting SettingsBinaryFormat::STRINGS. -#define DBMS_MIN_REVISION_WITH_SETTINGS_SERIALIZED_AS_STRINGS 54429 -#define DBMS_MIN_REVISION_WITH_SCALARS 54429 +static constexpr auto DBMS_MIN_REVISION_WITH_SETTINGS_SERIALIZED_AS_STRINGS = 54429; +static constexpr auto DBMS_MIN_REVISION_WITH_SCALARS = 54429; /// Minimum revision supporting OpenTelemetry -#define DBMS_MIN_REVISION_WITH_OPENTELEMETRY 54442 +static constexpr auto DBMS_MIN_REVISION_WITH_OPENTELEMETRY = 54442; -#define DBMS_MIN_REVISION_WITH_AGGREGATE_FUNCTIONS_VERSIONING 54452 +static constexpr auto DBMS_MIN_REVISION_WITH_AGGREGATE_FUNCTIONS_VERSIONING = 54452; -#define DBMS_CLUSTER_PROCESSING_PROTOCOL_VERSION 1 +static constexpr auto DBMS_CLUSTER_PROCESSING_PROTOCOL_VERSION = 1; -#define DBMS_PARALLEL_REPLICAS_PROTOCOL_VERSION 3 -#define DBMS_MIN_REVISION_WITH_PARALLEL_REPLICAS 54453 +static constexpr auto DBMS_PARALLEL_REPLICAS_PROTOCOL_VERSION = 3; +static constexpr auto DBMS_MIN_REVISION_WITH_PARALLEL_REPLICAS = 54453; -#define DBMS_MERGE_TREE_PART_INFO_VERSION 1 +static constexpr auto DBMS_MERGE_TREE_PART_INFO_VERSION = 1; -#define DBMS_MIN_REVISION_WITH_INTERSERVER_SECRET 54441 +static constexpr auto DBMS_MIN_REVISION_WITH_INTERSERVER_SECRET = 54441; -#define DBMS_MIN_REVISION_WITH_X_FORWARDED_FOR_IN_CLIENT_INFO 54443 -#define DBMS_MIN_REVISION_WITH_REFERER_IN_CLIENT_INFO 54447 +static constexpr auto DBMS_MIN_REVISION_WITH_X_FORWARDED_FOR_IN_CLIENT_INFO = 54443; +static constexpr auto DBMS_MIN_REVISION_WITH_REFERER_IN_CLIENT_INFO = 54447; -#define DBMS_MIN_PROTOCOL_VERSION_WITH_DISTRIBUTED_DEPTH 54448 +static constexpr auto DBMS_MIN_PROTOCOL_VERSION_WITH_DISTRIBUTED_DEPTH = 54448; -#define DBMS_MIN_PROTOCOL_VERSION_WITH_INCREMENTAL_PROFILE_EVENTS 54451 +static constexpr auto DBMS_MIN_PROTOCOL_VERSION_WITH_INCREMENTAL_PROFILE_EVENTS = 54451; -#define DBMS_MIN_REVISION_WITH_CUSTOM_SERIALIZATION 54454 +static constexpr auto DBMS_MIN_REVISION_WITH_CUSTOM_SERIALIZATION = 54454; -#define DBMS_MIN_PROTOCOL_VERSION_WITH_INITIAL_QUERY_START_TIME 54449 +static constexpr auto DBMS_MIN_PROTOCOL_VERSION_WITH_INITIAL_QUERY_START_TIME = 54449; -#define DBMS_MIN_PROTOCOL_VERSION_WITH_PROFILE_EVENTS_IN_INSERT 54456 +static constexpr auto DBMS_MIN_PROTOCOL_VERSION_WITH_PROFILE_EVENTS_IN_INSERT = 54456; -#define DBMS_MIN_PROTOCOL_VERSION_WITH_VIEW_IF_PERMITTED 54457 +static constexpr auto DBMS_MIN_PROTOCOL_VERSION_WITH_VIEW_IF_PERMITTED = 54457; -#define DBMS_MIN_PROTOCOL_VERSION_WITH_ADDENDUM 54458 +static constexpr auto DBMS_MIN_PROTOCOL_VERSION_WITH_ADDENDUM = 54458; -#define DBMS_MIN_PROTOCOL_VERSION_WITH_QUOTA_KEY 54458 +static constexpr auto DBMS_MIN_PROTOCOL_VERSION_WITH_QUOTA_KEY = 54458; -#define DBMS_MIN_PROTOCOL_VERSION_WITH_PARAMETERS 54459 +static constexpr auto DBMS_MIN_PROTOCOL_VERSION_WITH_PARAMETERS = 54459; /// The server will send query elapsed run time in the Progress packet. -#define DBMS_MIN_PROTOCOL_VERSION_WITH_SERVER_QUERY_TIME_IN_PROGRESS 54460 +static constexpr auto DBMS_MIN_PROTOCOL_VERSION_WITH_SERVER_QUERY_TIME_IN_PROGRESS = 54460; -#define DBMS_MIN_PROTOCOL_VERSION_WITH_PASSWORD_COMPLEXITY_RULES 54461 +static constexpr auto DBMS_MIN_PROTOCOL_VERSION_WITH_PASSWORD_COMPLEXITY_RULES = 54461; -#define DBMS_MIN_REVISION_WITH_INTERSERVER_SECRET_V2 54462 +static constexpr auto DBMS_MIN_REVISION_WITH_INTERSERVER_SECRET_V2 = 54462; -#define DBMS_MIN_PROTOCOL_VERSION_WITH_TOTAL_BYTES_IN_PROGRESS 54463 +static constexpr auto DBMS_MIN_PROTOCOL_VERSION_WITH_TOTAL_BYTES_IN_PROGRESS = 54463; -#define DBMS_MIN_PROTOCOL_VERSION_WITH_TIMEZONE_UPDATES 54464 +static constexpr auto DBMS_MIN_PROTOCOL_VERSION_WITH_TIMEZONE_UPDATES = 54464; -#define DBMS_MIN_REVISION_WITH_SPARSE_SERIALIZATION 54465 +static constexpr auto DBMS_MIN_REVISION_WITH_SPARSE_SERIALIZATION = 54465; -#define DBMS_MIN_REVISION_WITH_SSH_AUTHENTICATION 54466 +static constexpr auto DBMS_MIN_REVISION_WITH_SSH_AUTHENTICATION = 54466; /// Version of ClickHouse TCP protocol. /// @@ -80,4 +83,6 @@ /// NOTE: DBMS_TCP_PROTOCOL_VERSION has nothing common with VERSION_REVISION, /// later is just a number for server version (one number instead of commit SHA) /// for simplicity (sometimes it may be more convenient in some use cases). -#define DBMS_TCP_PROTOCOL_VERSION 54466 +static constexpr auto DBMS_TCP_PROTOCOL_VERSION = 54466; + +} diff --git a/src/Core/ServerSettings.h b/src/Core/ServerSettings.h index 6785eea26ea..85e3d33f80b 100644 --- a/src/Core/ServerSettings.h +++ b/src/Core/ServerSettings.h @@ -79,6 +79,9 @@ namespace DB \ M(UInt64, max_table_size_to_drop, 50000000000lu, "If size of a table is greater than this value (in bytes) than table could not be dropped with any DROP query.", 0) \ M(UInt64, max_partition_size_to_drop, 50000000000lu, "Same as max_table_size_to_drop, but for the partitions.", 0) \ + M(UInt64, max_table_num_to_warn, 5000lu, "If number of tables is greater than this value, server will create a warning that will displayed to user.", 0) \ + M(UInt64, max_database_num_to_warn, 1000lu, "If number of databases is greater than this value, server will create a warning that will displayed to user.", 0) \ + M(UInt64, max_part_num_to_warn, 100000lu, "If number of databases is greater than this value, server will create a warning that will displayed to user.", 0) \ M(UInt64, concurrent_threads_soft_limit_num, 0, "Sets how many concurrent thread can be allocated before applying CPU pressure. Zero means unlimited.", 0) \ M(UInt64, concurrent_threads_soft_limit_ratio_to_cores, 0, "Same as concurrent_threads_soft_limit_num, but with ratio to cores.", 0) \ \ @@ -97,6 +100,10 @@ namespace DB M(Bool, async_load_databases, false, "Enable asynchronous loading of databases and tables to speedup server startup. Queries to not yet loaded entity will be blocked until load is finished.", 0) \ M(Bool, display_secrets_in_show_and_select, false, "Allow showing secrets in SHOW and SELECT queries via a format setting and a grant", 0) \ \ + M(Seconds, keep_alive_timeout, DEFAULT_HTTP_KEEP_ALIVE_TIMEOUT, "The number of seconds that ClickHouse waits for incoming requests before closing the connection.", 0) \ + M(Seconds, replicated_fetches_http_connection_timeout, 0, "HTTP connection timeout for part fetch requests. Inherited from default profile `http_connection_timeout` if not set explicitly.", 0) \ + M(Seconds, replicated_fetches_http_send_timeout, 0, "HTTP send timeout for part fetch requests. Inherited from default profile `http_send_timeout` if not set explicitly.", 0) \ + M(Seconds, replicated_fetches_http_receive_timeout, 0, "HTTP receive timeout for fetch part requests. Inherited from default profile `http_receive_timeout` if not set explicitly.", 0) \ M(UInt64, total_memory_profiler_step, 0, "Whenever server memory usage becomes larger than every next step in number of bytes the memory profiler will collect the allocating stack trace. Zero means disabled memory profiler. Values lower than a few megabytes will slow down server.", 0) \ M(Double, total_memory_tracker_sample_probability, 0, "Collect random allocations and deallocations and write them into system.trace_log with 'MemorySample' trace_type. The probability is for every alloc/free regardless to the size of the allocation (can be changed with `memory_profiler_sample_min_allocation_size` and `memory_profiler_sample_max_allocation_size`). Note that sampling happens only when the amount of untracked memory exceeds 'max_untracked_memory'. You may want to set 'max_untracked_memory' to 0 for extra fine grained sampling.", 0) \ M(UInt64, total_memory_profiler_sample_min_allocation_size, 0, "Collect random allocations of size greater or equal than specified value with probability equal to `total_memory_profiler_sample_probability`. 0 means disabled. You may want to set 'max_untracked_memory' to 0 to make this threshold to work as expected.", 0) \ diff --git a/src/Core/Settings.cpp b/src/Core/Settings.cpp index 33db7660abd..a38197b9eeb 100644 --- a/src/Core/Settings.cpp +++ b/src/Core/Settings.cpp @@ -90,7 +90,8 @@ void Settings::checkNoSettingNamesAtTopLevel(const Poco::Util::AbstractConfigura for (const auto & setting : settings.all()) { const auto & name = setting.getName(); - if (config.has(name) && !setting.isObsolete()) + bool should_skip_check = name == "max_table_size_to_drop" || name == "max_partition_size_to_drop"; + if (config.has(name) && !setting.isObsolete() && !should_skip_check) { throw Exception(ErrorCodes::UNKNOWN_ELEMENT_IN_CONFIG, "A setting '{}' appeared at top level in config {}." " But it is user-level setting that should be located in users.xml inside section for specific profile." @@ -106,9 +107,7 @@ std::vector Settings::getAllRegisteredNames() const { std::vector all_settings; for (const auto & setting_field : all()) - { all_settings.push_back(setting_field.getName()); - } return all_settings; } diff --git a/src/Core/Settings.h b/src/Core/Settings.h index 08add1af7c5..6586a9d6b2c 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -186,7 +186,7 @@ class IColumn; M(Bool, parallel_replicas_for_non_replicated_merge_tree, false, "If true, ClickHouse will use parallel replicas algorithm also for non-replicated MergeTree tables", 0) \ M(UInt64, parallel_replicas_min_number_of_rows_per_replica, 0, "Limit the number of replicas used in a query to (estimated rows to read / min_number_of_rows_per_replica). The max is still limited by 'max_parallel_replicas'", 0) \ \ - M(Bool, skip_unavailable_shards, false, "If true, ClickHouse silently skips unavailable shards and nodes unresolvable through DNS. Shard is marked as unavailable when none of the replicas can be reached.", 0) \ + M(Bool, skip_unavailable_shards, false, "If true, ClickHouse silently skips unavailable shards. Shard is marked as unavailable when: 1) The shard cannot be reached due to a connection failure. 2) Shard is unresolvable through DNS. 3) Table does not exist on the shard.", 0) \ \ M(UInt64, parallel_distributed_insert_select, 0, "Process distributed INSERT SELECT query in the same cluster on local tables on every shard; if set to 1 - SELECT is executed on each shard; if set to 2 - SELECT and INSERT are executed on each shard", 0) \ M(UInt64, distributed_group_by_no_merge, 0, "If 1, Do not merge aggregation states from different servers for distributed queries (shards will process query up to the Complete stage, initiator just proxies the data from the shards). If 2 the initiator will apply ORDER BY and LIMIT stages (it is not in case when shard process query up to the Complete stage)", 0) \ @@ -219,6 +219,7 @@ class IColumn; M(Bool, mysql_map_fixed_string_to_text_in_show_columns, false, "If enabled, FixedString type will be mapped to TEXT in SHOW [FULL] COLUMNS, BLOB otherwise.", 0) \ \ M(UInt64, optimize_min_equality_disjunction_chain_length, 3, "The minimum length of the expression `expr = x1 OR ... expr = xN` for optimization ", 0) \ + M(UInt64, optimize_min_inequality_conjunction_chain_length, 3, "The minimum length of the expression `expr <> x1 AND ... expr <> xN` for optimization ", 0) \ \ M(UInt64, min_bytes_to_use_direct_io, 0, "The minimum number of bytes for reading the data with O_DIRECT option during SELECT queries execution. 0 - disabled.", 0) \ M(UInt64, min_bytes_to_use_mmap_io, 0, "The minimum number of bytes for reading the data with mmap option during SELECT queries execution. 0 - disabled.", 0) \ @@ -338,6 +339,7 @@ class IColumn; M(UInt64, http_max_field_value_size, 128 * 1024, "Maximum length of field value in HTTP header", 0) \ M(UInt64, http_max_chunk_size, 100_GiB, "Maximum value of a chunk size in HTTP chunked transfer encoding", 0) \ M(Bool, http_skip_not_found_url_for_globs, true, "Skip url's for globs with HTTP_NOT_FOUND error", 0) \ + M(Bool, http_make_head_request, true, "Allows the execution of a `HEAD` request while reading data from HTTP to retrieve information about the file to be read, such as its size", 0) \ M(Bool, optimize_throw_if_noop, false, "If setting is enabled and OPTIMIZE query didn't actually assign a merge then an explanatory exception is thrown", 0) \ M(Bool, use_index_for_in_with_subqueries, true, "Try using an index if there is a subquery or a table expression on the right side of the IN operator.", 0) \ M(UInt64, use_index_for_in_with_subqueries_max_values, 0, "The maximum size of set in the right hand side of the IN operator to use table index for filtering. It allows to avoid performance degradation and higher memory usage due to preparation of additional data structures for large queries. Zero means no limit.", 0) \ @@ -527,6 +529,8 @@ class IColumn; M(Int64, max_partitions_to_read, -1, "Limit the max number of partitions that can be accessed in one query. <= 0 means unlimited.", 0) \ M(Bool, check_query_single_value_result, true, "Return check query result as single 1/0 value", 0) \ M(Bool, allow_drop_detached, false, "Allow ALTER TABLE ... DROP DETACHED PART[ITION] ... queries", 0) \ + M(UInt64, max_table_size_to_drop, 50000000000lu, "If size of a table is greater than this value (in bytes) than table could not be dropped with any DROP query.", 0) \ + M(UInt64, max_partition_size_to_drop, 50000000000lu, "Same as max_table_size_to_drop, but for the partitions.", 0) \ \ M(UInt64, postgresql_connection_pool_size, 16, "Connection pool size for PostgreSQL table engine and database engine.", 0) \ M(UInt64, postgresql_connection_pool_wait_timeout, 5000, "Connection pool push/pop timeout on empty pool for PostgreSQL table engine and database engine. By default it will block on empty pool.", 0) \ @@ -580,6 +584,8 @@ class IColumn; M(Bool, enable_early_constant_folding, true, "Enable query optimization where we analyze function and subqueries results and rewrite query if there're constants there", 0) \ M(Bool, deduplicate_blocks_in_dependent_materialized_views, false, "Should deduplicate blocks for materialized views if the block is not a duplicate for the table. Use true to always deduplicate in dependent tables.", 0) \ M(Bool, materialized_views_ignore_errors, false, "Allows to ignore errors for MATERIALIZED VIEW, and deliver original block to the table regardless of MVs", 0) \ + M(Bool, allow_experimental_refreshable_materialized_view, false, "Allow refreshable materialized views (CREATE MATERIALIZED VIEW REFRESH ...).", 0) \ + M(Bool, stop_refreshable_materialized_views_on_startup, false, "On server startup, prevent scheduling of refreshable materialized views, as if with SYSTEM STOP VIEWS. You can manually start them with SYSTEM START VIEWS or SYSTEM START VIEW afterwards. Also applies to newly created views. Has no effect on non-refreshable materialized views.", 0) \ M(Bool, use_compact_format_in_distributed_parts_names, true, "Changes format of directories names for distributed table insert parts.", 0) \ M(Bool, validate_polygons, true, "Throw exception if polygon is invalid in function pointInPolygon (e.g. self-tangent, self-intersecting). If the setting is false, the function will accept invalid polygons but may silently return wrong result.", 0) \ M(UInt64, max_parser_depth, DBMS_DEFAULT_MAX_PARSER_DEPTH, "Maximum parser depth (recursion depth of recursive descend parser).", 0) \ @@ -620,6 +626,7 @@ class IColumn; M(Bool, describe_include_subcolumns, false, "If true, subcolumns of all table columns will be included into result of DESCRIBE query", 0) \ M(Bool, describe_include_virtual_columns, false, "If true, virtual columns of table will be included into result of DESCRIBE query", 0) \ M(Bool, describe_compact_output, false, "If true, include only column names and types into result of DESCRIBE query", 0) \ + M(Bool, apply_mutations_on_fly, false, "Only available in ClickHouse Cloud", 0) \ M(Bool, mutations_execute_nondeterministic_on_initiator, false, "If true nondeterministic function are executed on initiator and replaced to literals in UPDATE and DELETE queries", 0) \ M(Bool, mutations_execute_subqueries_on_initiator, false, "If true scalar subqueries are executed on initiator and replaced to literals in UPDATE and DELETE queries", 0) \ M(UInt64, mutations_max_literal_size_to_replace, 16384, "The maximum size of serialized literal in bytes to replace in UPDATE and DELETE queries", 0) \ @@ -654,6 +661,7 @@ class IColumn; M(Bool, allow_aggregate_partitions_independently, false, "Enable independent aggregation of partitions on separate threads when partition key suits group by key. Beneficial when number of partitions close to number of cores and partitions have roughly the same size", 0) \ M(Bool, force_aggregate_partitions_independently, false, "Force the use of optimization when it is applicable, but heuristics decided not to use it", 0) \ M(UInt64, max_number_of_partitions_for_independent_aggregation, 128, "Maximal number of partitions in table to apply optimization", 0) \ + M(Float, min_hit_rate_to_use_consecutive_keys_optimization, 0.5, "Minimal hit rate of a cache which is used for consecutive keys optimization in aggregation to keep it enabled", 0) \ /** Experimental feature for moving data between shards. */ \ \ M(Bool, allow_experimental_query_deduplication, false, "Experimental data deduplication for SELECT queries based on part UUIDs", 0) \ @@ -671,6 +679,8 @@ class IColumn; M(Bool, database_replicated_always_detach_permanently, false, "Execute DETACH TABLE as DETACH TABLE PERMANENTLY if database engine is Replicated", 0) \ M(Bool, database_replicated_allow_only_replicated_engine, false, "Allow to create only Replicated tables in database with engine Replicated", 0) \ M(Bool, database_replicated_allow_replicated_engine_arguments, true, "Allow to create only Replicated tables in database with engine Replicated with explicit arguments", 0) \ + M(Bool, cloud_mode, false, "Only available in ClickHouse Cloud", 0) \ + M(UInt64, cloud_mode_engine, 1, "Only available in ClickHouse Cloud", 0) \ M(DistributedDDLOutputMode, distributed_ddl_output_mode, DistributedDDLOutputMode::THROW, "Format of distributed DDL query result, one of: 'none', 'throw', 'null_status_on_timeout', 'never_throw'", 0) \ M(UInt64, distributed_ddl_entry_format_version, 5, "Compatibility version of distributed DDL (ON CLUSTER) queries", 0) \ \ @@ -724,6 +734,7 @@ class IColumn; M(UInt64, merge_tree_min_bytes_per_task_for_remote_reading, 4 * DBMS_DEFAULT_BUFFER_SIZE, "Min bytes to read per task.", 0) \ M(Bool, merge_tree_use_const_size_tasks_for_remote_reading, true, "Whether to use constant size tasks for reading from a remote table.", 0) \ M(Bool, merge_tree_determine_task_size_by_prewhere_columns, true, "Whether to use only prewhere columns size to determine reading task size.", 0) \ + M(UInt64, merge_tree_compact_parts_min_granules_to_multibuffer_read, 16, "Only available in ClickHouse Cloud", 0) \ \ M(Bool, async_insert, false, "If true, data from INSERT query is stored in queue and later flushed to table in background. If wait_for_async_insert is false, INSERT query is processed almost instantly, otherwise client will wait until data will be flushed to table", 0) \ M(Bool, wait_for_async_insert, true, "If true wait for processing of asynchronous insertion", 0) \ @@ -835,6 +846,11 @@ class IColumn; M(Bool, print_pretty_type_names, true, "Print pretty type names in DESCRIBE query and toTypeName() function", 0) \ M(Bool, create_table_empty_primary_key_by_default, false, "Allow to create *MergeTree tables with empty primary key when ORDER BY and PRIMARY KEY not specified", 0) \ M(Bool, allow_named_collection_override_by_default, true, "Allow named collections' fields override by default.", 0)\ + M(Bool, allow_experimental_shared_merge_tree, false, "Only available in ClickHouse Cloud", 0) \ + M(UInt64, cache_warmer_threads, 4, "Only available in ClickHouse Cloud", 0) \ + M(Int64, ignore_cold_parts_seconds, 0, "Only available in ClickHouse Cloud", 0) \ + M(Int64, prefer_warmed_unmerged_parts_seconds, 0, "Only available in ClickHouse Cloud", 0) \ + M(Bool, enable_order_by_all, true, "Enable sorting expression ORDER BY ALL.", 0)\ // End of COMMON_SETTINGS // Please add settings related to formats into the FORMAT_FACTORY_SETTINGS, move obsolete settings to OBSOLETE_SETTINGS and obsolete format settings to OBSOLETE_FORMAT_SETTINGS. @@ -928,12 +944,12 @@ class IColumn; M(Bool, input_format_parquet_preserve_order, false, "Avoid reordering rows when reading from Parquet files. Usually makes it much slower.", 0) \ M(Bool, input_format_parquet_filter_push_down, true, "When reading Parquet files, skip whole row groups based on the WHERE/PREWHERE expressions and min/max statistics in the Parquet metadata.", 0) \ M(Bool, input_format_allow_seeks, true, "Allow seeks while reading in ORC/Parquet/Arrow input formats", 0) \ - M(Bool, input_format_orc_allow_missing_columns, false, "Allow missing columns while reading ORC input formats", 0) \ + M(Bool, input_format_orc_allow_missing_columns, true, "Allow missing columns while reading ORC input formats", 0) \ M(Bool, input_format_orc_use_fast_decoder, true, "Use a faster ORC decoder implementation.", 0) \ M(Bool, input_format_orc_filter_push_down, true, "When reading ORC files, skip whole stripes or row groups based on the WHERE/PREWHERE expressions, min/max statistics or bloom filter in the ORC metadata.", 0) \ - M(Bool, input_format_parquet_allow_missing_columns, false, "Allow missing columns while reading Parquet input formats", 0) \ + M(Bool, input_format_parquet_allow_missing_columns, true, "Allow missing columns while reading Parquet input formats", 0) \ M(UInt64, input_format_parquet_local_file_min_bytes_for_seek, 8192, "Min bytes required for local read (file) to do seek, instead of read with ignore in Parquet input format", 0) \ - M(Bool, input_format_arrow_allow_missing_columns, false, "Allow missing columns while reading Arrow input formats", 0) \ + M(Bool, input_format_arrow_allow_missing_columns, true, "Allow missing columns while reading Arrow input formats", 0) \ M(Char, input_format_hive_text_fields_delimiter, '\x01', "Delimiter between fields in Hive Text File", 0) \ M(Char, input_format_hive_text_collection_items_delimiter, '\x02', "Delimiter between collection(array or map) items in Hive Text File", 0) \ M(Char, input_format_hive_text_map_keys_delimiter, '\x03', "Delimiter between a pair of map key/values in Hive Text File", 0) \ @@ -942,6 +958,7 @@ class IColumn; M(UInt64, input_format_max_rows_to_read_for_schema_inference, 25000, "The maximum rows of data to read for automatic schema inference", 0) \ M(UInt64, input_format_max_bytes_to_read_for_schema_inference, 32 * 1024 * 1024, "The maximum bytes of data to read for automatic schema inference", 0) \ M(Bool, input_format_csv_use_best_effort_in_schema_inference, true, "Use some tweaks and heuristics to infer schema in CSV format", 0) \ + M(Bool, input_format_csv_try_infer_numbers_from_strings, false, "Try to infer numbers from string fields while schema inference in CSV format", 0) \ M(Bool, input_format_tsv_use_best_effort_in_schema_inference, true, "Use some tweaks and heuristics to infer schema in TSV format", 0) \ M(Bool, input_format_csv_detect_header, true, "Automatically detect header with names and types in CSV format", 0) \ M(Bool, input_format_csv_allow_whitespace_or_tab_as_delimiter, false, "Allow to use spaces and tabs(\\t) as field delimiter in the CSV strings", 0) \ @@ -961,6 +978,7 @@ class IColumn; M(Bool, input_format_arrow_skip_columns_with_unsupported_types_in_schema_inference, false, "Skip columns with unsupported types while schema inference for format Arrow", 0) \ M(String, column_names_for_schema_inference, "", "The list of column names to use in schema inference for formats without column names. The format: 'column1,column2,column3,...'", 0) \ M(String, schema_inference_hints, "", "The list of column names and types to use in schema inference for formats without column names. The format: 'column_name1 column_type1, column_name2 column_type2, ...'", 0) \ + M(SchemaInferenceMode, schema_inference_mode, "default", "Mode of schema inference. 'default' - assume that all files have the same schema and schema can be inferred from any file, 'union' - files can have different schemas and the resulting schema should be the a union of schemas of all files", 0) \ M(Bool, schema_inference_make_columns_nullable, true, "If set to true, all inferred types will be Nullable in schema inference for formats without information about nullability.", 0) \ M(Bool, input_format_json_read_bools_as_numbers, true, "Allow to parse bools as numbers in JSON input formats", 0) \ M(Bool, input_format_json_try_infer_numbers_from_strings, false, "Try to infer numbers from string fields while schema inference", 0) \ diff --git a/src/Core/SettingsChangesHistory.h b/src/Core/SettingsChangesHistory.h index 8102ca818b2..649935acd3f 100644 --- a/src/Core/SettingsChangesHistory.h +++ b/src/Core/SettingsChangesHistory.h @@ -81,8 +81,11 @@ namespace SettingsChangesHistory /// It's used to implement `compatibility` setting (see https://github.com/ClickHouse/ClickHouse/issues/35972) static std::map settings_changes_history = { + {"24.1", {{"print_pretty_type_names", false, true, "Better user experience."}}}, {"23.12", {{"allow_suspicious_ttl_expressions", true, false, "It is a new setting, and in previous versions the behavior was equivalent to allowing."}, - {"print_pretty_type_names", false, true, "Better user experience."}}}, + {"input_format_parquet_allow_missing_columns", false, true, "Allow missing columns in Parquet files by default"}, + {"input_format_orc_allow_missing_columns", false, true, "Allow missing columns in ORC files by default"}, + {"input_format_arrow_allow_missing_columns", false, true, "Allow missing columns in Arrow files by default"}}}, {"23.9", {{"optimize_group_by_constant_keys", false, true, "Optimize group by constant keys by default"}, {"input_format_json_try_infer_named_tuples_from_objects", false, true, "Try to infer named Tuples from JSON objects by default"}, {"input_format_json_read_numbers_as_strings", false, true, "Allow to read numbers as strings in JSON formats by default"}, diff --git a/src/Core/SettingsEnums.cpp b/src/Core/SettingsEnums.cpp index b853b0d0a0b..ee113a6776f 100644 --- a/src/Core/SettingsEnums.cpp +++ b/src/Core/SettingsEnums.cpp @@ -196,9 +196,12 @@ IMPLEMENT_SETTING_ENUM(ExternalCommandStderrReaction, ErrorCodes::BAD_ARGUMENTS, {"log_last", ExternalCommandStderrReaction::LOG_LAST}, {"throw", ExternalCommandStderrReaction::THROW}}) -IMPLEMENT_SETTING_ENUM(DateTimeOverflowBehavior, ErrorCodes::BAD_ARGUMENTS, - {{"throw", FormatSettings::DateTimeOverflowBehavior::Throw}, - {"ignore", FormatSettings::DateTimeOverflowBehavior::Ignore}, - {"saturate", FormatSettings::DateTimeOverflowBehavior::Saturate}}) +IMPLEMENT_SETTING_ENUM(SchemaInferenceMode, ErrorCodes::BAD_ARGUMENTS, + {{"default", SchemaInferenceMode::DEFAULT}, + {"union", SchemaInferenceMode::UNION}}) +IMPLEMENT_SETTING_ENUM(DateTimeOverflowBehavior, ErrorCodes::BAD_ARGUMENTS, + {{"throw", FormatSettings::DateTimeOverflowBehavior::Throw}, + {"ignore", FormatSettings::DateTimeOverflowBehavior::Ignore}, + {"saturate", FormatSettings::DateTimeOverflowBehavior::Saturate}}) } diff --git a/src/Core/SettingsEnums.h b/src/Core/SettingsEnums.h index 0d6e87f25c2..7977a0b3ab6 100644 --- a/src/Core/SettingsEnums.h +++ b/src/Core/SettingsEnums.h @@ -133,6 +133,8 @@ enum class DefaultTableEngine ReplacingMergeTree, ReplicatedMergeTree, ReplicatedReplacingMergeTree, + SharedMergeTree, + SharedReplacingMergeTree, Memory, }; @@ -252,6 +254,14 @@ DECLARE_SETTING_ENUM(S3QueueAction) DECLARE_SETTING_ENUM(ExternalCommandStderrReaction) +enum class SchemaInferenceMode +{ + DEFAULT, + UNION, +}; + +DECLARE_SETTING_ENUM(SchemaInferenceMode) + DECLARE_SETTING_ENUM_WITH_RENAME(DateTimeOverflowBehavior, FormatSettings::DateTimeOverflowBehavior) } diff --git a/src/Daemon/BaseDaemon.cpp b/src/Daemon/BaseDaemon.cpp index d66bdf3583f..44a47de6918 100644 --- a/src/Daemon/BaseDaemon.cpp +++ b/src/Daemon/BaseDaemon.cpp @@ -92,10 +92,10 @@ PipeFDs signal_pipe; static void call_default_signal_handler(int sig) { if (SIG_ERR == signal(sig, SIG_DFL)) - throwFromErrno("Cannot set signal handler.", ErrorCodes::CANNOT_SET_SIGNAL_HANDLER); + throw ErrnoException(ErrorCodes::CANNOT_SET_SIGNAL_HANDLER, "Cannot set signal handler"); if (0 != raise(sig)) - throwFromErrno("Cannot send signal.", ErrorCodes::CANNOT_SEND_SIGNAL); + throw ErrnoException(ErrorCodes::CANNOT_SEND_SIGNAL, "Cannot send signal"); } static const size_t signal_pipe_buf_size = @@ -659,7 +659,17 @@ BaseDaemon::~BaseDaemon() /// Reset signals to SIG_DFL to avoid trying to write to the signal_pipe that will be closed after. for (int sig : handled_signals) if (SIG_ERR == signal(sig, SIG_DFL)) - throwFromErrno("Cannot set signal handler.", ErrorCodes::CANNOT_SET_SIGNAL_HANDLER); + { + try + { + throw ErrnoException(ErrorCodes::CANNOT_SET_SIGNAL_HANDLER, "Cannot set signal handler"); + } + catch (ErrnoException &) + { + tryLogCurrentException(__PRETTY_FUNCTION__); + } + } + signal_pipe.close(); } @@ -1129,7 +1139,7 @@ void BaseDaemon::setupWatchdog() pid = fork(); if (-1 == pid) - throwFromErrno("Cannot fork", ErrorCodes::SYSTEM_ERROR); + throw ErrnoException(ErrorCodes::SYSTEM_ERROR, "Cannot fork"); if (0 == pid) { @@ -1225,7 +1235,7 @@ void BaseDaemon::setupWatchdog() if (SIG_ERR == signal(sig, SIG_IGN)) { char * signal_description = strsignal(sig); // NOLINT(concurrency-mt-unsafe) - throwFromErrno(fmt::format("Cannot ignore {}", signal_description), ErrorCodes::SYSTEM_ERROR); + throw ErrnoException(ErrorCodes::SYSTEM_ERROR, "Cannot ignore {}", signal_description); } } } @@ -1315,7 +1325,7 @@ void systemdNotify(const std::string_view & command) int s = socket(AF_UNIX, SOCK_DGRAM | SOCK_CLOEXEC, 0); if (s == -1) - throwFromErrno("Can't create UNIX socket for systemd notify.", ErrorCodes::SYSTEM_ERROR); + throw ErrnoException(ErrorCodes::SYSTEM_ERROR, "Can't create UNIX socket for systemd notify"); SCOPE_EXIT({ close(s); }); @@ -1351,7 +1361,7 @@ void systemdNotify(const std::string_view & command) if (errno == EINTR) continue; else - throwFromErrno("Failed to notify systemd, sendto returned error.", ErrorCodes::SYSTEM_ERROR); + throw ErrnoException(ErrorCodes::SYSTEM_ERROR, "Failed to notify systemd, sendto returned error"); } else sent_bytes_total += sent_bytes; diff --git a/src/DataTypes/DataTypeEnum.cpp b/src/DataTypes/DataTypeEnum.cpp index e5efb73cfca..a1d5e4b39b7 100644 --- a/src/DataTypes/DataTypeEnum.cpp +++ b/src/DataTypes/DataTypeEnum.cpp @@ -170,7 +170,7 @@ bool DataTypeEnum::contains(const IDataType & rhs) const template SerializationPtr DataTypeEnum::doGetDefaultSerialization() const { - return std::make_shared>(this->getValues()); + return std::make_shared>(std::static_pointer_cast>(shared_from_this())); } diff --git a/src/DataTypes/IDataType.h b/src/DataTypes/IDataType.h index a1de6ea18a9..eabf066bc3d 100644 --- a/src/DataTypes/IDataType.h +++ b/src/DataTypes/IDataType.h @@ -440,6 +440,8 @@ template inline bool isFloat(const T & data_type) { return WhichDat template inline bool isNativeNumber(const T & data_type) { return WhichDataType(data_type).isNativeNumber(); } template inline bool isNumber(const T & data_type) { return WhichDataType(data_type).isNumber(); } +template inline bool isEnum8(const T & data_type) { return WhichDataType(data_type).isEnum8(); } +template inline bool isEnum16(const T & data_type) { return WhichDataType(data_type).isEnum16(); } template inline bool isEnum(const T & data_type) { return WhichDataType(data_type).isEnum(); } template inline bool isDate(const T & data_type) { return WhichDataType(data_type).isDate(); } diff --git a/src/DataTypes/NestedUtils.cpp b/src/DataTypes/NestedUtils.cpp index efac2454a03..a7cc1b21389 100644 --- a/src/DataTypes/NestedUtils.cpp +++ b/src/DataTypes/NestedUtils.cpp @@ -77,10 +77,11 @@ static Block flattenImpl(const Block & block, bool flatten_named_tuple) for (const auto & elem : block) { - if (const DataTypeArray * type_arr = typeid_cast(elem.type.get())) + if (isNested(elem.type)) { - const DataTypeTuple * type_tuple = typeid_cast(type_arr->getNestedType().get()); - if (type_tuple && type_tuple->haveExplicitNames()) + const DataTypeArray * type_arr = assert_cast(elem.type.get()); + const DataTypeTuple * type_tuple = assert_cast(type_arr->getNestedType().get()); + if (type_tuple->haveExplicitNames()) { const DataTypes & element_types = type_tuple->getElements(); const Strings & names = type_tuple->getElementNames(); @@ -149,7 +150,7 @@ Block flatten(const Block & block) } -Block flattenArrayOfTuples(const Block & block) +Block flattenNested(const Block & block) { return flattenImpl(block, false); } diff --git a/src/DataTypes/NestedUtils.h b/src/DataTypes/NestedUtils.h index e009ceb18fe..85c29d2c08f 100644 --- a/src/DataTypes/NestedUtils.h +++ b/src/DataTypes/NestedUtils.h @@ -20,13 +20,13 @@ namespace Nested /// Flat a column of nested type into columns /// 1) For named tuples,t Tuple(x .., y ..., ...), replace it with t.x ..., t.y ... , ... - /// 2) For an Array with named Tuple element column, a Array(Tuple(x ..., y ..., ...)), replace it with multiple Array Columns, a.x ..., a.y ..., ... + /// 2) For an Nested column, a Array(Tuple(x ..., y ..., ...)), replace it with multiple Array Columns, a.x ..., a.y ..., ... Block flatten(const Block & block); - /// Same as flatten but only for Array with named Tuple element column. - Block flattenArrayOfTuples(const Block & block); + /// Same as flatten but only for Nested column. + Block flattenNested(const Block & block); - /// Collect Array columns in a form of `column_name.element_name` to single Array(Tuple(...)) column. + /// Collect Array columns in a form of `column_name.element_name` to single Nested column. NamesAndTypesList collect(const NamesAndTypesList & names_and_types); /// Convert old-style nested (single arrays with same prefix, `n.a`, `n.b`...) to subcolumns of data type Nested. diff --git a/src/DataTypes/Serializations/SerializationArray.cpp b/src/DataTypes/Serializations/SerializationArray.cpp index c804f58c567..1a21a45d7b8 100644 --- a/src/DataTypes/Serializations/SerializationArray.cpp +++ b/src/DataTypes/Serializations/SerializationArray.cpp @@ -348,6 +348,8 @@ void SerializationArray::deserializeBinaryBulkWithMultipleStreams( { auto mutable_column = column->assumeMutable(); ColumnArray & column_array = typeid_cast(*mutable_column); + size_t prev_last_offset = column_array.getOffsets().back(); + settings.path.push_back(Substream::ArraySizes); if (auto cached_column = getFromSubstreamsCache(cache, settings.path)) @@ -371,9 +373,9 @@ void SerializationArray::deserializeBinaryBulkWithMultipleStreams( /// Number of values corresponding with `offset_values` must be read. size_t last_offset = offset_values.back(); - if (last_offset < nested_column->size()) + if (last_offset < prev_last_offset) throw Exception(ErrorCodes::LOGICAL_ERROR, "Nested column is longer than last offset"); - size_t nested_limit = last_offset - nested_column->size(); + size_t nested_limit = last_offset - prev_last_offset; if (unlikely(nested_limit > MAX_ARRAYS_SIZE)) throw Exception(ErrorCodes::TOO_LARGE_ARRAY_SIZE, "Array sizes are too large: {}", nested_limit); diff --git a/src/DataTypes/Serializations/SerializationEnum.cpp b/src/DataTypes/Serializations/SerializationEnum.cpp index 09b0b02714c..9b3a437e9cf 100644 --- a/src/DataTypes/Serializations/SerializationEnum.cpp +++ b/src/DataTypes/Serializations/SerializationEnum.cpp @@ -11,13 +11,13 @@ namespace DB template void SerializationEnum::serializeText(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings &) const { - writeString(this->getNameForValue(assert_cast(column).getData()[row_num]), ostr); + writeString(ref_enum_values.getNameForValue(assert_cast(column).getData()[row_num]), ostr); } template void SerializationEnum::serializeTextEscaped(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings &) const { - writeEscapedString(this->getNameForValue(assert_cast(column).getData()[row_num]).toView(), ostr); + writeEscapedString(ref_enum_values.getNameForValue(assert_cast(column).getData()[row_num]).toView(), ostr); } template @@ -30,14 +30,14 @@ void SerializationEnum::deserializeTextEscaped(IColumn & column, ReadBuffe /// NOTE It would be nice to do without creating a temporary object - at least extract std::string out. std::string field_name; readEscapedString(field_name, istr); - assert_cast(column).getData().push_back(this->getValue(StringRef(field_name), true)); + assert_cast(column).getData().push_back(ref_enum_values.getValue(StringRef(field_name), true)); } } template void SerializationEnum::serializeTextQuoted(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings &) const { - writeQuotedString(this->getNameForValue(assert_cast(column).getData()[row_num]), ostr); + writeQuotedString(ref_enum_values.getNameForValue(assert_cast(column).getData()[row_num]), ostr); } template @@ -45,7 +45,7 @@ void SerializationEnum::deserializeTextQuoted(IColumn & column, ReadBuffer { std::string field_name; readQuotedStringWithSQLStyle(field_name, istr); - assert_cast(column).getData().push_back(this->getValue(StringRef(field_name))); + assert_cast(column).getData().push_back(ref_enum_values.getValue(StringRef(field_name))); } template @@ -61,20 +61,20 @@ void SerializationEnum::deserializeWholeText(IColumn & column, ReadBuffer { std::string field_name; readStringUntilEOF(field_name, istr); - assert_cast(column).getData().push_back(this->getValue(StringRef(field_name), true)); + assert_cast(column).getData().push_back(ref_enum_values.getValue(StringRef(field_name), true)); } } template void SerializationEnum::serializeTextJSON(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const { - writeJSONString(this->getNameForValue(assert_cast(column).getData()[row_num]).toView(), ostr, settings); + writeJSONString(ref_enum_values.getNameForValue(assert_cast(column).getData()[row_num]).toView(), ostr, settings); } template void SerializationEnum::serializeTextXML(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings &) const { - writeXMLStringForTextElement(this->getNameForValue(assert_cast(column).getData()[row_num]).toView(), ostr); + writeXMLStringForTextElement(ref_enum_values.getNameForValue(assert_cast(column).getData()[row_num]).toView(), ostr); } template @@ -86,14 +86,14 @@ void SerializationEnum::deserializeTextJSON(IColumn & column, ReadBuffer & { std::string field_name; readJSONString(field_name, istr); - assert_cast(column).getData().push_back(this->getValue(StringRef(field_name))); + assert_cast(column).getData().push_back(ref_enum_values.getValue(StringRef(field_name))); } } template void SerializationEnum::serializeTextCSV(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings &) const { - writeCSVString(this->getNameForValue(assert_cast(column).getData()[row_num]), ostr); + writeCSVString(ref_enum_values.getNameForValue(assert_cast(column).getData()[row_num]), ostr); } template @@ -105,7 +105,7 @@ void SerializationEnum::deserializeTextCSV(IColumn & column, ReadBuffer & { std::string field_name; readCSVString(field_name, istr, settings.csv); - assert_cast(column).getData().push_back(this->getValue(StringRef(field_name), true)); + assert_cast(column).getData().push_back(ref_enum_values.getValue(StringRef(field_name), true)); } } @@ -114,7 +114,7 @@ void SerializationEnum::serializeTextMarkdown( const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const { if (settings.markdown.escape_special_characters) - writeMarkdownEscapedString(this->getNameForValue(assert_cast(column).getData()[row_num]).toView(), ostr); + writeMarkdownEscapedString(ref_enum_values.getNameForValue(assert_cast(column).getData()[row_num]).toView(), ostr); else serializeTextEscaped(column, row_num, ostr, settings); } diff --git a/src/DataTypes/Serializations/SerializationEnum.h b/src/DataTypes/Serializations/SerializationEnum.h index 49a0e4943e0..03b134e59a6 100644 --- a/src/DataTypes/Serializations/SerializationEnum.h +++ b/src/DataTypes/Serializations/SerializationEnum.h @@ -1,20 +1,35 @@ #pragma once +#include #include #include +#include namespace DB { template -class SerializationEnum : public SerializationNumber, public EnumValues +class SerializationEnum : public SerializationNumber { public: using typename SerializationNumber::FieldType; using typename SerializationNumber::ColumnType; - using typename EnumValues::Values; + using Values = EnumValues::Values; - explicit SerializationEnum(const Values & values_) : EnumValues(values_) {} + // SerializationEnum can be constructed in two ways: + /// - Make a copy of the Enum name-to-type mapping. + /// - Only store a reference to an existing mapping. This is faster if the Enum has a lot of different values or if SerializationEnum is + /// constructed very frequently. Make sure that the pointed-to mapping has a longer lifespan than SerializationEnum! + + explicit SerializationEnum(const Values & values_) + : own_enum_values(values_), ref_enum_values(own_enum_values.value()) + { + } + + explicit SerializationEnum(const std::shared_ptr> & enum_type) + : own_enum_type(enum_type), ref_enum_values(*enum_type) + { + } void serializeText(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings &) const override; void serializeTextEscaped(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings &) const override; @@ -35,8 +50,12 @@ public: { FieldType x; readText(x, istr); - return this->findByValue(x)->first; + return ref_enum_values.findByValue(x)->first; } + + std::optional> own_enum_values; + std::shared_ptr> own_enum_type; + const EnumValues & ref_enum_values; }; } diff --git a/src/DataTypes/Serializations/SerializationString.cpp b/src/DataTypes/Serializations/SerializationString.cpp index 89fa09781ef..788ff429088 100644 --- a/src/DataTypes/Serializations/SerializationString.cpp +++ b/src/DataTypes/Serializations/SerializationString.cpp @@ -152,6 +152,9 @@ template static NO_INLINE void deserializeBinarySSE2(ColumnString::Chars & data, ColumnString::Offsets & offsets, ReadBuffer & istr, size_t limit) { size_t offset = data.size(); + /// Avoiding calling resize in a loop improves the performance. + data.resize(std::max(data.capacity(), static_cast(4096))); + for (size_t i = 0; i < limit; ++i) { if (istr.eof()) @@ -171,7 +174,8 @@ static NO_INLINE void deserializeBinarySSE2(ColumnString::Chars & data, ColumnSt offset += size + 1; offsets.push_back(offset); - data.resize(offset); + if (unlikely(offset > data.size())) + data.resize_exact(roundUpToPowerOfTwoOrZero(std::max(offset, data.size() * 2))); if (size) { @@ -203,6 +207,8 @@ static NO_INLINE void deserializeBinarySSE2(ColumnString::Chars & data, ColumnSt data[offset - 1] = 0; } + + data.resize(offset); } diff --git a/src/Databases/DDLLoadingDependencyVisitor.cpp b/src/Databases/DDLLoadingDependencyVisitor.cpp index 77a40f674fd..b8690125aaa 100644 --- a/src/Databases/DDLLoadingDependencyVisitor.cpp +++ b/src/Databases/DDLLoadingDependencyVisitor.cpp @@ -1,6 +1,10 @@ #include #include #include +#include "config.h" +#if USE_LIBPQXX +#include +#endif #include #include #include @@ -131,6 +135,14 @@ void DDLLoadingDependencyVisitor::visit(const ASTStorage & storage, Data & data) extractTableNameFromArgument(*storage.engine, data, 3); else if (storage.engine->name == "Dictionary") extractTableNameFromArgument(*storage.engine, data, 0); +#if USE_LIBPQXX + else if (storage.engine->name == "MaterializedPostgreSQL") + { + const auto * create_query = data.create_query->as(); + auto nested_table = toString(create_query->uuid) + StorageMaterializedPostgreSQL::NESTED_TABLE_SUFFIX; + data.dependencies.emplace(QualifiedTableName{ .database = create_query->getDatabase(), .table = nested_table }); + } +#endif } diff --git a/src/Databases/DatabaseAtomic.cpp b/src/Databases/DatabaseAtomic.cpp index 1daa6351c23..8a5ba5f033f 100644 --- a/src/Databases/DatabaseAtomic.cpp +++ b/src/Databases/DatabaseAtomic.cpp @@ -1,6 +1,7 @@ #include #include #include +#include #include #include #include @@ -622,4 +623,16 @@ void DatabaseAtomic::checkDetachedTableNotInUse(const UUID & uuid) assertDetachedTableNotInUse(uuid); } +void registerDatabaseAtomic(DatabaseFactory & factory) +{ + auto create_fn = [](const DatabaseFactory::Arguments & args) + { + return make_shared( + args.database_name, + args.metadata_path, + args.uuid, + args.context); + }; + factory.registerDatabase("Atomic", create_fn); +} } diff --git a/src/Databases/DatabaseDictionary.cpp b/src/Databases/DatabaseDictionary.cpp index 3a3dea1d38e..e2e0d52cd88 100644 --- a/src/Databases/DatabaseDictionary.cpp +++ b/src/Databases/DatabaseDictionary.cpp @@ -1,4 +1,5 @@ #include +#include #include #include #include @@ -140,4 +141,14 @@ void DatabaseDictionary::shutdown() { } +void registerDatabaseDictionary(DatabaseFactory & factory) +{ + auto create_fn = [](const DatabaseFactory::Arguments & args) + { + return make_shared( + args.database_name, + args.context); + }; + factory.registerDatabase("Dictionary", create_fn); +} } diff --git a/src/Databases/DatabaseFactory.cpp b/src/Databases/DatabaseFactory.cpp index 53d5245770e..2c2e4030821 100644 --- a/src/Databases/DatabaseFactory.cpp +++ b/src/Databases/DatabaseFactory.cpp @@ -1,60 +1,15 @@ -#include - #include -#include -#include -#include -#include -#include -#include + +#include #include #include -#include #include #include #include #include -#include -#include #include #include - -#include "config.h" - -#if USE_MYSQL -# include -# include -# include -# include -# include -# include -# include -# include -#endif - -#if USE_MYSQL || USE_LIBPQXX -#include -#include -#endif - -#if USE_LIBPQXX -#include -#include -#include -#include -#endif - -#if USE_SQLITE -#include -#endif - -#if USE_AWS_S3 -#include -#endif - -#if USE_HDFS -#include -#endif +#include namespace fs = std::filesystem; @@ -67,7 +22,7 @@ namespace ErrorCodes extern const int BAD_ARGUMENTS; extern const int UNKNOWN_DATABASE_ENGINE; extern const int CANNOT_CREATE_DATABASE; - extern const int NOT_IMPLEMENTED; + extern const int LOGICAL_ERROR; } void cckMetadataPathForOrdinary(const ASTCreateQuery & create, const String & metadata_path) @@ -103,8 +58,47 @@ void cckMetadataPathForOrdinary(const ASTCreateQuery & create, const String & me } +/// validate validates the database engine that's specified in the create query for +/// engine arguments, settings and table overrides. +void validate(const ASTCreateQuery & create_query) + +{ + auto * storage = create_query.storage; + + /// Check engine may have arguments + static const std::unordered_set engines_with_arguments{"MySQL", "MaterializeMySQL", "MaterializedMySQL", + "Lazy", "Replicated", "PostgreSQL", "MaterializedPostgreSQL", "SQLite", "Filesystem", "S3", "HDFS"}; + + const String & engine_name = storage->engine->name; + bool engine_may_have_arguments = engines_with_arguments.contains(engine_name); + + if (storage->engine->arguments && !engine_may_have_arguments) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Database engine `{}` cannot have arguments", engine_name); + + /// Check engine may have settings + bool may_have_settings = endsWith(engine_name, "MySQL") || engine_name == "Replicated" || engine_name == "MaterializedPostgreSQL"; + bool has_unexpected_element = storage->engine->parameters || storage->partition_by || + storage->primary_key || storage->order_by || + storage->sample_by; + if (has_unexpected_element || (!may_have_settings && storage->settings)) + throw Exception(ErrorCodes::UNKNOWN_ELEMENT_IN_AST, + "Database engine `{}` cannot have parameters, primary_key, order_by, sample_by, settings", engine_name); + + /// Check engine with table overrides + static const std::unordered_set engines_with_table_overrides{"MaterializeMySQL", "MaterializedMySQL", "MaterializedPostgreSQL"}; + if (create_query.table_overrides && !engines_with_table_overrides.contains(engine_name)) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Database engine `{}` cannot have table overrides", engine_name); +} + DatabasePtr DatabaseFactory::get(const ASTCreateQuery & create, const String & metadata_path, ContextPtr context) { + /// check if the database engine is a valid one before proceeding + if (!database_engines.contains(create.storage->engine->name)) + throw Exception(ErrorCodes::UNKNOWN_DATABASE_ENGINE, "Unknown database engine: {}", create.storage->engine->name); + + /// if the engine is found (i.e. registered with the factory instance), then validate if the + /// supplied engine arguments, settings and table overrides are valid for the engine. + validate(create); cckMetadataPathForOrdinary(create, metadata_path); DatabasePtr impl = getImpl(create, metadata_path, context); @@ -119,383 +113,42 @@ DatabasePtr DatabaseFactory::get(const ASTCreateQuery & create, const String & m return impl; } -template -static inline ValueType safeGetLiteralValue(const ASTPtr &ast, const String &engine_name) +void DatabaseFactory::registerDatabase(const std::string & name, CreatorFn creator_fn) { - if (!ast || !ast->as()) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Database engine {} requested literal argument.", engine_name); + if (!database_engines.emplace(name, std::move(creator_fn)).second) + throw Exception(ErrorCodes::LOGICAL_ERROR, "DatabaseFactory: the database engine name '{}' is not unique", name); +} - return ast->as()->value.safeGet(); +DatabaseFactory & DatabaseFactory::instance() +{ + static DatabaseFactory db_fact; + return db_fact; } DatabasePtr DatabaseFactory::getImpl(const ASTCreateQuery & create, const String & metadata_path, ContextPtr context) { - auto * engine_define = create.storage; + auto * storage = create.storage; const String & database_name = create.getDatabase(); - const String & engine_name = engine_define->engine->name; - const UUID & uuid = create.uuid; - - static const std::unordered_set database_engines{"Ordinary", "Atomic", "Memory", - "Dictionary", "Lazy", "Replicated", "MySQL", "MaterializeMySQL", "MaterializedMySQL", - "PostgreSQL", "MaterializedPostgreSQL", "SQLite", "Filesystem", "S3", "HDFS"}; - - if (!database_engines.contains(engine_name)) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Database engine name `{}` does not exist", engine_name); - - static const std::unordered_set engines_with_arguments{"MySQL", "MaterializeMySQL", "MaterializedMySQL", - "Lazy", "Replicated", "PostgreSQL", "MaterializedPostgreSQL", "SQLite", "Filesystem", "S3", "HDFS"}; - - static const std::unordered_set engines_with_table_overrides{"MaterializeMySQL", "MaterializedMySQL", "MaterializedPostgreSQL"}; - bool engine_may_have_arguments = engines_with_arguments.contains(engine_name); - - if (engine_define->engine->arguments && !engine_may_have_arguments) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Database engine `{}` cannot have arguments", engine_name); - - bool has_unexpected_element = engine_define->engine->parameters || engine_define->partition_by || - engine_define->primary_key || engine_define->order_by || - engine_define->sample_by; - bool may_have_settings = endsWith(engine_name, "MySQL") || engine_name == "Replicated" || engine_name == "MaterializedPostgreSQL"; - - if (has_unexpected_element || (!may_have_settings && engine_define->settings)) - throw Exception(ErrorCodes::UNKNOWN_ELEMENT_IN_AST, - "Database engine `{}` cannot have parameters, primary_key, order_by, sample_by, settings", engine_name); - - if (create.table_overrides && !engines_with_table_overrides.contains(engine_name)) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Database engine `{}` cannot have table overrides", engine_name); - - if (engine_name == "Ordinary") - { - if (!create.attach && !context->getSettingsRef().allow_deprecated_database_ordinary) - throw Exception(ErrorCodes::UNKNOWN_DATABASE_ENGINE, - "Ordinary database engine is deprecated (see also allow_deprecated_database_ordinary setting)"); - - return std::make_shared(database_name, metadata_path, context); - } - - if (engine_name == "Atomic") - return std::make_shared(database_name, metadata_path, uuid, context); - else if (engine_name == "Memory") - return std::make_shared(database_name, context); - else if (engine_name == "Dictionary") - return std::make_shared(database_name, context); - -#if USE_MYSQL - - else if (engine_name == "MySQL" || engine_name == "MaterializeMySQL" || engine_name == "MaterializedMySQL") - { - const ASTFunction * engine = engine_define->engine; - if (!engine->arguments) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Engine `{}` must have arguments", engine_name); - - StorageMySQL::Configuration configuration; - ASTs & arguments = engine->arguments->children; - auto mysql_settings = std::make_unique(); - - if (auto named_collection = tryGetNamedCollectionWithOverrides(arguments, context)) - { - configuration = StorageMySQL::processNamedCollectionResult(*named_collection, *mysql_settings, context, false); - } - else - { - if (arguments.size() != 4) - throw Exception(ErrorCodes::BAD_ARGUMENTS, - "MySQL database require mysql_hostname, mysql_database_name, mysql_username, mysql_password arguments."); - - - arguments[1] = evaluateConstantExpressionOrIdentifierAsLiteral(arguments[1], context); - const auto & host_port = safeGetLiteralValue(arguments[0], engine_name); - - if (engine_name == "MySQL") - { - size_t max_addresses = context->getSettingsRef().glob_expansion_max_elements; - configuration.addresses = parseRemoteDescriptionForExternalDatabase(host_port, max_addresses, 3306); - } - else - { - const auto & [remote_host, remote_port] = parseAddress(host_port, 3306); - configuration.host = remote_host; - configuration.port = remote_port; - } - - configuration.database = safeGetLiteralValue(arguments[1], engine_name); - configuration.username = safeGetLiteralValue(arguments[2], engine_name); - configuration.password = safeGetLiteralValue(arguments[3], engine_name); - } - - try - { - if (engine_name == "MySQL") - { - mysql_settings->loadFromQueryContext(context, *engine_define); - if (engine_define->settings) - mysql_settings->loadFromQuery(*engine_define); - - auto mysql_pool = createMySQLPoolWithFailover(configuration, *mysql_settings); - - return std::make_shared( - context, database_name, metadata_path, engine_define, configuration.database, - std::move(mysql_settings), std::move(mysql_pool), create.attach); - } - - MySQLClient client(configuration.host, configuration.port, configuration.username, configuration.password); - auto mysql_pool = mysqlxx::Pool(configuration.database, configuration.host, configuration.username, configuration.password, configuration.port); - - auto materialize_mode_settings = std::make_unique(); - - if (engine_define->settings) - materialize_mode_settings->loadFromQuery(*engine_define); - - if (uuid == UUIDHelpers::Nil) - { - auto print_create_ast = create.clone(); - print_create_ast->as()->attach = false; - throw Exception(ErrorCodes::NOT_IMPLEMENTED, - "The MaterializedMySQL database engine no longer supports Ordinary databases. To re-create the database, delete " - "the old one by executing \"rm -rf {}{{,.sql}}\", then re-create the database with the following query: {}", - metadata_path, - queryToString(print_create_ast)); - } - - return std::make_shared( - context, database_name, metadata_path, uuid, configuration.database, std::move(mysql_pool), - std::move(client), std::move(materialize_mode_settings)); - } - catch (...) - { - const auto & exception_message = getCurrentExceptionMessage(true); - throw Exception(ErrorCodes::CANNOT_CREATE_DATABASE, "Cannot create MySQL database, because {}", exception_message); - } - } -#endif - - else if (engine_name == "Lazy") - { - const ASTFunction * engine = engine_define->engine; - - if (!engine->arguments || engine->arguments->children.size() != 1) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Lazy database require cache_expiration_time_seconds argument"); - - const auto & arguments = engine->arguments->children; - - const auto cache_expiration_time_seconds = safeGetLiteralValue(arguments[0], "Lazy"); - return std::make_shared(database_name, metadata_path, cache_expiration_time_seconds, context); - } - - else if (engine_name == "Replicated") - { - const ASTFunction * engine = engine_define->engine; - - if (!engine->arguments || engine->arguments->children.size() != 3) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Replicated database requires 3 arguments: zookeeper path, shard name and replica name"); - - auto & arguments = engine->arguments->children; - for (auto & engine_arg : arguments) - engine_arg = evaluateConstantExpressionOrIdentifierAsLiteral(engine_arg, context); - - String zookeeper_path = safeGetLiteralValue(arguments[0], "Replicated"); - String shard_name = safeGetLiteralValue(arguments[1], "Replicated"); - String replica_name = safeGetLiteralValue(arguments[2], "Replicated"); - - zookeeper_path = context->getMacros()->expand(zookeeper_path); - shard_name = context->getMacros()->expand(shard_name); - replica_name = context->getMacros()->expand(replica_name); - - DatabaseReplicatedSettings database_replicated_settings{}; - if (engine_define->settings) - database_replicated_settings.loadFromQuery(*engine_define); - - return std::make_shared(database_name, metadata_path, uuid, - zookeeper_path, shard_name, replica_name, - std::move(database_replicated_settings), context); - } - -#if USE_LIBPQXX - - else if (engine_name == "PostgreSQL") - { - const ASTFunction * engine = engine_define->engine; - if (!engine->arguments) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Engine `{}` must have arguments", engine_name); - - ASTs & engine_args = engine->arguments->children; - auto use_table_cache = false; - StoragePostgreSQL::Configuration configuration; - - if (auto named_collection = tryGetNamedCollectionWithOverrides(engine_args, context)) - { - configuration = StoragePostgreSQL::processNamedCollectionResult(*named_collection, false); - use_table_cache = named_collection->getOrDefault("use_table_cache", 0); - } - else - { - if (engine_args.size() < 4 || engine_args.size() > 6) - throw Exception(ErrorCodes::BAD_ARGUMENTS, - "PostgreSQL Database require `host:port`, `database_name`, `username`, `password`" - "[, `schema` = "", `use_table_cache` = 0"); - - for (auto & engine_arg : engine_args) - engine_arg = evaluateConstantExpressionOrIdentifierAsLiteral(engine_arg, context); - - const auto & host_port = safeGetLiteralValue(engine_args[0], engine_name); - size_t max_addresses = context->getSettingsRef().glob_expansion_max_elements; - - configuration.addresses = parseRemoteDescriptionForExternalDatabase(host_port, max_addresses, 5432); - configuration.database = safeGetLiteralValue(engine_args[1], engine_name); - configuration.username = safeGetLiteralValue(engine_args[2], engine_name); - configuration.password = safeGetLiteralValue(engine_args[3], engine_name); - - bool is_deprecated_syntax = false; - if (engine_args.size() >= 5) - { - auto arg_value = engine_args[4]->as()->value; - if (arg_value.getType() == Field::Types::Which::String) - { - configuration.schema = safeGetLiteralValue(engine_args[4], engine_name); - } - else - { - use_table_cache = safeGetLiteralValue(engine_args[4], engine_name); - LOG_WARNING(&Poco::Logger::get("DatabaseFactory"), "A deprecated syntax of PostgreSQL database engine is used"); - is_deprecated_syntax = true; - } - } - - if (!is_deprecated_syntax && engine_args.size() >= 6) - use_table_cache = safeGetLiteralValue(engine_args[5], engine_name); - } - - const auto & settings = context->getSettingsRef(); - auto pool = std::make_shared( - configuration, - settings.postgresql_connection_pool_size, - settings.postgresql_connection_pool_wait_timeout, - POSTGRESQL_POOL_WITH_FAILOVER_DEFAULT_MAX_TRIES, - settings.postgresql_connection_pool_auto_close_connection); - - return std::make_shared( - context, metadata_path, engine_define, database_name, configuration, pool, use_table_cache); - } - else if (engine_name == "MaterializedPostgreSQL") - { - const ASTFunction * engine = engine_define->engine; - if (!engine->arguments) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Engine `{}` must have arguments", engine_name); - - ASTs & engine_args = engine->arguments->children; - StoragePostgreSQL::Configuration configuration; - - if (auto named_collection = tryGetNamedCollectionWithOverrides(engine_args, context)) - { - configuration = StoragePostgreSQL::processNamedCollectionResult(*named_collection, false); - } - else - { - if (engine_args.size() != 4) - throw Exception(ErrorCodes::BAD_ARGUMENTS, - "MaterializedPostgreSQL Database require `host:port`, `database_name`, `username`, `password`."); - - for (auto & engine_arg : engine_args) - engine_arg = evaluateConstantExpressionOrIdentifierAsLiteral(engine_arg, context); - - auto parsed_host_port = parseAddress(safeGetLiteralValue(engine_args[0], engine_name), 5432); - - configuration.host = parsed_host_port.first; - configuration.port = parsed_host_port.second; - configuration.database = safeGetLiteralValue(engine_args[1], engine_name); - configuration.username = safeGetLiteralValue(engine_args[2], engine_name); - configuration.password = safeGetLiteralValue(engine_args[3], engine_name); - } - - auto connection_info = postgres::formatConnectionString( - configuration.database, configuration.host, configuration.port, configuration.username, configuration.password); - - auto postgresql_replica_settings = std::make_unique(); - if (engine_define->settings) - postgresql_replica_settings->loadFromQuery(*engine_define); - - return std::make_shared( - context, metadata_path, uuid, create.attach, - database_name, configuration.database, connection_info, - std::move(postgresql_replica_settings)); - } - - -#endif - -#if USE_SQLITE - else if (engine_name == "SQLite") - { - const ASTFunction * engine = engine_define->engine; - - if (!engine->arguments || engine->arguments->children.size() != 1) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "SQLite database requires 1 argument: database path"); - - const auto & arguments = engine->arguments->children; - - String database_path = safeGetLiteralValue(arguments[0], "SQLite"); - - return std::make_shared(context, engine_define, create.attach, database_path); - } -#endif - - else if (engine_name == "Filesystem") - { - const ASTFunction * engine = engine_define->engine; - - /// If init_path is empty, then the current path will be used - std::string init_path; - - if (engine->arguments && !engine->arguments->children.empty()) - { - if (engine->arguments->children.size() != 1) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Filesystem database requires at most 1 argument: filesystem_path"); - - const auto & arguments = engine->arguments->children; - init_path = safeGetLiteralValue(arguments[0], engine_name); - } - - return std::make_shared(database_name, init_path, context); - } - -#if USE_AWS_S3 - else if (engine_name == "S3") - { - const ASTFunction * engine = engine_define->engine; - - DatabaseS3::Configuration config; - - if (engine->arguments && !engine->arguments->children.empty()) - { - ASTs & engine_args = engine->arguments->children; - config = DatabaseS3::parseArguments(engine_args, context); - } - - return std::make_shared(database_name, config, context); - } -#endif - -#if USE_HDFS - else if (engine_name == "HDFS") - { - const ASTFunction * engine = engine_define->engine; - - /// If source_url is empty, then table name must contain full url - std::string source_url; - - if (engine->arguments && !engine->arguments->children.empty()) - { - if (engine->arguments->children.size() != 1) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "HDFS database requires at most 1 argument: source_url"); - - const auto & arguments = engine->arguments->children; - source_url = safeGetLiteralValue(arguments[0], engine_name); - } - - return std::make_shared(database_name, source_url, context); - } -#endif - - throw Exception(ErrorCodes::UNKNOWN_DATABASE_ENGINE, "Unknown database engine: {}", engine_name); + const String & engine_name = storage->engine->name; + + bool has_engine_args = false; + if (storage->engine->arguments) + has_engine_args = true; + + ASTs empty_engine_args; + Arguments arguments{ + .engine_name = engine_name, + .engine_args = has_engine_args ? storage->engine->arguments->children : empty_engine_args, + .create_query = create, + .database_name = database_name, + .metadata_path = metadata_path, + .uuid = create.uuid, + .context = context}; + + // creator_fn creates and returns a DatabasePtr with the supplied arguments + auto creator_fn = database_engines.at(engine_name); + + return creator_fn(arguments); } } diff --git a/src/Databases/DatabaseFactory.h b/src/Databases/DatabaseFactory.h index cb631cd76d0..c86eaddb29d 100644 --- a/src/Databases/DatabaseFactory.h +++ b/src/Databases/DatabaseFactory.h @@ -2,18 +2,60 @@ #include #include +#include +#include namespace DB { +namespace ErrorCodes +{ + extern const int BAD_ARGUMENTS; +} + class ASTCreateQuery; -class DatabaseFactory +template +static inline ValueType safeGetLiteralValue(const ASTPtr &ast, const String &engine_name) +{ + if (!ast || !ast->as()) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Database engine {} requested literal argument.", engine_name); + + return ast->as()->value.safeGet(); +} + +class DatabaseFactory : private boost::noncopyable { public: - static DatabasePtr get(const ASTCreateQuery & create, const String & metadata_path, ContextPtr context); - static DatabasePtr getImpl(const ASTCreateQuery & create, const String & metadata_path, ContextPtr context); + static DatabaseFactory & instance(); + + struct Arguments + { + const String & engine_name; + ASTs & engine_args; + ASTStorage * storage; + const ASTCreateQuery & create_query; + const String & database_name; + const String & metadata_path; + const UUID & uuid; + ContextPtr & context; + }; + + DatabasePtr get(const ASTCreateQuery & create, const String & metadata_path, ContextPtr context); + + using CreatorFn = std::function; + + using DatabaseEngines = std::unordered_map; + + void registerDatabase(const std::string & name, CreatorFn creator_fn); + + const DatabaseEngines & getDatabaseEngines() const { return database_engines; } + +private: + DatabaseEngines database_engines; + + DatabasePtr getImpl(const ASTCreateQuery & create, const String & metadata_path, ContextPtr context); }; } diff --git a/src/Databases/DatabaseFilesystem.cpp b/src/Databases/DatabaseFilesystem.cpp index ca1b5b27a59..5564f1d07cf 100644 --- a/src/Databases/DatabaseFilesystem.cpp +++ b/src/Databases/DatabaseFilesystem.cpp @@ -1,3 +1,4 @@ +#include #include #include @@ -237,4 +238,28 @@ DatabaseTablesIteratorPtr DatabaseFilesystem::getTablesIterator(ContextPtr, cons return std::make_unique(Tables{}, getDatabaseName()); } +void registerDatabaseFilesystem(DatabaseFactory & factory) +{ + auto create_fn = [](const DatabaseFactory::Arguments & args) + { + auto * engine_define = args.create_query.storage; + const ASTFunction * engine = engine_define->engine; + const String & engine_name = engine_define->engine->name; + + /// If init_path is empty, then the current path will be used + std::string init_path; + + if (engine->arguments && !engine->arguments->children.empty()) + { + if (engine->arguments->children.size() != 1) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Filesystem database requires at most 1 argument: filesystem_path"); + + const auto & arguments = engine->arguments->children; + init_path = safeGetLiteralValue(arguments[0], engine_name); + } + + return std::make_shared(args.database_name, init_path, args.context); + }; + factory.registerDatabase("Filesystem", create_fn); +} } diff --git a/src/Databases/DatabaseHDFS.cpp b/src/Databases/DatabaseHDFS.cpp index 750d79c8493..6810f655116 100644 --- a/src/Databases/DatabaseHDFS.cpp +++ b/src/Databases/DatabaseHDFS.cpp @@ -2,6 +2,7 @@ #if USE_HDFS +#include #include #include @@ -237,6 +238,30 @@ DatabaseTablesIteratorPtr DatabaseHDFS::getTablesIterator(ContextPtr, const Filt return std::make_unique(Tables{}, getDatabaseName()); } +void registerDatabaseHDFS(DatabaseFactory & factory) +{ + auto create_fn = [](const DatabaseFactory::Arguments & args) + { + auto * engine_define = args.create_query.storage; + const ASTFunction * engine = engine_define->engine; + const String & engine_name = engine_define->engine->name; + + /// If source_url is empty, then table name must contain full url + std::string source_url; + + if (engine->arguments && !engine->arguments->children.empty()) + { + if (engine->arguments->children.size() != 1) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "HDFS database requires at most 1 argument: source_url"); + + const auto & arguments = engine->arguments->children; + source_url = safeGetLiteralValue(arguments[0], engine_name); + } + + return std::make_shared(args.database_name, source_url, args.context); + }; + factory.registerDatabase("HDFS", create_fn); +} } // DB #endif diff --git a/src/Databases/DatabaseLazy.cpp b/src/Databases/DatabaseLazy.cpp index 896ae99656f..fcd832e7cc2 100644 --- a/src/Databases/DatabaseLazy.cpp +++ b/src/Databases/DatabaseLazy.cpp @@ -1,4 +1,5 @@ #include +#include #include #include #include @@ -7,6 +8,7 @@ #include #include #include +#include #include #include @@ -18,6 +20,13 @@ namespace fs = std::filesystem; + +namespace CurrentMetrics +{ + extern const Metric AttachedTable; +} + + namespace DB { @@ -27,6 +36,7 @@ namespace ErrorCodes extern const int UNKNOWN_TABLE; extern const int UNSUPPORTED_METHOD; extern const int LOGICAL_ERROR; + extern const int BAD_ARGUMENTS; } @@ -174,6 +184,7 @@ void DatabaseLazy::attachTable(ContextPtr /* context_ */, const String & table_n throw Exception(ErrorCodes::TABLE_ALREADY_EXISTS, "Table {}.{} already exists.", backQuote(database_name), backQuote(table_name)); it->second.expiration_iterator = cache_expiration_queue.emplace(cache_expiration_queue.end(), current_time, table_name); + CurrentMetrics::add(CurrentMetrics::AttachedTable, 1); } StoragePtr DatabaseLazy::detachTable(ContextPtr /* context */, const String & table_name) @@ -189,6 +200,7 @@ StoragePtr DatabaseLazy::detachTable(ContextPtr /* context */, const String & ta if (it->second.expiration_iterator != cache_expiration_queue.end()) cache_expiration_queue.erase(it->second.expiration_iterator); tables_cache.erase(it); + CurrentMetrics::sub(CurrentMetrics::AttachedTable, 1); } return res; } @@ -345,4 +357,26 @@ const StoragePtr & DatabaseLazyIterator::table() const return current_storage; } +void registerDatabaseLazy(DatabaseFactory & factory) +{ + auto create_fn = [](const DatabaseFactory::Arguments & args) + { + auto * engine_define = args.create_query.storage; + const ASTFunction * engine = engine_define->engine; + + if (!engine->arguments || engine->arguments->children.size() != 1) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Lazy database require cache_expiration_time_seconds argument"); + + const auto & arguments = engine->arguments->children; + + const auto cache_expiration_time_seconds = safeGetLiteralValue(arguments[0], "Lazy"); + + return make_shared( + args.database_name, + args.metadata_path, + cache_expiration_time_seconds, + args.context); + }; + factory.registerDatabase("Lazy", create_fn); +} } diff --git a/src/Databases/DatabaseMemory.cpp b/src/Databases/DatabaseMemory.cpp index 2a7a2ad8ccc..794eebbc399 100644 --- a/src/Databases/DatabaseMemory.cpp +++ b/src/Databases/DatabaseMemory.cpp @@ -1,5 +1,6 @@ #include #include +#include #include #include #include @@ -209,4 +210,15 @@ std::vector> DatabaseMemory::getTablesForBackup(co return res; } +void registerDatabaseMemory(DatabaseFactory & factory) +{ + auto create_fn = [](const DatabaseFactory::Arguments & args) + { + return make_shared( + args.database_name, + args.context); + }; + factory.registerDatabase("Memory", create_fn); +} + } diff --git a/src/Databases/DatabaseOnDisk.cpp b/src/Databases/DatabaseOnDisk.cpp index f5f30d0e977..12b0dc07799 100644 --- a/src/Databases/DatabaseOnDisk.cpp +++ b/src/Databases/DatabaseOnDisk.cpp @@ -675,8 +675,11 @@ ASTPtr DatabaseOnDisk::parseQueryFromMetadata( if (errno == ENOENT && !throw_on_error) return nullptr; - throwFromErrnoWithPath("Cannot open file " + metadata_file_path, metadata_file_path, - errno == ENOENT ? ErrorCodes::FILE_DOESNT_EXIST : ErrorCodes::CANNOT_OPEN_FILE); + ErrnoException::throwFromPath( + errno == ENOENT ? ErrorCodes::FILE_DOESNT_EXIST : ErrorCodes::CANNOT_OPEN_FILE, + metadata_file_path, + "Cannot open file {}", + metadata_file_path); } ReadBufferFromFile in(metadata_file_fd, metadata_file_path, METADATA_FILE_BUFFER_SIZE); diff --git a/src/Databases/DatabaseOrdinary.cpp b/src/Databases/DatabaseOrdinary.cpp index 1f344551c5e..8973b533720 100644 --- a/src/Databases/DatabaseOrdinary.cpp +++ b/src/Databases/DatabaseOrdinary.cpp @@ -1,6 +1,7 @@ #include #include +#include #include #include #include @@ -37,6 +38,7 @@ namespace DB namespace ErrorCodes { extern const int LOGICAL_ERROR; + extern const int UNKNOWN_DATABASE_ENGINE; } static constexpr size_t METADATA_FILE_BUFFER_SIZE = 32768; @@ -139,6 +141,8 @@ void DatabaseOrdinary::loadTableFromMetadata( assert(name.database == TSA_SUPPRESS_WARNING_FOR_READ(database_name)); const auto & query = ast->as(); + LOG_TRACE(log, "Loading table {}", name.getFullName()); + try { auto [table_name, table] = createTableFromAST( @@ -319,4 +323,19 @@ void DatabaseOrdinary::commitAlterTable(const StorageID &, const String & table_ } } +void registerDatabaseOrdinary(DatabaseFactory & factory) +{ + auto create_fn = [](const DatabaseFactory::Arguments & args) + { + if (!args.create_query.attach && !args.context->getSettingsRef().allow_deprecated_database_ordinary) + throw Exception( + ErrorCodes::UNKNOWN_DATABASE_ENGINE, + "Ordinary database engine is deprecated (see also allow_deprecated_database_ordinary setting)"); + return make_shared( + args.database_name, + args.metadata_path, + args.context); + }; + factory.registerDatabase("Ordinary", create_fn); +} } diff --git a/src/Databases/DatabaseReplicated.cpp b/src/Databases/DatabaseReplicated.cpp index 36dd858dcf7..d484b223706 100644 --- a/src/Databases/DatabaseReplicated.cpp +++ b/src/Databases/DatabaseReplicated.cpp @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include @@ -1054,7 +1055,7 @@ void DatabaseReplicated::recoverLostReplica(const ZooKeeperPtr & current_zookeep for (auto & [_, intermediate, to] : replicated_tables_to_rename) rename_table(intermediate, to); - LOG_DEBUG(log, "Renames completed succesessfully"); + LOG_DEBUG(log, "Renames completed successfully"); for (const auto & id : dropped_tables) DatabaseCatalog::instance().waitTableFinallyDropped(id); @@ -1204,7 +1205,7 @@ ASTPtr DatabaseReplicated::parseQueryFromMetadataInZooKeeper(const String & node } void DatabaseReplicated::dropReplica( - DatabaseReplicated * database, const String & database_zookeeper_path, const String & shard, const String & replica) + DatabaseReplicated * database, const String & database_zookeeper_path, const String & shard, const String & replica, bool throw_if_noop) { assert(!database || database_zookeeper_path == database->zookeeper_path); @@ -1215,14 +1216,21 @@ void DatabaseReplicated::dropReplica( auto zookeeper = Context::getGlobalContextInstance()->getZooKeeper(); - String database_mark = zookeeper->get(database_zookeeper_path); + String database_mark; + bool db_path_exists = zookeeper->tryGet(database_zookeeper_path, database_mark); + if (!db_path_exists && !throw_if_noop) + return; if (database_mark != REPLICATED_DATABASE_MARK) throw Exception(ErrorCodes::BAD_ARGUMENTS, "Path {} does not look like a path of Replicated database", database_zookeeper_path); String database_replica_path = fs::path(database_zookeeper_path) / "replicas" / full_replica_name; if (!zookeeper->exists(database_replica_path)) + { + if (!throw_if_noop) + return; throw Exception(ErrorCodes::BAD_ARGUMENTS, "Replica {} does not exist (database path: {})", full_replica_name, database_zookeeper_path); + } if (zookeeper->exists(database_replica_path + "/active")) throw Exception(ErrorCodes::BAD_ARGUMENTS, "Replica {} is active, cannot drop it (database path: {})", @@ -1645,4 +1653,41 @@ bool DatabaseReplicated::shouldReplicateQuery(const ContextPtr & query_context, return true; } +void registerDatabaseReplicated(DatabaseFactory & factory) +{ + auto create_fn = [](const DatabaseFactory::Arguments & args) + { + auto * engine_define = args.create_query.storage; + const ASTFunction * engine = engine_define->engine; + + if (!engine->arguments || engine->arguments->children.size() != 3) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Replicated database requires 3 arguments: zookeeper path, shard name and replica name"); + + auto & arguments = engine->arguments->children; + for (auto & engine_arg : arguments) + engine_arg = evaluateConstantExpressionOrIdentifierAsLiteral(engine_arg, args.context); + + String zookeeper_path = safeGetLiteralValue(arguments[0], "Replicated"); + String shard_name = safeGetLiteralValue(arguments[1], "Replicated"); + String replica_name = safeGetLiteralValue(arguments[2], "Replicated"); + + zookeeper_path = args.context->getMacros()->expand(zookeeper_path); + shard_name = args.context->getMacros()->expand(shard_name); + replica_name = args.context->getMacros()->expand(replica_name); + + DatabaseReplicatedSettings database_replicated_settings{}; + if (engine_define->settings) + database_replicated_settings.loadFromQuery(*engine_define); + + return std::make_shared( + args.database_name, + args.metadata_path, + args.uuid, + zookeeper_path, + shard_name, + replica_name, + std::move(database_replicated_settings), args.context); + }; + factory.registerDatabase("Replicated", create_fn); +} } diff --git a/src/Databases/DatabaseReplicated.h b/src/Databases/DatabaseReplicated.h index 202f5cc5c14..8a3999e70e9 100644 --- a/src/Databases/DatabaseReplicated.h +++ b/src/Databases/DatabaseReplicated.h @@ -79,7 +79,7 @@ public: bool shouldReplicateQuery(const ContextPtr & query_context, const ASTPtr & query_ptr) const override; - static void dropReplica(DatabaseReplicated * database, const String & database_zookeeper_path, const String & shard, const String & replica); + static void dropReplica(DatabaseReplicated * database, const String & database_zookeeper_path, const String & shard, const String & replica, bool throw_if_noop); std::vector tryGetAreReplicasActive(const ClusterPtr & cluster_) const; diff --git a/src/Databases/DatabaseReplicatedWorker.cpp b/src/Databases/DatabaseReplicatedWorker.cpp index 5f103a52a61..2056b403ff6 100644 --- a/src/Databases/DatabaseReplicatedWorker.cpp +++ b/src/Databases/DatabaseReplicatedWorker.cpp @@ -74,7 +74,7 @@ void DatabaseReplicatedDDLWorker::initializeReplication() /// Create "active" node (remove previous one if necessary) String active_path = fs::path(database->replica_path) / "active"; String active_id = toString(ServerUUID::get()); - zookeeper->handleEphemeralNodeExistence(active_path, active_id); + zookeeper->deleteEphemeralNodeIfContentMatches(active_path, active_id); zookeeper->create(active_path, active_id, zkutil::CreateMode::Ephemeral); active_node_holder.reset(); active_node_holder_zookeeper = zookeeper; diff --git a/src/Databases/DatabaseS3.cpp b/src/Databases/DatabaseS3.cpp index 11655f5f100..1721b0e9e97 100644 --- a/src/Databases/DatabaseS3.cpp +++ b/src/Databases/DatabaseS3.cpp @@ -2,6 +2,7 @@ #if USE_AWS_S3 +#include #include #include @@ -255,7 +256,7 @@ DatabaseS3::Configuration DatabaseS3::parseArguments(ASTs engine_args, ContextPt arg = evaluateConstantExpressionOrIdentifierAsLiteral(arg, context_); if (engine_args.size() > 3) - throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, error_message.c_str()); + throw Exception::createRuntime(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, error_message.c_str()); if (engine_args.empty()) return result; @@ -269,7 +270,7 @@ DatabaseS3::Configuration DatabaseS3::parseArguments(ASTs engine_args, ContextPt if (boost::iequals(second_arg, "NOSIGN")) result.no_sign_request = true; else - throw Exception(ErrorCodes::BAD_ARGUMENTS, error_message.c_str()); + throw Exception::createRuntime(ErrorCodes::BAD_ARGUMENTS, error_message.c_str()); } // url, access_key_id, secret_access_key @@ -279,7 +280,7 @@ DatabaseS3::Configuration DatabaseS3::parseArguments(ASTs engine_args, ContextPt auto secret_key = checkAndGetLiteralArgument(engine_args[2], "secret_access_key"); if (key_id.empty() || secret_key.empty() || boost::iequals(key_id, "NOSIGN")) - throw Exception(ErrorCodes::BAD_ARGUMENTS, error_message.c_str()); + throw Exception::createRuntime(ErrorCodes::BAD_ARGUMENTS, error_message.c_str()); result.access_key_id = key_id; result.secret_access_key = secret_key; @@ -307,6 +308,24 @@ DatabaseTablesIteratorPtr DatabaseS3::getTablesIterator(ContextPtr, const Filter return std::make_unique(Tables{}, getDatabaseName()); } -} +void registerDatabaseS3(DatabaseFactory & factory) +{ + auto create_fn = [](const DatabaseFactory::Arguments & args) + { + auto * engine_define = args.create_query.storage; + const ASTFunction * engine = engine_define->engine; + DatabaseS3::Configuration config; + + if (engine->arguments && !engine->arguments->children.empty()) + { + ASTs & engine_args = engine->arguments->children; + config = DatabaseS3::parseArguments(engine_args, args.context); + } + + return std::make_shared(args.database_name, config, args.context); + }; + factory.registerDatabase("S3", create_fn); +} +} #endif diff --git a/src/Databases/DatabasesCommon.cpp b/src/Databases/DatabasesCommon.cpp index 7bc445c5b5d..bda48737621 100644 --- a/src/Databases/DatabasesCommon.cpp +++ b/src/Databases/DatabasesCommon.cpp @@ -8,11 +8,17 @@ #include #include #include +#include #include #include #include #include +namespace CurrentMetrics +{ + extern const Metric AttachedTable; +} + namespace DB { @@ -59,6 +65,11 @@ void applyMetadataChangesToCreateQuery(const ASTPtr & query, const StorageInMemo query->replace(ast_create_query.select, metadata.select.select_query); } + if (metadata.refresh) + { + query->replace(ast_create_query.refresh_strategy, metadata.refresh); + } + /// MaterializedView, Dictionary are types of CREATE query without storage. if (ast_create_query.storage) { @@ -239,6 +250,7 @@ StoragePtr DatabaseWithOwnTablesBase::detachTableUnlocked(const String & table_n res = it->second; tables.erase(it); res->is_detached = true; + CurrentMetrics::sub(CurrentMetrics::AttachedTable, 1); auto table_id = res->getStorageID(); if (table_id.hasUUID()) @@ -279,6 +291,7 @@ void DatabaseWithOwnTablesBase::attachTableUnlocked(const String & table_name, c /// It is important to reset is_detached here since in case of RENAME in /// non-Atomic database the is_detached is set to true before RENAME. table->is_detached = false; + CurrentMetrics::add(CurrentMetrics::AttachedTable, 1); } void DatabaseWithOwnTablesBase::shutdown() diff --git a/src/Databases/IDatabase.cpp b/src/Databases/IDatabase.cpp index 09640d2f86e..ae8fc58bf89 100644 --- a/src/Databases/IDatabase.cpp +++ b/src/Databases/IDatabase.cpp @@ -5,8 +5,14 @@ #include #include #include +#include +namespace CurrentMetrics +{ + extern const Metric AttachedDatabase; +} + namespace DB { @@ -21,12 +27,31 @@ StoragePtr IDatabase::getTable(const String & name, ContextPtr context) const { if (auto storage = tryGetTable(name, context)) return storage; + TableNameHints hints(this->shared_from_this(), context); - std::vector names = hints.getHints(name); - if (names.empty()) + /// hint is a pair which holds a single database_name and table_name suggestion for the given table name. + auto hint = hints.getHintForTable(name); + + if (hint.first.empty()) throw Exception(ErrorCodes::UNKNOWN_TABLE, "Table {}.{} does not exist", backQuoteIfNeed(getDatabaseName()), backQuoteIfNeed(name)); else - throw Exception(ErrorCodes::UNKNOWN_TABLE, "Table {}.{} does not exist. Maybe you meant {}?", backQuoteIfNeed(getDatabaseName()), backQuoteIfNeed(name), backQuoteIfNeed(names[0])); + throw Exception( + ErrorCodes::UNKNOWN_TABLE, + "Table {}.{} does not exist. Maybe you meant {}.{}?", + backQuoteIfNeed(getDatabaseName()), + backQuoteIfNeed(name), + backQuoteIfNeed(hint.first), + backQuoteIfNeed(hint.second)); +} + +IDatabase::IDatabase(String database_name_) : database_name(std::move(database_name_)) +{ + CurrentMetrics::add(CurrentMetrics::AttachedDatabase, 1); +} + +IDatabase::~IDatabase() +{ + CurrentMetrics::sub(CurrentMetrics::AttachedDatabase, 1); } std::vector> IDatabase::getTablesForBackup(const FilterByNameFunction &, const ContextPtr &) const diff --git a/src/Databases/IDatabase.h b/src/Databases/IDatabase.h index e5afa0eb944..15e453371b7 100644 --- a/src/Databases/IDatabase.h +++ b/src/Databases/IDatabase.h @@ -139,7 +139,7 @@ class IDatabase : public std::enable_shared_from_this { public: IDatabase() = delete; - explicit IDatabase(String database_name_) : database_name(std::move(database_name_)) {} + explicit IDatabase(String database_name_); /// Get name of database engine. virtual String getEngineName() const = 0; @@ -420,7 +420,7 @@ public: /// Creates a table restored from backup. virtual void createTableRestoredFromBackup(const ASTPtr & create_table_query, ContextMutablePtr context, std::shared_ptr restore_coordination, UInt64 timeout_ms); - virtual ~IDatabase() = default; + virtual ~IDatabase(); protected: virtual ASTPtr getCreateTableQueryImpl(const String & /*name*/, ContextPtr /*context*/, bool throw_on_error) const diff --git a/src/Databases/MySQL/DatabaseMaterializedMySQL.cpp b/src/Databases/MySQL/DatabaseMaterializedMySQL.cpp index a31e74cc7ae..cbb080a0baa 100644 --- a/src/Databases/MySQL/DatabaseMaterializedMySQL.cpp +++ b/src/Databases/MySQL/DatabaseMaterializedMySQL.cpp @@ -2,13 +2,20 @@ #if USE_MYSQL +# include +# include # include -# include +# include +# include # include # include # include +# include +# include +# include # include +# include # include # include # include @@ -21,6 +28,7 @@ namespace DB namespace ErrorCodes { extern const int NOT_IMPLEMENTED; + extern const int BAD_ARGUMENTS; } DatabaseMaterializedMySQL::DatabaseMaterializedMySQL( @@ -179,6 +187,86 @@ void DatabaseMaterializedMySQL::stopReplication() started_up = false; } +void registerDatabaseMaterializedMySQL(DatabaseFactory & factory) +{ + auto create_fn = [](const DatabaseFactory::Arguments & args) + { + auto * engine_define = args.create_query.storage; + const ASTFunction * engine = engine_define->engine; + const String & engine_name = engine_define->engine->name; + + if (!engine->arguments) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Engine `{}` must have arguments", engine_name); + StorageMySQL::Configuration configuration; + ASTs & arguments = engine->arguments->children; + auto mysql_settings = std::make_unique(); + + if (auto named_collection = tryGetNamedCollectionWithOverrides(arguments, args.context)) + { + configuration = StorageMySQL::processNamedCollectionResult(*named_collection, *mysql_settings, args.context, false); + } + else + { + if (arguments.size() != 4) + throw Exception( + ErrorCodes::BAD_ARGUMENTS, + "MySQL database require mysql_hostname, mysql_database_name, mysql_username, mysql_password arguments."); + + + arguments[1] = evaluateConstantExpressionOrIdentifierAsLiteral(arguments[1], args.context); + const auto & host_port = safeGetLiteralValue(arguments[0], engine_name); + + if (engine_name == "MySQL") + { + size_t max_addresses = args.context->getSettingsRef().glob_expansion_max_elements; + configuration.addresses = parseRemoteDescriptionForExternalDatabase(host_port, max_addresses, 3306); + } + else + { + const auto & [remote_host, remote_port] = parseAddress(host_port, 3306); + configuration.host = remote_host; + configuration.port = remote_port; + } + + configuration.database = safeGetLiteralValue(arguments[1], engine_name); + configuration.username = safeGetLiteralValue(arguments[2], engine_name); + configuration.password = safeGetLiteralValue(arguments[3], engine_name); + } + MySQLClient client(configuration.host, configuration.port, configuration.username, configuration.password); + auto mysql_pool + = mysqlxx::Pool(configuration.database, configuration.host, configuration.username, configuration.password, configuration.port); + + auto materialize_mode_settings = std::make_unique(); + + if (engine_define->settings) + materialize_mode_settings->loadFromQuery(*engine_define); + + if (args.uuid == UUIDHelpers::Nil) + { + auto print_create_ast = args.create_query.clone(); + print_create_ast->as()->attach = false; + throw Exception( + ErrorCodes::NOT_IMPLEMENTED, + "The MaterializedMySQL database engine no longer supports Ordinary databases. To re-create the database, delete " + "the old one by executing \"rm -rf {}{{,.sql}}\", then re-create the database with the following query: {}", + args.metadata_path, + queryToString(print_create_ast)); + } + + return make_shared( + args.context, + args.database_name, + args.metadata_path, + args.uuid, + configuration.database, + std::move(mysql_pool), + std::move(client), + std::move(materialize_mode_settings)); + }; + factory.registerDatabase("MaterializeMySQL", create_fn); + factory.registerDatabase("MaterializedMySQL", create_fn); +} + } #endif diff --git a/src/Databases/MySQL/DatabaseMySQL.cpp b/src/Databases/MySQL/DatabaseMySQL.cpp index 7d2ed7a9662..96a5c3a18ce 100644 --- a/src/Databases/MySQL/DatabaseMySQL.cpp +++ b/src/Databases/MySQL/DatabaseMySQL.cpp @@ -2,6 +2,7 @@ #if USE_MYSQL # include +# include # include # include # include @@ -14,6 +15,7 @@ # include # include # include +# include # include # include # include @@ -21,8 +23,11 @@ # include # include # include +# include +# include # include # include +# include # include # include # include @@ -41,6 +46,8 @@ namespace ErrorCodes extern const int TABLE_IS_DROPPED; extern const int TABLE_ALREADY_EXISTS; extern const int UNEXPECTED_AST_STRUCTURE; + extern const int CANNOT_CREATE_DATABASE; + extern const int BAD_ARGUMENTS; } constexpr static const auto suffix = ".remove_flag"; @@ -504,6 +511,77 @@ void DatabaseMySQL::createTable(ContextPtr local_context, const String & table_n attachTable(local_context, table_name, storage, {}); } +void registerDatabaseMySQL(DatabaseFactory & factory) +{ + auto create_fn = [](const DatabaseFactory::Arguments & args) + { + auto * engine_define = args.create_query.storage; + const ASTFunction * engine = engine_define->engine; + const String & engine_name = engine_define->engine->name; + if (!engine->arguments) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Engine `{}` must have arguments", engine_name); + + StorageMySQL::Configuration configuration; + ASTs & arguments = engine->arguments->children; + auto mysql_settings = std::make_unique(); + + if (auto named_collection = tryGetNamedCollectionWithOverrides(arguments, args.context)) + { + configuration = StorageMySQL::processNamedCollectionResult(*named_collection, *mysql_settings, args.context, false); + } + else + { + if (arguments.size() != 4) + throw Exception( + ErrorCodes::BAD_ARGUMENTS, + "MySQL database require mysql_hostname, mysql_database_name, mysql_username, mysql_password arguments."); + + + arguments[1] = evaluateConstantExpressionOrIdentifierAsLiteral(arguments[1], args.context); + const auto & host_port = safeGetLiteralValue(arguments[0], engine_name); + + if (engine_name == "MySQL") + { + size_t max_addresses = args.context->getSettingsRef().glob_expansion_max_elements; + configuration.addresses = parseRemoteDescriptionForExternalDatabase(host_port, max_addresses, 3306); + } + else + { + const auto & [remote_host, remote_port] = parseAddress(host_port, 3306); + configuration.host = remote_host; + configuration.port = remote_port; + } + + configuration.database = safeGetLiteralValue(arguments[1], engine_name); + configuration.username = safeGetLiteralValue(arguments[2], engine_name); + configuration.password = safeGetLiteralValue(arguments[3], engine_name); + } + mysql_settings->loadFromQueryContext(args.context, *engine_define); + if (engine_define->settings) + mysql_settings->loadFromQuery(*engine_define); + + auto mysql_pool = createMySQLPoolWithFailover(configuration, *mysql_settings); + + try + { + return make_shared( + args.context, + args.database_name, + args.metadata_path, + engine_define, + configuration.database, + std::move(mysql_settings), + std::move(mysql_pool), + args.create_query.attach); + } + catch (...) + { + const auto & exception_message = getCurrentExceptionMessage(true); + throw Exception(ErrorCodes::CANNOT_CREATE_DATABASE, "Cannot create MySQL database, because {}", exception_message); + } + }; + factory.registerDatabase("MySQL", create_fn); +} } #endif diff --git a/src/Databases/PostgreSQL/DatabaseMaterializedPostgreSQL.cpp b/src/Databases/PostgreSQL/DatabaseMaterializedPostgreSQL.cpp index 78be0611631..a659821e179 100644 --- a/src/Databases/PostgreSQL/DatabaseMaterializedPostgreSQL.cpp +++ b/src/Databases/PostgreSQL/DatabaseMaterializedPostgreSQL.cpp @@ -8,23 +8,25 @@ #include #include #include +#include +#include #include #include #include #include #include +#include +#include #include #include #include #include #include -#include #include #include +#include #include #include -#include -#include namespace DB { @@ -471,6 +473,59 @@ DatabaseTablesIteratorPtr DatabaseMaterializedPostgreSQL::getTablesIterator( return DatabaseAtomic::getTablesIterator(StorageMaterializedPostgreSQL::makeNestedTableContext(local_context), filter_by_table_name); } +void registerDatabaseMaterializedPostgreSQL(DatabaseFactory & factory) +{ + auto create_fn = [](const DatabaseFactory::Arguments & args) + { + auto * engine_define = args.create_query.storage; + const ASTFunction * engine = engine_define->engine; + ASTs & engine_args = engine->arguments->children; + const String & engine_name = engine_define->engine->name; + + if (!engine->arguments) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Engine `{}` must have arguments", engine_name); + + StoragePostgreSQL::Configuration configuration; + + if (!engine->arguments) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Engine `{}` must have arguments", engine_name); + + if (auto named_collection = tryGetNamedCollectionWithOverrides(engine_args, args.context)) + { + configuration = StoragePostgreSQL::processNamedCollectionResult(*named_collection, args.context, false); + } + else + { + if (engine_args.size() != 4) + throw Exception(ErrorCodes::BAD_ARGUMENTS, + "MaterializedPostgreSQL Database require `host:port`, `database_name`, `username`, `password`."); + + for (auto & engine_arg : engine_args) + engine_arg = evaluateConstantExpressionOrIdentifierAsLiteral(engine_arg, args.context); + + auto parsed_host_port = parseAddress(safeGetLiteralValue(engine_args[0], engine_name), 5432); + + configuration.host = parsed_host_port.first; + configuration.port = parsed_host_port.second; + configuration.database = safeGetLiteralValue(engine_args[1], engine_name); + configuration.username = safeGetLiteralValue(engine_args[2], engine_name); + configuration.password = safeGetLiteralValue(engine_args[3], engine_name); + } + + auto connection_info = postgres::formatConnectionString( + configuration.database, configuration.host, configuration.port, configuration.username, configuration.password); + + auto postgresql_replica_settings = std::make_unique(); + if (engine_define->settings) + postgresql_replica_settings->loadFromQuery(*engine_define); + + return std::make_shared( + args.context, args.metadata_path, args.uuid, args.create_query.attach, + args.database_name, configuration.database, connection_info, + std::move(postgresql_replica_settings)); + }; + factory.registerDatabase("MaterializedPostgreSQL", create_fn); +} } #endif diff --git a/src/Databases/PostgreSQL/DatabasePostgreSQL.cpp b/src/Databases/PostgreSQL/DatabasePostgreSQL.cpp index 24f04c16029..1fe5c078581 100644 --- a/src/Databases/PostgreSQL/DatabasePostgreSQL.cpp +++ b/src/Databases/PostgreSQL/DatabasePostgreSQL.cpp @@ -6,14 +6,18 @@ #include #include +#include #include #include +#include #include #include #include #include #include #include +#include +#include #include #include #include @@ -478,6 +482,83 @@ ASTPtr DatabasePostgreSQL::getColumnDeclaration(const DataTypePtr & data_type) c return std::make_shared(data_type->getName()); } +void registerDatabasePostgreSQL(DatabaseFactory & factory) +{ + auto create_fn = [](const DatabaseFactory::Arguments & args) + { + auto * engine_define = args.create_query.storage; + const ASTFunction * engine = engine_define->engine; + ASTs & engine_args = engine->arguments->children; + const String & engine_name = engine_define->engine->name; + + if (!engine->arguments) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Engine `{}` must have arguments", engine_name); + + auto use_table_cache = false; + StoragePostgreSQL::Configuration configuration; + + if (auto named_collection = tryGetNamedCollectionWithOverrides(engine_args, args.context)) + { + configuration = StoragePostgreSQL::processNamedCollectionResult(*named_collection, args.context, false); + use_table_cache = named_collection->getOrDefault("use_table_cache", 0); + } + else + { + if (engine_args.size() < 4 || engine_args.size() > 6) + throw Exception(ErrorCodes::BAD_ARGUMENTS, + "PostgreSQL Database require `host:port`, `database_name`, `username`, `password`" + "[, `schema` = "", `use_table_cache` = 0"); + + for (auto & engine_arg : engine_args) + engine_arg = evaluateConstantExpressionOrIdentifierAsLiteral(engine_arg, args.context); + + const auto & host_port = safeGetLiteralValue(engine_args[0], engine_name); + size_t max_addresses = args.context->getSettingsRef().glob_expansion_max_elements; + + configuration.addresses = parseRemoteDescriptionForExternalDatabase(host_port, max_addresses, 5432); + configuration.database = safeGetLiteralValue(engine_args[1], engine_name); + configuration.username = safeGetLiteralValue(engine_args[2], engine_name); + configuration.password = safeGetLiteralValue(engine_args[3], engine_name); + + bool is_deprecated_syntax = false; + if (engine_args.size() >= 5) + { + auto arg_value = engine_args[4]->as()->value; + if (arg_value.getType() == Field::Types::Which::String) + { + configuration.schema = safeGetLiteralValue(engine_args[4], engine_name); + } + else + { + use_table_cache = safeGetLiteralValue(engine_args[4], engine_name); + LOG_WARNING(&Poco::Logger::get("DatabaseFactory"), "A deprecated syntax of PostgreSQL database engine is used"); + is_deprecated_syntax = true; + } + } + + if (!is_deprecated_syntax && engine_args.size() >= 6) + use_table_cache = safeGetLiteralValue(engine_args[5], engine_name); + } + + const auto & settings = args.context->getSettingsRef(); + auto pool = std::make_shared( + configuration, + settings.postgresql_connection_pool_size, + settings.postgresql_connection_pool_wait_timeout, + POSTGRESQL_POOL_WITH_FAILOVER_DEFAULT_MAX_TRIES, + settings.postgresql_connection_pool_auto_close_connection); + + return std::make_shared( + args.context, + args.metadata_path, + engine_define, + args.database_name, + configuration, + pool, + use_table_cache); + }; + factory.registerDatabase("PostgreSQL", create_fn); +} } #endif diff --git a/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.cpp b/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.cpp index 65d53049dac..469ca52890a 100644 --- a/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.cpp +++ b/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.cpp @@ -25,6 +25,7 @@ namespace ErrorCodes { extern const int UNKNOWN_TABLE; extern const int BAD_ARGUMENTS; + extern const int LOGICAL_ERROR; } @@ -158,6 +159,15 @@ static DataTypePtr convertPostgreSQLDataType(String & type, Fn auto && r return res; } +/// Check if PostgreSQL relation is empty. +/// postgres_table must be already quoted + schema-qualified. +template +bool isTableEmpty(T & tx, const String & postgres_table) +{ + auto query = fmt::format("SELECT NOT EXISTS (SELECT * FROM {} LIMIT 1);", postgres_table); + pqxx::result result{tx.exec(query)}; + return result[0][0].as(); +} template PostgreSQLTableStructure::ColumnsInfoPtr readNamesAndTypesList( @@ -186,20 +196,24 @@ PostgreSQLTableStructure::ColumnsInfoPtr readNamesAndTypesList( } else { - std::tuple row; + std::tuple row; while (stream >> row) { - auto data_type = convertPostgreSQLDataType( + const auto column_name = std::get<0>(row); + const auto data_type = convertPostgreSQLDataType( std::get<1>(row), recheck_array, use_nulls && (std::get<2>(row) == /* not nullable */"f"), std::get<3>(row)); - columns.push_back(NameAndTypePair(std::get<0>(row), data_type)); + columns.push_back(NameAndTypePair(column_name, data_type)); + auto attgenerated = std::get<6>(row); - attributes.emplace_back( - PostgreSQLTableStructure::PGAttribute{ - .atttypid = parse(std::get<4>(row)), - .atttypmod = parse(std::get<5>(row)), + attributes.emplace( + column_name, + PostgreSQLTableStructure::PGAttribute{ + .atttypid = parse(std::get<4>(row)), + .atttypmod = parse(std::get<5>(row)), + .attgenerated = attgenerated.empty() ? char{} : char(attgenerated[0]) }); ++i; @@ -213,12 +227,37 @@ PostgreSQLTableStructure::ColumnsInfoPtr readNamesAndTypesList( { const auto & name_and_type = columns[i]; - /// All rows must contain the same number of dimensions, so limit 1 is ok. If number of dimensions in all rows is not the same - + /// If the relation is empty, then array_ndims returns NULL. + /// ClickHouse cannot support this use case. + if (isTableEmpty(tx, postgres_table)) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "PostgreSQL relation containing arrays cannot be empty: {}", postgres_table); + + /// All rows must contain the same number of dimensions. + /// 1 is ok. If number of dimensions in all rows is not the same - /// such arrays are not able to be used as ClickHouse Array at all. - pqxx::result result{tx.exec(fmt::format("SELECT array_ndims({}) FROM {} LIMIT 1", name_and_type.name, postgres_table))}; - // array_ndims() may return null for empty array, but we expect 0: - // https://github.com/postgres/postgres/blob/d16a0c1e2e3874cd5adfa9ee968008b6c4b1ae01/src/backend/utils/adt/arrayfuncs.c#L1658 - auto dimensions = result[0][0].as>().value_or(0); + /// + /// For empty arrays, array_ndims([]) will return NULL. + auto postgres_column = doubleQuoteString(name_and_type.name); + pqxx::result result{tx.exec( + fmt::format("SELECT {} IS NULL, array_ndims({}) FROM {} LIMIT 1;", postgres_column, postgres_column, postgres_table))}; + + /// Nullable(Array) is not supported. + auto is_null_array = result[0][0].as(); + if (is_null_array) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "PostgreSQL array cannot be NULL: {}.{}", postgres_table, postgres_column); + + /// Cannot infer dimension of empty arrays. + auto is_empty_array = result[0][1].is_null(); + if (is_empty_array) + { + throw Exception( + ErrorCodes::BAD_ARGUMENTS, + "PostgreSQL cannot infer dimensions of an empty array: {}.{}", + postgres_table, + postgres_column); + } + + int dimensions = result[0][1].as(); /// It is always 1d array if it is in recheck. DataTypePtr type = assert_cast(name_and_type.type.get())->getNestedType(); @@ -255,14 +294,19 @@ PostgreSQLTableStructure fetchPostgreSQLTableStructure( PostgreSQLTableStructure table; auto where = fmt::format("relname = {}", quoteString(postgres_table)); - if (postgres_schema.empty()) - where += " AND relnamespace = (SELECT oid FROM pg_namespace WHERE nspname = 'public')"; - else - where += fmt::format(" AND relnamespace = (SELECT oid FROM pg_namespace WHERE nspname = {})", quoteString(postgres_schema)); + + where += postgres_schema.empty() + ? " AND relnamespace = (SELECT oid FROM pg_namespace WHERE nspname = 'public')" + : fmt::format(" AND relnamespace = (SELECT oid FROM pg_namespace WHERE nspname = {})", quoteString(postgres_schema)); std::string query = fmt::format( - "SELECT attname AS name, format_type(atttypid, atttypmod) AS type, " - "attnotnull AS not_null, attndims AS dims, atttypid as type_id, atttypmod as type_modifier " + "SELECT attname AS name, " /// column name + "format_type(atttypid, atttypmod) AS type, " /// data type + "attnotnull AS not_null, " /// is nullable + "attndims AS dims, " /// array dimensions + "atttypid as type_id, " + "atttypmod as type_modifier, " + "attgenerated as generated " /// if column has GENERATED "FROM pg_attribute " "WHERE attrelid = (SELECT oid FROM pg_class WHERE {}) " "AND NOT attisdropped AND attnum > 0 " @@ -274,11 +318,44 @@ PostgreSQLTableStructure fetchPostgreSQLTableStructure( if (!table.physical_columns) throw Exception(ErrorCodes::UNKNOWN_TABLE, "PostgreSQL table {} does not exist", postgres_table_with_schema); + for (const auto & column : table.physical_columns->columns) + { + table.physical_columns->names.push_back(column.name); + } + + bool check_generated = table.physical_columns->attributes.end() != std::find_if( + table.physical_columns->attributes.begin(), + table.physical_columns->attributes.end(), + [](const auto & attr){ return attr.second.attgenerated == 's'; }); + + if (check_generated) + { + std::string attrdef_query = fmt::format( + "SELECT adnum, pg_get_expr(adbin, adrelid) as generated_expression " + "FROM pg_attrdef " + "WHERE adrelid = (SELECT oid FROM pg_class WHERE {});", where); + + pqxx::result result{tx.exec(attrdef_query)}; + for (const auto row : result) + { + size_t adnum = row[0].as(); + if (!adnum || adnum > table.physical_columns->names.size()) + { + throw Exception(ErrorCodes::LOGICAL_ERROR, + "Received adnum {}, but currently fetched columns list has {} columns", + adnum, table.physical_columns->attributes.size()); + } + const auto column_name = table.physical_columns->names[adnum - 1]; + table.physical_columns->attributes.at(column_name).attr_def = row[1].as(); + } + } + if (with_primary_key) { /// wiki.postgresql.org/wiki/Retrieve_primary_key_columns query = fmt::format( - "SELECT a.attname, format_type(a.atttypid, a.atttypmod) AS data_type " + "SELECT a.attname, " /// column name + "format_type(a.atttypid, a.atttypmod) AS data_type " /// data type "FROM pg_index i " "JOIN pg_attribute a ON a.attrelid = i.indrelid " "AND a.attnum = ANY(i.indkey) " diff --git a/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.h b/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.h index 7cd21d353a2..81bf7b278fc 100644 --- a/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.h +++ b/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.h @@ -16,13 +16,17 @@ struct PostgreSQLTableStructure { Int32 atttypid; Int32 atttypmod; + bool atthasdef; + char attgenerated; + std::string attr_def; }; - using Attributes = std::vector; + using Attributes = std::unordered_map; struct ColumnsInfo { NamesAndTypesList columns; Attributes attributes; + std::vector names; ColumnsInfo(NamesAndTypesList && columns_, Attributes && attributes_) : columns(columns_), attributes(attributes_) {} }; using ColumnsInfoPtr = std::shared_ptr; diff --git a/src/Databases/SQLite/DatabaseSQLite.cpp b/src/Databases/SQLite/DatabaseSQLite.cpp index d031fd8e420..605a354bd7e 100644 --- a/src/Databases/SQLite/DatabaseSQLite.cpp +++ b/src/Databases/SQLite/DatabaseSQLite.cpp @@ -5,11 +5,11 @@ #include #include #include +#include #include #include #include #include -#include #include #include @@ -21,6 +21,7 @@ namespace ErrorCodes { extern const int SQLITE_ENGINE_ERROR; extern const int UNKNOWN_TABLE; + extern const int BAD_ARGUMENTS; } DatabaseSQLite::DatabaseSQLite( @@ -201,6 +202,24 @@ ASTPtr DatabaseSQLite::getCreateTableQueryImpl(const String & table_name, Contex return create_table_query; } +void registerDatabaseSQLite(DatabaseFactory & factory) +{ + auto create_fn = [](const DatabaseFactory::Arguments & args) + { + auto * engine_define = args.create_query.storage; + const ASTFunction * engine = engine_define->engine; + + if (!engine->arguments || engine->arguments->children.size() != 1) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "SQLite database requires 1 argument: database path"); + + const auto & arguments = engine->arguments->children; + + String database_path = safeGetLiteralValue(arguments[0], "SQLite"); + + return std::make_shared(args.context, engine_define, args.create_query.attach, database_path); + }; + factory.registerDatabase("SQLite", create_fn); +} } #endif diff --git a/src/Databases/TablesDependencyGraph.h b/src/Databases/TablesDependencyGraph.h index e71d5ecc5fc..50be3bbf969 100644 --- a/src/Databases/TablesDependencyGraph.h +++ b/src/Databases/TablesDependencyGraph.h @@ -60,7 +60,7 @@ public: /// Removes all dependencies of "table_id", returns those dependencies. std::vector removeDependencies(const StorageID & table_id, bool remove_isolated_tables = false); - /// Removes a table from the graph and removes all references to in from the graph (both from its dependencies and dependents). + /// Removes a table from the graph and removes all references to it from the graph (both from its dependencies and dependents). bool removeTable(const StorageID & table_id); /// Removes tables from the graph by a specified filter. diff --git a/src/Databases/TablesLoader.h b/src/Databases/TablesLoader.h index 3a7f8721432..038aa35895f 100644 --- a/src/Databases/TablesLoader.h +++ b/src/Databases/TablesLoader.h @@ -82,8 +82,6 @@ private: void buildDependencyGraph(); void removeUnresolvableDependencies(); - void loadTablesInTopologicalOrder(); - void startLoadingTables(ContextMutablePtr load_context, const std::vector & tables_to_load, size_t level); }; } diff --git a/src/Databases/registerDatabases.cpp b/src/Databases/registerDatabases.cpp new file mode 100644 index 00000000000..4f7c229bdf4 --- /dev/null +++ b/src/Databases/registerDatabases.cpp @@ -0,0 +1,72 @@ +#include +#include + + +namespace DB +{ + +void registerDatabaseAtomic(DatabaseFactory & factory); +void registerDatabaseOrdinary(DatabaseFactory & factory); +void registerDatabaseDictionary(DatabaseFactory & factory); +void registerDatabaseMemory(DatabaseFactory & factory); +void registerDatabaseLazy(DatabaseFactory & factory); +void registerDatabaseFilesystem(DatabaseFactory & factory); +void registerDatabaseReplicated(DatabaseFactory & factory); + +#if USE_MYSQL +void registerDatabaseMySQL(DatabaseFactory & factory); +void registerDatabaseMaterializedMySQL(DatabaseFactory & factory); +#endif + +#if USE_LIBPQXX +void registerDatabasePostgreSQL(DatabaseFactory & factory); + +void registerDatabaseMaterializedPostgreSQL(DatabaseFactory & factory); +#endif + +#if USE_SQLITE +void registerDatabaseSQLite(DatabaseFactory & factory); +#endif + +#if USE_AWS_S3 +void registerDatabaseS3(DatabaseFactory & factory); +#endif + +#if USE_HDFS +void registerDatabaseHDFS(DatabaseFactory & factory); +#endif + +void registerDatabases() +{ + auto & factory = DatabaseFactory::instance(); + registerDatabaseAtomic(factory); + registerDatabaseOrdinary(factory); + registerDatabaseDictionary(factory); + registerDatabaseMemory(factory); + registerDatabaseLazy(factory); + registerDatabaseFilesystem(factory); + registerDatabaseReplicated(factory); + +#if USE_MYSQL + registerDatabaseMySQL(factory); + registerDatabaseMaterializedMySQL(factory); +#endif + +#if USE_LIBPQXX + registerDatabasePostgreSQL(factory); + registerDatabaseMaterializedPostgreSQL(factory); +#endif + +#if USE_SQLITE + registerDatabaseSQLite(factory); +#endif + +#if USE_AWS_S3 + registerDatabaseS3(factory); +#endif + +#if USE_HDFS + registerDatabaseHDFS(factory); +#endif +} +} diff --git a/src/Databases/registerDatabases.h b/src/Databases/registerDatabases.h new file mode 100644 index 00000000000..dbf1bbb6e64 --- /dev/null +++ b/src/Databases/registerDatabases.h @@ -0,0 +1,6 @@ +#pragma once + +namespace DB +{ +void registerDatabases(); +} diff --git a/src/Dictionaries/DictionaryFactory.cpp b/src/Dictionaries/DictionaryFactory.cpp index c3102632167..f6102d7c657 100644 --- a/src/Dictionaries/DictionaryFactory.cpp +++ b/src/Dictionaries/DictionaryFactory.cpp @@ -69,12 +69,6 @@ DictionaryPtr DictionaryFactory::create( layout_type); } -DictionaryPtr DictionaryFactory::create(const std::string & name, const ASTCreateQuery & ast, ContextPtr global_context) const -{ - auto configuration = getDictionaryConfigurationFromAST(ast, global_context); - return DictionaryFactory::create(name, *configuration, "dictionary", global_context, true); -} - bool DictionaryFactory::isComplex(const std::string & layout_type) const { auto it = registered_layouts.find(layout_type); diff --git a/src/Dictionaries/DictionaryFactory.h b/src/Dictionaries/DictionaryFactory.h index 35097a5ed24..2834451df81 100644 --- a/src/Dictionaries/DictionaryFactory.h +++ b/src/Dictionaries/DictionaryFactory.h @@ -39,11 +39,6 @@ public: ContextPtr global_context, bool created_from_ddl) const; - /// Create dictionary from DDL-query - DictionaryPtr create(const std::string & name, - const ASTCreateQuery & ast, - ContextPtr global_context) const; - using LayoutCreateFunction = std::function GeoDictionariesLoader::reloadRegionsHierarchies(const Poco::Util::AbstractConfiguration & config) { static constexpr auto config_key = "path_to_regions_hierarchy_file"; @@ -27,3 +30,5 @@ std::unique_ptr GeoDictionariesLoader::reloadRegionsNames(const Po auto data_provider = std::make_unique(directory); return std::make_unique(std::move(data_provider)); } + +} diff --git a/src/Dictionaries/Embedded/GeoDictionariesLoader.h b/src/Dictionaries/Embedded/GeoDictionariesLoader.h index d09e69cf561..f795456985e 100644 --- a/src/Dictionaries/Embedded/GeoDictionariesLoader.h +++ b/src/Dictionaries/Embedded/GeoDictionariesLoader.h @@ -6,6 +6,9 @@ #include +namespace DB +{ + // Default implementation of geo dictionaries loader used by native server application class GeoDictionariesLoader { @@ -13,3 +16,5 @@ public: static std::unique_ptr reloadRegionsHierarchies(const Poco::Util::AbstractConfiguration & config); static std::unique_ptr reloadRegionsNames(const Poco::Util::AbstractConfiguration & config); }; + +} diff --git a/src/Dictionaries/Embedded/GeodataProviders/Entries.h b/src/Dictionaries/Embedded/GeodataProviders/Entries.h index 942c2f5adbc..6b27c5ae19e 100644 --- a/src/Dictionaries/Embedded/GeodataProviders/Entries.h +++ b/src/Dictionaries/Embedded/GeodataProviders/Entries.h @@ -3,6 +3,9 @@ #include #include "Types.h" +namespace DB +{ + struct RegionEntry { RegionID id; @@ -17,3 +20,5 @@ struct RegionNameEntry RegionID id; std::string name; }; + +} diff --git a/src/Dictionaries/Embedded/GeodataProviders/HierarchiesProvider.cpp b/src/Dictionaries/Embedded/GeodataProviders/HierarchiesProvider.cpp index 210459da0be..5d8781d6f23 100644 --- a/src/Dictionaries/Embedded/GeodataProviders/HierarchiesProvider.cpp +++ b/src/Dictionaries/Embedded/GeodataProviders/HierarchiesProvider.cpp @@ -9,6 +9,9 @@ namespace fs = std::filesystem; +namespace DB +{ + bool RegionsHierarchyDataSource::isModified() const { return updates_tracker.isModified(); @@ -17,7 +20,7 @@ bool RegionsHierarchyDataSource::isModified() const IRegionsHierarchyReaderPtr RegionsHierarchyDataSource::createReader() { updates_tracker.fixCurrentVersion(); - auto file_reader = std::make_shared(path); + auto file_reader = std::make_shared(path); return std::make_unique(std::move(file_reader)); } @@ -73,3 +76,5 @@ IRegionsHierarchyDataSourcePtr RegionsHierarchiesDataProvider::getHierarchySourc throw Poco::Exception("Regions hierarchy '" + name + "' not found"); } + +} diff --git a/src/Dictionaries/Embedded/GeodataProviders/HierarchiesProvider.h b/src/Dictionaries/Embedded/GeodataProviders/HierarchiesProvider.h index c2e36f59e1e..6ded62dbf83 100644 --- a/src/Dictionaries/Embedded/GeodataProviders/HierarchiesProvider.h +++ b/src/Dictionaries/Embedded/GeodataProviders/HierarchiesProvider.h @@ -5,6 +5,8 @@ #include #include +namespace DB +{ // Represents local file with regions hierarchy dump class RegionsHierarchyDataSource : public IRegionsHierarchyDataSource @@ -50,3 +52,5 @@ public: private: void discoverFilesWithCustomHierarchies(); }; + +} diff --git a/src/Dictionaries/Embedded/GeodataProviders/HierarchyFormatReader.cpp b/src/Dictionaries/Embedded/GeodataProviders/HierarchyFormatReader.cpp index 68bd6142416..d9ac19f4d67 100644 --- a/src/Dictionaries/Embedded/GeodataProviders/HierarchyFormatReader.cpp +++ b/src/Dictionaries/Embedded/GeodataProviders/HierarchyFormatReader.cpp @@ -3,6 +3,8 @@ #include #include +namespace DB +{ bool RegionsHierarchyFormatReader::readNext(RegionEntry & entry) { @@ -15,11 +17,11 @@ bool RegionsHierarchyFormatReader::readNext(RegionEntry & entry) Int32 read_parent_id = 0; Int8 read_type = 0; - DB::readIntText(read_region_id, *input); - DB::assertChar('\t', *input); - DB::readIntText(read_parent_id, *input); - DB::assertChar('\t', *input); - DB::readIntText(read_type, *input); + readIntText(read_region_id, *input); + assertChar('\t', *input); + readIntText(read_parent_id, *input); + assertChar('\t', *input); + readIntText(read_type, *input); /** Then there can be a newline (old version) * or tab, the region's population, line feed (new version). @@ -29,11 +31,11 @@ bool RegionsHierarchyFormatReader::readNext(RegionEntry & entry) { ++input->position(); UInt64 population_big = 0; - DB::readIntText(population_big, *input); + readIntText(population_big, *input); population = population_big > std::numeric_limits::max() ? std::numeric_limits::max() : static_cast(population_big); } - DB::assertChar('\n', *input); + assertChar('\n', *input); if (read_region_id <= 0 || read_type < 0) continue; @@ -55,3 +57,5 @@ bool RegionsHierarchyFormatReader::readNext(RegionEntry & entry) return false; } + +} diff --git a/src/Dictionaries/Embedded/GeodataProviders/HierarchyFormatReader.h b/src/Dictionaries/Embedded/GeodataProviders/HierarchyFormatReader.h index 64f393ada62..ebd8fca4ff9 100644 --- a/src/Dictionaries/Embedded/GeodataProviders/HierarchyFormatReader.h +++ b/src/Dictionaries/Embedded/GeodataProviders/HierarchyFormatReader.h @@ -3,15 +3,19 @@ #include #include "IHierarchiesProvider.h" +namespace DB +{ // Reads regions hierarchy in geoexport format class RegionsHierarchyFormatReader : public IRegionsHierarchyReader { private: - DB::ReadBufferPtr input; + ReadBufferPtr input; public: - explicit RegionsHierarchyFormatReader(DB::ReadBufferPtr input_) : input(std::move(input_)) {} + explicit RegionsHierarchyFormatReader(ReadBufferPtr input_) : input(std::move(input_)) {} bool readNext(RegionEntry & entry) override; }; + +} diff --git a/src/Dictionaries/Embedded/GeodataProviders/IHierarchiesProvider.h b/src/Dictionaries/Embedded/GeodataProviders/IHierarchiesProvider.h index f7d51135440..68ab0fdca2d 100644 --- a/src/Dictionaries/Embedded/GeodataProviders/IHierarchiesProvider.h +++ b/src/Dictionaries/Embedded/GeodataProviders/IHierarchiesProvider.h @@ -5,6 +5,8 @@ #include #include "Entries.h" +namespace DB +{ // Iterates over all regions in data source class IRegionsHierarchyReader @@ -46,3 +48,5 @@ public: }; using IRegionsHierarchiesDataProviderPtr = std::shared_ptr; + +} diff --git a/src/Dictionaries/Embedded/GeodataProviders/INamesProvider.h b/src/Dictionaries/Embedded/GeodataProviders/INamesProvider.h index 679c14d546b..6cd7d78f6d5 100644 --- a/src/Dictionaries/Embedded/GeodataProviders/INamesProvider.h +++ b/src/Dictionaries/Embedded/GeodataProviders/INamesProvider.h @@ -3,6 +3,8 @@ #include #include "Entries.h" +namespace DB +{ // Iterates over all name entries in data source class ILanguageRegionsNamesReader @@ -49,3 +51,5 @@ public: }; using IRegionsNamesDataProviderPtr = std::unique_ptr; + +} diff --git a/src/Dictionaries/Embedded/GeodataProviders/NamesFormatReader.cpp b/src/Dictionaries/Embedded/GeodataProviders/NamesFormatReader.cpp index 9d0c57f18eb..99216507c10 100644 --- a/src/Dictionaries/Embedded/GeodataProviders/NamesFormatReader.cpp +++ b/src/Dictionaries/Embedded/GeodataProviders/NamesFormatReader.cpp @@ -2,6 +2,8 @@ #include +namespace DB +{ bool LanguageRegionsNamesFormatReader::readNext(RegionNameEntry & entry) { @@ -10,10 +12,10 @@ bool LanguageRegionsNamesFormatReader::readNext(RegionNameEntry & entry) Int32 read_region_id; std::string region_name; - DB::readIntText(read_region_id, *input); - DB::assertChar('\t', *input); - DB::readString(region_name, *input); - DB::assertChar('\n', *input); + readIntText(read_region_id, *input); + assertChar('\t', *input); + readString(region_name, *input); + assertChar('\n', *input); if (read_region_id <= 0) continue; @@ -25,3 +27,5 @@ bool LanguageRegionsNamesFormatReader::readNext(RegionNameEntry & entry) return false; } + +} diff --git a/src/Dictionaries/Embedded/GeodataProviders/NamesFormatReader.h b/src/Dictionaries/Embedded/GeodataProviders/NamesFormatReader.h index 49d324d434e..50b2abd47c1 100644 --- a/src/Dictionaries/Embedded/GeodataProviders/NamesFormatReader.h +++ b/src/Dictionaries/Embedded/GeodataProviders/NamesFormatReader.h @@ -3,15 +3,19 @@ #include #include "INamesProvider.h" +namespace DB +{ // Reads regions names list in geoexport format class LanguageRegionsNamesFormatReader : public ILanguageRegionsNamesReader { private: - DB::ReadBufferPtr input; + ReadBufferPtr input; public: - explicit LanguageRegionsNamesFormatReader(DB::ReadBufferPtr input_) : input(std::move(input_)) {} + explicit LanguageRegionsNamesFormatReader(ReadBufferPtr input_) : input(std::move(input_)) {} bool readNext(RegionNameEntry & entry) override; }; + +} diff --git a/src/Dictionaries/Embedded/GeodataProviders/NamesProvider.cpp b/src/Dictionaries/Embedded/GeodataProviders/NamesProvider.cpp index 5f79fda070f..e6a8d308e87 100644 --- a/src/Dictionaries/Embedded/GeodataProviders/NamesProvider.cpp +++ b/src/Dictionaries/Embedded/GeodataProviders/NamesProvider.cpp @@ -6,6 +6,9 @@ namespace fs = std::filesystem; +namespace DB +{ + bool LanguageRegionsNamesDataSource::isModified() const { return updates_tracker.isModified(); @@ -19,7 +22,7 @@ size_t LanguageRegionsNamesDataSource::estimateTotalSize() const ILanguageRegionsNamesReaderPtr LanguageRegionsNamesDataSource::createReader() { updates_tracker.fixCurrentVersion(); - auto file_reader = std::make_shared(path); + auto file_reader = std::make_shared(path); return std::make_unique(std::move(file_reader)); } @@ -51,3 +54,5 @@ std::string RegionsNamesDataProvider::getDataFilePath(const std::string & langua { return directory + "/regions_names_" + language + ".txt"; } + +} diff --git a/src/Dictionaries/Embedded/GeodataProviders/NamesProvider.h b/src/Dictionaries/Embedded/GeodataProviders/NamesProvider.h index 2d49cceab86..8ba1f33d2c4 100644 --- a/src/Dictionaries/Embedded/GeodataProviders/NamesProvider.h +++ b/src/Dictionaries/Embedded/GeodataProviders/NamesProvider.h @@ -3,6 +3,8 @@ #include #include "INamesProvider.h" +namespace DB +{ // Represents local file with list of regions ids / names class LanguageRegionsNamesDataSource : public ILanguageRegionsNamesDataSource @@ -46,3 +48,5 @@ public: private: std::string getDataFilePath(const std::string & language) const; }; + +} diff --git a/src/Dictionaries/Embedded/GeodataProviders/Types.h b/src/Dictionaries/Embedded/GeodataProviders/Types.h index e63f6bae716..0fd6a01051a 100644 --- a/src/Dictionaries/Embedded/GeodataProviders/Types.h +++ b/src/Dictionaries/Embedded/GeodataProviders/Types.h @@ -2,6 +2,8 @@ #include +namespace DB +{ using RegionID = UInt32; using RegionDepth = UInt8; @@ -16,3 +18,5 @@ enum class RegionType : Int8 Area = 5, City = 6, }; + +} diff --git a/src/Dictionaries/Embedded/RegionsHierarchies.cpp b/src/Dictionaries/Embedded/RegionsHierarchies.cpp index be828b8b281..c3c62bcc83c 100644 --- a/src/Dictionaries/Embedded/RegionsHierarchies.cpp +++ b/src/Dictionaries/Embedded/RegionsHierarchies.cpp @@ -3,6 +3,8 @@ #include #include +namespace DB +{ RegionsHierarchies::RegionsHierarchies(IRegionsHierarchiesDataProviderPtr data_provider) { @@ -19,3 +21,5 @@ RegionsHierarchies::RegionsHierarchies(IRegionsHierarchiesDataProviderPtr data_p reload(); } + +} diff --git a/src/Dictionaries/Embedded/RegionsHierarchies.h b/src/Dictionaries/Embedded/RegionsHierarchies.h index 925b7b490ff..996c1177b6e 100644 --- a/src/Dictionaries/Embedded/RegionsHierarchies.h +++ b/src/Dictionaries/Embedded/RegionsHierarchies.h @@ -5,6 +5,8 @@ #include "GeodataProviders/IHierarchiesProvider.h" #include "RegionsHierarchy.h" +namespace DB +{ /** Contains several hierarchies of regions. * Used to support several different perspectives on the ownership of regions by countries. @@ -37,3 +39,5 @@ public: return it->second; } }; + +} diff --git a/src/Dictionaries/Embedded/RegionsHierarchy.cpp b/src/Dictionaries/Embedded/RegionsHierarchy.cpp index c266bf7efb8..23f4c250a23 100644 --- a/src/Dictionaries/Embedded/RegionsHierarchy.cpp +++ b/src/Dictionaries/Embedded/RegionsHierarchy.cpp @@ -12,7 +12,7 @@ namespace DB namespace ErrorCodes { extern const int INCORRECT_DATA; -} + extern const int LOGICAL_ERROR; } @@ -54,9 +54,8 @@ void RegionsHierarchy::reload() if (region_entry.id > max_region_id) { if (region_entry.id > max_size) - throw DB::Exception(DB::ErrorCodes::INCORRECT_DATA, - "Region id is too large: {}, should be not more than {}", - DB::toString(region_entry.id), DB::toString(max_size)); + throw Exception( + ErrorCodes::INCORRECT_DATA, "Region id is too large: {}, should be not more than {}", region_entry.id, max_size); max_region_id = region_entry.id; @@ -112,16 +111,18 @@ void RegionsHierarchy::reload() ++depth; if (depth == std::numeric_limits::max()) - throw Poco::Exception( - "Logical error in regions hierarchy: region " + DB::toString(current) + " possible is inside infinite loop"); + throw Exception( + ErrorCodes::LOGICAL_ERROR, "Logical error in regions hierarchy: region {} possible is inside infinite loop", current); current = new_parents[current]; if (current == 0) break; if (current > max_region_id) - throw Poco::Exception( - "Logical error in regions hierarchy: region " + DB::toString(current) + " (specified as parent) doesn't exist"); + throw Exception( + ErrorCodes::LOGICAL_ERROR, + "Logical error in regions hierarchy: region {} (specified as parent) doesn't exist", + current); if (types[current] == RegionType::City) new_city[i] = current; @@ -156,3 +157,5 @@ void RegionsHierarchy::reload() populations.swap(new_populations); depths.swap(new_depths); } + +} diff --git a/src/Dictionaries/Embedded/RegionsHierarchy.h b/src/Dictionaries/Embedded/RegionsHierarchy.h index 508bca0d1e1..5d9aacb9512 100644 --- a/src/Dictionaries/Embedded/RegionsHierarchy.h +++ b/src/Dictionaries/Embedded/RegionsHierarchy.h @@ -6,6 +6,8 @@ #include "GeodataProviders/IHierarchiesProvider.h" #include +namespace DB +{ class IRegionsHierarchyDataProvider; @@ -129,3 +131,5 @@ public: return populations[region]; } }; + +} diff --git a/src/Dictionaries/Embedded/RegionsNames.cpp b/src/Dictionaries/Embedded/RegionsNames.cpp index 93ca9e6dbc9..847dfe99b10 100644 --- a/src/Dictionaries/Embedded/RegionsNames.cpp +++ b/src/Dictionaries/Embedded/RegionsNames.cpp @@ -10,12 +10,12 @@ namespace DB { + namespace ErrorCodes { extern const int INCORRECT_DATA; + extern const int LOGICAL_ERROR; } -} - RegionsNames::RegionsNames(IRegionsNamesDataProviderPtr data_provider) { @@ -30,7 +30,7 @@ RegionsNames::RegionsNames(IRegionsNamesDataProviderPtr data_provider) std::string RegionsNames::dumpSupportedLanguagesNames() { - DB::WriteBufferFromOwnString out; + WriteBufferFromOwnString out; for (size_t i = 0; i < total_languages; ++i) { if (i > 0) @@ -74,7 +74,8 @@ void RegionsNames::reload() size_t old_size = new_chars.size(); if (new_chars.capacity() < old_size + name_entry.name.length() + 1) - throw Poco::Exception("Logical error. Maybe size estimate of " + names_source->getSourceName() + " is wrong."); + throw Exception( + ErrorCodes::LOGICAL_ERROR, "Logical error. Maybe size estimate of {} is wrong", names_source->getSourceName()); new_chars.resize(old_size + name_entry.name.length() + 1); memcpy(new_chars.data() + old_size, name_entry.name.c_str(), name_entry.name.length() + 1); @@ -84,9 +85,8 @@ void RegionsNames::reload() max_region_id = name_entry.id; if (name_entry.id > max_size) - throw DB::Exception(DB::ErrorCodes::INCORRECT_DATA, - "Region id is too large: {}, should be not more than {}", - DB::toString(name_entry.id), DB::toString(max_size)); + throw Exception( + ErrorCodes::INCORRECT_DATA, "Region id is too large: {}, should be not more than {}", name_entry.id, max_size); } while (name_entry.id >= new_names_refs.size()) @@ -102,3 +102,5 @@ void RegionsNames::reload() for (size_t language_id = 0; language_id < total_languages; ++language_id) names_refs[language_id].resize(max_region_id + 1, StringRef("", 0)); } + +} diff --git a/src/Dictionaries/Embedded/RegionsNames.h b/src/Dictionaries/Embedded/RegionsNames.h index 1e0ea3f0923..0053c74745a 100644 --- a/src/Dictionaries/Embedded/RegionsNames.h +++ b/src/Dictionaries/Embedded/RegionsNames.h @@ -7,6 +7,8 @@ #include #include "GeodataProviders/INamesProvider.h" +namespace DB +{ /** A class that allows you to recognize by region id its text name in one of the supported languages. * @@ -111,3 +113,5 @@ public: void reload(); }; + +} diff --git a/src/Dictionaries/HTTPDictionarySource.cpp b/src/Dictionaries/HTTPDictionarySource.cpp index c12f4fedf3f..689593a969e 100644 --- a/src/Dictionaries/HTTPDictionarySource.cpp +++ b/src/Dictionaries/HTTPDictionarySource.cpp @@ -38,7 +38,7 @@ HTTPDictionarySource::HTTPDictionarySource( , configuration(configuration_) , sample_block(sample_block_) , context(context_) - , timeouts(ConnectionTimeouts::getHTTPTimeouts(context->getSettingsRef(), {context->getConfigRef().getUInt("keep_alive_timeout", DEFAULT_HTTP_KEEP_ALIVE_TIMEOUT), 0})) + , timeouts(ConnectionTimeouts::getHTTPTimeouts(context->getSettingsRef(), context->getServerSettings().keep_alive_timeout)) { credentials.setUsername(credentials_.getUsername()); credentials.setPassword(credentials_.getPassword()); @@ -51,7 +51,7 @@ HTTPDictionarySource::HTTPDictionarySource(const HTTPDictionarySource & other) , configuration(other.configuration) , sample_block(other.sample_block) , context(Context::createCopy(other.context)) - , timeouts(ConnectionTimeouts::getHTTPTimeouts(context->getSettingsRef(), {context->getConfigRef().getUInt("keep_alive_timeout", DEFAULT_HTTP_KEEP_ALIVE_TIMEOUT), 0})) + , timeouts(ConnectionTimeouts::getHTTPTimeouts(context->getSettingsRef(), context->getServerSettings().keep_alive_timeout)) { credentials.setUsername(other.credentials.getUsername()); credentials.setPassword(other.credentials.getPassword()); diff --git a/src/Dictionaries/HashedArrayDictionary.cpp b/src/Dictionaries/HashedArrayDictionary.cpp index 21016025d96..4c9ff8abe80 100644 --- a/src/Dictionaries/HashedArrayDictionary.cpp +++ b/src/Dictionaries/HashedArrayDictionary.cpp @@ -20,17 +20,19 @@ namespace ErrorCodes { extern const int BAD_ARGUMENTS; extern const int DICTIONARY_IS_EMPTY; + extern const int LOGICAL_ERROR; extern const int UNSUPPORTED_METHOD; } -template -HashedArrayDictionary::HashedArrayDictionary( +template +HashedArrayDictionary::HashedArrayDictionary( const StorageID & dict_id_, const DictionaryStructure & dict_struct_, DictionarySourcePtr source_ptr_, const HashedArrayDictionaryStorageConfiguration & configuration_, BlockPtr update_field_loaded_block_) : IDictionary(dict_id_) + , log(&Poco::Logger::get("HashedArrayDictionary")) , dict_struct(dict_struct_) , source_ptr(std::move(source_ptr_)) , configuration(configuration_) @@ -42,8 +44,8 @@ HashedArrayDictionary::HashedArrayDictionary( calculateBytesAllocated(); } -template -ColumnPtr HashedArrayDictionary::getColumn( +template +ColumnPtr HashedArrayDictionary::getColumn( const std::string & attribute_name, const DataTypePtr & result_type, const Columns & key_columns, @@ -67,8 +69,8 @@ ColumnPtr HashedArrayDictionary::getColumn( return getAttributeColumn(attribute, dictionary_attribute, keys_size, default_values_column, extractor); } -template -Columns HashedArrayDictionary::getColumns( +template +Columns HashedArrayDictionary::getColumns( const Strings & attribute_names, const DataTypes & result_types, const Columns & key_columns, @@ -83,7 +85,7 @@ Columns HashedArrayDictionary::getColumns( const size_t keys_size = extractor.getKeysSize(); - PaddedPODArray key_index_to_element_index; + KeyIndexToElementIndex key_index_to_element_index; /** Optimization for multiple attributes. * For each key save element index in key_index_to_element_index array. @@ -92,7 +94,6 @@ Columns HashedArrayDictionary::getColumns( */ if (attribute_names.size() > 1) { - const auto & key_attribute_container = key_attribute.container; size_t keys_found = 0; key_index_to_element_index.resize(keys_size); @@ -100,15 +101,23 @@ Columns HashedArrayDictionary::getColumns( for (size_t key_index = 0; key_index < keys_size; ++key_index) { auto key = extractor.extractCurrentKey(); + auto shard = getShard(key); + const auto & key_attribute_container = key_attribute.containers[shard]; auto it = key_attribute_container.find(key); if (it == key_attribute_container.end()) { - key_index_to_element_index[key_index] = -1; + if constexpr (sharded) + key_index_to_element_index[key_index] = std::make_pair(-1, shard); + else + key_index_to_element_index[key_index] = -1; } else { - key_index_to_element_index[key_index] = it->getMapped(); + if constexpr (sharded) + key_index_to_element_index[key_index] = std::make_pair(it->getMapped(), shard); + else + key_index_to_element_index[key_index] = it->getMapped(); ++keys_found; } @@ -147,8 +156,8 @@ Columns HashedArrayDictionary::getColumns( return result_columns; } -template -ColumnUInt8::Ptr HashedArrayDictionary::hasKeys(const Columns & key_columns, const DataTypes & key_types) const +template +ColumnUInt8::Ptr HashedArrayDictionary::hasKeys(const Columns & key_columns, const DataTypes & key_types) const { if (dictionary_key_type == DictionaryKeyType::Complex) dict_struct.validateKeyTypes(key_types); @@ -166,8 +175,10 @@ ColumnUInt8::Ptr HashedArrayDictionary::hasKeys(const Colum for (size_t requested_key_index = 0; requested_key_index < keys_size; ++requested_key_index) { auto requested_key = extractor.extractCurrentKey(); + auto shard = getShard(requested_key); + const auto & key_attribute_container = key_attribute.containers[shard]; - out[requested_key_index] = key_attribute.container.find(requested_key) != key_attribute.container.end(); + out[requested_key_index] = key_attribute_container.find(requested_key) != key_attribute_container.end(); keys_found += out[requested_key_index]; extractor.rollbackCurrentKey(); @@ -179,8 +190,8 @@ ColumnUInt8::Ptr HashedArrayDictionary::hasKeys(const Colum return result; } -template -ColumnPtr HashedArrayDictionary::getHierarchy(ColumnPtr key_column [[maybe_unused]], const DataTypePtr &) const +template +ColumnPtr HashedArrayDictionary::getHierarchy(ColumnPtr key_column [[maybe_unused]], const DataTypePtr &) const { if constexpr (dictionary_key_type == DictionaryKeyType::Simple) { @@ -197,16 +208,20 @@ ColumnPtr HashedArrayDictionary::getHierarchy(ColumnPtr key if (!dictionary_attribute.null_value.isNull()) null_value = dictionary_attribute.null_value.get(); - const auto & key_attribute_container = key_attribute.container; - const AttributeContainerType & parent_keys_container = std::get>(hierarchical_attribute.container); - auto is_key_valid_func = [&](auto & key) { return key_attribute_container.find(key) != key_attribute_container.end(); }; + auto is_key_valid_func = [&, this](auto & key) + { + const auto & key_attribute_container = key_attribute.containers[getShard(key)]; + return key_attribute_container.find(key) != key_attribute_container.end(); + }; size_t keys_found = 0; - auto get_parent_func = [&](auto & hierarchy_key) + auto get_parent_func = [&, this](auto & hierarchy_key) { std::optional result; + auto shard = getShard(hierarchy_key); + const auto & key_attribute_container = key_attribute.containers[shard]; auto it = key_attribute_container.find(hierarchy_key); @@ -215,8 +230,9 @@ ColumnPtr HashedArrayDictionary::getHierarchy(ColumnPtr key size_t key_index = it->getMapped(); - if (unlikely(hierarchical_attribute.is_index_null) && (*hierarchical_attribute.is_index_null)[key_index]) + if (unlikely(hierarchical_attribute.is_index_null) && (*hierarchical_attribute.is_index_null)[shard][key_index]) return result; + const auto & parent_keys_container = std::get>(hierarchical_attribute.containers)[shard]; UInt64 parent_key = parent_keys_container[key_index]; if (null_value && *null_value == parent_key) @@ -241,8 +257,8 @@ ColumnPtr HashedArrayDictionary::getHierarchy(ColumnPtr key } } -template -ColumnUInt8::Ptr HashedArrayDictionary::isInHierarchy( +template +ColumnUInt8::Ptr HashedArrayDictionary::isInHierarchy( ColumnPtr key_column [[maybe_unused]], ColumnPtr in_key_column [[maybe_unused]], const DataTypePtr &) const @@ -265,16 +281,20 @@ ColumnUInt8::Ptr HashedArrayDictionary::isInHierarchy( if (!dictionary_attribute.null_value.isNull()) null_value = dictionary_attribute.null_value.get(); - const auto & key_attribute_container = key_attribute.container; - const AttributeContainerType & parent_keys_container = std::get>(hierarchical_attribute.container); - auto is_key_valid_func = [&](auto & key) { return key_attribute_container.find(key) != key_attribute_container.end(); }; + auto is_key_valid_func = [&](auto & key) + { + const auto & key_attribute_container = key_attribute.containers[getShard(key)]; + return key_attribute_container.find(key) != key_attribute_container.end(); + }; size_t keys_found = 0; auto get_parent_func = [&](auto & hierarchy_key) { std::optional result; + auto shard = getShard(hierarchy_key); + const auto & key_attribute_container = key_attribute.containers[shard]; auto it = key_attribute_container.find(hierarchy_key); @@ -283,9 +303,10 @@ ColumnUInt8::Ptr HashedArrayDictionary::isInHierarchy( size_t key_index = it->getMapped(); - if (unlikely(hierarchical_attribute.is_index_null) && (*hierarchical_attribute.is_index_null)[key_index]) + if (unlikely(hierarchical_attribute.is_index_null) && (*hierarchical_attribute.is_index_null)[shard][key_index]) return result; + const auto & parent_keys_container = std::get>(hierarchical_attribute.containers)[shard]; UInt64 parent_key = parent_keys_container[key_index]; if (null_value && *null_value == parent_key) return result; @@ -309,8 +330,8 @@ ColumnUInt8::Ptr HashedArrayDictionary::isInHierarchy( } } -template -DictionaryHierarchicalParentToChildIndexPtr HashedArrayDictionary::getHierarchicalIndex() const +template +DictionaryHierarchicalParentToChildIndexPtr HashedArrayDictionary::getHierarchicalIndex() const { if constexpr (dictionary_key_type == DictionaryKeyType::Simple) { @@ -318,33 +339,35 @@ DictionaryHierarchicalParentToChildIndexPtr HashedArrayDictionary & parent_keys_container = std::get>(hierarchical_attribute.container); - - const auto & key_attribute_container = key_attribute.container; - - HashMap index_to_key; - index_to_key.reserve(key_attribute.container.size()); - - for (auto & [key, value] : key_attribute_container) - index_to_key[value] = key; DictionaryHierarchicalParentToChildIndex::ParentToChildIndex parent_to_child; - parent_to_child.reserve(index_to_key.size()); - - size_t parent_keys_container_size = parent_keys_container.size(); - for (size_t i = 0; i < parent_keys_container_size; ++i) + for (size_t shard = 0; shard < configuration.shards; ++shard) { - if (unlikely(hierarchical_attribute.is_index_null) && (*hierarchical_attribute.is_index_null)[i]) - continue; + HashMap index_to_key; + index_to_key.reserve(element_counts[shard]); - const auto * it = index_to_key.find(i); - if (it == index_to_key.end()) - continue; + for (auto & [key, value] : key_attribute.containers[shard]) + index_to_key[value] = key; - auto child_key = it->getMapped(); - auto parent_key = parent_keys_container[i]; - parent_to_child[parent_key].emplace_back(child_key); + parent_to_child.reserve(parent_to_child.size() + index_to_key.size()); + + const auto & hierarchical_attribute = attributes[hierarchical_attribute_index]; + const auto & parent_keys_container = std::get>(hierarchical_attribute.containers)[shard]; + + size_t parent_keys_container_size = parent_keys_container.size(); + for (size_t i = 0; i < parent_keys_container_size; ++i) + { + if (unlikely(hierarchical_attribute.is_index_null) && (*hierarchical_attribute.is_index_null)[shard][i]) + continue; + + const auto * it = index_to_key.find(i); + if (it == index_to_key.end()) + continue; + + auto child_key = it->getMapped(); + auto parent_key = parent_keys_container[i]; + parent_to_child[parent_key].emplace_back(child_key); + } } return std::make_shared(parent_to_child); @@ -355,8 +378,8 @@ DictionaryHierarchicalParentToChildIndexPtr HashedArrayDictionary -ColumnPtr HashedArrayDictionary::getDescendants( +template +ColumnPtr HashedArrayDictionary::getDescendants( ColumnPtr key_column [[maybe_unused]], const DataTypePtr &, size_t level [[maybe_unused]], @@ -381,8 +404,8 @@ ColumnPtr HashedArrayDictionary::getDescendants( } } -template -void HashedArrayDictionary::createAttributes() +template +void HashedArrayDictionary::createAttributes() { const auto size = dict_struct.attributes.size(); attributes.reserve(size); @@ -395,17 +418,24 @@ void HashedArrayDictionary::createAttributes() using AttributeType = typename Type::AttributeType; using ValueType = DictionaryValueType; - auto is_index_null = dictionary_attribute.is_nullable ? std::make_optional>() : std::optional>{}; - Attribute attribute{dictionary_attribute.underlying_type, AttributeContainerType(), std::move(is_index_null)}; + auto is_index_null = dictionary_attribute.is_nullable ? std::make_optional>(configuration.shards) : std::nullopt; + Attribute attribute{dictionary_attribute.underlying_type, AttributeContainerShardsType(configuration.shards), std::move(is_index_null)}; attributes.emplace_back(std::move(attribute)); }; callOnDictionaryAttributeType(dictionary_attribute.underlying_type, type_call); } + + key_attribute.containers.resize(configuration.shards); + element_counts.resize(configuration.shards); + + string_arenas.resize(configuration.shards); + for (auto & arena : string_arenas) + arena = std::make_unique(); } -template -void HashedArrayDictionary::updateData() +template +void HashedArrayDictionary::updateData() { if (!update_field_loaded_block || update_field_loaded_block->rows() == 0) { @@ -445,13 +475,17 @@ void HashedArrayDictionary::updateData() if (update_field_loaded_block) { resize(update_field_loaded_block->rows()); - blockToAttributes(*update_field_loaded_block.get()); + DictionaryKeysArenaHolder arena_holder; + blockToAttributes(*update_field_loaded_block.get(), arena_holder, /* shard = */ 0); } } -template -void HashedArrayDictionary::blockToAttributes(const Block & block [[maybe_unused]]) +template +void HashedArrayDictionary::blockToAttributes(const Block & block, DictionaryKeysArenaHolder & arena_holder, size_t shard) { + if (unlikely(shard >= configuration.shards)) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Shard number {} is out of range: 0..{}", shard, configuration.shards - 1); + size_t skip_keys_size_offset = dict_struct.getKeysSize(); Columns key_columns; @@ -461,7 +495,6 @@ void HashedArrayDictionary::blockToAttributes(const Block & for (size_t i = 0; i < skip_keys_size_offset; ++i) key_columns.emplace_back(block.safeGetByPosition(i).column); - DictionaryKeysArenaHolder arena_holder; DictionaryKeysExtractor keys_extractor(key_columns, arena_holder.getComplexKeyArena()); const size_t keys_size = keys_extractor.getKeysSize(); @@ -471,18 +504,18 @@ void HashedArrayDictionary::blockToAttributes(const Block & { auto key = keys_extractor.extractCurrentKey(); - auto it = key_attribute.container.find(key); + auto it = key_attribute.containers[shard].find(key); - if (it != key_attribute.container.end()) + if (it != key_attribute.containers[shard].end()) { keys_extractor.rollbackCurrentKey(); continue; } if constexpr (std::is_same_v) - key = copyStringInArena(string_arena, key); + key = copyStringInArena(*string_arenas[shard], key); - key_attribute.container.insert({key, element_count}); + key_attribute.containers[shard].insert({key, element_counts[shard]}); for (size_t attribute_index = 0; attribute_index < attributes.size(); ++attribute_index) { @@ -498,16 +531,16 @@ void HashedArrayDictionary::blockToAttributes(const Block & using AttributeType = typename Type::AttributeType; using AttributeValueType = DictionaryValueType; - auto & attribute_container = std::get>(attribute.container); + auto & attribute_container = std::get>(attribute.containers)[shard]; attribute_container.emplace_back(); if (attribute_is_nullable) { - attribute.is_index_null->emplace_back(); + (*attribute.is_index_null)[shard].emplace_back(); if (column_value_to_insert.isNull()) { - (*attribute.is_index_null).back() = true; + (*attribute.is_index_null)[shard].back() = true; return; } } @@ -515,7 +548,7 @@ void HashedArrayDictionary::blockToAttributes(const Block & if constexpr (std::is_same_v) { String & value_to_insert = column_value_to_insert.get(); - StringRef string_in_arena_reference = copyStringInArena(string_arena, value_to_insert); + StringRef string_in_arena_reference = copyStringInArena(*string_arenas[shard], value_to_insert); attribute_container.back() = string_in_arena_reference; } else @@ -528,23 +561,29 @@ void HashedArrayDictionary::blockToAttributes(const Block & callOnDictionaryAttributeType(attribute.type, type_call); } - ++element_count; + ++element_counts[shard]; + ++total_element_count; keys_extractor.rollbackCurrentKey(); } } -template -void HashedArrayDictionary::resize(size_t total_rows) +template +void HashedArrayDictionary::resize(size_t total_rows) { if (unlikely(!total_rows)) return; - key_attribute.container.reserve(total_rows); + /// In multi shards configuration it is pointless. + if constexpr (sharded) + return; + + for (auto & container : key_attribute.containers) + container.reserve(total_rows); } -template +template template -ColumnPtr HashedArrayDictionary::getAttributeColumn( +ColumnPtr HashedArrayDictionary::getAttributeColumn( const Attribute & attribute, const DictionaryAttribute & dictionary_attribute, size_t keys_size, @@ -638,16 +677,14 @@ ColumnPtr HashedArrayDictionary::getAttributeColumn( return result; } -template +template template -void HashedArrayDictionary::getItemsImpl( +void HashedArrayDictionary::getItemsImpl( const Attribute & attribute, DictionaryKeysExtractor & keys_extractor, ValueSetter && set_value [[maybe_unused]], DefaultValueExtractor & default_value_extractor) const { - const auto & key_attribute_container = key_attribute.container; - const auto & attribute_container = std::get>(attribute.container); const size_t keys_size = keys_extractor.getKeysSize(); size_t keys_found = 0; @@ -655,6 +692,9 @@ void HashedArrayDictionary::getItemsImpl( for (size_t key_index = 0; key_index < keys_size; ++key_index) { auto key = keys_extractor.extractCurrentKey(); + auto shard = getShard(key); + const auto & key_attribute_container = key_attribute.containers[shard]; + const auto & attribute_container = std::get>(attribute.containers)[shard]; const auto it = key_attribute_container.find(key); @@ -665,7 +705,7 @@ void HashedArrayDictionary::getItemsImpl( const auto & element = attribute_container[element_index]; if constexpr (is_nullable) - set_value(key_index, element, (*attribute.is_index_null)[element_index]); + set_value(key_index, element, (*attribute.is_index_null)[shard][element_index]); else set_value(key_index, element, false); @@ -686,28 +726,39 @@ void HashedArrayDictionary::getItemsImpl( found_count.fetch_add(keys_found, std::memory_order_relaxed); } -template +template template -void HashedArrayDictionary::getItemsImpl( +void HashedArrayDictionary::getItemsImpl( const Attribute & attribute, - const PaddedPODArray & key_index_to_element_index, + const KeyIndexToElementIndex & key_index_to_element_index, ValueSetter && set_value, DefaultValueExtractor & default_value_extractor) const { - const auto & attribute_container = std::get>(attribute.container); const size_t keys_size = key_index_to_element_index.size(); + size_t shard = 0; for (size_t key_index = 0; key_index < keys_size; ++key_index) { - bool key_exists = key_index_to_element_index[key_index] != -1; - - if (key_exists) + ssize_t element_index; + if constexpr (sharded) { - size_t element_index = static_cast(key_index_to_element_index[key_index]); - const auto & element = attribute_container[element_index]; + element_index = key_index_to_element_index[key_index].first; + shard = key_index_to_element_index[key_index].second; + } + else + { + element_index = key_index_to_element_index[key_index]; + } + + if (element_index != -1) + { + const auto & attribute_container = std::get>(attribute.containers)[shard]; + + size_t found_element_index = static_cast(element_index); + const auto & element = attribute_container[found_element_index]; if constexpr (is_nullable) - set_value(key_index, element, (*attribute.is_index_null)[element_index]); + set_value(key_index, element, (*attribute.is_index_null)[shard][found_element_index]); else set_value(key_index, element, false); } @@ -721,13 +772,17 @@ void HashedArrayDictionary::getItemsImpl( } } -template -void HashedArrayDictionary::loadData() +template +void HashedArrayDictionary::loadData() { if (!source_ptr->hasUpdateField()) { - QueryPipeline pipeline; - pipeline = QueryPipeline(source_ptr->loadAll()); + + std::optional parallel_loader; + if constexpr (sharded) + parallel_loader.emplace(*this); + + QueryPipeline pipeline(source_ptr->loadAll()); DictionaryPipelineExecutor executor(pipeline, configuration.use_async_executor); UInt64 pull_time_microseconds = 0; @@ -751,10 +806,22 @@ void HashedArrayDictionary::loadData() Stopwatch watch_process; resize(total_rows); - blockToAttributes(block); + + if (parallel_loader) + { + parallel_loader->addBlock(block); + } + else + { + DictionaryKeysArenaHolder arena_holder; + blockToAttributes(block, arena_holder, /* shard = */ 0); + } process_time_microseconds += watch_process.elapsedMicroseconds(); } + if (parallel_loader) + parallel_loader->finish(); + LOG_DEBUG(&Poco::Logger::get("HashedArrayDictionary"), "Finished {}reading {} blocks with {} rows from pipeline in {:.2f} sec and inserted into hashtable in {:.2f} sec", configuration.use_async_executor ? "asynchronous " : "", @@ -765,14 +832,14 @@ void HashedArrayDictionary::loadData() updateData(); } - if (configuration.require_nonempty && 0 == element_count) + if (configuration.require_nonempty && 0 == total_element_count) throw Exception(ErrorCodes::DICTIONARY_IS_EMPTY, "{}: dictionary source is empty and 'require_nonempty' property is set.", getFullName()); } -template -void HashedArrayDictionary::buildHierarchyParentToChildIndexIfNeeded() +template +void HashedArrayDictionary::buildHierarchyParentToChildIndexIfNeeded() { if (!dict_struct.hierarchical_attribute_index) return; @@ -781,12 +848,13 @@ void HashedArrayDictionary::buildHierarchyParentToChildInde hierarchical_index = getHierarchicalIndex(); } -template -void HashedArrayDictionary::calculateBytesAllocated() +template +void HashedArrayDictionary::calculateBytesAllocated() { bytes_allocated += attributes.size() * sizeof(attributes.front()); - bytes_allocated += key_attribute.container.size(); + for (const auto & container : key_attribute.containers) + bytes_allocated += container.size(); for (auto & attribute : attributes) { @@ -796,26 +864,29 @@ void HashedArrayDictionary::calculateBytesAllocated() using AttributeType = typename Type::AttributeType; using ValueType = DictionaryValueType; - const auto & container = std::get>(attribute.container); - bytes_allocated += sizeof(AttributeContainerType); - - if constexpr (std::is_same_v) + for (const auto & container : std::get>(attribute.containers)) { - /// It is not accurate calculations - bytes_allocated += sizeof(Array) * container.size(); - } - else - { - bytes_allocated += container.allocated_bytes(); - } + bytes_allocated += sizeof(AttributeContainerType); - bucket_count = container.capacity(); + if constexpr (std::is_same_v) + { + /// It is not accurate calculations + bytes_allocated += sizeof(Array) * container.size(); + } + else + { + bytes_allocated += container.allocated_bytes(); + } + + bucket_count = container.capacity(); + } }; callOnDictionaryAttributeType(attribute.type, type_call); if (attribute.is_index_null.has_value()) - bytes_allocated += (*attribute.is_index_null).size(); + for (const auto & container : attribute.is_index_null.value()) + bytes_allocated += container.size(); } if (update_field_loaded_block) @@ -826,18 +897,19 @@ void HashedArrayDictionary::calculateBytesAllocated() hierarchical_index_bytes_allocated = hierarchical_index->getSizeInBytes(); bytes_allocated += hierarchical_index_bytes_allocated; } - - bytes_allocated += string_arena.allocatedBytes(); + for (const auto & string_arena : string_arenas) + bytes_allocated += string_arena->allocatedBytes(); } -template -Pipe HashedArrayDictionary::read(const Names & column_names, size_t max_block_size, size_t num_streams) const +template +Pipe HashedArrayDictionary::read(const Names & column_names, size_t max_block_size, size_t num_streams) const { PaddedPODArray keys; - keys.reserve(key_attribute.container.size()); + keys.reserve(total_element_count); - for (auto & [key, _] : key_attribute.container) - keys.emplace_back(key); + for (const auto & container : key_attribute.containers) + for (auto & [key, _] : container) + keys.emplace_back(key); ColumnsWithTypeAndName key_columns; @@ -858,8 +930,10 @@ Pipe HashedArrayDictionary::read(const Names & column_names return result; } -template class HashedArrayDictionary; -template class HashedArrayDictionary; +template class HashedArrayDictionary; +template class HashedArrayDictionary; +template class HashedArrayDictionary; +template class HashedArrayDictionary; void registerDictionaryArrayHashed(DictionaryFactory & factory) { @@ -886,7 +960,14 @@ void registerDictionaryArrayHashed(DictionaryFactory & factory) const DictionaryLifetime dict_lifetime{config, config_prefix + ".lifetime"}; const bool require_nonempty = config.getBool(config_prefix + ".require_nonempty", false); - HashedArrayDictionaryStorageConfiguration configuration{require_nonempty, dict_lifetime}; + std::string dictionary_layout_name = dictionary_key_type == DictionaryKeyType::Simple ? "hashed_array" : "complex_key_hashed_array"; + std::string dictionary_layout_prefix = ".layout." + dictionary_layout_name; + + Int64 shards = config.getInt(config_prefix + dictionary_layout_prefix + ".shards", 1); + if (shards <= 0 || 128 < shards) + throw Exception(ErrorCodes::BAD_ARGUMENTS,"{}: SHARDS parameter should be within [1, 128]", full_name); + + HashedArrayDictionaryStorageConfiguration configuration{require_nonempty, dict_lifetime, static_cast(shards)}; ContextMutablePtr context = copyContextAndApplySettingsFromDictionaryConfig(global_context, config, config_prefix); const auto & settings = context->getSettingsRef(); @@ -895,9 +976,17 @@ void registerDictionaryArrayHashed(DictionaryFactory & factory) configuration.use_async_executor = clickhouse_source && clickhouse_source->isLocal() && settings.dictionary_use_async_executor; if (dictionary_key_type == DictionaryKeyType::Simple) - return std::make_unique>(dict_id, dict_struct, std::move(source_ptr), configuration); + { + if (shards > 1) + return std::make_unique>(dict_id, dict_struct, std::move(source_ptr), configuration); + return std::make_unique>(dict_id, dict_struct, std::move(source_ptr), configuration); + } else - return std::make_unique>(dict_id, dict_struct, std::move(source_ptr), configuration); + { + if (shards > 1) + return std::make_unique>(dict_id, dict_struct, std::move(source_ptr), configuration); + return std::make_unique>(dict_id, dict_struct, std::move(source_ptr), configuration); + } }; factory.registerLayout("hashed_array", diff --git a/src/Dictionaries/HashedArrayDictionary.h b/src/Dictionaries/HashedArrayDictionary.h index 3b9446e4e8f..606008ce921 100644 --- a/src/Dictionaries/HashedArrayDictionary.h +++ b/src/Dictionaries/HashedArrayDictionary.h @@ -13,6 +13,7 @@ #include #include #include +#include /** This dictionary stores all attributes in arrays. * Key is stored in hash table and value is index into attribute array. @@ -25,12 +26,17 @@ struct HashedArrayDictionaryStorageConfiguration { const bool require_nonempty; const DictionaryLifetime lifetime; + size_t shards = 1; + size_t shard_load_queue_backlog = 10000; bool use_async_executor = false; }; -template +template class HashedArrayDictionary final : public IDictionary { + using DictionaryParallelLoaderType = HashedDictionaryImpl::HashedDictionaryParallelLoader>; + friend class HashedDictionaryImpl::HashedDictionaryParallelLoader>; + public: using KeyType = std::conditional_t; @@ -63,13 +69,13 @@ public: double getHitRate() const override { return 1.0; } - size_t getElementCount() const override { return element_count; } + size_t getElementCount() const override { return total_element_count; } - double getLoadFactor() const override { return static_cast(element_count) / bucket_count; } + double getLoadFactor() const override { return static_cast(total_element_count) / bucket_count; } std::shared_ptr clone() const override { - return std::make_shared>(getDictionaryID(), dict_struct, source_ptr->clone(), configuration, update_field_loaded_block); + return std::make_shared>(getDictionaryID(), dict_struct, source_ptr->clone(), configuration, update_field_loaded_block); } DictionarySourcePtr getSource() const override { return source_ptr; } @@ -132,50 +138,54 @@ private: template using AttributeContainerType = std::conditional_t, std::vector, PaddedPODArray>; + template + using AttributeContainerShardsType = std::vector>; + struct Attribute final { AttributeUnderlyingType type; std::variant< - AttributeContainerType, - AttributeContainerType, - AttributeContainerType, - AttributeContainerType, - AttributeContainerType, - AttributeContainerType, - AttributeContainerType, - AttributeContainerType, - AttributeContainerType, - AttributeContainerType, - AttributeContainerType, - AttributeContainerType, - AttributeContainerType, - AttributeContainerType, - AttributeContainerType, - AttributeContainerType, - AttributeContainerType, - AttributeContainerType, - AttributeContainerType, - AttributeContainerType, - AttributeContainerType, - AttributeContainerType, - AttributeContainerType, - AttributeContainerType> - container; + AttributeContainerShardsType, + AttributeContainerShardsType, + AttributeContainerShardsType, + AttributeContainerShardsType, + AttributeContainerShardsType, + AttributeContainerShardsType, + AttributeContainerShardsType, + AttributeContainerShardsType, + AttributeContainerShardsType, + AttributeContainerShardsType, + AttributeContainerShardsType, + AttributeContainerShardsType, + AttributeContainerShardsType, + AttributeContainerShardsType, + AttributeContainerShardsType, + AttributeContainerShardsType, + AttributeContainerShardsType, + AttributeContainerShardsType, + AttributeContainerShardsType, + AttributeContainerShardsType, + AttributeContainerShardsType, + AttributeContainerShardsType, + AttributeContainerShardsType, + AttributeContainerShardsType> + containers; - std::optional> is_index_null; + /// One container per shard + using RowsMask = std::vector; + std::optional> is_index_null; }; struct KeyAttribute final { - - KeyContainerType container; - + /// One container per shard + std::vector containers; }; void createAttributes(); - void blockToAttributes(const Block & block); + void blockToAttributes(const Block & block, DictionaryKeysArenaHolder & arena_holder, size_t shard); void updateData(); @@ -185,6 +195,22 @@ private: void calculateBytesAllocated(); + UInt64 getShard(UInt64 key) const + { + if constexpr (!sharded) + return 0; + /// NOTE: function here should not match with the DefaultHash<> since + /// it used for the HashMap/sparse_hash_map. + return intHashCRC32(key) % configuration.shards; + } + + UInt64 getShard(StringRef key) const + { + if constexpr (!sharded) + return 0; + return StringRefHash()(key) % configuration.shards; + } + template ColumnPtr getAttributeColumn( const Attribute & attribute, @@ -200,10 +226,13 @@ private: ValueSetter && set_value, DefaultValueExtractor & default_value_extractor) const; + + using KeyIndexToElementIndex = std::conditional_t>, PaddedPODArray>; + template void getItemsImpl( const Attribute & attribute, - const PaddedPODArray & key_index_to_element_index, + const KeyIndexToElementIndex & key_index_to_element_index, ValueSetter && set_value, DefaultValueExtractor & default_value_extractor) const; @@ -215,6 +244,8 @@ private: void resize(size_t total_rows); + Poco::Logger * log; + const DictionaryStructure dict_struct; const DictionarySourcePtr source_ptr; const HashedArrayDictionaryStorageConfiguration configuration; @@ -225,17 +256,20 @@ private: size_t bytes_allocated = 0; size_t hierarchical_index_bytes_allocated = 0; - size_t element_count = 0; + std::atomic total_element_count = 0; + std::vector element_counts; size_t bucket_count = 0; mutable std::atomic query_count{0}; mutable std::atomic found_count{0}; BlockPtr update_field_loaded_block; - Arena string_arena; + std::vector> string_arenas; DictionaryHierarchicalParentToChildIndexPtr hierarchical_index; }; -extern template class HashedArrayDictionary; -extern template class HashedArrayDictionary; +extern template class HashedArrayDictionary; +extern template class HashedArrayDictionary; +extern template class HashedArrayDictionary; +extern template class HashedArrayDictionary; } diff --git a/src/Dictionaries/HashedDictionary.h b/src/Dictionaries/HashedDictionary.h index 376637189dd..8009ffab80a 100644 --- a/src/Dictionaries/HashedDictionary.h +++ b/src/Dictionaries/HashedDictionary.h @@ -71,7 +71,8 @@ struct HashedDictionaryConfiguration template class HashedDictionary final : public IDictionary { - friend class HashedDictionaryParallelLoader; + using DictionaryParallelLoaderType = HashedDictionaryParallelLoader>; + friend class HashedDictionaryParallelLoader>; public: using KeyType = std::conditional_t; @@ -987,7 +988,7 @@ void HashedDictionary::getItemsImpl( auto key = keys_extractor.extractCurrentKey(); auto shard = getShard(key); - const auto & container = attribute_containers[getShard(key)]; + const auto & container = attribute_containers[shard]; const auto it = container.find(key); if (it != container.end()) @@ -1020,11 +1021,11 @@ void HashedDictionary::loadData() { if (!source_ptr->hasUpdateField()) { - std::optional> parallel_loader; + std::optional parallel_loader; if constexpr (sharded) parallel_loader.emplace(*this); - QueryPipeline pipeline = QueryPipeline(source_ptr->loadAll()); + QueryPipeline pipeline(source_ptr->loadAll()); DictionaryPipelineExecutor executor(pipeline, configuration.use_async_executor); Block block; diff --git a/src/Dictionaries/HashedDictionaryParallelLoader.h b/src/Dictionaries/HashedDictionaryParallelLoader.h index b52158c7fcb..907a987555e 100644 --- a/src/Dictionaries/HashedDictionaryParallelLoader.h +++ b/src/Dictionaries/HashedDictionaryParallelLoader.h @@ -38,13 +38,12 @@ namespace DB::HashedDictionaryImpl { /// Implementation parallel dictionary load for SHARDS -template +template class HashedDictionaryParallelLoader : public boost::noncopyable { - using HashedDictionary = HashedDictionary; public: - explicit HashedDictionaryParallelLoader(HashedDictionary & dictionary_) + explicit HashedDictionaryParallelLoader(DictionaryType & dictionary_) : dictionary(dictionary_) , shards(dictionary.configuration.shards) , pool(CurrentMetrics::HashedDictionaryThreads, CurrentMetrics::HashedDictionaryThreadsActive, CurrentMetrics::HashedDictionaryThreadsScheduled, shards) @@ -118,7 +117,7 @@ public: } private: - HashedDictionary & dictionary; + DictionaryType & dictionary; const size_t shards; ThreadPool pool; std::vector>> shards_queues; diff --git a/src/Dictionaries/RangeHashedDictionary.h b/src/Dictionaries/RangeHashedDictionary.h index 9be9fa1d0d4..c44bffe42e1 100644 --- a/src/Dictionaries/RangeHashedDictionary.h +++ b/src/Dictionaries/RangeHashedDictionary.h @@ -683,7 +683,7 @@ void RangeHashedDictionary::loadData() if (configuration.require_nonempty && 0 == element_count) throw Exception(ErrorCodes::DICTIONARY_IS_EMPTY, - "{}: dictionary source is empty and 'require_nonempty' property is set."); + "{}: dictionary source is empty and 'require_nonempty' property is set.", getFullName()); } template diff --git a/src/Dictionaries/SSDCacheDictionaryStorage.h b/src/Dictionaries/SSDCacheDictionaryStorage.h index 6c98ce9c180..68f727c019c 100644 --- a/src/Dictionaries/SSDCacheDictionaryStorage.h +++ b/src/Dictionaries/SSDCacheDictionaryStorage.h @@ -481,7 +481,7 @@ public: if (file.fd == -1) { auto error_code = (errno == ENOENT) ? ErrorCodes::FILE_DOESNT_EXIST : ErrorCodes::CANNOT_OPEN_FILE; - throwFromErrnoWithPath("Cannot open file " + file_path, file_path, error_code); + ErrnoException::throwFromPath(error_code, file_path, "Cannot open file {}", file_path); } allocateSizeForNextPartition(); @@ -490,7 +490,8 @@ public: void allocateSizeForNextPartition() { if (preallocateDiskSpace(file.fd, current_blocks_size * block_size, block_size * file_blocks_size) < 0) - throwFromErrnoWithPath("Cannot preallocate space for the file " + file_path, file_path, ErrorCodes::CANNOT_ALLOCATE_MEMORY); + ErrnoException::throwFromPath( + ErrorCodes::CANNOT_ALLOCATE_MEMORY, file_path, "Cannot preallocate space for the file {}", file_path); current_blocks_size += file_blocks_size; } @@ -552,11 +553,11 @@ public: Stopwatch watch; #if defined(OS_DARWIN) if (::fsync(file.fd) < 0) - throwFromErrnoWithPath("Cannot fsync " + file_path, file_path, ErrorCodes::CANNOT_FSYNC); - #else + ErrnoException::throwFromPath(ErrorCodes::CANNOT_FSYNC, file_path, "Cannot fsync {}", file_path); +# else if (::fdatasync(file.fd) < 0) - throwFromErrnoWithPath("Cannot fdatasync " + file_path, file_path, ErrorCodes::CANNOT_FSYNC); - #endif + ErrnoException::throwFromPath(ErrorCodes::CANNOT_FSYNC, file_path, "Cannot fdatasync {}", file_path); +# endif ProfileEvents::increment(ProfileEvents::FileSyncElapsedMicroseconds, watch.elapsedMicroseconds()); current_block_index += buffer_size_in_blocks; @@ -598,13 +599,13 @@ public: while (io_submit(aio_context.ctx, 1, &request_ptr) != 1) { if (errno != EINTR) - throwFromErrno("io_submit: Failed to submit a request for asynchronous IO", ErrorCodes::CANNOT_IO_SUBMIT); + throw ErrnoException(ErrorCodes::CANNOT_IO_SUBMIT, "io_submit: Failed to submit a request for asynchronous IO"); } while (io_getevents(aio_context.ctx, 1, 1, &event, nullptr) != 1) { if (errno != EINTR) - throwFromErrno("io_getevents: Failed to get an event for asynchronous IO", ErrorCodes::CANNOT_IO_GETEVENTS); + throw ErrnoException(ErrorCodes::CANNOT_IO_GETEVENTS, "io_getevents: Failed to get an event for asynchronous IO"); } auto read_bytes = eventResult(event); @@ -692,7 +693,7 @@ public: while (to_pop < to_push && (popped = io_getevents(aio_context.ctx, to_push - to_pop, to_push - to_pop, &events[to_pop], nullptr)) <= 0) { if (errno != EINTR) - throwFromErrno("io_getevents: Failed to get an event for asynchronous IO", ErrorCodes::CANNOT_IO_GETEVENTS); + throw ErrnoException(ErrorCodes::CANNOT_IO_GETEVENTS, "io_getevents: Failed to get an event for asynchronous IO"); } for (size_t i = to_pop; i < to_pop + popped; ++i) @@ -743,7 +744,7 @@ public: while (new_tasks_count > 0 && (pushed = io_submit(aio_context.ctx, new_tasks_count, &pointers[to_push])) <= 0) { if (errno != EINTR) - throwFromErrno("io_submit: Failed to submit a request for asynchronous IO", ErrorCodes::CANNOT_IO_SUBMIT); + throw ErrnoException(ErrorCodes::CANNOT_IO_SUBMIT, "io_submit: Failed to submit a request for asynchronous IO"); } to_push += pushed; diff --git a/src/Dictionaries/XDBCDictionarySource.cpp b/src/Dictionaries/XDBCDictionarySource.cpp index 23dc7db508d..080f7db96be 100644 --- a/src/Dictionaries/XDBCDictionarySource.cpp +++ b/src/Dictionaries/XDBCDictionarySource.cpp @@ -76,7 +76,7 @@ XDBCDictionarySource::XDBCDictionarySource( , load_all_query(query_builder.composeLoadAllQuery()) , bridge_helper(bridge_) , bridge_url(bridge_helper->getMainURI()) - , timeouts(ConnectionTimeouts::getHTTPTimeouts(context_->getSettingsRef(), {context_->getConfigRef().getUInt("keep_alive_timeout", DEFAULT_HTTP_KEEP_ALIVE_TIMEOUT), 0})) + , timeouts(ConnectionTimeouts::getHTTPTimeouts(context_->getSettingsRef(), context_->getServerSettings().keep_alive_timeout)) { auto url_params = bridge_helper->getURLParams(max_block_size); for (const auto & [name, value] : url_params) diff --git a/src/Disks/DiskLocal.cpp b/src/Disks/DiskLocal.cpp index 394c08e4876..5e77ff61789 100644 --- a/src/Disks/DiskLocal.cpp +++ b/src/Disks/DiskLocal.cpp @@ -8,6 +8,7 @@ #include #include #include +#include #include #include #include @@ -359,21 +360,21 @@ void DiskLocal::removeFile(const String & path) { auto fs_path = fs::path(disk_path) / path; if (0 != unlink(fs_path.c_str())) - throwFromErrnoWithPath("Cannot unlink file " + fs_path.string(), fs_path, ErrorCodes::CANNOT_UNLINK); + ErrnoException::throwFromPath(ErrorCodes::CANNOT_UNLINK, fs_path, "Cannot unlink file {}", fs_path); } void DiskLocal::removeFileIfExists(const String & path) { auto fs_path = fs::path(disk_path) / path; if (0 != unlink(fs_path.c_str()) && errno != ENOENT) - throwFromErrnoWithPath("Cannot unlink file " + fs_path.string(), fs_path, ErrorCodes::CANNOT_UNLINK); + ErrnoException::throwFromPath(ErrorCodes::CANNOT_UNLINK, fs_path, "Cannot unlink file {}", fs_path); } void DiskLocal::removeDirectory(const String & path) { auto fs_path = fs::path(disk_path) / path; if (0 != rmdir(fs_path.c_str())) - throwFromErrnoWithPath("Cannot rmdir " + fs_path.string(), fs_path, ErrorCodes::CANNOT_RMDIR); + ErrnoException::throwFromPath(ErrorCodes::CANNOT_RMDIR, fs_path, "Cannot remove directory {}", fs_path); } void DiskLocal::removeRecursive(const String & path) @@ -412,7 +413,7 @@ void DiskLocal::truncateFile(const String & path, size_t size) { int res = truncate((fs::path(disk_path) / path).string().data(), size); if (-1 == res) - throwFromErrnoWithPath("Cannot truncate file " + path, path, ErrorCodes::CANNOT_TRUNCATE_FILE); + ErrnoException::throwFromPath(ErrorCodes::CANNOT_TRUNCATE_FILE, path, "Cannot truncate {}", path); } void DiskLocal::createFile(const String & path) @@ -709,7 +710,7 @@ struct stat DiskLocal::stat(const String & path) const auto full_path = fs::path(disk_path) / path; if (::stat(full_path.string().c_str(), &st) == 0) return st; - DB::throwFromErrnoWithPath("Cannot stat file: " + path, path, DB::ErrorCodes::CANNOT_STAT); + DB::ErrnoException::throwFromPath(DB::ErrorCodes::CANNOT_STAT, path, "Cannot stat file: {}", path); } void DiskLocal::chmod(const String & path, mode_t mode) @@ -717,7 +718,7 @@ void DiskLocal::chmod(const String & path, mode_t mode) auto full_path = fs::path(disk_path) / path; if (::chmod(full_path.string().c_str(), mode) == 0) return; - DB::throwFromErrnoWithPath("Cannot chmod file: " + path, path, DB::ErrorCodes::PATH_ACCESS_DENIED); + DB::ErrnoException::throwFromPath(DB::ErrorCodes::PATH_ACCESS_DENIED, path, "Cannot chmod file: {}", path); } void registerDiskLocal(DiskFactory & factory, bool global_skip_access_check) diff --git a/src/Disks/IO/CachedOnDiskReadBufferFromFile.cpp b/src/Disks/IO/CachedOnDiskReadBufferFromFile.cpp index 833614c4bd8..f507fb207e5 100644 --- a/src/Disks/IO/CachedOnDiskReadBufferFromFile.cpp +++ b/src/Disks/IO/CachedOnDiskReadBufferFromFile.cpp @@ -177,22 +177,13 @@ CachedOnDiskReadBufferFromFile::getCacheReadBuffer(const FileSegment & file_segm } ReadSettings local_read_settings{settings}; - local_read_settings.local_fs_prefetch = false; - if (local_read_settings.local_fs_method != LocalFSReadMethod::pread_threadpool) - local_read_settings.local_fs_method = LocalFSReadMethod::pread; + local_read_settings.local_fs_method = LocalFSReadMethod::pread; if (use_external_buffer) local_read_settings.local_fs_buffer_size = 0; - cache_file_reader = createReadBufferFromFileBase( - path, - local_read_settings, - std::nullopt, - std::nullopt, - file_segment.getFlagsForLocalRead(), - /*existing_memory=*/nullptr, - /*alignment=*/0, - /*use_external_buffer=*/true); + cache_file_reader + = createReadBufferFromFileBase(path, local_read_settings, std::nullopt, std::nullopt, file_segment.getFlagsForLocalRead()); if (getFileSizeFromReadBuffer(*cache_file_reader) == 0) throw Exception(ErrorCodes::LOGICAL_ERROR, "Attempt to read from an empty cache file: {}", path); @@ -540,7 +531,7 @@ bool CachedOnDiskReadBufferFromFile::completeFileSegmentAndGetNext() return false; current_file_segment = &file_segments->front(); - current_file_segment->use(); + current_file_segment->increasePriority(); implementation_buffer = getImplementationBuffer(*current_file_segment); LOG_TEST( @@ -868,7 +859,7 @@ bool CachedOnDiskReadBufferFromFile::nextImplStep() else { implementation_buffer = getImplementationBuffer(file_segments->front()); - file_segments->front().use(); + file_segments->front().increasePriority(); } chassert(!internal_buffer.empty()); diff --git a/src/Disks/IO/IOUringReader.cpp b/src/Disks/IO/IOUringReader.cpp index 0e11b42ab01..4c9f665093d 100644 --- a/src/Disks/IO/IOUringReader.cpp +++ b/src/Disks/IO/IOUringReader.cpp @@ -77,7 +77,7 @@ IOUringReader::IOUringReader(uint32_t entries_) int ret = io_uring_queue_init_params(entries_, &ring, ¶ms); if (ret < 0) - throwFromErrno("Failed initializing io_uring", ErrorCodes::IO_URING_INIT_FAILED, -ret); + ErrnoException::throwWithErrno(ErrorCodes::IO_URING_INIT_FAILED, -ret, "Failed initializing io_uring"); cq_entries = params.cq_entries; ring_completion_monitor = std::make_unique([this] { monitorRing(); }); diff --git a/src/Disks/IO/ReadBufferFromWebServer.cpp b/src/Disks/IO/ReadBufferFromWebServer.cpp index 46d8c41ff78..90cd5285875 100644 --- a/src/Disks/IO/ReadBufferFromWebServer.cpp +++ b/src/Disks/IO/ReadBufferFromWebServer.cpp @@ -54,8 +54,7 @@ std::unique_ptr ReadBufferFromWebServer::initialize() } const auto & settings = context->getSettingsRef(); - const auto & config = context->getConfigRef(); - Poco::Timespan http_keep_alive_timeout{config.getUInt("keep_alive_timeout", DEFAULT_HTTP_KEEP_ALIVE_TIMEOUT), 0}; + const auto & server_settings = context->getServerSettings(); auto res = std::make_unique( uri, @@ -65,7 +64,7 @@ std::unique_ptr ReadBufferFromWebServer::initialize() settings.http_send_timeout, std::max(Poco::Timespan(settings.http_receive_timeout.totalSeconds(), 0), Poco::Timespan(20, 0)), settings.tcp_keep_alive_timeout, - http_keep_alive_timeout), + server_settings.keep_alive_timeout), credentials, 0, buf_size, diff --git a/src/Disks/IO/ThreadPoolReader.cpp b/src/Disks/IO/ThreadPoolReader.cpp index 2ffae9b3338..bb295643726 100644 --- a/src/Disks/IO/ThreadPoolReader.cpp +++ b/src/Disks/IO/ThreadPoolReader.cpp @@ -175,9 +175,8 @@ std::future ThreadPoolReader::submit(Request reques else { ProfileEvents::increment(ProfileEvents::ReadBufferFromFileDescriptorReadFailed); - promise.set_exception(std::make_exception_ptr(ErrnoException( - fmt::format("Cannot read from file {}, {}", fd, errnoToString()), - ErrorCodes::CANNOT_READ_FROM_FILE_DESCRIPTOR, errno))); + promise.set_exception(std::make_exception_ptr( + ErrnoException(ErrorCodes::CANNOT_READ_FROM_FILE_DESCRIPTOR, "Cannot read from file {}", fd))); return future; } } @@ -233,7 +232,7 @@ std::future ThreadPoolReader::submit(Request reques if (-1 == res && errno != EINTR) { ProfileEvents::increment(ProfileEvents::ReadBufferFromFileDescriptorReadFailed); - throwFromErrno(fmt::format("Cannot read from file {}", fd), ErrorCodes::CANNOT_READ_FROM_FILE_DESCRIPTOR); + throw ErrnoException(ErrorCodes::CANNOT_READ_FROM_FILE_DESCRIPTOR, "Cannot read from file {}", fd); } bytes_read += res; diff --git a/src/Disks/IO/ThreadPoolRemoteFSReader.cpp b/src/Disks/IO/ThreadPoolRemoteFSReader.cpp index 9d079dd3b70..f3caf62ffd5 100644 --- a/src/Disks/IO/ThreadPoolRemoteFSReader.cpp +++ b/src/Disks/IO/ThreadPoolRemoteFSReader.cpp @@ -26,6 +26,7 @@ namespace ProfileEvents extern const Event ThreadpoolReaderSubmitReadSynchronously; extern const Event ThreadpoolReaderSubmitReadSynchronouslyBytes; extern const Event ThreadpoolReaderSubmitReadSynchronouslyMicroseconds; + extern const Event ThreadpoolReaderSubmitLookupInCacheMicroseconds; extern const Event AsynchronousReaderIgnoredBytes; } @@ -83,7 +84,13 @@ std::future ThreadPoolRemoteFSReader::submit(Reques reader.seek(request.offset, SEEK_SET); } - if (reader.isContentCached(request.offset, request.size)) + bool is_content_cached = false; + { + ProfileEventTimeIncrement elapsed(ProfileEvents::ThreadpoolReaderSubmitLookupInCacheMicroseconds); + is_content_cached = reader.isContentCached(request.offset, request.size); + } + + if (is_content_cached) { std::promise promise; std::future future = promise.get_future(); diff --git a/src/Disks/IO/WriteBufferFromTemporaryFile.cpp b/src/Disks/IO/WriteBufferFromTemporaryFile.cpp index 03713adef02..c47fe281176 100644 --- a/src/Disks/IO/WriteBufferFromTemporaryFile.cpp +++ b/src/Disks/IO/WriteBufferFromTemporaryFile.cpp @@ -29,8 +29,7 @@ public: off_t res = lseek(fd, 0, SEEK_SET); if (-1 == res) - throwFromErrnoWithPath("Cannot reread temporary file " + file_name, file_name, - ErrorCodes::CANNOT_SEEK_THROUGH_FILE); + ErrnoException::throwFromPath(ErrorCodes::CANNOT_SEEK_THROUGH_FILE, file_name, "Cannot reread temporary file {}", file_name); return std::make_unique(fd, file_name, std::move(origin->tmp_file)); } diff --git a/src/Disks/IO/createReadBufferFromFileBase.cpp b/src/Disks/IO/createReadBufferFromFileBase.cpp index 80dbc8df988..236dd43e9ee 100644 --- a/src/Disks/IO/createReadBufferFromFileBase.cpp +++ b/src/Disks/IO/createReadBufferFromFileBase.cpp @@ -101,12 +101,16 @@ std::unique_ptr createReadBufferFromFileBase( else if (settings.local_fs_method == LocalFSReadMethod::io_uring) { #if USE_LIBURING - static std::shared_ptr reader = std::make_shared(512); - if (!reader->isSupported()) + auto global_context = Context::getGlobalContextInstance(); + if (!global_context) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot obtain io_uring reader (global context not initialized)"); + + auto & reader = global_context->getIOURingReader(); + if (!reader.isSupported()) throw Exception(ErrorCodes::UNSUPPORTED_METHOD, "io_uring is not supported by this system"); res = std::make_unique( - *reader, + reader, settings.priority, filename, buffer_size, diff --git a/src/Disks/LocalDirectorySyncGuard.cpp b/src/Disks/LocalDirectorySyncGuard.cpp index 1ac57df63fb..6aff40cd47d 100644 --- a/src/Disks/LocalDirectorySyncGuard.cpp +++ b/src/Disks/LocalDirectorySyncGuard.cpp @@ -31,8 +31,8 @@ LocalDirectorySyncGuard::LocalDirectorySyncGuard(const String & full_path) : fd(::open(full_path.c_str(), O_DIRECTORY)) { if (-1 == fd) - throwFromErrnoWithPath("Cannot open file " + full_path, full_path, - errno == ENOENT ? ErrorCodes::FILE_DOESNT_EXIST : ErrorCodes::CANNOT_OPEN_FILE); + ErrnoException::throwFromPath( + errno == ENOENT ? ErrorCodes::FILE_DOESNT_EXIST : ErrorCodes::CANNOT_OPEN_FILE, full_path, "Cannot open file {}", full_path); } LocalDirectorySyncGuard::~LocalDirectorySyncGuard() @@ -45,7 +45,7 @@ LocalDirectorySyncGuard::~LocalDirectorySyncGuard() #if defined(OS_DARWIN) if (fcntl(fd, F_FULLFSYNC, 0)) - throwFromErrno("Cannot fcntl(F_FULLFSYNC)", ErrorCodes::CANNOT_FSYNC); + throw ErrnoException(ErrorCodes::CANNOT_FSYNC, "Cannot fcntl(F_FULLFSYNC)"); #else if (-1 == ::fdatasync(fd)) throw Exception(ErrorCodes::CANNOT_FSYNC, "Cannot fdatasync"); diff --git a/src/Disks/ObjectStorages/Cached/CachedObjectStorage.cpp b/src/Disks/ObjectStorages/Cached/CachedObjectStorage.cpp index e459aae190c..742d735cc95 100644 --- a/src/Disks/ObjectStorages/Cached/CachedObjectStorage.cpp +++ b/src/Disks/ObjectStorages/Cached/CachedObjectStorage.cpp @@ -98,7 +98,7 @@ std::unique_ptr CachedObjectStorage::writeObject( /// N auto implementation_buffer = object_storage->writeObject(object, mode, attributes, buf_size, modified_write_settings); bool cache_on_write = modified_write_settings.enable_filesystem_cache_on_write_operations - && FileCacheFactory::instance().getByName(cache_config_name).settings.cache_on_write_operations + && FileCacheFactory::instance().getByName(cache_config_name)->getSettings().cache_on_write_operations && fs::path(object.remote_path).extension() != ".tmp"; /// Need to remove even if cache_on_write == false. diff --git a/src/Disks/ObjectStorages/Cached/registerDiskCache.cpp b/src/Disks/ObjectStorages/Cached/registerDiskCache.cpp index 182326bbdc3..da01a82746f 100644 --- a/src/Disks/ObjectStorages/Cached/registerDiskCache.cpp +++ b/src/Disks/ObjectStorages/Cached/registerDiskCache.cpp @@ -6,9 +6,8 @@ #include #include #include -#include #include -#include + namespace DB { @@ -65,7 +64,7 @@ void registerDiskCache(DiskFactory & factory, bool /* global_skip_access_check * } } - auto cache = FileCacheFactory::instance().getOrCreate(name, file_cache_settings); + auto cache = FileCacheFactory::instance().getOrCreate(name, file_cache_settings, predefined_configuration ? "" : config_prefix); auto disk = disk_it->second; if (!dynamic_cast(disk.get())) throw Exception(ErrorCodes::BAD_ARGUMENTS, diff --git a/src/Disks/ObjectStorages/DiskObjectStorage.cpp b/src/Disks/ObjectStorages/DiskObjectStorage.cpp index af26f2dddd0..c3baf3fdbda 100644 --- a/src/Disks/ObjectStorages/DiskObjectStorage.cpp +++ b/src/Disks/ObjectStorages/DiskObjectStorage.cpp @@ -3,6 +3,7 @@ #include #include #include +#include #include #include #include @@ -46,6 +47,17 @@ DiskTransactionPtr DiskObjectStorage::createObjectStorageTransaction() send_metadata ? metadata_helper.get() : nullptr); } +DiskTransactionPtr DiskObjectStorage::createObjectStorageTransactionToAnotherDisk(DiskObjectStorage& to_disk) +{ + return std::make_shared( + *object_storage, + *metadata_storage, + *to_disk.getObjectStorage(), + *to_disk.getMetadataStorage(), + send_metadata ? metadata_helper.get() : nullptr); +} + + DiskObjectStorage::DiskObjectStorage( const String & name_, const String & object_key_prefix_, @@ -179,12 +191,13 @@ void DiskObjectStorage::copyFile( /// NOLINT const std::function & cancellation_hook ) { - if (this == &to_disk) + if (getDataSourceDescription() == to_disk.getDataSourceDescription()) { - /// It may use s3-server-side copy - auto transaction = createObjectStorageTransaction(); - transaction->copyFile(from_file_path, to_file_path); - transaction->commit(); + /// It may use s3-server-side copy + auto & to_disk_object_storage = dynamic_cast(to_disk); + auto transaction = createObjectStorageTransactionToAnotherDisk(to_disk_object_storage); + transaction->copyFile(from_file_path, to_file_path); + transaction->commit(); } else { diff --git a/src/Disks/ObjectStorages/DiskObjectStorage.h b/src/Disks/ObjectStorages/DiskObjectStorage.h index 25b39c4d974..acc1591f8a9 100644 --- a/src/Disks/ObjectStorages/DiskObjectStorage.h +++ b/src/Disks/ObjectStorages/DiskObjectStorage.h @@ -222,6 +222,7 @@ private: /// Create actual disk object storage transaction for operations /// execution. DiskTransactionPtr createObjectStorageTransaction(); + DiskTransactionPtr createObjectStorageTransactionToAnotherDisk(DiskObjectStorage& to_disk); String getReadResourceName() const; String getWriteResourceName() const; diff --git a/src/Disks/ObjectStorages/DiskObjectStorageTransaction.cpp b/src/Disks/ObjectStorages/DiskObjectStorageTransaction.cpp index 83a229c3533..5958762fa09 100644 --- a/src/Disks/ObjectStorages/DiskObjectStorageTransaction.cpp +++ b/src/Disks/ObjectStorages/DiskObjectStorageTransaction.cpp @@ -38,6 +38,29 @@ DiskObjectStorageTransaction::DiskObjectStorageTransaction( , metadata_helper(metadata_helper_) {} + +DiskObjectStorageTransaction::DiskObjectStorageTransaction( + IObjectStorage & object_storage_, + IMetadataStorage & metadata_storage_, + DiskObjectStorageRemoteMetadataRestoreHelper * metadata_helper_, + MetadataTransactionPtr metadata_transaction_) + : object_storage(object_storage_) + , metadata_storage(metadata_storage_) + , metadata_transaction(metadata_transaction_) + , metadata_helper(metadata_helper_) +{} + +MultipleDisksObjectStorageTransaction::MultipleDisksObjectStorageTransaction( + IObjectStorage & object_storage_, + IMetadataStorage & metadata_storage_, + IObjectStorage& destination_object_storage_, + IMetadataStorage& destination_metadata_storage_, + DiskObjectStorageRemoteMetadataRestoreHelper * metadata_helper_) + : DiskObjectStorageTransaction(object_storage_, metadata_storage_, metadata_helper_, destination_metadata_storage_.createTransaction()) + , destination_object_storage(destination_object_storage_) + , destination_metadata_storage(destination_metadata_storage_) +{} + namespace { /// Operation which affects only metadata. Simplest way to @@ -485,10 +508,12 @@ struct CopyFileObjectStorageOperation final : public IDiskObjectStorageOperation std::string to_path; StoredObjects created_objects; + IObjectStorage& destination_object_storage; CopyFileObjectStorageOperation( IObjectStorage & object_storage_, IMetadataStorage & metadata_storage_, + IObjectStorage & destination_object_storage_, const ReadSettings & read_settings_, const WriteSettings & write_settings_, const std::string & from_path_, @@ -498,6 +523,7 @@ struct CopyFileObjectStorageOperation final : public IDiskObjectStorageOperation , write_settings(write_settings_) , from_path(from_path_) , to_path(to_path_) + , destination_object_storage(destination_object_storage_) {} std::string getInfoForLog() const override @@ -515,7 +541,7 @@ struct CopyFileObjectStorageOperation final : public IDiskObjectStorageOperation auto object_key = object_storage.generateObjectKeyForPath(to_path); auto object_to = StoredObject(object_key.serialize()); - object_storage.copyObject(object_from, object_to, read_settings, write_settings); + object_storage.copyObjectToAnotherObjectStorage(object_from, object_to,read_settings,write_settings, destination_object_storage); tx->addBlobToMetadata(to_path, object_key, object_from.bytes_size); @@ -526,7 +552,7 @@ struct CopyFileObjectStorageOperation final : public IDiskObjectStorageOperation void undo() override { for (const auto & object : created_objects) - object_storage.removeObject(object); + destination_object_storage.removeObject(object); } void finalize() override @@ -859,7 +885,13 @@ void DiskObjectStorageTransaction::createFile(const std::string & path) void DiskObjectStorageTransaction::copyFile(const std::string & from_file_path, const std::string & to_file_path, const ReadSettings & read_settings, const WriteSettings & write_settings) { operations_to_execute.emplace_back( - std::make_unique(object_storage, metadata_storage, read_settings, write_settings, from_file_path, to_file_path)); + std::make_unique(object_storage, metadata_storage, object_storage, read_settings, write_settings, from_file_path, to_file_path)); +} + +void MultipleDisksObjectStorageTransaction::copyFile(const std::string & from_file_path, const std::string & to_file_path, const ReadSettings & read_settings, const WriteSettings & write_settings) +{ + operations_to_execute.emplace_back( + std::make_unique(object_storage, metadata_storage, destination_object_storage, read_settings, write_settings, from_file_path, to_file_path)); } void DiskObjectStorageTransaction::commit() diff --git a/src/Disks/ObjectStorages/DiskObjectStorageTransaction.h b/src/Disks/ObjectStorages/DiskObjectStorageTransaction.h index 4b62a41e161..67044751b84 100644 --- a/src/Disks/ObjectStorages/DiskObjectStorageTransaction.h +++ b/src/Disks/ObjectStorages/DiskObjectStorageTransaction.h @@ -50,9 +50,9 @@ using DiskObjectStorageOperations = std::vector; /// /// If something wrong happen on step 1 or 2 reverts all applied operations. /// If finalize failed -- nothing is reverted, garbage is left in blob storage. -struct DiskObjectStorageTransaction final : public IDiskTransaction, std::enable_shared_from_this +struct DiskObjectStorageTransaction : public IDiskTransaction, std::enable_shared_from_this { -private: +protected: IObjectStorage & object_storage; IMetadataStorage & metadata_storage; @@ -63,6 +63,12 @@ private: DiskObjectStorageOperations operations_to_execute; + DiskObjectStorageTransaction( + IObjectStorage & object_storage_, + IMetadataStorage & metadata_storage_, + DiskObjectStorageRemoteMetadataRestoreHelper * metadata_helper_, + MetadataTransactionPtr metadata_transaction_); + public: DiskObjectStorageTransaction( IObjectStorage & object_storage_, @@ -118,6 +124,21 @@ public: void createHardLink(const std::string & src_path, const std::string & dst_path) override; }; +struct MultipleDisksObjectStorageTransaction final : public DiskObjectStorageTransaction, std::enable_shared_from_this +{ + IObjectStorage& destination_object_storage; + IMetadataStorage& destination_metadata_storage; + + MultipleDisksObjectStorageTransaction( + IObjectStorage & object_storage_, + IMetadataStorage & metadata_storage_, + IObjectStorage& destination_object_storage, + IMetadataStorage& destination_metadata_storage, + DiskObjectStorageRemoteMetadataRestoreHelper * metadata_helper_); + + void copyFile(const std::string & from_file_path, const std::string & to_file_path, const ReadSettings & read_settings, const WriteSettings &) override; +}; + using DiskObjectStorageTransactionPtr = std::shared_ptr; } diff --git a/src/Disks/ObjectStorages/Local/LocalObjectStorage.cpp b/src/Disks/ObjectStorages/Local/LocalObjectStorage.cpp index 4cf3c23d5a6..c20a27e2384 100644 --- a/src/Disks/ObjectStorages/Local/LocalObjectStorage.cpp +++ b/src/Disks/ObjectStorages/Local/LocalObjectStorage.cpp @@ -141,7 +141,7 @@ void LocalObjectStorage::removeObject(const StoredObject & object) return; if (0 != unlink(object.remote_path.data())) - throwFromErrnoWithPath("Cannot unlink file " + object.remote_path, object.remote_path, ErrorCodes::CANNOT_UNLINK); + ErrnoException::throwFromPath(ErrorCodes::CANNOT_UNLINK, object.remote_path, "Cannot unlink file {}", object.remote_path); } void LocalObjectStorage::removeObjects(const StoredObjects & objects) diff --git a/src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp b/src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp index a84d4c8f4a1..beb8a400632 100644 --- a/src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp +++ b/src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp @@ -458,27 +458,39 @@ void S3ObjectStorage::copyObjectToAnotherObjectStorage( // NOLINT /// Shortcut for S3 if (auto * dest_s3 = dynamic_cast(&object_storage_to); dest_s3 != nullptr) { - auto client_ = client.get(); + auto client_ = dest_s3->client.get(); auto settings_ptr = s3_settings.get(); auto size = S3::getObjectSize(*client_, bucket, object_from.remote_path, {}, settings_ptr->request_settings, /* for_disk_s3= */ true); auto scheduler = threadPoolCallbackRunner(getThreadPoolWriter(), "S3ObjStor_copy"); - copyS3File( - client.get(), - bucket, - object_from.remote_path, - 0, - size, - dest_s3->bucket, - object_to.remote_path, - settings_ptr->request_settings, - patchSettings(read_settings), - BlobStorageLogWriter::create(disk_name), - object_to_attributes, - scheduler, - /* for_disk_s3= */ true); + try { + copyS3File( + client_, + bucket, + object_from.remote_path, + 0, + size, + dest_s3->bucket, + object_to.remote_path, + settings_ptr->request_settings, + patchSettings(read_settings), + BlobStorageLogWriter::create(disk_name), + object_to_attributes, + scheduler, + /* for_disk_s3= */ true); + return; + } + catch (S3Exception & exc) + { + /// If authentication/permissions error occurs then fallthrough to copy with buffer. + if (exc.getS3ErrorCode() != Aws::S3::S3Errors::ACCESS_DENIED) + throw; + LOG_WARNING(&Poco::Logger::get("S3ObjectStorage"), + "S3-server-side copy object from the disk {} to the disk {} can not be performed: {}\n", + getName(), dest_s3->getName(), exc.what()); + } } - else - IObjectStorage::copyObjectToAnotherObjectStorage(object_from, object_to, read_settings, write_settings, object_storage_to, object_to_attributes); + + IObjectStorage::copyObjectToAnotherObjectStorage(object_from, object_to, read_settings, write_settings, object_storage_to, object_to_attributes); } void S3ObjectStorage::copyObject( // NOLINT diff --git a/src/Disks/ObjectStorages/S3/S3ObjectStorage.h b/src/Disks/ObjectStorages/S3/S3ObjectStorage.h index fdf82430812..c8b3aeaca28 100644 --- a/src/Disks/ObjectStorages/S3/S3ObjectStorage.h +++ b/src/Disks/ObjectStorages/S3/S3ObjectStorage.h @@ -22,11 +22,13 @@ struct S3ObjectStorageSettings const S3Settings::RequestSettings & request_settings_, uint64_t min_bytes_for_seek_, int32_t list_object_keys_size_, - int32_t objects_chunk_size_to_delete_) + int32_t objects_chunk_size_to_delete_, + bool read_only_) : request_settings(request_settings_) , min_bytes_for_seek(min_bytes_for_seek_) , list_object_keys_size(list_object_keys_size_) , objects_chunk_size_to_delete(objects_chunk_size_to_delete_) + , read_only(read_only_) {} S3Settings::RequestSettings request_settings; @@ -34,6 +36,7 @@ struct S3ObjectStorageSettings uint64_t min_bytes_for_seek; int32_t list_object_keys_size; int32_t objects_chunk_size_to_delete; + bool read_only; }; @@ -166,6 +169,8 @@ public: ObjectStorageKey generateObjectKeyForPath(const std::string & path) const override; + bool isReadOnly() const override { return s3_settings.get()->read_only; } + private: void setNewSettings(std::unique_ptr && s3_settings_); diff --git a/src/Disks/ObjectStorages/S3/diskSettings.cpp b/src/Disks/ObjectStorages/S3/diskSettings.cpp index 2ddde4021b3..8ea559be5ba 100644 --- a/src/Disks/ObjectStorages/S3/diskSettings.cpp +++ b/src/Disks/ObjectStorages/S3/diskSettings.cpp @@ -1,4 +1,5 @@ #include +#include "IO/S3/Client.h" #if USE_AWS_S3 @@ -34,7 +35,8 @@ std::unique_ptr getSettings(const Poco::Util::AbstractC request_settings, config.getUInt64(config_prefix + ".min_bytes_for_seek", 1024 * 1024), config.getInt(config_prefix + ".list_object_keys_size", 1000), - config.getInt(config_prefix + ".objects_chunk_size_to_delete", 1000)); + config.getInt(config_prefix + ".objects_chunk_size_to_delete", 1000), + config.getBool(config_prefix + ".readonly", false)); } std::unique_ptr getClient( @@ -92,14 +94,15 @@ std::unique_ptr getClient( HTTPHeaderEntries headers = S3::getHTTPHeaders(config_prefix, config); S3::ServerSideEncryptionKMSConfig sse_kms_config = S3::getSSEKMSConfig(config_prefix, config); - client_configuration.retryStrategy - = std::make_shared( - config.getUInt64(config_prefix + ".retry_attempts", settings.request_settings.retry_attempts)); + S3::ClientSettings client_settings{ + .use_virtual_addressing = uri.is_virtual_hosted_style, + .disable_checksum = local_settings.s3_disable_checksum, + .gcs_issue_compose_request = config.getBool("s3.gcs_issue_compose_request", false), + }; return S3::ClientFactory::instance().create( client_configuration, - uri.is_virtual_hosted_style, - local_settings.s3_disable_checksum, + client_settings, config.getString(config_prefix + ".access_key_id", ""), config.getString(config_prefix + ".secret_access_key", ""), config.getString(config_prefix + ".server_side_encryption_customer_key_base64", ""), diff --git a/src/Disks/ObjectStorages/Web/WebObjectStorage.cpp b/src/Disks/ObjectStorages/Web/WebObjectStorage.cpp index 0103188b562..f3b0cb8b9a0 100644 --- a/src/Disks/ObjectStorages/Web/WebObjectStorage.cpp +++ b/src/Disks/ObjectStorages/Web/WebObjectStorage.cpp @@ -47,7 +47,7 @@ void WebObjectStorage::initialize(const String & uri_path, const std::unique_loc ReadWriteBufferFromHTTP::OutStreamCallback(), ConnectionTimeouts::getHTTPTimeouts( getContext()->getSettingsRef(), - {getContext()->getConfigRef().getUInt("keep_alive_timeout", DEFAULT_HTTP_KEEP_ALIVE_TIMEOUT), 0}), + getContext()->getServerSettings().keep_alive_timeout), credentials, /* max_redirects= */ 0, /* buffer_size_= */ DBMS_DEFAULT_BUFFER_SIZE, diff --git a/src/Formats/EscapingRuleUtils.cpp b/src/Formats/EscapingRuleUtils.cpp index d429985e52a..9cc7cb3b89e 100644 --- a/src/Formats/EscapingRuleUtils.cpp +++ b/src/Formats/EscapingRuleUtils.cpp @@ -303,8 +303,8 @@ DataTypePtr tryInferDataTypeByEscapingRule(const String & field, const FormatSet /// Try to determine the type of value inside quotes auto type = tryInferDataTypeForSingleField(data, format_settings); - /// If we couldn't infer any type or it's a number or tuple in quotes, we determine it as a string. - if (!type || isNumber(removeNullable(type)) || isTuple(type)) + /// If we couldn't infer any type or it's tuple in quotes or it's a number and csv.try_infer_numbers_from_strings = 0, we determine it as a string. + if (!type || isTuple(type) || (isNumber(type) && !format_settings.csv.try_infer_numbers_from_strings)) return std::make_shared(); return type; diff --git a/src/Formats/FormatFactory.cpp b/src/Formats/FormatFactory.cpp index b2ff9b1c3f7..15743365d7d 100644 --- a/src/Formats/FormatFactory.cpp +++ b/src/Formats/FormatFactory.cpp @@ -74,6 +74,7 @@ FormatSettings getFormatSettings(ContextPtr context, const Settings & settings) format_settings.csv.allow_whitespace_or_tab_as_delimiter = settings.input_format_csv_allow_whitespace_or_tab_as_delimiter; format_settings.csv.allow_variable_number_of_columns = settings.input_format_csv_allow_variable_number_of_columns; format_settings.csv.use_default_on_bad_values = settings.input_format_csv_use_default_on_bad_values; + format_settings.csv.try_infer_numbers_from_strings = settings.input_format_csv_try_infer_numbers_from_strings; format_settings.hive_text.fields_delimiter = settings.input_format_hive_text_fields_delimiter; format_settings.hive_text.collection_items_delimiter = settings.input_format_hive_text_collection_items_delimiter; format_settings.hive_text.map_keys_delimiter = settings.input_format_hive_text_map_keys_delimiter; @@ -347,7 +348,13 @@ InputFormatPtr FormatFactory::getInput( if (owned_buf) format->addBuffer(std::move(owned_buf)); if (!settings.input_format_record_errors_file_path.toString().empty()) - format->setErrorsLogger(std::make_shared(context)); + { + if (parallel_parsing) + format->setErrorsLogger(std::make_shared(context)); + else + format->setErrorsLogger(std::make_shared(context)); + } + /// It's a kludge. Because I cannot remove context from values format. /// (Not needed in the parallel_parsing case above because VALUES format doesn't support it.) diff --git a/src/Formats/FormatSettings.h b/src/Formats/FormatSettings.h index d9e3a420502..8d5c044a311 100644 --- a/src/Formats/FormatSettings.h +++ b/src/Formats/FormatSettings.h @@ -164,6 +164,7 @@ struct FormatSettings bool allow_whitespace_or_tab_as_delimiter = false; bool allow_variable_number_of_columns = false; bool use_default_on_bad_values = false; + bool try_infer_numbers_from_strings = true; } csv; struct HiveText diff --git a/src/Formats/JSONUtils.cpp b/src/Formats/JSONUtils.cpp index 349945bbd54..b8b9a9ecb0d 100644 --- a/src/Formats/JSONUtils.cpp +++ b/src/Formats/JSONUtils.cpp @@ -24,7 +24,6 @@ namespace ErrorCodes namespace JSONUtils { - template static std::pair fileSegmentationEngineJSONEachRowImpl(ReadBuffer & in, DB::Memory<> & memory, size_t min_bytes, size_t min_rows, size_t max_rows) @@ -72,7 +71,7 @@ namespace JSONUtils } else { - pos = find_first_symbols(pos, in.buffer().end()); + pos = find_first_symbols(pos, in.buffer().end()); if (pos > in.buffer().end()) throw Exception(ErrorCodes::LOGICAL_ERROR, "Position in buffer is out of bounds. There must be a bug."); @@ -89,19 +88,13 @@ namespace JSONUtils --balance; ++pos; } - else if (*pos == '\\') - { - ++pos; - if (loadAtPosition(in, memory, pos)) - ++pos; - } else if (*pos == '"') { quotes = true; ++pos; } - if (balance == 0) + if (!quotes && balance == 0) { ++number_of_rows; if ((number_of_rows >= min_rows) @@ -115,13 +108,14 @@ namespace JSONUtils return {loadAtPosition(in, memory, pos), number_of_rows}; } - std::pair fileSegmentationEngineJSONEachRow(ReadBuffer & in, DB::Memory<> & memory, size_t min_bytes, size_t max_rows) + std::pair fileSegmentationEngineJSONEachRow( + ReadBuffer & in, DB::Memory<> & memory, size_t min_bytes, size_t max_rows) { return fileSegmentationEngineJSONEachRowImpl<'{', '}'>(in, memory, min_bytes, 1, max_rows); } - std::pair - fileSegmentationEngineJSONCompactEachRow(ReadBuffer & in, DB::Memory<> & memory, size_t min_bytes, size_t min_rows, size_t max_rows) + std::pair fileSegmentationEngineJSONCompactEachRow( + ReadBuffer & in, DB::Memory<> & memory, size_t min_bytes, size_t min_rows, size_t max_rows) { return fileSegmentationEngineJSONEachRowImpl<'[', ']'>(in, memory, min_bytes, min_rows, max_rows); } @@ -564,6 +558,15 @@ namespace JSONUtils skipWhitespaceIfAny(in); } + bool checkAndSkipColon(ReadBuffer & in) + { + skipWhitespaceIfAny(in); + if (!checkChar(':', in)) + return false; + skipWhitespaceIfAny(in); + return true; + } + String readFieldName(ReadBuffer & in) { skipWhitespaceIfAny(in); @@ -573,6 +576,12 @@ namespace JSONUtils return field; } + bool tryReadFieldName(ReadBuffer & in, String & field) + { + skipWhitespaceIfAny(in); + return tryReadJSONStringInto(field, in) && checkAndSkipColon(in); + } + String readStringField(ReadBuffer & in) { skipWhitespaceIfAny(in); @@ -582,6 +591,15 @@ namespace JSONUtils return value; } + bool tryReadStringField(ReadBuffer & in, String & value) + { + skipWhitespaceIfAny(in); + if (!tryReadJSONStringInto(value, in)) + return false; + skipWhitespaceIfAny(in); + return true; + } + void skipArrayStart(ReadBuffer & in) { skipWhitespaceIfAny(in); @@ -628,6 +646,15 @@ namespace JSONUtils skipWhitespaceIfAny(in); } + bool checkAndSkipObjectStart(ReadBuffer & in) + { + skipWhitespaceIfAny(in); + if (!checkChar('{', in)) + return false; + skipWhitespaceIfAny(in); + return true; + } + bool checkAndSkipObjectEnd(ReadBuffer & in) { skipWhitespaceIfAny(in); @@ -644,6 +671,15 @@ namespace JSONUtils skipWhitespaceIfAny(in); } + bool checkAndSkipComma(ReadBuffer & in) + { + skipWhitespaceIfAny(in); + if (!checkChar(',', in)) + return false; + skipWhitespaceIfAny(in); + return true; + } + std::pair readStringFieldNameAndValue(ReadBuffer & in) { auto field_name = readFieldName(in); @@ -651,6 +687,11 @@ namespace JSONUtils return {field_name, field_value}; } + bool tryReadStringFieldNameAndValue(ReadBuffer & in, std::pair & field_and_value) + { + return tryReadFieldName(in, field_and_value.first) && tryReadStringField(in, field_and_value.second); + } + NameAndTypePair readObjectWithNameAndType(ReadBuffer & in) { skipObjectStart(in); @@ -673,6 +714,44 @@ namespace JSONUtils return name_and_type; } + bool tryReadObjectWithNameAndType(ReadBuffer & in, NameAndTypePair & name_and_type) + { + if (!checkAndSkipObjectStart(in)) + return false; + + std::pair first_field_and_value; + if (!tryReadStringFieldNameAndValue(in, first_field_and_value)) + return false; + + if (!checkAndSkipComma(in)) + return false; + + std::pair second_field_and_value; + if (!tryReadStringFieldNameAndValue(in, second_field_and_value)) + return false; + + if (first_field_and_value.first == "name" && second_field_and_value.first == "type") + { + auto type = DataTypeFactory::instance().tryGet(second_field_and_value.second); + if (!type) + return false; + name_and_type = {first_field_and_value.second, type}; + } + else if (second_field_and_value.first == "name" && first_field_and_value.first == "type") + { + auto type = DataTypeFactory::instance().tryGet(first_field_and_value.second); + if (!type) + return false; + name_and_type = {second_field_and_value.second, type}; + } + else + { + return false; + } + + return checkAndSkipObjectEnd(in); + } + NamesAndTypesList readMetadata(ReadBuffer & in) { auto field_name = readFieldName(in); @@ -693,6 +772,37 @@ namespace JSONUtils return names_and_types; } + bool tryReadMetadata(ReadBuffer & in, NamesAndTypesList & names_and_types) + { + String field_name; + if (!tryReadFieldName(in, field_name) || field_name != "meta") + return false; + + if (!checkAndSkipArrayStart(in)) + return false; + + bool first = true; + while (!checkAndSkipArrayEnd(in)) + { + if (!first) + { + if (!checkAndSkipComma(in)) + return false; + } + else + { + first = false; + } + + NameAndTypePair name_and_type; + if (!tryReadObjectWithNameAndType(in, name_and_type)) + return false; + names_and_types.push_back(name_and_type); + } + + return !names_and_types.empty(); + } + void validateMetadataByHeader(const NamesAndTypesList & names_and_types_from_metadata, const Block & header) { for (const auto & [name, type] : names_and_types_from_metadata) diff --git a/src/Formats/JSONUtils.h b/src/Formats/JSONUtils.h index cd6b5ff8171..a770ded9687 100644 --- a/src/Formats/JSONUtils.h +++ b/src/Formats/JSONUtils.h @@ -112,6 +112,7 @@ namespace JSONUtils void skipColon(ReadBuffer & in); void skipComma(ReadBuffer & in); + bool checkAndSkipComma(ReadBuffer & in); String readFieldName(ReadBuffer & in); @@ -122,9 +123,11 @@ namespace JSONUtils void skipObjectStart(ReadBuffer & in); void skipObjectEnd(ReadBuffer & in); + bool checkAndSkipObjectStart(ReadBuffer & in); bool checkAndSkipObjectEnd(ReadBuffer & in); NamesAndTypesList readMetadata(ReadBuffer & in); + bool tryReadMetadata(ReadBuffer & in, NamesAndTypesList & names_and_types); NamesAndTypesList readMetadataAndValidateHeader(ReadBuffer & in, const Block & header); void validateMetadataByHeader(const NamesAndTypesList & names_and_types_from_metadata, const Block & header); diff --git a/src/Formats/MarkInCompressedFile.h b/src/Formats/MarkInCompressedFile.h index 08e4f182c45..a25033e2a14 100644 --- a/src/Formats/MarkInCompressedFile.h +++ b/src/Formats/MarkInCompressedFile.h @@ -10,6 +10,13 @@ namespace DB { +/// It's a bug in clang with three-way comparison operator +/// https://github.com/llvm/llvm-project/issues/55919 +#ifdef __clang__ + #pragma clang diagnostic push + #pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant" +#endif + /** Mark is the position in the compressed file. The compressed file consists of adjacent compressed blocks. * Mark is a tuple - the offset in the file to the start of the compressed block, the offset in the decompressed block to the start of the data. */ @@ -18,12 +25,7 @@ struct MarkInCompressedFile size_t offset_in_compressed_file; size_t offset_in_decompressed_block; - bool operator==(const MarkInCompressedFile & rhs) const - { - return std::tie(offset_in_compressed_file, offset_in_decompressed_block) - == std::tie(rhs.offset_in_compressed_file, rhs.offset_in_decompressed_block); - } - bool operator!=(const MarkInCompressedFile & rhs) const { return !(*this == rhs); } + auto operator<=>(const MarkInCompressedFile &) const = default; auto asTuple() const { return std::make_tuple(offset_in_compressed_file, offset_in_decompressed_block); } @@ -39,6 +41,10 @@ struct MarkInCompressedFile } }; +#ifdef __clang__ + #pragma clang diagnostic pop +#endif + /** * In-memory representation of an array of marks. * diff --git a/src/Formats/ReadSchemaUtils.cpp b/src/Formats/ReadSchemaUtils.cpp index b185007eda7..43931be3449 100644 --- a/src/Formats/ReadSchemaUtils.cpp +++ b/src/Formats/ReadSchemaUtils.cpp @@ -1,12 +1,9 @@ -#include #include #include #include -#include #include -#include #include - +#include namespace DB { @@ -17,6 +14,7 @@ namespace ErrorCodes extern const int BAD_ARGUMENTS; extern const int ONLY_NULLS_WHILE_READING_SCHEMA; extern const int CANNOT_EXTRACT_TABLE_STRUCTURE; + extern const int TYPE_MISMATCH; } static std::optional getOrderedColumnsList(const NamesAndTypesList & columns_list, const Names & columns_order_hint) @@ -55,6 +53,17 @@ ColumnsDescription readSchemaFromFormat( try { NamesAndTypesList names_and_types; + SchemaInferenceMode mode = context->getSettingsRef().schema_inference_mode; + if (mode == SchemaInferenceMode::UNION && !FormatFactory::instance().checkIfFormatSupportsSubsetOfColumns(format_name, context, format_settings)) + { + String additional_message; + /// Better exception message for WithNames(AndTypes) formats. + if (format_name.ends_with("WithNames") || format_name.ends_with("WithNamesAndTypes")) + additional_message = " (formats -WithNames(AndTypes) support reading subset of columns only when setting input_format_with_names_use_header is enabled)"; + + throw Exception(ErrorCodes::BAD_ARGUMENTS, "UNION schema inference mode is not supported for format {}, because it doesn't support reading subset of columns{}", format_name, additional_message); + } + if (FormatFactory::instance().checkIfFormatHasExternalSchemaReader(format_name)) { auto external_schema_reader = FormatFactory::instance().getExternalSchemaReader(format_name, context, format_settings); @@ -71,6 +80,11 @@ try } else if (FormatFactory::instance().checkIfFormatHasSchemaReader(format_name)) { + if (mode == SchemaInferenceMode::UNION) + retry = false; + + std::vector> schemas_for_union_mode; + std::optional cached_columns; std::string exception_messages; SchemaReaderPtr schema_reader; size_t max_rows_to_read = format_settings ? format_settings->max_rows_to_read_for_schema_inference @@ -84,7 +98,15 @@ try try { read_buffer_iterator.setPreviousReadBuffer(std::move(buf)); - buf = read_buffer_iterator.next(); + std::tie(buf, cached_columns) = read_buffer_iterator.next(); + if (cached_columns) + { + if (mode == SchemaInferenceMode::DEFAULT) + return *cached_columns; + schemas_for_union_mode.emplace_back(cached_columns->getAll(), read_buffer_iterator.getLastFileName()); + continue; + } + if (!buf) break; @@ -136,12 +158,19 @@ try auto num_rows = schema_reader->readNumberOrRows(); if (num_rows) read_buffer_iterator.setNumRowsToLastFile(*num_rows); - break; + + /// In default mode, we finish when schema is inferred successfully from any file. + if (mode == SchemaInferenceMode::DEFAULT) + break; + + if (!names_and_types.empty()) + read_buffer_iterator.setSchemaToLastFile(ColumnsDescription(names_and_types)); + schemas_for_union_mode.emplace_back(names_and_types, read_buffer_iterator.getLastFileName()); } catch (...) { auto exception_message = getCurrentExceptionMessage(false); - if (schema_reader) + if (schema_reader && mode == SchemaInferenceMode::DEFAULT) { size_t rows_read = schema_reader->getNumRowsRead(); assert(rows_read <= max_rows_to_read); @@ -190,8 +219,58 @@ try } } - if (auto cached_columns = read_buffer_iterator.getCachedColumns()) - return *cached_columns; + /// If we got all schemas from cache, schema_reader can be uninitialized. + /// But we still need some stateless methods of ISchemaReader, + /// let's initialize it with empty buffer. + EmptyReadBuffer empty; + if (!schema_reader) + schema_reader = FormatFactory::instance().getSchemaReader(format_name, empty, context, format_settings); + + if (mode == SchemaInferenceMode::UNION) + { + Names names_order; /// Try to save original columns order; + std::unordered_map names_to_types; + + + for (const auto & [schema, file_name] : schemas_for_union_mode) + { + for (const auto & [name, type] : schema) + { + auto it = names_to_types.find(name); + if (it == names_to_types.end()) + { + names_order.push_back(name); + names_to_types[name] = type; + } + else + { + /// We already have column with such name. + /// Check if types are the same. + if (!type->equals(*it->second)) + { + /// If types are not the same, try to transform them according + /// to the format to find common type. + auto new_type_copy = type; + schema_reader->transformTypesFromDifferentFilesIfNeeded(it->second, new_type_copy); + + /// If types are not the same after transform, we cannot do anything, throw an exception. + if (!it->second->equals(*new_type_copy)) + throw Exception( + ErrorCodes::TYPE_MISMATCH, + "Automatically inferred type {} for column '{}'{} differs from type inferred from previous files: {}", + type->getName(), + name, + file_name.empty() ? "" : " in file " + file_name, + it->second->getName()); + } + } + } + } + + names_and_types.clear(); + for (const auto & name : names_order) + names_and_types.emplace_back(name, names_to_types[name]); + } if (names_and_types.empty()) throw Exception( @@ -206,7 +285,7 @@ try /// It will allow to execute simple data loading with query /// "INSERT INTO table SELECT * FROM ..." const auto & insertion_table = context->getInsertionTable(); - if (!schema_reader->hasStrictOrderOfColumns() && !insertion_table.empty()) + if (schema_reader && !schema_reader->hasStrictOrderOfColumns() && !insertion_table.empty()) { auto storage = DatabaseCatalog::instance().getTable(insertion_table, context); auto metadata = storage->getInMemoryMetadataPtr(); @@ -226,13 +305,15 @@ try names_and_types.erase( std::remove_if(names_and_types.begin(), names_and_types.end(), [](const NameAndTypePair & pair) { return pair.name.empty(); }), names_and_types.end()); - return ColumnsDescription(names_and_types); + + auto columns = ColumnsDescription(names_and_types); + if (mode == SchemaInferenceMode::DEFAULT) + read_buffer_iterator.setResultingSchema(columns); + return columns; } catch (Exception & e) { - if (!buf) - throw; - auto file_name = getFileNameFromReadBuffer(*buf); + auto file_name = read_buffer_iterator.getLastFileName(); if (!file_name.empty()) e.addMessage(fmt::format("(in file/uri {})", file_name)); throw; @@ -256,9 +337,9 @@ SchemaCache::Key getKeyForSchemaCache( return getKeysForSchemaCache({source}, format, format_settings, context).front(); } -static SchemaCache::Key makeSchemaCacheKey(const String & source, const String & format, const String & additional_format_info) +static SchemaCache::Key makeSchemaCacheKey(const String & source, const String & format, const String & additional_format_info, const String & schema_inference_mode) { - return SchemaCache::Key{source, format, additional_format_info}; + return SchemaCache::Key{source, format, additional_format_info, schema_inference_mode}; } SchemaCache::Keys getKeysForSchemaCache( @@ -270,13 +351,14 @@ SchemaCache::Keys getKeysForSchemaCache( /// For example, for Protobuf format additional information is the path to the schema /// and message name. String additional_format_info = FormatFactory::instance().getAdditionalInfoForSchemaCache(format, context, format_settings); + String schema_inference_mode(magic_enum::enum_name(context->getSettingsRef().schema_inference_mode.value)); SchemaCache::Keys cache_keys; cache_keys.reserve(sources.size()); std::transform( sources.begin(), sources.end(), std::back_inserter(cache_keys), - [&](const auto & source) { return makeSchemaCacheKey(source, format, additional_format_info); }); + [&](const auto & source) { return makeSchemaCacheKey(source, format, additional_format_info, schema_inference_mode); }); return cache_keys; } diff --git a/src/Formats/ReadSchemaUtils.h b/src/Formats/ReadSchemaUtils.h index c769846acbb..6aa8f3f9c4c 100644 --- a/src/Formats/ReadSchemaUtils.h +++ b/src/Formats/ReadSchemaUtils.h @@ -13,11 +13,23 @@ struct IReadBufferIterator virtual void setPreviousReadBuffer(std::unique_ptr /* buffer */) {} - virtual std::unique_ptr next() = 0; - - virtual std::optional getCachedColumns() { return std::nullopt; } + /// Return read buffer of the next file or cached schema. + /// In DEFAULT schema inference mode cached schema can be from any file. + /// In UNION mode cached schema can be only from current file. + /// When there is no files to process, return pair (nullptr, nullopt) + virtual std::pair, std::optional> next() = 0; virtual void setNumRowsToLastFile(size_t /*num_rows*/) {} + + /// Set schema inferred from last file. Used for UNION mode to cache schema + /// per file. + virtual void setSchemaToLastFile(const ColumnsDescription & /*columns*/) {} + /// Set resulting inferred schema. Used for DEFAULT mode to cache schema + /// for all files. + virtual void setResultingSchema(const ColumnsDescription & /*columns*/) {} + + /// Get last processed file name for better exception messages. + virtual String getLastFileName() const { return ""; } }; struct SingleReadBufferIterator : public IReadBufferIterator @@ -27,12 +39,12 @@ public: { } - std::unique_ptr next() override + std::pair, std::optional> next() override { if (done) - return nullptr; + return {nullptr, {}}; done = true; - return std::move(buf); + return {std::move(buf), {}}; } private: @@ -45,11 +57,18 @@ private: /// use it and won't create a read buffer. /// For formats that have a schema reader from the data, /// read buffer will be created by the provided iterator and -/// the schema will be extracted from the data. If schema reader -/// couldn't determine the schema we will try the next read buffer -/// from the provided iterator if it makes sense. If the format doesn't -/// have any schema reader or we couldn't determine the schema, -/// an exception will be thrown. +/// the schema will be extracted from the data. If the format doesn't +/// have any schema reader an exception will be thrown. +/// Reading schema can be performed in 2 modes depending on setting schema_inference_mode: +/// 1) Default mode. In this mode ClickHouse assumes that all files have the same schema +/// and tries to infer the schema by reading files one by one until it succeeds. +/// If schema reader couldn't determine the schema for some file, ClickHouse will try the next +/// file (next read buffer from the provided iterator) if it makes sense. If ClickHouse couldn't determine +/// the resulting schema, an exception will be thrown. +/// 2) Union mode. In this mode ClickHouse assumes that files can have different schemas, +/// so it infer schemas of all files and then union them to the common schema. In this mode +/// all read buffers from provided iterator will be used. If ClickHouse couldn't determine +/// the schema for some file, an exception will be thrown. ColumnsDescription readSchemaFromFormat( const String & format_name, const std::optional & format_settings, diff --git a/src/Formats/SchemaInferenceUtils.cpp b/src/Formats/SchemaInferenceUtils.cpp index 94166aa9002..e2ba188d015 100644 --- a/src/Formats/SchemaInferenceUtils.cpp +++ b/src/Formats/SchemaInferenceUtils.cpp @@ -547,6 +547,54 @@ namespace } } + void mergeNamedTuples(DataTypes & data_types, TypeIndexesSet & type_indexes, const FormatSettings & settings, JSONInferenceInfo * json_info) + { + if (!type_indexes.contains(TypeIndex::Tuple)) + return; + + /// Collect all names and their types from all named tuples. + std::unordered_map names_to_types; + /// Try to save original order of element names. + Names element_names; + for (auto & type : data_types) + { + const auto * tuple_type = typeid_cast(type.get()); + if (tuple_type && tuple_type->haveExplicitNames()) + { + const auto & elements = tuple_type->getElements(); + const auto & names = tuple_type->getElementNames(); + for (size_t i = 0; i != elements.size(); ++i) + { + if (!names_to_types.contains(names[i])) + element_names.push_back(names[i]); + names_to_types[names[i]].push_back(elements[i]); + } + } + } + + /// Try to find common type for each tuple element with the same name. + DataTypes element_types; + element_types.reserve(names_to_types.size()); + for (const auto & name : element_names) + { + auto & types = names_to_types[name]; + transformInferredTypesIfNeededImpl(types, settings, json_info); + /// If some element have different types in different tuples, we can't do anything + if (!checkIfTypesAreEqual(types)) + return; + element_types.push_back(types.front()); + } + + DataTypePtr result_tuple = std::make_shared(element_types, element_names); + + for (auto & type : data_types) + { + const auto * tuple_type = typeid_cast(type.get()); + if (tuple_type && tuple_type->haveExplicitNames()) + type = result_tuple; + } + } + template void transformInferredTypesIfNeededImpl(DataTypes & types, const FormatSettings & settings, JSONInferenceInfo * json_info) { @@ -604,6 +652,9 @@ namespace if (settings.json.read_objects_as_strings) transformMapsAndStringsToStrings(data_types, type_indexes); + + if (json_info && json_info->allow_merging_named_tuples) + mergeNamedTuples(data_types, type_indexes, settings, json_info); }; transformTypesRecursively(types, transform_simple_types, transform_complex_types); @@ -1180,6 +1231,13 @@ void transformInferredJSONTypesIfNeeded( second = std::move(types[1]); } +void transformInferredJSONTypesFromDifferentFilesIfNeeded(DataTypePtr & first, DataTypePtr & second, const FormatSettings & settings) +{ + JSONInferenceInfo json_info; + json_info.allow_merging_named_tuples = true; + transformInferredJSONTypesIfNeeded(first, second, settings, &json_info); +} + void transformFinalInferredJSONTypeIfNeededImpl(DataTypePtr & data_type, const FormatSettings & settings, JSONInferenceInfo * json_info, bool remain_nothing_types = false) { if (!data_type) @@ -1247,11 +1305,22 @@ void transformFinalInferredJSONTypeIfNeededImpl(DataTypePtr & data_type, const F return; } + /// First, try to transform nested types without final transformations to see if there is a common type. + auto nested_types_copy = nested_types; + transformInferredTypesIfNeededImpl(nested_types_copy, settings, json_info); + if (checkIfTypesAreEqual(nested_types_copy)) + { + data_type = std::make_shared(nested_types_copy.back()); + transformFinalInferredJSONTypeIfNeededImpl(data_type, settings, json_info); + return; + } + + /// Apply final transformation to nested types, and then try to find common type. for (auto & nested_type : nested_types) /// Don't change Nothing to String in nested types here, because we are not sure yet if it's Array or actual Tuple transformFinalInferredJSONTypeIfNeededImpl(nested_type, settings, json_info, /*remain_nothing_types=*/ true); - auto nested_types_copy = nested_types; + nested_types_copy = nested_types; transformInferredTypesIfNeededImpl(nested_types_copy, settings, json_info); if (checkIfTypesAreEqual(nested_types_copy)) { @@ -1381,7 +1450,6 @@ DataTypePtr makeNullableRecursively(DataTypePtr type) return std::make_shared(std::move(nested_types), tuple_type->getElementNames()); return std::make_shared(std::move(nested_types)); - } if (which.isMap()) diff --git a/src/Formats/SchemaInferenceUtils.h b/src/Formats/SchemaInferenceUtils.h index efeb6c9c873..b492d9b22b6 100644 --- a/src/Formats/SchemaInferenceUtils.h +++ b/src/Formats/SchemaInferenceUtils.h @@ -14,6 +14,11 @@ struct JSONInferenceInfo std::unordered_set numbers_parsed_from_json_strings; /// Indicates if currently we are inferring type for Map/Object key. bool is_object_key = false; + /// When we transform types for the same column from different files + /// we cannot use DataTypeJSONPaths for inferring named tuples from JSON objects, + /// because DataTypeJSONPaths was already finalized to named tuple. IN this case + /// we can only merge named tuples from different files together. + bool allow_merging_named_tuples = false; }; /// Try to determine datatype of the value in buffer/string. If the type cannot be inferred, return nullptr. @@ -64,9 +69,7 @@ void transformInferredTypesIfNeeded(DataTypePtr & first, DataTypePtr & second, c /// from strings in json_info while inference and use it here, so we will know that Array(Int64) contains /// integer inferred from a string. /// Example 2: -/// When we have maps with different value types, we convert all types to JSON object type. -/// For example, if we have Map(String, UInt64) (like `{"a" : 123}`) and Map(String, String) (like `{"b" : 'abc'}`) -/// we will convert both types to Object('JSON'). +/// We merge DataTypeJSONPaths types to a single DataTypeJSONPaths type with union of all JSON paths. void transformInferredJSONTypesIfNeeded(DataTypePtr & first, DataTypePtr & second, const FormatSettings & settings, JSONInferenceInfo * json_info); /// Make final transform for types inferred in JSON format. It does 3 types of transformation: @@ -78,6 +81,11 @@ void transformInferredJSONTypesIfNeeded(DataTypePtr & first, DataTypePtr & secon /// 3) Converts all Nothing types to String types if input_format_json_infer_incomplete_types_as_strings is enabled. void transformFinalInferredJSONTypeIfNeeded(DataTypePtr & data_type, const FormatSettings & settings, JSONInferenceInfo * json_info); +/// Transform types for the same column inferred from different files. +/// Does the same as transformInferredJSONTypesIfNeeded, but also merges named Tuples together, +/// because DataTypeJSONPaths types were finalized when we finished inference for a file. +void transformInferredJSONTypesFromDifferentFilesIfNeeded(DataTypePtr & first, DataTypePtr & second, const FormatSettings & settings); + /// Make type Nullable recursively: /// - Type -> Nullable(type) /// - Array(Type) -> Array(Nullable(Type)) diff --git a/src/Formats/registerFormats.cpp b/src/Formats/registerFormats.cpp index 6c9f1a94022..cc9cf380693 100644 --- a/src/Formats/registerFormats.cpp +++ b/src/Formats/registerFormats.cpp @@ -294,4 +294,3 @@ void registerFormats() } } - diff --git a/src/Functions/CMakeLists.txt b/src/Functions/CMakeLists.txt index cfbbd77bc36..89676594581 100644 --- a/src/Functions/CMakeLists.txt +++ b/src/Functions/CMakeLists.txt @@ -99,6 +99,10 @@ if (TARGET ch_contrib::rapidjson) list (APPEND PRIVATE_LIBS ch_contrib::rapidjson) endif() +if (TARGET ch_contrib::pocketfft) + list (APPEND PRIVATE_LIBS ch_contrib::pocketfft) +endif() + if (TARGET ch_contrib::crc32-vpmsum) list (APPEND PUBLIC_LIBS ch_contrib::crc32-vpmsum) endif() diff --git a/src/Functions/FunctionBinaryArithmetic.h b/src/Functions/FunctionBinaryArithmetic.h index 33f62b8da8c..1b2519d1ec5 100644 --- a/src/Functions/FunctionBinaryArithmetic.h +++ b/src/Functions/FunctionBinaryArithmetic.h @@ -1483,6 +1483,17 @@ public: return getReturnTypeImplStatic(new_arguments, context); } + /// Special case - one or both arguments are IPv6 + if (isIPv6(arguments[0]) || isIPv6(arguments[1])) + { + DataTypes new_arguments { + isIPv6(arguments[0]) ? std::make_shared() : arguments[0], + isIPv6(arguments[1]) ? std::make_shared() : arguments[1], + }; + + return getReturnTypeImplStatic(new_arguments, context); + } + if constexpr (is_plus || is_minus) { @@ -2181,6 +2192,25 @@ ColumnPtr executeStringInteger(const ColumnsWithTypeAndName & arguments, const A return executeImpl2(new_arguments, result_type, input_rows_count, right_nullmap); } + /// Special case - one or both arguments are IPv6 + if (isIPv6(arguments[0].type) || isIPv6(arguments[1].type)) + { + ColumnsWithTypeAndName new_arguments { + { + isIPv6(arguments[0].type) ? castColumn(arguments[0], std::make_shared()) : arguments[0].column, + isIPv6(arguments[0].type) ? std::make_shared() : arguments[0].type, + arguments[0].name, + }, + { + isIPv6(arguments[1].type) ? castColumn(arguments[1], std::make_shared()) : arguments[1].column, + isIPv6(arguments[1].type) ? std::make_shared() : arguments[1].type, + arguments[1].name + } + }; + + return executeImpl2(new_arguments, result_type, input_rows_count, right_nullmap); + } + const auto * const left_generic = left_argument.type.get(); const auto * const right_generic = right_argument.type.get(); ColumnPtr res; diff --git a/src/Functions/FunctionsConversion.h b/src/Functions/FunctionsConversion.h index d7c2c70884b..eed75788fcd 100644 --- a/src/Functions/FunctionsConversion.h +++ b/src/Functions/FunctionsConversion.h @@ -47,7 +47,6 @@ #include #include #include -#include #include #include #include @@ -221,6 +220,18 @@ struct ConvertImpl continue; } + if constexpr (std::is_same_v && std::is_same_v) + { + static_assert( + std::is_same_v, + "UInt128 and IPv6 types must be same"); + + vec_to[i].items[1] = std::byteswap(vec_from[i].toUnderType().items[0]); + vec_to[i].items[0] = std::byteswap(vec_from[i].toUnderType().items[1]); + + continue; + } + if constexpr (std::is_same_v != std::is_same_v) { throw Exception(ErrorCodes::NOT_IMPLEMENTED, @@ -1401,10 +1412,10 @@ inline bool tryParseImpl(DataTypeDate32::FieldType & x, ReadBuff template <> inline bool tryParseImpl(DataTypeDateTime::FieldType & x, ReadBuffer & rb, const DateLUTImpl * time_zone, bool) { - time_t tmp = 0; - if (!tryReadDateTimeText(tmp, rb, *time_zone)) + time_t time = 0; + if (!tryReadDateTimeText(time, rb, *time_zone)) return false; - x = static_cast(tmp); + convertFromTime(x, time); return true; } @@ -1685,7 +1696,6 @@ struct ConvertThroughParsing break; } } - parseImpl(vec_to[i], read_buffer, local_time_zone, precise_float_parsing); } while (false); } @@ -3279,7 +3289,6 @@ private: { /// In case when converting to Nullable type, we apply different parsing rule, /// that will not throw an exception but return NULL in case of malformed input. - FunctionPtr function = FunctionConvertFromString::create(); return createFunctionAdaptor(function, from_type); } diff --git a/src/Functions/FunctionsExternalDictionaries.h b/src/Functions/FunctionsExternalDictionaries.h index db6529da73c..37ddfd6168e 100644 --- a/src/Functions/FunctionsExternalDictionaries.h +++ b/src/Functions/FunctionsExternalDictionaries.h @@ -654,7 +654,7 @@ private: if (tuple_size < 1) throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, - "Tuple second argument of function {} must contain multiple constant string columns"); + "Tuple second argument of function {} must contain multiple constant string columns", getName()); for (size_t i = 0; i < tuple_col.tupleSize(); ++i) { diff --git a/src/Functions/FunctionsHashing.h b/src/Functions/FunctionsHashing.h index 9468bc259e3..d0edd34e657 100644 --- a/src/Functions/FunctionsHashing.h +++ b/src/Functions/FunctionsHashing.h @@ -15,24 +15,13 @@ #endif #include -#if USE_BLAKE3 -# include -#endif - #include #include #include #include #if USE_SSL -# include # include -# include -#if USE_BORINGSSL -# include -#else -# include -#endif #endif #include @@ -73,7 +62,6 @@ namespace ErrorCodes extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; extern const int NOT_IMPLEMENTED; extern const int ILLEGAL_COLUMN; - extern const int SUPPORT_IS_DISABLED; } namespace impl @@ -191,6 +179,40 @@ T combineHashesFunc(T t1, T t2) } +struct SipHash64Impl +{ + static constexpr auto name = "sipHash64"; + using ReturnType = UInt64; + + static UInt64 apply(const char * begin, size_t size) { return sipHash64(begin, size); } + static UInt64 combineHashes(UInt64 h1, UInt64 h2) { return combineHashesFunc(h1, h2); } + + static constexpr bool use_int_hash_for_pods = false; +}; + +struct SipHash64KeyedImpl +{ + static constexpr auto name = "sipHash64Keyed"; + using ReturnType = UInt64; + using Key = impl::SipHashKey; + using KeyColumns = impl::SipHashKeyColumns; + + static KeyColumns parseKeyColumns(const ColumnWithTypeAndName & key) { return impl::parseSipHashKeyColumns(key); } + static Key getKey(const KeyColumns & key, size_t i) { return key.getKey(i); } + + static UInt64 applyKeyed(const Key & key, const char * begin, size_t size) { return sipHash64Keyed(key.key0, key.key1, begin, size); } + + static UInt64 combineHashesKeyed(const Key & key, UInt64 h1, UInt64 h2) + { + transformEndianness(h1); + transformEndianness(h2); + const UInt64 hashes[]{h1, h2}; + return applyKeyed(key, reinterpret_cast(hashes), sizeof(hashes)); + } + + static constexpr bool use_int_hash_for_pods = false; +}; + #if USE_SSL struct HalfMD5Impl { @@ -225,159 +247,8 @@ struct HalfMD5Impl static constexpr bool use_int_hash_for_pods = false; }; - -struct MD4Impl -{ - static constexpr auto name = "MD4"; - enum { length = MD4_DIGEST_LENGTH }; - - static void apply(const char * begin, const size_t size, unsigned char * out_char_data) - { - MD4_CTX ctx; - MD4_Init(&ctx); - MD4_Update(&ctx, reinterpret_cast(begin), size); - MD4_Final(out_char_data, &ctx); - } -}; - -struct MD5Impl -{ - static constexpr auto name = "MD5"; - enum { length = MD5_DIGEST_LENGTH }; - - static void apply(const char * begin, const size_t size, unsigned char * out_char_data) - { - MD5_CTX ctx; - MD5_Init(&ctx); - MD5_Update(&ctx, reinterpret_cast(begin), size); - MD5_Final(out_char_data, &ctx); - } -}; - -struct SHA1Impl -{ - static constexpr auto name = "SHA1"; - enum { length = SHA_DIGEST_LENGTH }; - - static void apply(const char * begin, const size_t size, unsigned char * out_char_data) - { - SHA_CTX ctx; - SHA1_Init(&ctx); - SHA1_Update(&ctx, reinterpret_cast(begin), size); - SHA1_Final(out_char_data, &ctx); - } -}; - -struct SHA224Impl -{ - static constexpr auto name = "SHA224"; - enum { length = SHA224_DIGEST_LENGTH }; - - static void apply(const char * begin, const size_t size, unsigned char * out_char_data) - { - SHA256_CTX ctx; - SHA224_Init(&ctx); - SHA224_Update(&ctx, reinterpret_cast(begin), size); - SHA224_Final(out_char_data, &ctx); - } -}; - -struct SHA256Impl -{ - static constexpr auto name = "SHA256"; - enum { length = SHA256_DIGEST_LENGTH }; - - static void apply(const char * begin, const size_t size, unsigned char * out_char_data) - { - SHA256_CTX ctx; - SHA256_Init(&ctx); - SHA256_Update(&ctx, reinterpret_cast(begin), size); - SHA256_Final(out_char_data, &ctx); - } -}; - -struct SHA384Impl -{ - static constexpr auto name = "SHA384"; - enum { length = SHA384_DIGEST_LENGTH }; - - static void apply(const char * begin, const size_t size, unsigned char * out_char_data) - { - SHA512_CTX ctx; - SHA384_Init(&ctx); - SHA384_Update(&ctx, reinterpret_cast(begin), size); - SHA384_Final(out_char_data, &ctx); - } -}; - -struct SHA512Impl -{ - static constexpr auto name = "SHA512"; - enum { length = 64 }; - - static void apply(const char * begin, const size_t size, unsigned char * out_char_data) - { - SHA512_CTX ctx; - SHA512_Init(&ctx); - SHA512_Update(&ctx, reinterpret_cast(begin), size); - SHA512_Final(out_char_data, &ctx); - } -}; - -struct SHA512Impl256 -{ - static constexpr auto name = "SHA512_256"; - enum { length = 32 }; - - static void apply(const char * begin, const size_t size, unsigned char * out_char_data) - { - /// Here, we use the EVP interface that is common to both BoringSSL and OpenSSL. Though BoringSSL is the default - /// SSL library that we use, for S390X architecture only OpenSSL is supported. But the SHA512-256, SHA512_256_Init, - /// SHA512_256_Update, SHA512_256_Final methods to calculate hash (similar to the other SHA functions) aren't available - /// in the current version of OpenSSL that we use which necessitates the use of the EVP interface. - auto md_ctx = EVP_MD_CTX_create(); - EVP_DigestInit_ex(md_ctx, EVP_sha512_256(), nullptr /*engine*/); - EVP_DigestUpdate(md_ctx, begin, size); - EVP_DigestFinal_ex(md_ctx, out_char_data, nullptr /*size*/); - EVP_MD_CTX_destroy(md_ctx); - } -}; #endif -struct SipHash64Impl -{ - static constexpr auto name = "sipHash64"; - using ReturnType = UInt64; - - static UInt64 apply(const char * begin, size_t size) { return sipHash64(begin, size); } - static UInt64 combineHashes(UInt64 h1, UInt64 h2) { return combineHashesFunc(h1, h2); } - - static constexpr bool use_int_hash_for_pods = false; -}; - -struct SipHash64KeyedImpl -{ - static constexpr auto name = "sipHash64Keyed"; - using ReturnType = UInt64; - using Key = impl::SipHashKey; - using KeyColumns = impl::SipHashKeyColumns; - - static KeyColumns parseKeyColumns(const ColumnWithTypeAndName & key) { return impl::parseSipHashKeyColumns(key); } - static Key getKey(const KeyColumns & key, size_t i) { return key.getKey(i); } - - static UInt64 applyKeyed(const Key & key, const char * begin, size_t size) { return sipHash64Keyed(key.key0, key.key1, begin, size); } - - static UInt64 combineHashesKeyed(const Key & key, UInt64 h1, UInt64 h2) - { - transformEndianness(h1); - transformEndianness(h2); - const UInt64 hashes[]{h1, h2}; - return applyKeyed(key, reinterpret_cast(hashes), sizeof(hashes)); - } - - static constexpr bool use_int_hash_for_pods = false; -}; - struct SipHash128Impl { static constexpr auto name = "sipHash128"; @@ -820,121 +691,6 @@ struct ImplXXH3 static constexpr bool use_int_hash_for_pods = false; }; -struct ImplBLAKE3 -{ - static constexpr auto name = "BLAKE3"; - enum { length = 32 }; - -#if !USE_BLAKE3 - [[noreturn]] static void apply(const char * /*begin*/, const size_t /*size*/, unsigned char * /*out_char_data*/) - { - throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "BLAKE3 is not available. Rust code or BLAKE3 itself may be disabled."); - } -#else - static void apply(const char * begin, const size_t size, unsigned char* out_char_data) - { - auto err_msg = blake3_apply_shim(begin, safe_cast(size), out_char_data); - if (err_msg != nullptr) - { - auto err_st = std::string(err_msg); - blake3_free_char_pointer(err_msg); - throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Function returned error message: {}", err_st); - } - } -#endif -}; - -template -class FunctionStringHashFixedString : public IFunction -{ -public: - static constexpr auto name = Impl::name; - static FunctionPtr create(ContextPtr) { return std::make_shared(); } - - String getName() const override - { - return name; - } - - size_t getNumberOfArguments() const override { return 1; } - - DataTypePtr getReturnTypeImpl(const DataTypes & arguments) const override - { - if (!isStringOrFixedString(arguments[0]) && !isIPv6(arguments[0])) - throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal type {} of argument of function {}", - arguments[0]->getName(), getName()); - - return std::make_shared(Impl::length); - } - - bool useDefaultImplementationForConstants() const override { return true; } - - bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return true; } - - ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t /*input_rows_count*/) const override - { - if (const ColumnString * col_from = checkAndGetColumn(arguments[0].column.get())) - { - auto col_to = ColumnFixedString::create(Impl::length); - - const typename ColumnString::Chars & data = col_from->getChars(); - const typename ColumnString::Offsets & offsets = col_from->getOffsets(); - auto & chars_to = col_to->getChars(); - const auto size = offsets.size(); - chars_to.resize(size * Impl::length); - - ColumnString::Offset current_offset = 0; - for (size_t i = 0; i < size; ++i) - { - Impl::apply( - reinterpret_cast(&data[current_offset]), - offsets[i] - current_offset - 1, - reinterpret_cast(&chars_to[i * Impl::length])); - - current_offset = offsets[i]; - } - - return col_to; - } - else if ( - const ColumnFixedString * col_from_fix = checkAndGetColumn(arguments[0].column.get())) - { - auto col_to = ColumnFixedString::create(Impl::length); - const typename ColumnFixedString::Chars & data = col_from_fix->getChars(); - const auto size = col_from_fix->size(); - auto & chars_to = col_to->getChars(); - const auto length = col_from_fix->getN(); - chars_to.resize(size * Impl::length); - for (size_t i = 0; i < size; ++i) - { - Impl::apply( - reinterpret_cast(&data[i * length]), length, reinterpret_cast(&chars_to[i * Impl::length])); - } - return col_to; - } - else if ( - const ColumnIPv6 * col_from_ip = checkAndGetColumn(arguments[0].column.get())) - { - auto col_to = ColumnFixedString::create(Impl::length); - const typename ColumnIPv6::Container & data = col_from_ip->getData(); - const auto size = col_from_ip->size(); - auto & chars_to = col_to->getChars(); - const auto length = IPV6_BINARY_LENGTH; - chars_to.resize(size * Impl::length); - for (size_t i = 0; i < size; ++i) - { - Impl::apply( - reinterpret_cast(&data[i * length]), length, reinterpret_cast(&chars_to[i * Impl::length])); - } - return col_to; - } - else - throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Illegal column {} of first argument of function {}", - arguments[0].column->getName(), getName()); - } -}; - - DECLARE_MULTITARGET_CODE( template @@ -1817,15 +1573,7 @@ using FunctionSipHash64Keyed = FunctionAnyHash; using FunctionIntHash64 = FunctionIntHash; #if USE_SSL -using FunctionMD4 = FunctionStringHashFixedString; using FunctionHalfMD5 = FunctionAnyHash; -using FunctionMD5 = FunctionStringHashFixedString; -using FunctionSHA1 = FunctionStringHashFixedString; -using FunctionSHA224 = FunctionStringHashFixedString; -using FunctionSHA256 = FunctionStringHashFixedString; -using FunctionSHA384 = FunctionStringHashFixedString; -using FunctionSHA512 = FunctionStringHashFixedString; -using FunctionSHA512_256 = FunctionStringHashFixedString; #endif using FunctionSipHash128 = FunctionAnyHash; using FunctionSipHash128Keyed = FunctionAnyHash; @@ -1854,7 +1602,6 @@ using FunctionXxHash64 = FunctionAnyHash; using FunctionXXH3 = FunctionAnyHash; using FunctionWyHash64 = FunctionAnyHash; -using FunctionBLAKE3 = FunctionStringHashFixedString; } #ifdef __clang__ diff --git a/src/Functions/FunctionsHashingMisc.cpp b/src/Functions/FunctionsHashingMisc.cpp index f56568b2508..38f16af0e6d 100644 --- a/src/Functions/FunctionsHashingMisc.cpp +++ b/src/Functions/FunctionsHashingMisc.cpp @@ -46,19 +46,34 @@ REGISTER_FUNCTION(Hashing) factory.registerFunction(); +#if USE_SSL + factory.registerFunction(FunctionDocumentation{ + .description = R"( +[Interprets](../..//sql-reference/functions/type-conversion-functions.md/#type_conversion_functions-reinterpretAsString) all the input +parameters as strings and calculates the MD5 hash value for each of them. Then combines hashes, takes the first 8 bytes of the hash of the +resulting string, and interprets them as [UInt64](../../../sql-reference/data-types/int-uint.md) in big-endian byte order. The function is +relatively slow (5 million short strings per second per processor core). - factory.registerFunction( - FunctionDocumentation{ - .description=R"( -Calculates BLAKE3 hash string and returns the resulting set of bytes as FixedString. -This cryptographic hash-function is integrated into ClickHouse with BLAKE3 Rust library. -The function is rather fast and shows approximately two times faster performance compared to SHA-2, while generating hashes of the same length as SHA-256. -It returns a BLAKE3 hash as a byte array with type FixedString(32). -)", - .examples{ - {"hash", "SELECT hex(BLAKE3('ABC'))", ""}}, - .categories{"Hash"} - }, - FunctionFactory::CaseSensitive); +Consider using the [sipHash64](../../sql-reference/functions/hash-functions.md/#hash_functions-siphash64) function instead. + )", + .syntax = "SELECT halfMD5(par1,par2,...,parN);", + .arguments + = {{"par1,par2,...,parN", + R"( +The function takes a variable number of input parameters. Arguments can be any of the supported data types. For some data types calculated +value of hash function may be the same for the same values even if types of arguments differ (integers of different size, named and unnamed +Tuple with the same data, Map and the corresponding Array(Tuple(key, value)) type with the same data). + )"}}, + .returned_value = "The computed half MD5 hash of the given input params returned as a " + "[UInt64](../../../sql-reference/data-types/int-uint.md) in big-endian byte order.", + .examples + = {{"", + "SELECT HEX(halfMD5('abc', 'cde', 'fgh'));", + R"( +┌─hex(halfMD5('abc', 'cde', 'fgh'))─┐ +│ 2C9506B7374CFAF4 │ +└───────────────────────────────────┘ + )"}}}); +#endif } } diff --git a/src/Functions/FunctionsHashingSSL.cpp b/src/Functions/FunctionsHashingSSL.cpp deleted file mode 100644 index b716a11f9c3..00000000000 --- a/src/Functions/FunctionsHashingSSL.cpp +++ /dev/null @@ -1,44 +0,0 @@ -#include "config.h" - -#if USE_SSL - -#include "FunctionsHashing.h" -#include - -/// FunctionsHashing instantiations are separated into files FunctionsHashing*.cpp -/// to better parallelize the build procedure and avoid MSan build failure -/// due to excessive resource consumption. - -namespace DB -{ - -REGISTER_FUNCTION(HashingSSL) -{ - factory.registerFunction(); - factory.registerFunction(); - factory.registerFunction(); - factory.registerFunction(); - factory.registerFunction(); - factory.registerFunction(); - factory.registerFunction(); - factory.registerFunction(); - factory.registerFunction(FunctionDocumentation{ - .description = R"(Calculates the SHA512_256 hash of the given string.)", - .syntax = "SELECT SHA512_256(s);", - .arguments = {{"s", "The input [String](../../sql-reference/data-types/string.md)."}}, - .returned_value - = "The SHA512_256 hash of the given input string returned as a [FixedString](../../sql-reference/data-types/fixedstring.md).", - .examples - = {{"", - "SELECT HEX(SHA512_256('abc'));", - R"( -┌─hex(SHA512_256('abc'))───────────────────────────────────────────┐ -│ 53048E2681941EF99B2E29B76B4C7DABE4C2D0C634FC6D46E0E2F13107E7AF23 │ -└──────────────────────────────────────────────────────────────────┘ - )" - }} - }); -} -} - -#endif diff --git a/src/Functions/FunctionsMiscellaneous.h b/src/Functions/FunctionsMiscellaneous.h index 75c91a2e964..fb5109eaa88 100644 --- a/src/Functions/FunctionsMiscellaneous.h +++ b/src/Functions/FunctionsMiscellaneous.h @@ -159,7 +159,6 @@ private: class FunctionCapture : public IFunctionBase { public: - using Capture = ExecutableFunctionCapture::Capture; using CapturePtr = ExecutableFunctionCapture::CapturePtr; FunctionCapture( @@ -201,10 +200,10 @@ public: FunctionCaptureOverloadResolver( ExpressionActionsPtr expression_actions_, - const Names & captured_names_, - const NamesAndTypesList & lambda_arguments_, - const DataTypePtr & function_return_type_, - const String & expression_return_name_) + const Names & captured_names, + const NamesAndTypesList & lambda_arguments, + const DataTypePtr & function_return_type, + const String & expression_return_name) : expression_actions(std::move(expression_actions_)) { /// Check that expression does not contain unusual actions that will break columns structure. @@ -219,9 +218,9 @@ public: arguments_map[arg.name] = arg.type; DataTypes captured_types; - captured_types.reserve(captured_names_.size()); + captured_types.reserve(captured_names.size()); - for (const auto & captured_name : captured_names_) + for (const auto & captured_name : captured_names) { auto it = arguments_map.find(captured_name); if (it == arguments_map.end()) @@ -232,21 +231,21 @@ public: } DataTypes argument_types; - argument_types.reserve(lambda_arguments_.size()); - for (const auto & lambda_argument : lambda_arguments_) + argument_types.reserve(lambda_arguments.size()); + for (const auto & lambda_argument : lambda_arguments) argument_types.push_back(lambda_argument.type); - return_type = std::make_shared(argument_types, function_return_type_); + return_type = std::make_shared(argument_types, function_return_type); name = "Capture[" + toString(captured_types) + "](" + toString(argument_types) + ") -> " - + function_return_type_->getName(); + + function_return_type->getName(); capture = std::make_shared(Capture{ - .captured_names = captured_names_, + .captured_names = captured_names, .captured_types = std::move(captured_types), - .lambda_arguments = lambda_arguments_, - .return_name = expression_return_name_, - .return_type = function_return_type_, + .lambda_arguments = lambda_arguments, + .return_name = expression_return_name, + .return_type = function_return_type, }); } diff --git a/src/Functions/FunctionsStringHashFixedString.cpp b/src/Functions/FunctionsStringHashFixedString.cpp new file mode 100644 index 00000000000..fd42a84fa26 --- /dev/null +++ b/src/Functions/FunctionsStringHashFixedString.cpp @@ -0,0 +1,440 @@ +#include +#include +#include +#include +#include +#include +#include +#include + +#include "config.h" + +#if USE_BLAKE3 +# include +#endif + +#if USE_SSL +# include +# include +# include +# if USE_BORINGSSL +# include +# else +# include +# endif +#endif + +/// Instatiating only the functions that require FunctionStringHashFixedString in a separate file +/// to better parallelize the build procedure and avoid MSan build failure +/// due to excessive resource consumption. + +namespace DB +{ +namespace ErrorCodes +{ +extern const int ILLEGAL_COLUMN; +extern const int ILLEGAL_TYPE_OF_ARGUMENT; +} + + +#if USE_SSL + +struct MD4Impl +{ + static constexpr auto name = "MD4"; + enum + { + length = MD4_DIGEST_LENGTH + }; + + static void apply(const char * begin, const size_t size, unsigned char * out_char_data) + { + MD4_CTX ctx; + MD4_Init(&ctx); + MD4_Update(&ctx, reinterpret_cast(begin), size); + MD4_Final(out_char_data, &ctx); + } +}; + +struct MD5Impl +{ + static constexpr auto name = "MD5"; + enum + { + length = MD5_DIGEST_LENGTH + }; + + static void apply(const char * begin, const size_t size, unsigned char * out_char_data) + { + MD5_CTX ctx; + MD5_Init(&ctx); + MD5_Update(&ctx, reinterpret_cast(begin), size); + MD5_Final(out_char_data, &ctx); + } +}; + +struct SHA1Impl +{ + static constexpr auto name = "SHA1"; + enum + { + length = SHA_DIGEST_LENGTH + }; + + static void apply(const char * begin, const size_t size, unsigned char * out_char_data) + { + SHA_CTX ctx; + SHA1_Init(&ctx); + SHA1_Update(&ctx, reinterpret_cast(begin), size); + SHA1_Final(out_char_data, &ctx); + } +}; + +struct SHA224Impl +{ + static constexpr auto name = "SHA224"; + enum + { + length = SHA224_DIGEST_LENGTH + }; + + static void apply(const char * begin, const size_t size, unsigned char * out_char_data) + { + SHA256_CTX ctx; + SHA224_Init(&ctx); + SHA224_Update(&ctx, reinterpret_cast(begin), size); + SHA224_Final(out_char_data, &ctx); + } +}; + +struct SHA256Impl +{ + static constexpr auto name = "SHA256"; + enum + { + length = SHA256_DIGEST_LENGTH + }; + + static void apply(const char * begin, const size_t size, unsigned char * out_char_data) + { + SHA256_CTX ctx; + SHA256_Init(&ctx); + SHA256_Update(&ctx, reinterpret_cast(begin), size); + SHA256_Final(out_char_data, &ctx); + } +}; + +struct SHA384Impl +{ + static constexpr auto name = "SHA384"; + enum + { + length = SHA384_DIGEST_LENGTH + }; + + static void apply(const char * begin, const size_t size, unsigned char * out_char_data) + { + SHA512_CTX ctx; + SHA384_Init(&ctx); + SHA384_Update(&ctx, reinterpret_cast(begin), size); + SHA384_Final(out_char_data, &ctx); + } +}; + +struct SHA512Impl +{ + static constexpr auto name = "SHA512"; + enum + { + length = 64 + }; + + static void apply(const char * begin, const size_t size, unsigned char * out_char_data) + { + SHA512_CTX ctx; + SHA512_Init(&ctx); + SHA512_Update(&ctx, reinterpret_cast(begin), size); + SHA512_Final(out_char_data, &ctx); + } +}; + +struct SHA512Impl256 +{ + static constexpr auto name = "SHA512_256"; + enum + { + length = 32 + }; + + static void apply(const char * begin, const size_t size, unsigned char * out_char_data) + { + /// Here, we use the EVP interface that is common to both BoringSSL and OpenSSL. Though BoringSSL is the default + /// SSL library that we use, for S390X architecture only OpenSSL is supported. But the SHA512-256, SHA512_256_Init, + /// SHA512_256_Update, SHA512_256_Final methods to calculate hash (similar to the other SHA functions) aren't available + /// in the current version of OpenSSL that we use which necessitates the use of the EVP interface. + auto * md_ctx = EVP_MD_CTX_create(); + EVP_DigestInit_ex(md_ctx, EVP_sha512_256(), nullptr /*engine*/); + EVP_DigestUpdate(md_ctx, begin, size); + EVP_DigestFinal_ex(md_ctx, out_char_data, nullptr /*size*/); + EVP_MD_CTX_destroy(md_ctx); + } +}; +#endif + +#if USE_BLAKE3 +struct ImplBLAKE3 +{ + static constexpr auto name = "BLAKE3"; + enum + { + length = 32 + }; + + static void apply(const char * begin, const size_t size, unsigned char * out_char_data) + { + static_assert(LLVM_BLAKE3_OUT_LEN == ImplBLAKE3::length); + auto & result = *reinterpret_cast *>(out_char_data); + + llvm::BLAKE3 hasher; + if (size > 0) + hasher.update(llvm::StringRef(begin, size)); + hasher.final(result); + } +}; + +#endif + +template +class FunctionStringHashFixedString : public IFunction +{ +public: + static constexpr auto name = Impl::name; + static FunctionPtr create(ContextPtr) { return std::make_shared(); } + + String getName() const override { return name; } + + size_t getNumberOfArguments() const override { return 1; } + + DataTypePtr getReturnTypeImpl(const DataTypes & arguments) const override + { + if (!isStringOrFixedString(arguments[0]) && !isIPv6(arguments[0])) + throw Exception( + ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal type {} of argument of function {}", arguments[0]->getName(), getName()); + + return std::make_shared(Impl::length); + } + + bool useDefaultImplementationForConstants() const override { return true; } + + bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return true; } + + ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t /*input_rows_count*/) const override + { + if (const ColumnString * col_from = checkAndGetColumn(arguments[0].column.get())) + { + auto col_to = ColumnFixedString::create(Impl::length); + + const typename ColumnString::Chars & data = col_from->getChars(); + const typename ColumnString::Offsets & offsets = col_from->getOffsets(); + auto & chars_to = col_to->getChars(); + const auto size = offsets.size(); + chars_to.resize(size * Impl::length); + + ColumnString::Offset current_offset = 0; + for (size_t i = 0; i < size; ++i) + { + Impl::apply( + reinterpret_cast(&data[current_offset]), + offsets[i] - current_offset - 1, + reinterpret_cast(&chars_to[i * Impl::length])); + + current_offset = offsets[i]; + } + + return col_to; + } + else if (const ColumnFixedString * col_from_fix = checkAndGetColumn(arguments[0].column.get())) + { + auto col_to = ColumnFixedString::create(Impl::length); + const typename ColumnFixedString::Chars & data = col_from_fix->getChars(); + const auto size = col_from_fix->size(); + auto & chars_to = col_to->getChars(); + const auto length = col_from_fix->getN(); + chars_to.resize(size * Impl::length); + for (size_t i = 0; i < size; ++i) + { + Impl::apply( + reinterpret_cast(&data[i * length]), length, reinterpret_cast(&chars_to[i * Impl::length])); + } + return col_to; + } + else if (const ColumnIPv6 * col_from_ip = checkAndGetColumn(arguments[0].column.get())) + { + auto col_to = ColumnFixedString::create(Impl::length); + const typename ColumnIPv6::Container & data = col_from_ip->getData(); + const auto size = col_from_ip->size(); + auto & chars_to = col_to->getChars(); + const auto length = IPV6_BINARY_LENGTH; + chars_to.resize(size * Impl::length); + for (size_t i = 0; i < size; ++i) + { + Impl::apply( + reinterpret_cast(&data[i * length]), length, reinterpret_cast(&chars_to[i * Impl::length])); + } + return col_to; + } + else + throw Exception( + ErrorCodes::ILLEGAL_COLUMN, + "Illegal column {} of first argument of function {}", + arguments[0].column->getName(), + getName()); + } +}; + +#if USE_SSL || USE_BLAKE3 +REGISTER_FUNCTION(HashFixedStrings) +{ +# if USE_SSL + using FunctionMD4 = FunctionStringHashFixedString; + using FunctionMD5 = FunctionStringHashFixedString; + using FunctionSHA1 = FunctionStringHashFixedString; + using FunctionSHA224 = FunctionStringHashFixedString; + using FunctionSHA256 = FunctionStringHashFixedString; + using FunctionSHA384 = FunctionStringHashFixedString; + using FunctionSHA512 = FunctionStringHashFixedString; + using FunctionSHA512_256 = FunctionStringHashFixedString; + + factory.registerFunction(FunctionDocumentation{ + .description = R"(Calculates the MD4 hash of the given string.)", + .syntax = "SELECT MD4(s);", + .arguments = {{"s", "The input [String](../../sql-reference/data-types/string.md)."}}, + .returned_value + = "The MD4 hash of the given input string returned as a [FixedString(16)](../../sql-reference/data-types/fixedstring.md).", + .examples + = {{"", + "SELECT HEX(MD4('abc'));", + R"( +┌─hex(MD4('abc'))──────────────────┐ +│ A448017AAF21D8525FC10AE87AA6729D │ +└──────────────────────────────────┘ + )"}}}); + factory.registerFunction(FunctionDocumentation{ + .description = R"(Calculates the MD5 hash of the given string.)", + .syntax = "SELECT MD5(s);", + .arguments = {{"s", "The input [String](../../sql-reference/data-types/string.md)."}}, + .returned_value + = "The MD5 hash of the given input string returned as a [FixedString(16)](../../sql-reference/data-types/fixedstring.md).", + .examples + = {{"", + "SELECT HEX(MD5('abc'));", + R"( +┌─hex(MD5('abc'))──────────────────┐ +│ 900150983CD24FB0D6963F7D28E17F72 │ +└──────────────────────────────────┘ + )"}}}); + factory.registerFunction(FunctionDocumentation{ + .description = R"(Calculates the SHA1 hash of the given string.)", + .syntax = "SELECT SHA1(s);", + .arguments = {{"s", "The input [String](../../sql-reference/data-types/string.md)."}}, + .returned_value + = "The SHA1 hash of the given input string returned as a [FixedString](../../sql-reference/data-types/fixedstring.md).", + .examples + = {{"", + "SELECT HEX(SHA1('abc'));", + R"( +┌─hex(SHA1('abc'))─────────────────────────┐ +│ A9993E364706816ABA3E25717850C26C9CD0D89D │ +└──────────────────────────────────────────┘ + )"}}}); + factory.registerFunction(FunctionDocumentation{ + .description = R"(Calculates the SHA224 hash of the given string.)", + .syntax = "SELECT SHA224(s);", + .arguments = {{"s", "The input [String](../../sql-reference/data-types/string.md)."}}, + .returned_value + = "The SHA224 hash of the given input string returned as a [FixedString](../../sql-reference/data-types/fixedstring.md).", + .examples + = {{"", + "SELECT HEX(SHA224('abc'));", + R"( +┌─hex(SHA224('abc'))───────────────────────────────────────┐ +│ 23097D223405D8228642A477BDA255B32AADBCE4BDA0B3F7E36C9DA7 │ +└──────────────────────────────────────────────────────────┘ + )"}}}); + factory.registerFunction(FunctionDocumentation{ + .description = R"(Calculates the SHA256 hash of the given string.)", + .syntax = "SELECT SHA256(s);", + .arguments = {{"s", "The input [String](../../sql-reference/data-types/string.md)."}}, + .returned_value + = "The SHA256 hash of the given input string returned as a [FixedString](../../sql-reference/data-types/fixedstring.md).", + .examples + = {{"", + "SELECT HEX(SHA256('abc'));", + R"( +┌─hex(SHA256('abc'))───────────────────────────────────────────────┐ +│ BA7816BF8F01CFEA414140DE5DAE2223B00361A396177A9CB410FF61F20015AD │ +└──────────────────────────────────────────────────────────────────┘ + )"}}}); + factory.registerFunction(FunctionDocumentation{ + .description = R"(Calculates the SHA384 hash of the given string.)", + .syntax = "SELECT SHA384(s);", + .arguments = {{"s", "The input [String](../../sql-reference/data-types/string.md)."}}, + .returned_value + = "The SHA384 hash of the given input string returned as a [FixedString](../../sql-reference/data-types/fixedstring.md).", + .examples + = {{"", + "SELECT HEX(SHA384('abc'));", + R"( +┌─hex(SHA384('abc'))───────────────────────────────────────────────────────────────────────────────┐ +│ CB00753F45A35E8BB5A03D699AC65007272C32AB0EDED1631A8B605A43FF5BED8086072BA1E7CC2358BAECA134C825A7 │ +└──────────────────────────────────────────────────────────────────────────────────────────────────┘ + )"}}}); + factory.registerFunction(FunctionDocumentation{ + .description = R"(Calculates the SHA512 hash of the given string.)", + .syntax = "SELECT SHA512(s);", + .arguments = {{"s", "The input [String](../../sql-reference/data-types/string.md)."}}, + .returned_value + = "The SHA512 hash of the given input string returned as a [FixedString](../../sql-reference/data-types/fixedstring.md).", + .examples + = {{"", + "SELECT HEX(SHA512('abc'));", + R"( +┌─hex(SHA512('abc'))───────────────────────────────────────────────────────────────────────────────────────────────────────────────┐ +│ DDAF35A193617ABACC417349AE20413112E6FA4E89A97EA20A9EEEE64B55D39A2192992A274FC1A836BA3C23A3FEEBBD454D4423643CE80E2A9AC94FA54CA49F │ +└──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘ + )"}}}); + factory.registerFunction(FunctionDocumentation{ + .description = R"(Calculates the SHA512_256 hash of the given string.)", + .syntax = "SELECT SHA512_256(s);", + .arguments = {{"s", "The input [String](../../sql-reference/data-types/string.md)."}}, + .returned_value + = "The SHA512_256 hash of the given input string returned as a [FixedString](../../sql-reference/data-types/fixedstring.md).", + .examples + = {{"", + "SELECT HEX(SHA512_256('abc'));", + R"( +┌─hex(SHA512_256('abc'))───────────────────────────────────────────┐ +│ 53048E2681941EF99B2E29B76B4C7DABE4C2D0C634FC6D46E0E2F13107E7AF23 │ +└──────────────────────────────────────────────────────────────────┘ + )"}}}); + + +# endif + +# if USE_BLAKE3 + using FunctionBLAKE3 = FunctionStringHashFixedString; + factory.registerFunction( + FunctionDocumentation{ + .description = R"( + Calculates BLAKE3 hash string and returns the resulting set of bytes as FixedString. + This cryptographic hash-function is integrated into ClickHouse with BLAKE3 Rust library. + The function is rather fast and shows approximately two times faster performance compared to SHA-2, while generating hashes of the same length as SHA-256. + It returns a BLAKE3 hash as a byte array with type FixedString(32). + )", + .examples{{"hash", "SELECT hex(BLAKE3('ABC'))", ""}}, + .categories{"Hash"}}, + FunctionFactory::CaseSensitive); +# endif +} +#endif +} diff --git a/src/Functions/GatherUtils/Sources.h b/src/Functions/GatherUtils/Sources.h index fad2be1f622..222f9f19168 100644 --- a/src/Functions/GatherUtils/Sources.h +++ b/src/Functions/GatherUtils/Sources.h @@ -11,6 +11,8 @@ #include #include +#include + #include "IArraySource.h" #include "IValueSource.h" #include "Slices.h" @@ -56,8 +58,8 @@ struct NumericArraySource : public ArraySourceImpl> } explicit NumericArraySource(const ColumnArray & arr) - : column(typeid_cast(arr.getData())) - , elements(typeid_cast(arr.getData()).getData()), offsets(arr.getOffsets()) + : column(typeid_cast(arr.getData())) + , elements(typeid_cast(arr.getData()).getData()), offsets(arr.getOffsets()) { } @@ -154,17 +156,22 @@ struct ConstSource : public Base size_t row_num = 0; explicit ConstSource(const ColumnConst & col_) - : Base(static_cast(col_.getDataColumn())), total_rows(col_.size()) + : Base(static_cast(col_.getDataColumn())) + , total_rows(col_.size()) { } template - ConstSource(const ColumnType & col_, size_t total_rows_) : Base(col_), total_rows(total_rows_) + ConstSource(const ColumnType & col_, size_t total_rows_) + : Base(col_) + , total_rows(total_rows_) { } template - ConstSource(const ColumnType & col_, const NullMap & null_map_, size_t total_rows_) : Base(col_, null_map_), total_rows(total_rows_) + ConstSource(const ColumnType & col_, const NullMap & null_map_, size_t total_rows_) + : Base(col_, null_map_) + , total_rows(total_rows_) { } @@ -240,7 +247,8 @@ struct StringSource ColumnString::Offset prev_offset = 0; explicit StringSource(const ColumnString & col) - : elements(col.getChars()), offsets(col.getOffsets()) + : elements(col.getChars()) + , offsets(col.getOffsets()) { } @@ -313,6 +321,96 @@ struct StringSource } }; +/// Treats Enum values as Strings, modeled after StringSource +template +struct EnumSource +{ + using Column = typename EnumDataType::ColumnType; + using Slice = NumericArraySlice; + + using SinkType = StringSink; + + const typename Column::Container & data; + const EnumDataType & data_type; + + size_t row_num = 0; + + EnumSource(const Column & col, const EnumDataType & data_type_) + : data(col.getData()) + , data_type(data_type_) + { + } + + void next() + { + ++row_num; + } + + bool isEnd() const + { + return row_num == data.size(); + } + + size_t rowNum() const + { + return row_num; + } + + size_t getSizeForReserve() const + { + return data.size(); + } + + size_t getElementSize() const + { + std::string_view name = data_type.getNameForValue(data[row_num]).toView(); + return name.size(); + } + + size_t getColumnSize() const + { + return data.size(); + } + + Slice getWhole() const + { + std::string_view name = data_type.getNameForValue(data[row_num]).toView(); + return {reinterpret_cast(name.data()), name.size()}; + } + + Slice getSliceFromLeft(size_t offset) const + { + std::string_view name = data_type.getNameForValue(data[row_num]).toView(); + if (offset >= name.size()) + return {reinterpret_cast(name.data()), 0}; + return {reinterpret_cast(name.data()) + offset, name.size() - offset}; + } + + Slice getSliceFromLeft(size_t offset, size_t length) const + { + std::string_view name = data_type.getNameForValue(data[row_num]).toView(); + if (offset >= name.size()) + return {reinterpret_cast(name.data()), 0}; + return {reinterpret_cast(name.data()) + offset, std::min(length, name.size() - offset)}; + } + + Slice getSliceFromRight(size_t offset) const + { + std::string_view name = data_type.getNameForValue(data[row_num]).toView(); + if (offset > name.size()) + return {reinterpret_cast(name.data()), name.size()}; + return {reinterpret_cast(name.data()) + name.size() - offset, offset}; + } + + Slice getSliceFromRight(size_t offset, size_t length) const + { + std::string_view name = data_type.getNameForValue(data[row_num]).toView(); + if (offset > name.size()) + return {reinterpret_cast(name.data()), length + name.size() > offset ? std::min(name.size(), length + name.size() - offset) : 0}; + return {reinterpret_cast(name.data()) + name.size() - offset, std::min(length, offset)}; + } +}; + /// Differs to StringSource by having 'offset' and 'length' in code points instead of bytes in getSlice* methods. /** NOTE: The behaviour of substring and substringUTF8 is inconsistent when negative offset is greater than string size: @@ -419,7 +517,7 @@ struct FixedStringSource size_t column_size = 0; explicit FixedStringSource(const ColumnFixedString & col) - : string_size(col.getN()) + : string_size(col.getN()) { const auto & chars = col.getChars(); pos = chars.data(); @@ -553,7 +651,8 @@ struct GenericArraySource : public ArraySourceImpl } explicit GenericArraySource(const ColumnArray & arr) - : elements(arr.getData()), offsets(arr.getOffsets()) + : elements(arr.getData()) + , offsets(arr.getOffsets()) { } @@ -813,7 +912,10 @@ struct NullableValueSource : public ValueSource const NullMap & null_map; template - explicit NullableValueSource(const Column & col, const NullMap & null_map_) : ValueSource(col), null_map(null_map_) {} + NullableValueSource(const Column & col, const NullMap & null_map_) + : ValueSource(col) + , null_map(null_map_) + {} void accept(ValueSourceVisitor & visitor) override { visitor.visit(*this); } diff --git a/src/Functions/GregorianDate.cpp b/src/Functions/GregorianDate.cpp index f28194781c2..eb7ef4abe56 100644 --- a/src/Functions/GregorianDate.cpp +++ b/src/Functions/GregorianDate.cpp @@ -125,7 +125,7 @@ void GregorianDate::init(ReadBuffer & in) assertEOF(in); if (month_ < 1 || month_ > 12 || day_of_month_ < 1 || day_of_month_ > monthLength(is_leap_year(year_), month_)) - throw Exception(ErrorCodes::CANNOT_PARSE_DATE, "Invalid date, out of range (year: {}, month: {}, day_of_month: {})."); + throw Exception(ErrorCodes::CANNOT_PARSE_DATE, "Invalid date, out of range (year: {}, month: {}, day_of_month: {}).", year_, month_, day_of_month_); } bool GregorianDate::tryInit(ReadBuffer & in) diff --git a/src/Functions/UserDefined/IUserDefinedSQLObjectsLoader.h b/src/Functions/UserDefined/IUserDefinedSQLObjectsLoader.h deleted file mode 100644 index 4c7850951b5..00000000000 --- a/src/Functions/UserDefined/IUserDefinedSQLObjectsLoader.h +++ /dev/null @@ -1,47 +0,0 @@ -#pragma once - -#include - - -namespace DB -{ -class IAST; -struct Settings; -enum class UserDefinedSQLObjectType; - -/// Interface for a loader of user-defined SQL objects. -/// Implementations: UserDefinedSQLLoaderFromDisk, UserDefinedSQLLoaderFromZooKeeper -class IUserDefinedSQLObjectsLoader -{ -public: - virtual ~IUserDefinedSQLObjectsLoader() = default; - - /// Whether this loader can replicate SQL objects to another node. - virtual bool isReplicated() const { return false; } - virtual String getReplicationID() const { return ""; } - - /// Loads all objects. Can be called once - if objects are already loaded the function does nothing. - virtual void loadObjects() = 0; - - /// Stops watching. - virtual void stopWatching() {} - - /// Immediately reloads all objects, throws an exception if failed. - virtual void reloadObjects() = 0; - - /// Immediately reloads a specified object only. - virtual void reloadObject(UserDefinedSQLObjectType object_type, const String & object_name) = 0; - - /// Stores an object (must be called only by UserDefinedSQLFunctionFactory::registerFunction). - virtual bool storeObject( - UserDefinedSQLObjectType object_type, - const String & object_name, - const IAST & create_object_query, - bool throw_if_exists, - bool replace_if_exists, - const Settings & settings) = 0; - - /// Removes an object (must be called only by UserDefinedSQLFunctionFactory::unregisterFunction). - virtual bool removeObject(UserDefinedSQLObjectType object_type, const String & object_name, bool throw_if_not_exists) = 0; -}; -} diff --git a/src/Functions/UserDefined/IUserDefinedSQLObjectsStorage.h b/src/Functions/UserDefined/IUserDefinedSQLObjectsStorage.h new file mode 100644 index 00000000000..345ff8c5954 --- /dev/null +++ b/src/Functions/UserDefined/IUserDefinedSQLObjectsStorage.h @@ -0,0 +1,74 @@ +#pragma once + +#include + +#include + +#include + + +namespace DB +{ +class IAST; +struct Settings; +enum class UserDefinedSQLObjectType; + +/// Interface for a storage of user-defined SQL objects. +/// Implementations: UserDefinedSQLObjectsDiskStorage, UserDefinedSQLObjectsZooKeeperStorage +class IUserDefinedSQLObjectsStorage +{ +public: + virtual ~IUserDefinedSQLObjectsStorage() = default; + + /// Whether this loader can replicate SQL objects to another node. + virtual bool isReplicated() const { return false; } + virtual String getReplicationID() const { return ""; } + + /// Loads all objects. Can be called once - if objects are already loaded the function does nothing. + virtual void loadObjects() = 0; + + /// Get object by name. If no object stored with object_name throws exception. + virtual ASTPtr get(const String & object_name) const = 0; + + /// Get object by name. If no object stored with object_name return nullptr. + virtual ASTPtr tryGet(const String & object_name) const = 0; + + /// Check if object with object_name is stored. + virtual bool has(const String & object_name) const = 0; + + /// Get all user defined object names. + virtual std::vector getAllObjectNames() const = 0; + + /// Get all user defined objects. + virtual std::vector> getAllObjects() const = 0; + + /// Check whether any UDFs have been stored. + virtual bool empty() const = 0; + + /// Stops watching. + virtual void stopWatching() {} + + /// Immediately reloads all objects, throws an exception if failed. + virtual void reloadObjects() = 0; + + /// Immediately reloads a specified object only. + virtual void reloadObject(UserDefinedSQLObjectType object_type, const String & object_name) = 0; + + /// Stores an object (must be called only by UserDefinedSQLFunctionFactory::registerFunction). + virtual bool storeObject( + const ContextPtr & current_context, + UserDefinedSQLObjectType object_type, + const String & object_name, + ASTPtr create_object_query, + bool throw_if_exists, + bool replace_if_exists, + const Settings & settings) = 0; + + /// Removes an object (must be called only by UserDefinedSQLFunctionFactory::unregisterFunction). + virtual bool removeObject( + const ContextPtr & current_context, + UserDefinedSQLObjectType object_type, + const String & object_name, + bool throw_if_not_exists) = 0; +}; +} diff --git a/src/Functions/UserDefined/UserDefinedSQLFunctionFactory.cpp b/src/Functions/UserDefined/UserDefinedSQLFunctionFactory.cpp index c4a503589eb..e37e4a23b63 100644 --- a/src/Functions/UserDefined/UserDefinedSQLFunctionFactory.cpp +++ b/src/Functions/UserDefined/UserDefinedSQLFunctionFactory.cpp @@ -3,7 +3,7 @@ #include #include #include -#include +#include #include #include #include @@ -14,8 +14,6 @@ #include #include -#include - namespace DB { @@ -23,7 +21,6 @@ namespace DB namespace ErrorCodes { extern const int FUNCTION_ALREADY_EXISTS; - extern const int UNKNOWN_FUNCTION; extern const int CANNOT_DROP_FUNCTION; extern const int CANNOT_CREATE_RECURSIVE_FUNCTION; extern const int UNSUPPORTED_METHOD; @@ -130,20 +127,17 @@ bool UserDefinedSQLFunctionFactory::registerFunction(const ContextMutablePtr & c checkCanBeRegistered(context, function_name, *create_function_query); create_function_query = normalizeCreateFunctionQuery(*create_function_query); - std::lock_guard lock{mutex}; - auto it = function_name_to_create_query_map.find(function_name); - if (it != function_name_to_create_query_map.end()) - { - if (throw_if_exists) - throw Exception(ErrorCodes::FUNCTION_ALREADY_EXISTS, "User-defined function '{}' already exists", function_name); - else if (!replace_if_exists) - return false; - } - try { - auto & loader = context->getUserDefinedSQLObjectsLoader(); - bool stored = loader.storeObject(UserDefinedSQLObjectType::Function, function_name, *create_function_query, throw_if_exists, replace_if_exists, context->getSettingsRef()); + auto & loader = context->getUserDefinedSQLObjectsStorage(); + bool stored = loader.storeObject( + context, + UserDefinedSQLObjectType::Function, + function_name, + create_function_query, + throw_if_exists, + replace_if_exists, + context->getSettingsRef()); if (!stored) return false; } @@ -153,7 +147,6 @@ bool UserDefinedSQLFunctionFactory::registerFunction(const ContextMutablePtr & c throw; } - function_name_to_create_query_map[function_name] = create_function_query; return true; } @@ -161,20 +154,14 @@ bool UserDefinedSQLFunctionFactory::unregisterFunction(const ContextMutablePtr & { checkCanBeUnregistered(context, function_name); - std::lock_guard lock(mutex); - auto it = function_name_to_create_query_map.find(function_name); - if (it == function_name_to_create_query_map.end()) - { - if (throw_if_not_exists) - throw Exception(ErrorCodes::UNKNOWN_FUNCTION, "User-defined function '{}' doesn't exist", function_name); - else - return false; - } - try { - auto & loader = context->getUserDefinedSQLObjectsLoader(); - bool removed = loader.removeObject(UserDefinedSQLObjectType::Function, function_name, throw_if_not_exists); + auto & storage = context->getUserDefinedSQLObjectsStorage(); + bool removed = storage.removeObject( + context, + UserDefinedSQLObjectType::Function, + function_name, + throw_if_not_exists); if (!removed) return false; } @@ -184,61 +171,41 @@ bool UserDefinedSQLFunctionFactory::unregisterFunction(const ContextMutablePtr & throw; } - function_name_to_create_query_map.erase(function_name); return true; } ASTPtr UserDefinedSQLFunctionFactory::get(const String & function_name) const { - std::lock_guard lock(mutex); - - auto it = function_name_to_create_query_map.find(function_name); - if (it == function_name_to_create_query_map.end()) - throw Exception(ErrorCodes::UNKNOWN_FUNCTION, - "The function name '{}' is not registered", - function_name); - - return it->second; + return global_context->getUserDefinedSQLObjectsStorage().get(function_name); } ASTPtr UserDefinedSQLFunctionFactory::tryGet(const std::string & function_name) const { - std::lock_guard lock(mutex); - - auto it = function_name_to_create_query_map.find(function_name); - if (it == function_name_to_create_query_map.end()) - return nullptr; - - return it->second; + return global_context->getUserDefinedSQLObjectsStorage().tryGet(function_name); } bool UserDefinedSQLFunctionFactory::has(const String & function_name) const { - return tryGet(function_name) != nullptr; + return global_context->getUserDefinedSQLObjectsStorage().has(function_name); } std::vector UserDefinedSQLFunctionFactory::getAllRegisteredNames() const { - std::vector registered_names; - - std::lock_guard lock(mutex); - registered_names.reserve(function_name_to_create_query_map.size()); - - for (const auto & [name, _] : function_name_to_create_query_map) - registered_names.emplace_back(name); - - return registered_names; + return global_context->getUserDefinedSQLObjectsStorage().getAllObjectNames(); } bool UserDefinedSQLFunctionFactory::empty() const { - std::lock_guard lock(mutex); - return function_name_to_create_query_map.empty(); + return global_context->getUserDefinedSQLObjectsStorage().empty(); } void UserDefinedSQLFunctionFactory::backup(BackupEntriesCollector & backup_entries_collector, const String & data_path_in_backup) const { - backupUserDefinedSQLObjects(backup_entries_collector, data_path_in_backup, UserDefinedSQLObjectType::Function, getAllFunctions()); + backupUserDefinedSQLObjects( + backup_entries_collector, + data_path_in_backup, + UserDefinedSQLObjectType::Function, + global_context->getUserDefinedSQLObjectsStorage().getAllObjects()); } void UserDefinedSQLFunctionFactory::restore(RestorerFromBackup & restorer, const String & data_path_in_backup) @@ -252,52 +219,4 @@ void UserDefinedSQLFunctionFactory::restore(RestorerFromBackup & restorer, const registerFunction(context, function_name, create_function_query, throw_if_exists, replace_if_exists); } -void UserDefinedSQLFunctionFactory::setAllFunctions(const std::vector> & new_functions) -{ - std::unordered_map normalized_functions; - for (const auto & [function_name, create_query] : new_functions) - normalized_functions[function_name] = normalizeCreateFunctionQuery(*create_query); - - std::lock_guard lock(mutex); - function_name_to_create_query_map = std::move(normalized_functions); -} - -std::vector> UserDefinedSQLFunctionFactory::getAllFunctions() const -{ - std::lock_guard lock{mutex}; - std::vector> all_functions; - all_functions.reserve(function_name_to_create_query_map.size()); - std::copy(function_name_to_create_query_map.begin(), function_name_to_create_query_map.end(), std::back_inserter(all_functions)); - return all_functions; -} - -void UserDefinedSQLFunctionFactory::setFunction(const String & function_name, const IAST & create_function_query) -{ - std::lock_guard lock(mutex); - function_name_to_create_query_map[function_name] = normalizeCreateFunctionQuery(create_function_query); -} - -void UserDefinedSQLFunctionFactory::removeFunction(const String & function_name) -{ - std::lock_guard lock(mutex); - function_name_to_create_query_map.erase(function_name); -} - -void UserDefinedSQLFunctionFactory::removeAllFunctionsExcept(const Strings & function_names_to_keep) -{ - boost::container::flat_set names_set_to_keep{function_names_to_keep.begin(), function_names_to_keep.end()}; - std::lock_guard lock(mutex); - for (auto it = function_name_to_create_query_map.begin(); it != function_name_to_create_query_map.end();) - { - auto current = it++; - if (!names_set_to_keep.contains(current->first)) - function_name_to_create_query_map.erase(current); - } -} - -std::unique_lock UserDefinedSQLFunctionFactory::getLock() const -{ - return std::unique_lock{mutex}; -} - } diff --git a/src/Functions/UserDefined/UserDefinedSQLFunctionFactory.h b/src/Functions/UserDefined/UserDefinedSQLFunctionFactory.h index a7d586061b2..b1f3940323a 100644 --- a/src/Functions/UserDefined/UserDefinedSQLFunctionFactory.h +++ b/src/Functions/UserDefined/UserDefinedSQLFunctionFactory.h @@ -6,7 +6,7 @@ #include #include -#include +#include namespace DB @@ -48,23 +48,11 @@ public: void restore(RestorerFromBackup & restorer, const String & data_path_in_backup); private: - friend class UserDefinedSQLObjectsLoaderFromDisk; - friend class UserDefinedSQLObjectsLoaderFromZooKeeper; - /// Checks that a specified function can be registered, throws an exception if not. static void checkCanBeRegistered(const ContextPtr & context, const String & function_name, const IAST & create_function_query); static void checkCanBeUnregistered(const ContextPtr & context, const String & function_name); - /// The following functions must be called only by the loader. - void setAllFunctions(const std::vector> & new_functions); - std::vector> getAllFunctions() const; - void setFunction(const String & function_name, const IAST & create_function_query); - void removeFunction(const String & function_name); - void removeAllFunctionsExcept(const Strings & function_names_to_keep); - std::unique_lock getLock() const; - - std::unordered_map function_name_to_create_query_map; - mutable std::recursive_mutex mutex; + ContextPtr global_context = Context::getGlobalContextInstance(); }; } diff --git a/src/Functions/UserDefined/UserDefinedSQLObjectsBackup.cpp b/src/Functions/UserDefined/UserDefinedSQLObjectsBackup.cpp index 6920e8ce2c2..3ec5393fa6f 100644 --- a/src/Functions/UserDefined/UserDefinedSQLObjectsBackup.cpp +++ b/src/Functions/UserDefined/UserDefinedSQLObjectsBackup.cpp @@ -6,7 +6,7 @@ #include #include #include -#include +#include #include #include #include @@ -37,9 +37,9 @@ void backupUserDefinedSQLObjects( escapeForFileName(object_name) + ".sql", std::make_shared(queryToString(create_object_query))); auto context = backup_entries_collector.getContext(); - const auto & loader = context->getUserDefinedSQLObjectsLoader(); + const auto & storage = context->getUserDefinedSQLObjectsStorage(); - if (!loader.isReplicated()) + if (!storage.isReplicated()) { fs::path data_path_in_backup_fs{data_path_in_backup}; for (const auto & [file_name, entry] : backup_entries) @@ -47,7 +47,7 @@ void backupUserDefinedSQLObjects( return; } - String replication_id = loader.getReplicationID(); + String replication_id = storage.getReplicationID(); auto backup_coordination = backup_entries_collector.getBackupCoordination(); backup_coordination->addReplicatedSQLObjectsDir(replication_id, object_type, data_path_in_backup); @@ -80,9 +80,9 @@ std::vector> restoreUserDefinedSQLObjects(RestorerFromBackup & restorer, const String & data_path_in_backup, UserDefinedSQLObjectType object_type) { auto context = restorer.getContext(); - const auto & loader = context->getUserDefinedSQLObjectsLoader(); + const auto & storage = context->getUserDefinedSQLObjectsStorage(); - if (loader.isReplicated() && !restorer.getRestoreCoordination()->acquireReplicatedSQLObjects(loader.getReplicationID(), object_type)) + if (storage.isReplicated() && !restorer.getRestoreCoordination()->acquireReplicatedSQLObjects(storage.getReplicationID(), object_type)) return {}; /// Other replica is already restoring user-defined SQL objects. auto backup = restorer.getBackup(); diff --git a/src/Functions/UserDefined/UserDefinedSQLObjectsLoaderFromDisk.cpp b/src/Functions/UserDefined/UserDefinedSQLObjectsDiskStorage.cpp similarity index 80% rename from src/Functions/UserDefined/UserDefinedSQLObjectsLoaderFromDisk.cpp rename to src/Functions/UserDefined/UserDefinedSQLObjectsDiskStorage.cpp index d67c48f166d..271c464e79a 100644 --- a/src/Functions/UserDefined/UserDefinedSQLObjectsLoaderFromDisk.cpp +++ b/src/Functions/UserDefined/UserDefinedSQLObjectsDiskStorage.cpp @@ -1,4 +1,4 @@ -#include "Functions/UserDefined/UserDefinedSQLObjectsLoaderFromDisk.h" +#include "Functions/UserDefined/UserDefinedSQLObjectsDiskStorage.h" #include "Functions/UserDefined/UserDefinedSQLFunctionFactory.h" #include "Functions/UserDefined/UserDefinedSQLObjectType.h" @@ -51,7 +51,7 @@ namespace } } -UserDefinedSQLObjectsLoaderFromDisk::UserDefinedSQLObjectsLoaderFromDisk(const ContextPtr & global_context_, const String & dir_path_) +UserDefinedSQLObjectsDiskStorage::UserDefinedSQLObjectsDiskStorage(const ContextPtr & global_context_, const String & dir_path_) : global_context(global_context_) , dir_path{makeDirectoryPathCanonical(dir_path_)} , log{&Poco::Logger::get("UserDefinedSQLObjectsLoaderFromDisk")} @@ -60,13 +60,13 @@ UserDefinedSQLObjectsLoaderFromDisk::UserDefinedSQLObjectsLoaderFromDisk(const C } -ASTPtr UserDefinedSQLObjectsLoaderFromDisk::tryLoadObject(UserDefinedSQLObjectType object_type, const String & object_name) +ASTPtr UserDefinedSQLObjectsDiskStorage::tryLoadObject(UserDefinedSQLObjectType object_type, const String & object_name) { return tryLoadObject(object_type, object_name, getFilePath(object_type, object_name), /* check_file_exists= */ true); } -ASTPtr UserDefinedSQLObjectsLoaderFromDisk::tryLoadObject(UserDefinedSQLObjectType object_type, const String & object_name, const String & path, bool check_file_exists) +ASTPtr UserDefinedSQLObjectsDiskStorage::tryLoadObject(UserDefinedSQLObjectType object_type, const String & object_name, const String & path, bool check_file_exists) { LOG_DEBUG(log, "Loading user defined object {} from file {}", backQuote(object_name), path); @@ -93,7 +93,6 @@ ASTPtr UserDefinedSQLObjectsLoaderFromDisk::tryLoadObject(UserDefinedSQLObjectTy "", 0, global_context->getSettingsRef().max_parser_depth); - UserDefinedSQLFunctionFactory::checkCanBeRegistered(global_context, object_name, *ast); return ast; } } @@ -106,20 +105,20 @@ ASTPtr UserDefinedSQLObjectsLoaderFromDisk::tryLoadObject(UserDefinedSQLObjectTy } -void UserDefinedSQLObjectsLoaderFromDisk::loadObjects() +void UserDefinedSQLObjectsDiskStorage::loadObjects() { if (!objects_loaded) loadObjectsImpl(); } -void UserDefinedSQLObjectsLoaderFromDisk::reloadObjects() +void UserDefinedSQLObjectsDiskStorage::reloadObjects() { loadObjectsImpl(); } -void UserDefinedSQLObjectsLoaderFromDisk::loadObjectsImpl() +void UserDefinedSQLObjectsDiskStorage::loadObjectsImpl() { LOG_INFO(log, "Loading user defined objects from {}", dir_path); createDirectory(); @@ -148,26 +147,25 @@ void UserDefinedSQLObjectsLoaderFromDisk::loadObjectsImpl() function_names_and_queries.emplace_back(function_name, ast); } - UserDefinedSQLFunctionFactory::instance().setAllFunctions(function_names_and_queries); + setAllObjects(function_names_and_queries); objects_loaded = true; LOG_DEBUG(log, "User defined objects loaded"); } -void UserDefinedSQLObjectsLoaderFromDisk::reloadObject(UserDefinedSQLObjectType object_type, const String & object_name) +void UserDefinedSQLObjectsDiskStorage::reloadObject(UserDefinedSQLObjectType object_type, const String & object_name) { createDirectory(); auto ast = tryLoadObject(object_type, object_name); - auto & factory = UserDefinedSQLFunctionFactory::instance(); if (ast) - factory.setFunction(object_name, *ast); + setObject(object_name, *ast); else - factory.removeFunction(object_name); + removeObject(object_name); } -void UserDefinedSQLObjectsLoaderFromDisk::createDirectory() +void UserDefinedSQLObjectsDiskStorage::createDirectory() { std::error_code create_dir_error_code; fs::create_directories(dir_path, create_dir_error_code); @@ -177,10 +175,11 @@ void UserDefinedSQLObjectsLoaderFromDisk::createDirectory() } -bool UserDefinedSQLObjectsLoaderFromDisk::storeObject( +bool UserDefinedSQLObjectsDiskStorage::storeObjectImpl( + const ContextPtr & /*current_context*/, UserDefinedSQLObjectType object_type, const String & object_name, - const IAST & create_object_query, + ASTPtr create_object_query, bool throw_if_exists, bool replace_if_exists, const Settings & settings) @@ -197,7 +196,7 @@ bool UserDefinedSQLObjectsLoaderFromDisk::storeObject( } WriteBufferFromOwnString create_statement_buf; - formatAST(create_object_query, create_statement_buf, false); + formatAST(*create_object_query, create_statement_buf, false); writeChar('\n', create_statement_buf); String create_statement = create_statement_buf.str(); @@ -228,8 +227,11 @@ bool UserDefinedSQLObjectsLoaderFromDisk::storeObject( } -bool UserDefinedSQLObjectsLoaderFromDisk::removeObject( - UserDefinedSQLObjectType object_type, const String & object_name, bool throw_if_not_exists) +bool UserDefinedSQLObjectsDiskStorage::removeObjectImpl( + const ContextPtr & /*current_context*/, + UserDefinedSQLObjectType object_type, + const String & object_name, + bool throw_if_not_exists) { String file_path = getFilePath(object_type, object_name); LOG_DEBUG(log, "Removing user defined object {} stored in file {}", backQuote(object_name), file_path); @@ -249,7 +251,7 @@ bool UserDefinedSQLObjectsLoaderFromDisk::removeObject( } -String UserDefinedSQLObjectsLoaderFromDisk::getFilePath(UserDefinedSQLObjectType object_type, const String & object_name) const +String UserDefinedSQLObjectsDiskStorage::getFilePath(UserDefinedSQLObjectType object_type, const String & object_name) const { String file_path; switch (object_type) diff --git a/src/Functions/UserDefined/UserDefinedSQLObjectsLoaderFromDisk.h b/src/Functions/UserDefined/UserDefinedSQLObjectsDiskStorage.h similarity index 65% rename from src/Functions/UserDefined/UserDefinedSQLObjectsLoaderFromDisk.h rename to src/Functions/UserDefined/UserDefinedSQLObjectsDiskStorage.h index 7b0bb291f42..f0986dbda72 100644 --- a/src/Functions/UserDefined/UserDefinedSQLObjectsLoaderFromDisk.h +++ b/src/Functions/UserDefined/UserDefinedSQLObjectsDiskStorage.h @@ -1,6 +1,6 @@ #pragma once -#include +#include #include #include @@ -9,10 +9,10 @@ namespace DB { /// Loads user-defined sql objects from a specified folder. -class UserDefinedSQLObjectsLoaderFromDisk : public IUserDefinedSQLObjectsLoader +class UserDefinedSQLObjectsDiskStorage : public UserDefinedSQLObjectsStorageBase { public: - UserDefinedSQLObjectsLoaderFromDisk(const ContextPtr & global_context_, const String & dir_path_); + UserDefinedSQLObjectsDiskStorage(const ContextPtr & global_context_, const String & dir_path_); void loadObjects() override; @@ -20,17 +20,22 @@ public: void reloadObject(UserDefinedSQLObjectType object_type, const String & object_name) override; - bool storeObject( +private: + bool storeObjectImpl( + const ContextPtr & current_context, UserDefinedSQLObjectType object_type, const String & object_name, - const IAST & create_object_query, + ASTPtr create_object_query, bool throw_if_exists, bool replace_if_exists, const Settings & settings) override; - bool removeObject(UserDefinedSQLObjectType object_type, const String & object_name, bool throw_if_not_exists) override; + bool removeObjectImpl( + const ContextPtr & current_context, + UserDefinedSQLObjectType object_type, + const String & object_name, + bool throw_if_not_exists) override; -private: void createDirectory(); void loadObjectsImpl(); ASTPtr tryLoadObject(UserDefinedSQLObjectType object_type, const String & object_name); diff --git a/src/Functions/UserDefined/UserDefinedSQLObjectsStorageBase.cpp b/src/Functions/UserDefined/UserDefinedSQLObjectsStorageBase.cpp new file mode 100644 index 00000000000..4f47a46b10d --- /dev/null +++ b/src/Functions/UserDefined/UserDefinedSQLObjectsStorageBase.cpp @@ -0,0 +1,190 @@ +#include "Functions/UserDefined/UserDefinedSQLObjectsStorageBase.h" + +#include + +#include +#include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int FUNCTION_ALREADY_EXISTS; + extern const int UNKNOWN_FUNCTION; +} + +namespace +{ + +ASTPtr normalizeCreateFunctionQuery(const IAST & create_function_query) +{ + auto ptr = create_function_query.clone(); + auto & res = typeid_cast(*ptr); + res.if_not_exists = false; + res.or_replace = false; + FunctionNameNormalizer().visit(res.function_core.get()); + return ptr; +} + +} + +ASTPtr UserDefinedSQLObjectsStorageBase::get(const String & object_name) const +{ + std::lock_guard lock(mutex); + + auto it = object_name_to_create_object_map.find(object_name); + if (it == object_name_to_create_object_map.end()) + throw Exception(ErrorCodes::UNKNOWN_FUNCTION, + "The object name '{}' is not saved", + object_name); + + return it->second; +} + +ASTPtr UserDefinedSQLObjectsStorageBase::tryGet(const std::string & object_name) const +{ + std::lock_guard lock(mutex); + + auto it = object_name_to_create_object_map.find(object_name); + if (it == object_name_to_create_object_map.end()) + return nullptr; + + return it->second; +} + +bool UserDefinedSQLObjectsStorageBase::has(const String & object_name) const +{ + return tryGet(object_name) != nullptr; +} + +std::vector UserDefinedSQLObjectsStorageBase::getAllObjectNames() const +{ + std::vector object_names; + + std::lock_guard lock(mutex); + object_names.reserve(object_name_to_create_object_map.size()); + + for (const auto & [name, _] : object_name_to_create_object_map) + object_names.emplace_back(name); + + return object_names; +} + +bool UserDefinedSQLObjectsStorageBase::empty() const +{ + std::lock_guard lock(mutex); + return object_name_to_create_object_map.empty(); +} + +bool UserDefinedSQLObjectsStorageBase::storeObject( + const ContextPtr & current_context, + UserDefinedSQLObjectType object_type, + const String & object_name, + ASTPtr create_object_query, + bool throw_if_exists, + bool replace_if_exists, + const Settings & settings) +{ + std::lock_guard lock{mutex}; + auto it = object_name_to_create_object_map.find(object_name); + if (it != object_name_to_create_object_map.end()) + { + if (throw_if_exists) + throw Exception(ErrorCodes::FUNCTION_ALREADY_EXISTS, "User-defined object '{}' already exists", object_name); + else if (!replace_if_exists) + return false; + } + + bool stored = storeObjectImpl( + current_context, + object_type, + object_name, + create_object_query, + throw_if_exists, + replace_if_exists, + settings); + + if (stored) + object_name_to_create_object_map[object_name] = create_object_query; + + return stored; +} + +bool UserDefinedSQLObjectsStorageBase::removeObject( + const ContextPtr & current_context, + UserDefinedSQLObjectType object_type, + const String & object_name, + bool throw_if_not_exists) +{ + std::lock_guard lock(mutex); + auto it = object_name_to_create_object_map.find(object_name); + if (it == object_name_to_create_object_map.end()) + { + if (throw_if_not_exists) + throw Exception(ErrorCodes::UNKNOWN_FUNCTION, "User-defined object '{}' doesn't exist", object_name); + else + return false; + } + + bool removed = removeObjectImpl( + current_context, + object_type, + object_name, + throw_if_not_exists); + + if (removed) + object_name_to_create_object_map.erase(object_name); + + return removed; +} + +std::unique_lock UserDefinedSQLObjectsStorageBase::getLock() const +{ + return std::unique_lock{mutex}; +} + +void UserDefinedSQLObjectsStorageBase::setAllObjects(const std::vector> & new_objects) +{ + std::unordered_map normalized_functions; + for (const auto & [function_name, create_query] : new_objects) + normalized_functions[function_name] = normalizeCreateFunctionQuery(*create_query); + + std::lock_guard lock(mutex); + object_name_to_create_object_map = std::move(normalized_functions); +} + +std::vector> UserDefinedSQLObjectsStorageBase::getAllObjects() const +{ + std::lock_guard lock{mutex}; + std::vector> all_objects; + all_objects.reserve(object_name_to_create_object_map.size()); + std::copy(object_name_to_create_object_map.begin(), object_name_to_create_object_map.end(), std::back_inserter(all_objects)); + return all_objects; +} + +void UserDefinedSQLObjectsStorageBase::setObject(const String & object_name, const IAST & create_object_query) +{ + std::lock_guard lock(mutex); + object_name_to_create_object_map[object_name] = normalizeCreateFunctionQuery(create_object_query); +} + +void UserDefinedSQLObjectsStorageBase::removeObject(const String & object_name) +{ + std::lock_guard lock(mutex); + object_name_to_create_object_map.erase(object_name); +} + +void UserDefinedSQLObjectsStorageBase::removeAllObjectsExcept(const Strings & object_names_to_keep) +{ + boost::container::flat_set names_set_to_keep{object_names_to_keep.begin(), object_names_to_keep.end()}; + std::lock_guard lock(mutex); + for (auto it = object_name_to_create_object_map.begin(); it != object_name_to_create_object_map.end();) + { + auto current = it++; + if (!names_set_to_keep.contains(current->first)) + object_name_to_create_object_map.erase(current); + } +} + +} diff --git a/src/Functions/UserDefined/UserDefinedSQLObjectsStorageBase.h b/src/Functions/UserDefined/UserDefinedSQLObjectsStorageBase.h new file mode 100644 index 00000000000..cab63a3bfcf --- /dev/null +++ b/src/Functions/UserDefined/UserDefinedSQLObjectsStorageBase.h @@ -0,0 +1,69 @@ +#pragma once + +#include +#include + +#include + +#include + +namespace DB +{ + +class UserDefinedSQLObjectsStorageBase : public IUserDefinedSQLObjectsStorage +{ +public: + ASTPtr get(const String & object_name) const override; + + ASTPtr tryGet(const String & object_name) const override; + + bool has(const String & object_name) const override; + + std::vector getAllObjectNames() const override; + + std::vector> getAllObjects() const override; + + bool empty() const override; + + bool storeObject( + const ContextPtr & current_context, + UserDefinedSQLObjectType object_type, + const String & object_name, + ASTPtr create_object_query, + bool throw_if_exists, + bool replace_if_exists, + const Settings & settings) override; + + bool removeObject( + const ContextPtr & current_context, + UserDefinedSQLObjectType object_type, + const String & object_name, + bool throw_if_not_exists) override; + +protected: + virtual bool storeObjectImpl( + const ContextPtr & current_context, + UserDefinedSQLObjectType object_type, + const String & object_name, + ASTPtr create_object_query, + bool throw_if_exists, + bool replace_if_exists, + const Settings & settings) = 0; + + virtual bool removeObjectImpl( + const ContextPtr & current_context, + UserDefinedSQLObjectType object_type, + const String & object_name, + bool throw_if_not_exists) = 0; + + std::unique_lock getLock() const; + void setAllObjects(const std::vector> & new_objects); + void setObject(const String & object_name, const IAST & create_object_query); + void removeObject(const String & object_name); + void removeAllObjectsExcept(const Strings & object_names_to_keep); + + std::unordered_map object_name_to_create_object_map; + mutable std::recursive_mutex mutex; +}; + +} diff --git a/src/Functions/UserDefined/UserDefinedSQLObjectsLoaderFromZooKeeper.cpp b/src/Functions/UserDefined/UserDefinedSQLObjectsZooKeeperStorage.cpp similarity index 82% rename from src/Functions/UserDefined/UserDefinedSQLObjectsLoaderFromZooKeeper.cpp rename to src/Functions/UserDefined/UserDefinedSQLObjectsZooKeeperStorage.cpp index 29aff666da5..6e5a5338437 100644 --- a/src/Functions/UserDefined/UserDefinedSQLObjectsLoaderFromZooKeeper.cpp +++ b/src/Functions/UserDefined/UserDefinedSQLObjectsZooKeeperStorage.cpp @@ -1,4 +1,4 @@ -#include +#include #include #include @@ -47,7 +47,7 @@ namespace } -UserDefinedSQLObjectsLoaderFromZooKeeper::UserDefinedSQLObjectsLoaderFromZooKeeper( +UserDefinedSQLObjectsZooKeeperStorage::UserDefinedSQLObjectsZooKeeperStorage( const ContextPtr & global_context_, const String & zookeeper_path_) : global_context{global_context_} , zookeeper_getter{[global_context_]() { return global_context_->getZooKeeper(); }} @@ -66,20 +66,20 @@ UserDefinedSQLObjectsLoaderFromZooKeeper::UserDefinedSQLObjectsLoaderFromZooKeep zookeeper_path = "/" + zookeeper_path; } -UserDefinedSQLObjectsLoaderFromZooKeeper::~UserDefinedSQLObjectsLoaderFromZooKeeper() +UserDefinedSQLObjectsZooKeeperStorage::~UserDefinedSQLObjectsZooKeeperStorage() { SCOPE_EXIT_SAFE(stopWatchingThread()); } -void UserDefinedSQLObjectsLoaderFromZooKeeper::startWatchingThread() +void UserDefinedSQLObjectsZooKeeperStorage::startWatchingThread() { if (!watching_flag.exchange(true)) { - watching_thread = ThreadFromGlobalPool(&UserDefinedSQLObjectsLoaderFromZooKeeper::processWatchQueue, this); + watching_thread = ThreadFromGlobalPool(&UserDefinedSQLObjectsZooKeeperStorage::processWatchQueue, this); } } -void UserDefinedSQLObjectsLoaderFromZooKeeper::stopWatchingThread() +void UserDefinedSQLObjectsZooKeeperStorage::stopWatchingThread() { if (watching_flag.exchange(false)) { @@ -89,7 +89,7 @@ void UserDefinedSQLObjectsLoaderFromZooKeeper::stopWatchingThread() } } -zkutil::ZooKeeperPtr UserDefinedSQLObjectsLoaderFromZooKeeper::getZooKeeper() +zkutil::ZooKeeperPtr UserDefinedSQLObjectsZooKeeperStorage::getZooKeeper() { auto [zookeeper, session_status] = zookeeper_getter.getZooKeeper(); @@ -106,18 +106,18 @@ zkutil::ZooKeeperPtr UserDefinedSQLObjectsLoaderFromZooKeeper::getZooKeeper() return zookeeper; } -void UserDefinedSQLObjectsLoaderFromZooKeeper::initZooKeeperIfNeeded() +void UserDefinedSQLObjectsZooKeeperStorage::initZooKeeperIfNeeded() { getZooKeeper(); } -void UserDefinedSQLObjectsLoaderFromZooKeeper::resetAfterError() +void UserDefinedSQLObjectsZooKeeperStorage::resetAfterError() { zookeeper_getter.resetCache(); } -void UserDefinedSQLObjectsLoaderFromZooKeeper::loadObjects() +void UserDefinedSQLObjectsZooKeeperStorage::loadObjects() { /// loadObjects() is called at start from Server::main(), so it's better not to stop here on no connection to ZooKeeper or any other error. /// However the watching thread must be started anyway in case the connection will be established later. @@ -136,7 +136,7 @@ void UserDefinedSQLObjectsLoaderFromZooKeeper::loadObjects() } -void UserDefinedSQLObjectsLoaderFromZooKeeper::processWatchQueue() +void UserDefinedSQLObjectsZooKeeperStorage::processWatchQueue() { LOG_DEBUG(log, "Started watching thread"); setThreadName("UserDefObjWatch"); @@ -173,13 +173,13 @@ void UserDefinedSQLObjectsLoaderFromZooKeeper::processWatchQueue() } -void UserDefinedSQLObjectsLoaderFromZooKeeper::stopWatching() +void UserDefinedSQLObjectsZooKeeperStorage::stopWatching() { stopWatchingThread(); } -void UserDefinedSQLObjectsLoaderFromZooKeeper::reloadObjects() +void UserDefinedSQLObjectsZooKeeperStorage::reloadObjects() { auto zookeeper = getZooKeeper(); refreshAllObjects(zookeeper); @@ -187,23 +187,24 @@ void UserDefinedSQLObjectsLoaderFromZooKeeper::reloadObjects() } -void UserDefinedSQLObjectsLoaderFromZooKeeper::reloadObject(UserDefinedSQLObjectType object_type, const String & object_name) +void UserDefinedSQLObjectsZooKeeperStorage::reloadObject(UserDefinedSQLObjectType object_type, const String & object_name) { auto zookeeper = getZooKeeper(); refreshObject(zookeeper, object_type, object_name); } -void UserDefinedSQLObjectsLoaderFromZooKeeper::createRootNodes(const zkutil::ZooKeeperPtr & zookeeper) +void UserDefinedSQLObjectsZooKeeperStorage::createRootNodes(const zkutil::ZooKeeperPtr & zookeeper) { zookeeper->createAncestors(zookeeper_path); zookeeper->createIfNotExists(zookeeper_path, ""); } -bool UserDefinedSQLObjectsLoaderFromZooKeeper::storeObject( +bool UserDefinedSQLObjectsZooKeeperStorage::storeObjectImpl( + const ContextPtr & /*current_context*/, UserDefinedSQLObjectType object_type, const String & object_name, - const IAST & create_object_query, + ASTPtr create_object_query, bool throw_if_exists, bool replace_if_exists, const Settings &) @@ -212,7 +213,7 @@ bool UserDefinedSQLObjectsLoaderFromZooKeeper::storeObject( LOG_DEBUG(log, "Storing user-defined object {} at zk path {}", backQuote(object_name), path); WriteBufferFromOwnString create_statement_buf; - formatAST(create_object_query, create_statement_buf, false); + formatAST(*create_object_query, create_statement_buf, false); writeChar('\n', create_statement_buf); String create_statement = create_statement_buf.str(); @@ -252,8 +253,11 @@ bool UserDefinedSQLObjectsLoaderFromZooKeeper::storeObject( } -bool UserDefinedSQLObjectsLoaderFromZooKeeper::removeObject( - UserDefinedSQLObjectType object_type, const String & object_name, bool throw_if_not_exists) +bool UserDefinedSQLObjectsZooKeeperStorage::removeObjectImpl( + const ContextPtr & /*current_context*/, + UserDefinedSQLObjectType object_type, + const String & object_name, + bool throw_if_not_exists) { String path = getNodePath(zookeeper_path, object_type, object_name); LOG_DEBUG(log, "Removing user-defined object {} at zk path {}", backQuote(object_name), path); @@ -276,7 +280,7 @@ bool UserDefinedSQLObjectsLoaderFromZooKeeper::removeObject( return true; } -bool UserDefinedSQLObjectsLoaderFromZooKeeper::getObjectDataAndSetWatch( +bool UserDefinedSQLObjectsZooKeeperStorage::getObjectDataAndSetWatch( const zkutil::ZooKeeperPtr & zookeeper, String & data, const String & path, @@ -298,7 +302,7 @@ bool UserDefinedSQLObjectsLoaderFromZooKeeper::getObjectDataAndSetWatch( return zookeeper->tryGetWatch(path, data, &entity_stat, object_watcher); } -ASTPtr UserDefinedSQLObjectsLoaderFromZooKeeper::parseObjectData(const String & object_data, UserDefinedSQLObjectType object_type) +ASTPtr UserDefinedSQLObjectsZooKeeperStorage::parseObjectData(const String & object_data, UserDefinedSQLObjectType object_type) { switch (object_type) { @@ -317,7 +321,7 @@ ASTPtr UserDefinedSQLObjectsLoaderFromZooKeeper::parseObjectData(const String & UNREACHABLE(); } -ASTPtr UserDefinedSQLObjectsLoaderFromZooKeeper::tryLoadObject( +ASTPtr UserDefinedSQLObjectsZooKeeperStorage::tryLoadObject( const zkutil::ZooKeeperPtr & zookeeper, UserDefinedSQLObjectType object_type, const String & object_name) { String path = getNodePath(zookeeper_path, object_type, object_name); @@ -343,7 +347,7 @@ ASTPtr UserDefinedSQLObjectsLoaderFromZooKeeper::tryLoadObject( } } -Strings UserDefinedSQLObjectsLoaderFromZooKeeper::getObjectNamesAndSetWatch( +Strings UserDefinedSQLObjectsZooKeeperStorage::getObjectNamesAndSetWatch( const zkutil::ZooKeeperPtr & zookeeper, UserDefinedSQLObjectType object_type) { auto object_list_watcher = [my_watch_queue = watch_queue, object_type](const Coordination::WatchResponse &) @@ -371,7 +375,7 @@ Strings UserDefinedSQLObjectsLoaderFromZooKeeper::getObjectNamesAndSetWatch( return object_names; } -void UserDefinedSQLObjectsLoaderFromZooKeeper::refreshAllObjects(const zkutil::ZooKeeperPtr & zookeeper) +void UserDefinedSQLObjectsZooKeeperStorage::refreshAllObjects(const zkutil::ZooKeeperPtr & zookeeper) { /// It doesn't make sense to keep the old watch events because we will reread everything in this function. watch_queue->clear(); @@ -380,7 +384,7 @@ void UserDefinedSQLObjectsLoaderFromZooKeeper::refreshAllObjects(const zkutil::Z objects_loaded = true; } -void UserDefinedSQLObjectsLoaderFromZooKeeper::refreshObjects(const zkutil::ZooKeeperPtr & zookeeper, UserDefinedSQLObjectType object_type) +void UserDefinedSQLObjectsZooKeeperStorage::refreshObjects(const zkutil::ZooKeeperPtr & zookeeper, UserDefinedSQLObjectType object_type) { LOG_DEBUG(log, "Refreshing all user-defined {} objects", object_type); Strings object_names = getObjectNamesAndSetWatch(zookeeper, object_type); @@ -393,21 +397,20 @@ void UserDefinedSQLObjectsLoaderFromZooKeeper::refreshObjects(const zkutil::ZooK function_names_and_asts.emplace_back(function_name, ast); } - UserDefinedSQLFunctionFactory::instance().setAllFunctions(function_names_and_asts); + setAllObjects(function_names_and_asts); LOG_DEBUG(log, "All user-defined {} objects refreshed", object_type); } -void UserDefinedSQLObjectsLoaderFromZooKeeper::syncObjects(const zkutil::ZooKeeperPtr & zookeeper, UserDefinedSQLObjectType object_type) +void UserDefinedSQLObjectsZooKeeperStorage::syncObjects(const zkutil::ZooKeeperPtr & zookeeper, UserDefinedSQLObjectType object_type) { LOG_DEBUG(log, "Syncing user-defined {} objects", object_type); Strings object_names = getObjectNamesAndSetWatch(zookeeper, object_type); - auto & factory = UserDefinedSQLFunctionFactory::instance(); - auto lock = factory.getLock(); + getLock(); /// Remove stale objects - factory.removeAllFunctionsExcept(object_names); + removeAllObjectsExcept(object_names); /// Read & parse only new SQL objects from ZooKeeper for (const auto & function_name : object_names) { @@ -418,16 +421,15 @@ void UserDefinedSQLObjectsLoaderFromZooKeeper::syncObjects(const zkutil::ZooKeep LOG_DEBUG(log, "User-defined {} objects synced", object_type); } -void UserDefinedSQLObjectsLoaderFromZooKeeper::refreshObject( +void UserDefinedSQLObjectsZooKeeperStorage::refreshObject( const zkutil::ZooKeeperPtr & zookeeper, UserDefinedSQLObjectType object_type, const String & object_name) { auto ast = tryLoadObject(zookeeper, object_type, object_name); - auto & factory = UserDefinedSQLFunctionFactory::instance(); if (ast) - factory.setFunction(object_name, *ast); + setObject(object_name, *ast); else - factory.removeFunction(object_name); + removeObject(object_name); } } diff --git a/src/Functions/UserDefined/UserDefinedSQLObjectsLoaderFromZooKeeper.h b/src/Functions/UserDefined/UserDefinedSQLObjectsZooKeeperStorage.h similarity index 80% rename from src/Functions/UserDefined/UserDefinedSQLObjectsLoaderFromZooKeeper.h rename to src/Functions/UserDefined/UserDefinedSQLObjectsZooKeeperStorage.h index 38e061fd4d9..9f41763c59c 100644 --- a/src/Functions/UserDefined/UserDefinedSQLObjectsLoaderFromZooKeeper.h +++ b/src/Functions/UserDefined/UserDefinedSQLObjectsZooKeeperStorage.h @@ -1,6 +1,6 @@ #pragma once -#include +#include #include #include #include @@ -12,11 +12,11 @@ namespace DB { /// Loads user-defined sql objects from ZooKeeper. -class UserDefinedSQLObjectsLoaderFromZooKeeper : public IUserDefinedSQLObjectsLoader +class UserDefinedSQLObjectsZooKeeperStorage : public UserDefinedSQLObjectsStorageBase { public: - UserDefinedSQLObjectsLoaderFromZooKeeper(const ContextPtr & global_context_, const String & zookeeper_path_); - ~UserDefinedSQLObjectsLoaderFromZooKeeper() override; + UserDefinedSQLObjectsZooKeeperStorage(const ContextPtr & global_context_, const String & zookeeper_path_); + ~UserDefinedSQLObjectsZooKeeperStorage() override; bool isReplicated() const override { return true; } String getReplicationID() const override { return zookeeper_path; } @@ -26,16 +26,21 @@ public: void reloadObjects() override; void reloadObject(UserDefinedSQLObjectType object_type, const String & object_name) override; - bool storeObject( +private: + bool storeObjectImpl( + const ContextPtr & current_context, UserDefinedSQLObjectType object_type, const String & object_name, - const IAST & create_object_query, + ASTPtr create_object_query, bool throw_if_exists, bool replace_if_exists, const Settings & settings) override; - bool removeObject(UserDefinedSQLObjectType object_type, const String & object_name, bool throw_if_not_exists) override; + bool removeObjectImpl( + const ContextPtr & current_context, + UserDefinedSQLObjectType object_type, + const String & object_name, + bool throw_if_not_exists) override; -private: void processWatchQueue(); zkutil::ZooKeeperPtr getZooKeeper(); diff --git a/src/Functions/UserDefined/createUserDefinedSQLObjectsLoader.h b/src/Functions/UserDefined/createUserDefinedSQLObjectsLoader.h deleted file mode 100644 index b3a4623dba3..00000000000 --- a/src/Functions/UserDefined/createUserDefinedSQLObjectsLoader.h +++ /dev/null @@ -1,12 +0,0 @@ -#pragma once - -#include - - -namespace DB -{ -class IUserDefinedSQLObjectsLoader; - -std::unique_ptr createUserDefinedSQLObjectsLoader(const ContextMutablePtr & global_context); - -} diff --git a/src/Functions/UserDefined/createUserDefinedSQLObjectsLoader.cpp b/src/Functions/UserDefined/createUserDefinedSQLObjectsStorage.cpp similarity index 61% rename from src/Functions/UserDefined/createUserDefinedSQLObjectsLoader.cpp rename to src/Functions/UserDefined/createUserDefinedSQLObjectsStorage.cpp index b7ebc7abf14..f8847024508 100644 --- a/src/Functions/UserDefined/createUserDefinedSQLObjectsLoader.cpp +++ b/src/Functions/UserDefined/createUserDefinedSQLObjectsStorage.cpp @@ -1,6 +1,6 @@ -#include -#include -#include +#include +#include +#include #include #include #include @@ -17,7 +17,7 @@ namespace ErrorCodes extern const int INVALID_CONFIG_PARAMETER; } -std::unique_ptr createUserDefinedSQLObjectsLoader(const ContextMutablePtr & global_context) +std::unique_ptr createUserDefinedSQLObjectsStorage(const ContextMutablePtr & global_context) { const String zookeeper_path_key = "user_defined_zookeeper_path"; const String disk_path_key = "user_defined_path"; @@ -33,12 +33,12 @@ std::unique_ptr createUserDefinedSQLObjectsLoader( zookeeper_path_key, disk_path_key); } - return std::make_unique(global_context, config.getString(zookeeper_path_key)); + return std::make_unique(global_context, config.getString(zookeeper_path_key)); } String default_path = fs::path{global_context->getPath()} / "user_defined/"; String path = config.getString(disk_path_key, default_path); - return std::make_unique(global_context, path); + return std::make_unique(global_context, path); } } diff --git a/src/Functions/UserDefined/createUserDefinedSQLObjectsStorage.h b/src/Functions/UserDefined/createUserDefinedSQLObjectsStorage.h new file mode 100644 index 00000000000..01659372dec --- /dev/null +++ b/src/Functions/UserDefined/createUserDefinedSQLObjectsStorage.h @@ -0,0 +1,12 @@ +#pragma once + +#include + + +namespace DB +{ +class IUserDefinedSQLObjectsStorage; + +std::unique_ptr createUserDefinedSQLObjectsStorage(const ContextMutablePtr & global_context); + +} diff --git a/src/Functions/array/FunctionArrayMapped.h b/src/Functions/array/FunctionArrayMapped.h index a7ab80f697a..9773673c63c 100644 --- a/src/Functions/array/FunctionArrayMapped.h +++ b/src/Functions/array/FunctionArrayMapped.h @@ -74,6 +74,8 @@ public: size_t getNumberOfArguments() const override { return 0; } bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return true; } + bool useDefaultImplementationForConstants() const override { return true; } + /// Called if at least one function argument is a lambda expression. /// For argument-lambda expressions, it defines the types of arguments of these expressions. void getLambdaArgumentTypes(DataTypes & arguments) const override @@ -370,10 +372,10 @@ public: /// Put all the necessary columns multiplied by the sizes of arrays into the columns. auto replicated_column_function_ptr = IColumn::mutate(column_function->replicate(column_first_array->getOffsets())); - auto * replicated_column_function = typeid_cast(replicated_column_function_ptr.get()); - replicated_column_function->appendArguments(arrays); + auto & replicated_column_function = typeid_cast(*replicated_column_function_ptr); + replicated_column_function.appendArguments(arrays); - auto lambda_result = replicated_column_function->reduce(); + auto lambda_result = replicated_column_function.reduce(); /// Convert LowCardinality(T) -> T and Const(LowCardinality(T)) -> Const(T), /// because we removed LowCardinality from return type of lambda expression. diff --git a/src/Functions/array/arrayFold.cpp b/src/Functions/array/arrayFold.cpp index b5b650e7289..44fe95624a6 100644 --- a/src/Functions/array/arrayFold.cpp +++ b/src/Functions/array/arrayFold.cpp @@ -1,6 +1,11 @@ -#include "FunctionArrayMapped.h" -#include +#include +#include #include +#include +#include +#include +#include +#include namespace DB { @@ -15,13 +20,13 @@ namespace ErrorCodes } /** - * arrayFold(x1,...,xn,accum -> expression, array1,...,arrayn, accum_initial) - apply the expression to each element of the array (or set of arrays). + * arrayFold( acc,a1,...,aN->expr, arr1, ..., arrN, acc_initial) */ -class ArrayFold : public IFunction +class FunctionArrayFold : public IFunction { public: static constexpr auto name = "arrayFold"; - static FunctionPtr create(ContextPtr) { return std::make_shared(); } + static FunctionPtr create(ContextPtr) { return std::make_shared(); } bool isVariadic() const override { return true; } size_t getNumberOfArguments() const override { return 0; } @@ -80,143 +85,192 @@ public: if (!lambda_function) throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "First argument for function {} must be a function", getName()); - ColumnPtr offsets_column; - ColumnPtr column_first_array_ptr; - const ColumnArray * column_first_array = nullptr; - ColumnsWithTypeAndName arrays; - arrays.reserve(arguments.size() - 1); + ColumnPtr first_array_col; + const ColumnArray * first_array_col_concrete = nullptr; + ColumnPtr first_array_col_offsets; - /// Validate input types and get input array columns in convenient form + ColumnsWithTypeAndName arrays_data_with_type_and_name; /// for all arrays, the pointers to the internal data column, type and name + arrays_data_with_type_and_name.reserve(arguments.size() - 1); + + /// Validate array arguments and set pointers so we can access them more conveniently for (size_t i = 1; i < arguments.size() - 1; ++i) { const auto & array_with_type_and_name = arguments[i]; - ColumnPtr column_array_ptr = array_with_type_and_name.column; - const auto * column_array = checkAndGetColumn(column_array_ptr.get()); - if (!column_array) + ColumnPtr array_col = array_with_type_and_name.column; + const auto * array_col_concrete = checkAndGetColumn(array_col.get()); + if (!array_col_concrete) { - const ColumnConst * column_const_array = checkAndGetColumnConst(column_array_ptr.get()); - if (!column_const_array) - throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Expected array column, found {}", column_array_ptr->getName()); - column_array_ptr = recursiveRemoveLowCardinality(column_const_array->convertToFullColumn()); - column_array = checkAndGetColumn(column_array_ptr.get()); + const ColumnConst * aray_col_concrete_const = checkAndGetColumnConst(array_col.get()); + if (!aray_col_concrete_const) + throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Expected array column, found {}", array_col->getName()); + array_col = recursiveRemoveLowCardinality(aray_col_concrete_const->convertToFullColumn()); + array_col_concrete = checkAndGetColumn(array_col.get()); } - const DataTypePtr & array_type_ptr = array_with_type_and_name.type; - const auto * array_type = checkAndGetDataType(array_type_ptr.get()); - if (!array_type) - throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Expected array type, found {}", array_type_ptr->getName()); + const DataTypePtr & array_type = array_with_type_and_name.type; + const auto * array_type_concrete = checkAndGetDataType(array_type.get()); + if (!array_type_concrete) + throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Expected array type, found {}", array_type->getName()); - if (!offsets_column) - offsets_column = column_array->getOffsetsPtr(); + /// Check that the cardinality of the arrays across a row is the same for all array arguments. + /// This simplifies later calculations which can work only with the offsets of the first column. + if (!first_array_col_offsets) + first_array_col_offsets = array_col_concrete->getOffsetsPtr(); else { - /// The first condition is optimization: do not compare data if the pointers are equal. - if (column_array->getOffsetsPtr() != offsets_column - && column_array->getOffsets() != typeid_cast(*offsets_column).getData()) - throw Exception(ErrorCodes::SIZES_OF_ARRAYS_DONT_MATCH, "Arrays passed to {} must have equal size", getName()); + /// It suffices to check that the internal offset columns are equal. + /// The first condition is optimization: skip comparison if the offset pointers are equal. + if (array_col_concrete->getOffsetsPtr() != first_array_col_offsets + && array_col_concrete->getOffsets() != typeid_cast(*first_array_col_offsets).getData()) + throw Exception(ErrorCodes::SIZES_OF_ARRAYS_DONT_MATCH, "arrays_data_with_type_and_name passed to {} must have equal size", getName()); } + if (i == 1) { - column_first_array_ptr = column_array_ptr; - column_first_array = column_array; + first_array_col = array_col; + first_array_col_concrete = array_col_concrete; } - arrays.emplace_back(ColumnWithTypeAndName(column_array->getDataPtr(), - recursiveRemoveLowCardinality(array_type->getNestedType()), - array_with_type_and_name.name)); + + ColumnWithTypeAndName data_type_name(array_col_concrete->getDataPtr(), recursiveRemoveLowCardinality(array_type_concrete->getNestedType()), array_with_type_and_name.name); + arrays_data_with_type_and_name.push_back(data_type_name); } - ssize_t rows_count = input_rows_count; - ssize_t data_row_count = arrays[0].column->size(); - size_t array_count = arrays.size(); + const ssize_t num_rows = input_rows_count; /// how many rows are processed + const size_t num_array_cols = arrays_data_with_type_and_name.size(); /// number of given array arguments + const ssize_t num_elements_in_array_col = arrays_data_with_type_and_name[0].column->size(); /// total number of array elements in the 1st array argument (the value is the same for other array arguments) - if (rows_count == 0) + if (num_rows == 0) return arguments.back().column->convertToFullColumnIfConst()->cloneEmpty(); - ColumnPtr current_column = arguments.back().column->convertToFullColumnIfConst(); - MutableColumnPtr result_data = arguments.back().column->convertToFullColumnIfConst()->cloneEmpty(); + const auto & offsets = first_array_col_concrete->getOffsets(); /// the internal offsets column of the first array argument (other array arguments have the same offsets) - size_t max_array_size = 0; - const auto & offsets = column_first_array->getOffsets(); + /// Find the first row which contains a non-empty array + ssize_t first_row_with_non_empty_array = 0; + if (num_elements_in_array_col) + while (offsets[first_row_with_non_empty_array] == 0) + ++first_row_with_non_empty_array; - IColumn::Selector selector(data_row_count); - size_t cur_ind = 0; - ssize_t cur_arr = 0; - - /// skip to the first non empty array - if (data_row_count) - while (offsets[cur_arr] == 0) - ++cur_arr; - - /// selector[i] is an index that i_th data element has in an array it corresponds to - for (ssize_t i = 0; i < data_row_count; ++i) + /// Build a selector which stores for every array element in the first array argument if the array element is the 0th, 1st, ... (horizontal) array element in the current row + /// Better explained by an example: + /// 0 1 <-- horizontal position + /// row0: ['elem1'] + /// row1: ['elem2', 'elem3'] + /// row2: ['elem4'] + /// --> Selector will contain [0, 0, 1, 0]. + IColumn::Selector selector(num_elements_in_array_col); + size_t max_array_size = 0; /// cardinality of the array with the most elements in the first array argument + size_t cur_element_in_cur_array = 0; + for (ssize_t i = 0; i < num_elements_in_array_col; ++i) { - selector[i] = cur_ind; - cur_ind++; - if (cur_ind > max_array_size) - max_array_size = cur_ind; - while (cur_arr < rows_count && cur_ind >= offsets[cur_arr] - offsets[cur_arr - 1]) + selector[i] = cur_element_in_cur_array; + ++cur_element_in_cur_array; + if (cur_element_in_cur_array > max_array_size) + max_array_size = cur_element_in_cur_array; + while (first_row_with_non_empty_array < num_rows && cur_element_in_cur_array >= offsets[first_row_with_non_empty_array] - offsets[first_row_with_non_empty_array - 1]) { - ++cur_arr; - cur_ind = 0; + ++first_row_with_non_empty_array; + cur_element_in_cur_array = 0; } } - std::vector data_arrays; - data_arrays.resize(array_count); - - /// Split each data column to columns containing elements of only Nth index in array + /// Based on the selector, scatter elements of the arrays on all rows into vertical slices + /// Example: + /// row0: ['elem1'] + /// row1: ['elem2', 'elem3'] + /// row2: ['elem4'] + /// --> create two slices based on selector [0, 0, 1, 0] + /// - slice0: 'elem1', 'elem2', 'elem4'' + /// - slice1: 'elem3' + std::vector vertical_slices; /// contains for every array argument, a vertical slice for the 0th array element, a vertical slice for the 1st array element, ... + vertical_slices.resize(num_array_cols); if (max_array_size > 0) - for (size_t i = 0; i < array_count; ++i) - data_arrays[i] = arrays[i].column->scatter(max_array_size, selector); + for (size_t i = 0; i < num_array_cols; ++i) + vertical_slices[i] = arrays_data_with_type_and_name[i].column->scatter(max_array_size, selector); - size_t prev_size = rows_count; + ColumnPtr accumulator_col = arguments.back().column->convertToFullColumnIfConst(); + MutableColumnPtr result_col = accumulator_col->cloneEmpty(); + ColumnPtr lambda_col = lambda_function->cloneResized(num_rows); - IColumn::Permutation inverse_permutation(rows_count); - size_t inverse_permutation_count = 0; + IColumn::Permutation inverse_permutation(num_rows); + size_t num_inverse_permutations = 0; - /// current_column after each iteration contains value of accumulator after applying values under indexes of arrays. - /// At each iteration only rows of current_column with arrays that still has unapplied elements are kept. - /// Discarded rows which contain finished calculations are added to result_data column and as we insert them we save their original row_number in inverse_permutation vector - for (size_t ind = 0; ind < max_array_size; ++ind) + /// Iterate the slices. The accumulator value of a row is updated iff the array in the row has at least slice_i-many elements. Since + /// slices become incrementally smaller, fewer and fewer accumulator values are updated in each iteration. Once the calculation for + /// a row is finished (i.e. there are no more slices to process), it is added to the result. Since that happens in random order, + /// we also maintain a mapping to reconstruct the right result order at the end. + size_t unfinished_rows = num_rows; /// number of rows to consider in the current iteration + for (size_t slice = 0; slice < max_array_size; ++slice) { - IColumn::Selector prev_selector(prev_size); - size_t prev_ind = 0; - for (ssize_t irow = 0; irow < rows_count; ++irow) + IColumn::Selector prev_selector(unfinished_rows); /// 1 for rows which have slice_i-many elements, otherwise 0 + size_t prev_index = 0; + for (ssize_t row = 0; row < num_rows; ++row) { - if (offsets[irow] - offsets[irow - 1] > ind) - prev_selector[prev_ind++] = 1; - else if (offsets[irow] - offsets[irow - 1] == ind) + size_t num_elements = offsets[row] - offsets[row - 1]; /// cardinality of array on the row + if (num_elements > slice) { - inverse_permutation[inverse_permutation_count++] = irow; - prev_selector[prev_ind++] = 0; + prev_selector[prev_index] = 1; + ++prev_index; + } + else if (num_elements == slice) + { + prev_selector[prev_index] = 0; + ++prev_index; + inverse_permutation[num_inverse_permutations] = row; + ++num_inverse_permutations; } } - auto prev = current_column->scatter(2, prev_selector); - result_data->insertRangeFrom(*(prev[0]), 0, prev[0]->size()); + /// Scatter the accumulator into two columns + /// - one column with accumulator values for rows less than slice-many elements, no further calculation is performed on them + /// - one column with accumulator values for rows with slice-many or more elements, these are updated in this or following iteration + std::vector finished_unfinished_accumulator_values = accumulator_col->scatter(2, prev_selector); + IColumn::MutablePtr & finished_accumulator_values = finished_unfinished_accumulator_values[0]; + IColumn::MutablePtr & unfinished_accumulator_values = finished_unfinished_accumulator_values[1]; - auto res_lambda = lambda_function->cloneResized(prev[1]->size()); - auto * res_lambda_ptr = typeid_cast(res_lambda.get()); + /// Copy finished accumulator values into the result + result_col->insertRangeFrom(*finished_accumulator_values, 0, finished_accumulator_values->size()); - res_lambda_ptr->appendArguments(std::vector({ColumnWithTypeAndName(std::move(prev[1]), arguments.back().type, arguments.back().name)})); - for (size_t i = 0; i < array_count; i++) - res_lambda_ptr->appendArguments(std::vector({ColumnWithTypeAndName(std::move(data_arrays[i][ind]), arrays[i].type, arrays[i].name)})); + /// The lambda function can contain statically bound arguments, in particular their row values. We need to filter for the rows + /// we care about. + IColumn::Filter filter(unfinished_rows); + for (size_t i = 0; i < prev_selector.size(); ++i) + filter[i] = prev_selector[i]; + ColumnPtr lambda_col_filtered = lambda_col->filter(filter, lambda_col->size()); + IColumn::MutablePtr lambda_col_filtered_cloned = lambda_col_filtered->cloneResized(lambda_col_filtered->size()); /// clone so we can bind more arguments + auto * lambda = typeid_cast(lambda_col_filtered_cloned.get()); - current_column = IColumn::mutate(res_lambda_ptr->reduce().column); - prev_size = current_column->size(); + /// Bind arguments to lambda function (accumulator + array arguments) + lambda->appendArguments(std::vector({ColumnWithTypeAndName(std::move(unfinished_accumulator_values), arguments.back().type, arguments.back().name)})); + for (size_t array_col = 0; array_col < num_array_cols; ++array_col) + lambda->appendArguments(std::vector({ColumnWithTypeAndName(std::move(vertical_slices[array_col][slice]), arrays_data_with_type_and_name[array_col].type, arrays_data_with_type_and_name[array_col].name)})); + + /// Perform the actual calculation and copy the result into the accumulator + ColumnWithTypeAndName res_with_type_and_name = lambda->reduce(); + accumulator_col = res_with_type_and_name.column->convertToFullColumnIfConst(); + + unfinished_rows = accumulator_col->size(); + lambda_col = lambda_col_filtered; } - result_data->insertRangeFrom(*current_column, 0, current_column->size()); - for (ssize_t irow = 0; irow < rows_count; ++irow) - if (offsets[irow] - offsets[irow - 1] == max_array_size) - inverse_permutation[inverse_permutation_count++] = irow; + /// Copy accumulator values of last iteration into result. + result_col->insertRangeFrom(*accumulator_col, 0, accumulator_col->size()); - /// We have result_data containing result for every row and inverse_permutation which contains indexes of rows in input it corresponds to. - /// Now we need to invert inverse_permuation and apply it to result_data to get rows in right order. - IColumn::Permutation perm(rows_count); - for (ssize_t i = 0; i < rows_count; i++) - perm[inverse_permutation[i]] = i; - return result_data->permute(perm, 0); + for (ssize_t row = 0; row < num_rows; ++row) + { + size_t num_elements = offsets[row] - offsets[row - 1]; /// cardinality of array on the row + if (num_elements == max_array_size) + { + inverse_permutation[num_inverse_permutations] = row; + ++num_inverse_permutations; + } + } + + /// We have result_col containing result for every row and inverse_permutation which contains indexes of rows in input it corresponds to. + /// Now we need to invert inverse_permuation and apply it to result_col to get rows in right order. + IColumn::Permutation perm(num_rows); + for (ssize_t row = 0; row < num_rows; ++row) + perm[inverse_permutation[row]] = row; + return result_col->permute(perm, 0); } private: @@ -228,9 +282,9 @@ private: REGISTER_FUNCTION(ArrayFold) { - factory.registerFunction(FunctionDocumentation{.description=R"( - Function arrayFold(x1,...,xn,accum -> expression, array1,...,arrayn, accum_initial) applies lambda function to a number of equally-sized arrays - and collects the result in an accumulator. - )", .examples{{"sum", "SELECT arrayFold(x,acc -> acc+x, [1,2,3,4], toInt64(1));", "11"}}, .categories{"Array"}}); + factory.registerFunction(FunctionDocumentation{.description=R"( + Function arrayFold(acc,a1,...,aN->expr, arr1, ..., arrN, acc_initial) applies a lambda function to each element + in each (equally-sized) array and collects the result in an accumulator. + )", .examples{{"sum", "SELECT arrayFold(acc,x->acc+x, [1,2,3,4], toInt64(1));", "11"}}, .categories{"Array"}}); } } diff --git a/src/Functions/concat.cpp b/src/Functions/concat.cpp index 6403c4b8416..b057e7fede5 100644 --- a/src/Functions/concat.cpp +++ b/src/Functions/concat.cpp @@ -7,10 +7,10 @@ #include #include #include +#include #include #include -#include "formatString.h" namespace DB { @@ -145,13 +145,13 @@ private: } write_helper.finalize(); - /// Same as the normal `ColumnString` branch - has_column_string = true; - data[i] = &converted_col_str->getChars(); - offsets[i] = &converted_col_str->getOffsets(); - /// Keep the pointer alive converted_col_ptrs[i] = std::move(converted_col_str); + + /// Same as the normal `ColumnString` branch + has_column_string = true; + data[i] = &converted_col_ptrs[i]->getChars(); + offsets[i] = &converted_col_ptrs[i]->getOffsets(); } } diff --git a/src/Functions/concatWithSeparator.cpp b/src/Functions/concatWithSeparator.cpp index f295d86943f..b4f3732710f 100644 --- a/src/Functions/concatWithSeparator.cpp +++ b/src/Functions/concatWithSeparator.cpp @@ -4,11 +4,11 @@ #include #include #include +#include #include #include #include -#include "formatString.h" namespace DB { diff --git a/src/Functions/dateDiff.cpp b/src/Functions/dateDiff.cpp index c9c9020f068..f75e6eb4fc8 100644 --- a/src/Functions/dateDiff.cpp +++ b/src/Functions/dateDiff.cpp @@ -412,14 +412,14 @@ private: }; -/** TimeDiff(t1, t2) +/** timeDiff(t1, t2) * t1 and t2 can be Date or DateTime */ class FunctionTimeDiff : public IFunction { using ColumnDateTime64 = ColumnDecimal; public: - static constexpr auto name = "TimeDiff"; + static constexpr auto name = "timeDiff"; static FunctionPtr create(ContextPtr) { return std::make_shared(); } String getName() const override diff --git a/src/Functions/formatString.cpp b/src/Functions/format.cpp similarity index 71% rename from src/Functions/formatString.cpp rename to src/Functions/format.cpp index 8e0b3a238cb..036ff9f0c57 100644 --- a/src/Functions/formatString.cpp +++ b/src/Functions/format.cpp @@ -1,35 +1,33 @@ #include #include +#include #include #include #include #include +#include #include -#include #include #include #include -#include "formatString.h" namespace DB { namespace ErrorCodes { extern const int ILLEGAL_COLUMN; - extern const int ILLEGAL_TYPE_OF_ARGUMENT; extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; } namespace { -template class FormatFunction : public IFunction { public: - static constexpr auto name = Name::name; + static constexpr auto name = "format"; static FunctionPtr create(ContextPtr) { return std::make_shared(); } @@ -41,6 +39,7 @@ public: size_t getNumberOfArguments() const override { return 0; } + bool useDefaultImplementationForConstants() const override { return true; } ColumnNumbers getArgumentsThatAreAlwaysConstant() const override { return {0}; } DataTypePtr getReturnTypeImpl(const DataTypes & arguments) const override @@ -52,18 +51,6 @@ public: getName(), arguments.size()); - for (const auto arg_idx : collections::range(0, arguments.size())) - { - const auto * arg = arguments[arg_idx].get(); - if (!isStringOrFixedString(arg)) - throw Exception( - ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, - "Illegal type {} of argument {} of function {}", - arg->getName(), - arg_idx + 1, - getName()); - } - return std::make_shared(); } @@ -83,6 +70,7 @@ public: std::vector offsets(arguments.size() - 1); std::vector fixed_string_sizes(arguments.size() - 1); std::vector> constant_strings(arguments.size() - 1); + std::vector converted_col_ptrs(arguments.size() - 1); bool has_column_string = false; bool has_column_fixed_string = false; @@ -106,8 +94,29 @@ public: constant_strings[i - 1] = const_col->getValue(); } else - throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Illegal column {} of argument of function {}", - column->getName(), getName()); + { + /// A non-String/non-FixedString-type argument: use the default serialization to convert it to String + auto full_column = column->convertToFullIfNeeded(); + auto serialization = arguments[i].type->getDefaultSerialization(); + auto converted_col_str = ColumnString::create(); + ColumnStringHelpers::WriteHelper write_helper(*converted_col_str, column->size()); + auto & write_buffer = write_helper.getWriteBuffer(); + FormatSettings format_settings; + for (size_t row = 0; row < column->size(); ++row) + { + serialization->serializeText(*full_column, row, write_buffer, format_settings); + write_helper.rowWritten(); + } + write_helper.finalize(); + + /// Keep the pointer alive + converted_col_ptrs[i - 1] = std::move(converted_col_str); + + /// Same as the normal `ColumnString` branch + has_column_string = true; + data[i - 1] = &converted_col_ptrs[i - 1]->getChars(); + offsets[i - 1] = &converted_col_ptrs[i - 1]->getOffsets(); + } } FormatStringImpl::formatExecute( @@ -127,11 +136,7 @@ public: }; -struct NameFormat -{ - static constexpr auto name = "format"; -}; -using FunctionFormat = FormatFunction; +using FunctionFormat = FormatFunction; } diff --git a/src/Functions/formatReadableDecimalSize.cpp b/src/Functions/formatReadableDecimalSize.cpp index b6fd0de8f7b..1aa5abc526e 100644 --- a/src/Functions/formatReadableDecimalSize.cpp +++ b/src/Functions/formatReadableDecimalSize.cpp @@ -1,5 +1,6 @@ #include #include +#include namespace DB diff --git a/src/Functions/formatReadableQuantity.cpp b/src/Functions/formatReadableQuantity.cpp index 682fac88969..483e8a77a0b 100644 --- a/src/Functions/formatReadableQuantity.cpp +++ b/src/Functions/formatReadableQuantity.cpp @@ -1,5 +1,6 @@ #include #include +#include namespace DB diff --git a/src/Functions/formatReadableSize.cpp b/src/Functions/formatReadableSize.cpp index 22505907fa7..5c11603e9d7 100644 --- a/src/Functions/formatReadableSize.cpp +++ b/src/Functions/formatReadableSize.cpp @@ -1,5 +1,6 @@ #include #include +#include namespace DB diff --git a/src/Functions/geoToS2.cpp b/src/Functions/geoToS2.cpp index 8d065b01c34..f27cd26fd9d 100644 --- a/src/Functions/geoToS2.cpp +++ b/src/Functions/geoToS2.cpp @@ -101,19 +101,35 @@ public: const Float64 lon = data_col_lon[row]; const Float64 lat = data_col_lat[row]; - if (isNaN(lon) || isNaN(lat)) - throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Arguments must not be NaN"); + if (isNaN(lon)) + throw Exception( + ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal argument for longitude in function {}. It must not be NaN", getName()); + if (!isFinite(lon)) + throw Exception( + ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, + "Illegal argument for longitude in function {}. It must not be infinite", + getName()); - if (!(isFinite(lon) && isFinite(lat))) - throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Arguments must not be infinite"); + if (isNaN(lat)) + throw Exception( + ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal argument for latitude in function {}. It must not be NaN", getName()); + if (!isFinite(lat)) + throw Exception( + ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, + "Illegal argument for latitude in function {}. It must not be infinite", + getName()); - /// S2 acceptes point as (latitude, longitude) + /// S2 accepts point as (latitude, longitude) S2LatLng lat_lng = S2LatLng::FromDegrees(lat, lon); if (!lat_lng.is_valid()) - throw Exception(ErrorCodes::BAD_ARGUMENTS, - "Point is invalid. For valid point the latitude is between -90 and 90 degrees inclusive" - "and the longitude is between -180 and 180 degrees inclusive."); + throw Exception( + ErrorCodes::BAD_ARGUMENTS, + "Point ({}, {}) is invalid in function {}. For valid point the latitude is between -90 and 90 degrees inclusive" + "and the longitude is between -180 and 180 degrees inclusive.", + lon, + lat, + getName()); S2CellId id(lat_lng); diff --git a/src/Functions/h3ToString.cpp b/src/Functions/h3ToString.cpp index 897329ed9ec..f8a10d5252b 100644 --- a/src/Functions/h3ToString.cpp +++ b/src/Functions/h3ToString.cpp @@ -84,7 +84,7 @@ public: const UInt64 hindex = data[row]; if (!isValidCell(hindex)) - throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Invalid H3 index: {}", hindex); + throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Invalid H3 index: {} in function {}", hindex, getName()); h3ToString(hindex, pos, H3_INDEX_STRING_LENGTH); diff --git a/src/Functions/if.cpp b/src/Functions/if.cpp index eba1733c683..cae3b720d8b 100644 --- a/src/Functions/if.cpp +++ b/src/Functions/if.cpp @@ -23,7 +23,7 @@ #include #include #include - +#include namespace DB { @@ -42,7 +42,8 @@ using namespace GatherUtils; /** Selection function by condition: if(cond, then, else). * cond - UInt8 * then, else - numeric types for which there is a general type, or dates, datetimes, or strings, or arrays of these types. - */ + * For better performance, try to use branch free code for numeric types(i.e. cond ? a : b --> !!cond * a + !cond * b), except floating point types because of Inf or NaN. +*/ template inline void fillVectorVector(const ArrayCond & cond, const ArrayA & a, const ArrayB & b, ArrayResult & res) @@ -55,24 +56,48 @@ inline void fillVectorVector(const ArrayCond & cond, const ArrayA & a, const Arr { size_t a_index = 0, b_index = 0; for (size_t i = 0; i < size; ++i) - res[i] = cond[i] ? static_cast(a[a_index++]) : static_cast(b[b_index++]); + { + if constexpr (std::is_integral_v) + { + res[i] = !!cond[i] * static_cast(a[a_index]) + (!cond[i]) * static_cast(b[b_index]); + a_index += !!cond[i]; + b_index += !cond[i]; + } + else + res[i] = cond[i] ? static_cast(a[a_index++]) : static_cast(b[b_index++]); + } } else if (a_is_short) { size_t a_index = 0; for (size_t i = 0; i < size; ++i) - res[i] = cond[i] ? static_cast(a[a_index++]) : static_cast(b[i]); + if constexpr (std::is_integral_v) + { + res[i] = !!cond[i] * static_cast(a[a_index]) + (!cond[i]) * static_cast(b[i]); + a_index += !!cond[i]; + } + else + res[i] = cond[i] ? static_cast(a[a_index++]) : static_cast(b[i]); } else if (b_is_short) { size_t b_index = 0; for (size_t i = 0; i < size; ++i) - res[i] = cond[i] ? static_cast(a[i]) : static_cast(b[b_index++]); + if constexpr (std::is_integral_v) + { + res[i] = !!cond[i] * static_cast(a[i]) + (!cond[i]) * static_cast(b[b_index]); + b_index += !cond[i]; + } + else + res[i] = cond[i] ? static_cast(a[i]) : static_cast(b[b_index++]); } else { for (size_t i = 0; i < size; ++i) - res[i] = cond[i] ? static_cast(a[i]) : static_cast(b[i]); + if constexpr (std::is_integral_v) + res[i] = !!cond[i] * static_cast(a[i]) + (!cond[i]) * static_cast(b[i]); + else + res[i] = cond[i] ? static_cast(a[i]) : static_cast(b[i]); } } @@ -85,12 +110,21 @@ inline void fillVectorConstant(const ArrayCond & cond, const ArrayA & a, B b, Ar { size_t a_index = 0; for (size_t i = 0; i < size; ++i) - res[i] = cond[i] ? static_cast(a[a_index++]) : static_cast(b); + if constexpr (std::is_integral_v) + { + res[i] = !!cond[i] * static_cast(a[a_index]) + (!cond[i]) * static_cast(b); + a_index += !!cond[i]; + } + else + res[i] = cond[i] ? static_cast(a[a_index++]) : static_cast(b); } else { for (size_t i = 0; i < size; ++i) - res[i] = cond[i] ? static_cast(a[i]) : static_cast(b); + if constexpr (std::is_integral_v) + res[i] = !!cond[i] * static_cast(a[i]) + (!cond[i]) * static_cast(b); + else + res[i] = cond[i] ? static_cast(a[i]) : static_cast(b); } } @@ -103,12 +137,21 @@ inline void fillConstantVector(const ArrayCond & cond, A a, const ArrayB & b, Ar { size_t b_index = 0; for (size_t i = 0; i < size; ++i) - res[i] = cond[i] ? static_cast(a) : static_cast(b[b_index++]); + if constexpr (std::is_integral_v) + { + res[i] = !!cond[i] * static_cast(a) + (!cond[i]) * static_cast(b[b_index]); + b_index += !cond[i]; + } + else + res[i] = cond[i] ? static_cast(a) : static_cast(b[b_index++]); } else { for (size_t i = 0; i < size; ++i) - res[i] = cond[i] ? static_cast(a) : static_cast(b[i]); + if constexpr (std::is_integral_v) + res[i] = !!cond[i] * static_cast(a) + (!cond[i]) * static_cast(b[i]); + else + res[i] = cond[i] ? static_cast(a) : static_cast(b[i]); } } diff --git a/src/Functions/normalizeQuery.cpp b/src/Functions/normalizeQuery.cpp index b01dac3909a..ad9a8903733 100644 --- a/src/Functions/normalizeQuery.cpp +++ b/src/Functions/normalizeQuery.cpp @@ -3,8 +3,6 @@ #include #include #include -#include -#include namespace DB @@ -34,10 +32,12 @@ struct Impl for (size_t i = 0; i < size; ++i) { ColumnString::Offset curr_src_offset = offsets[i]; - normalizeQueryToPODArray( + + normalizeQueryToPODArray( reinterpret_cast(&data[prev_src_offset]), reinterpret_cast(&data[curr_src_offset - 1]), - res_data); + res_data, keep_names); + prev_src_offset = offsets[i]; res_offsets[i] = res_data.size(); } @@ -58,4 +58,3 @@ REGISTER_FUNCTION(NormalizeQuery) } } - diff --git a/src/Functions/normalizedQueryHash.cpp b/src/Functions/normalizedQueryHash.cpp index d2ccb1c016d..63218f28af5 100644 --- a/src/Functions/normalizedQueryHash.cpp +++ b/src/Functions/normalizedQueryHash.cpp @@ -4,9 +4,6 @@ #include #include #include -#include -#include -#include /** The function returns 64bit hash value that is identical for similar queries. @@ -39,8 +36,10 @@ struct Impl for (size_t i = 0; i < size; ++i) { ColumnString::Offset curr_src_offset = offsets[i]; - res_data[i] = normalizedQueryHash( - reinterpret_cast(&data[prev_src_offset]), reinterpret_cast(&data[curr_src_offset - 1])); + res_data[i] = normalizedQueryHash( + reinterpret_cast(&data[prev_src_offset]), + reinterpret_cast(&data[curr_src_offset - 1]), + keep_names); prev_src_offset = offsets[i]; } } @@ -104,4 +103,3 @@ REGISTER_FUNCTION(NormalizedQueryHash) } } - diff --git a/src/Functions/now64.cpp b/src/Functions/now64.cpp index 349b8c71145..0f1e8a04236 100644 --- a/src/Functions/now64.cpp +++ b/src/Functions/now64.cpp @@ -30,7 +30,7 @@ Field nowSubsecond(UInt32 scale) timespec spec{}; if (clock_gettime(CLOCK_REALTIME, &spec)) - throwFromErrno("Cannot clock_gettime.", ErrorCodes::CANNOT_CLOCK_GETTIME); + throw ErrnoException(ErrorCodes::CANNOT_CLOCK_GETTIME, "Cannot clock_gettime"); DecimalUtils::DecimalComponents components{spec.tv_sec, spec.tv_nsec}; diff --git a/src/Functions/randDistribution.cpp b/src/Functions/randDistribution.cpp index db101486de8..4e616ada697 100644 --- a/src/Functions/randDistribution.cpp +++ b/src/Functions/randDistribution.cpp @@ -1,7 +1,8 @@ #include #include #include -#include "Common/Exception.h" +#include +#include #include #include #include diff --git a/src/Functions/reverseDNSQuery.cpp b/src/Functions/reverseDNSQuery.cpp deleted file mode 100644 index b4d963a6a15..00000000000 --- a/src/Functions/reverseDNSQuery.cpp +++ /dev/null @@ -1,118 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -namespace DB -{ - -namespace ErrorCodes -{ - extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; - extern const int BAD_ARGUMENTS; - extern const int FUNCTION_NOT_ALLOWED; -} - -class ReverseDNSQuery : public IFunction -{ -public: - static constexpr auto name = "reverseDNSQuery"; - static constexpr auto allow_function_config_name = "allow_reverse_dns_query_function"; - - static FunctionPtr create(ContextPtr) - { - return std::make_shared(); - } - - String getName() const override - { - return name; - } - - ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & data_type, size_t input_rows_count) const override - { - if (!Context::getGlobalContextInstance()->getConfigRef().getBool(allow_function_config_name, false)) - { - throw Exception(ErrorCodes::FUNCTION_NOT_ALLOWED, "Function {} is not allowed because {} is not set", name, allow_function_config_name); - } - - if (arguments.empty()) - { - throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "Function {} requires at least one argument", name); - } - - auto res_type = getReturnTypeImpl({data_type}); - - if (input_rows_count == 0u) - { - return res_type->createColumnConstWithDefaultValue(input_rows_count); - } - - if (!isString(arguments[0].type)) - { - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Function {} requires the input column to be of type String", name); - } - - auto input_column = arguments[0].column; - - auto ip_address = Poco::Net::IPAddress(input_column->getDataAt(0).toString()); - - auto ptr_records = DNSResolver::instance().reverseResolve(ip_address); - - if (ptr_records.empty()) - return res_type->createColumnConstWithDefaultValue(input_rows_count); - - Array res; - - for (const auto & ptr_record : ptr_records) - { - res.push_back(ptr_record); - } - - return res_type->createColumnConst(input_rows_count, res); - } - - bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override - { - return false; - } - - size_t getNumberOfArguments() const override - { - return 1u; - } - - DataTypePtr getReturnTypeImpl(const DataTypes & /*arguments*/) const override - { - return std::make_shared(std::make_shared()); - } - -}; - - -REGISTER_FUNCTION(ReverseDNSQuery) -{ - factory.registerFunction( - FunctionDocumentation{ - .description = R"(Performs a reverse DNS query to get the PTR records associated with the IP address)", - .syntax = "reverseDNSQuery(address)", - .arguments = {{"address", "An IPv4 or IPv6 address. [String](../../sql-reference/data-types/string.md)"}}, - .returned_value = "Associated domains (PTR records). [String](../../sql-reference/data-types/string.md).", - .examples = {{"", - "SELECT reverseDNSQuery('192.168.0.2');", -R"( -┌─reverseDNSQuery('192.168.0.2')────────────┐ -│ ['test2.example.com','test3.example.com'] │ -└───────────────────────────────────────────┘ -)"}} - } - ); -} - -} diff --git a/src/Functions/s2CapContains.cpp b/src/Functions/s2CapContains.cpp index 9dfbc05a6a0..72e9da69a7d 100644 --- a/src/Functions/s2CapContains.cpp +++ b/src/Functions/s2CapContains.cpp @@ -131,16 +131,16 @@ public: const auto point = S2CellId(data_point[row]); if (isNaN(degrees)) - throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Radius of the cap must not be nan"); + throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Radius of the cap must not be nan in function {}", getName()); if (std::isinf(degrees)) - throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Radius of the cap must not be infinite"); + throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Radius of the cap must not be infinite in function {}", getName()); if (!center.is_valid()) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Center is not valid"); + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Center (id {}) is not valid in function {}", data_center[row], getName()); if (!point.is_valid()) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Point is not valid"); + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Point (id {}) is not valid in function {}", data_point[row], getName()); S1Angle angle = S1Angle::Degrees(degrees); S2Cap cap(center.ToPoint(), angle); diff --git a/src/Functions/s2CellsIntersect.cpp b/src/Functions/s2CellsIntersect.cpp index 1fac5fd6e60..320f3c964a2 100644 --- a/src/Functions/s2CellsIntersect.cpp +++ b/src/Functions/s2CellsIntersect.cpp @@ -100,10 +100,12 @@ public: const UInt64 id_second = data_id_second[row]; auto first_cell = S2CellId(id_first); - auto second_cell = S2CellId(id_second); + if (!first_cell.is_valid()) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "First cell (id {}) is not valid in function {}", id_first, getName()); - if (!first_cell.is_valid() || !second_cell.is_valid()) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Cell is not valid"); + auto second_cell = S2CellId(id_second); + if (!second_cell.is_valid()) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Second cell (id {}) is not valid in function {}", id_second, getName()); dst_data.emplace_back(S2CellId(id_first).intersects(S2CellId(id_second))); } diff --git a/src/Functions/s2GetNeighbors.cpp b/src/Functions/s2GetNeighbors.cpp index b200f61315b..a6371b9ff68 100644 --- a/src/Functions/s2GetNeighbors.cpp +++ b/src/Functions/s2GetNeighbors.cpp @@ -94,7 +94,7 @@ public: S2CellId cell_id(id); if (!cell_id.is_valid()) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Cell is not valid"); + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Cell (id {}) is not valid in function {}", id, getName()); S2CellId neighbors[4]; cell_id.GetEdgeNeighbors(neighbors); diff --git a/src/Functions/seriesPeriodDetectFFT.cpp b/src/Functions/seriesPeriodDetectFFT.cpp new file mode 100644 index 00000000000..61e3319d810 --- /dev/null +++ b/src/Functions/seriesPeriodDetectFFT.cpp @@ -0,0 +1,227 @@ +#include "config.h" + +#if USE_POCKETFFT +# ifdef __clang__ +# pragma clang diagnostic push +# pragma clang diagnostic ignored "-Wshadow" +# pragma clang diagnostic ignored "-Wextra-semi-stmt" +# pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant" +# endif + +# include + +# ifdef __clang__ +# pragma clang diagnostic pop +# endif + +# include +# include +# include +# include +# include +# include +# include +# include + + +namespace DB +{ +namespace ErrorCodes +{ +extern const int ILLEGAL_COLUMN; +} + +/*Detect Period in time series data using FFT. + * FFT - Fast Fourier transform (https://en.wikipedia.org/wiki/Fast_Fourier_transform) + * 1. Convert time series data to frequency domain using FFT. + * 2. Remove the 0th(the Dc component) and n/2th the Nyquist frequency + * 3. Find the peak value (highest) for dominant frequency component. + * 4. Inverse of the dominant frequency component is the period. +*/ + +class FunctionSeriesPeriodDetectFFT : public IFunction +{ +public: + static constexpr auto name = "seriesPeriodDetectFFT"; + + static FunctionPtr create(ContextPtr) { return std::make_shared(); } + + std::string getName() const override { return name; } + + size_t getNumberOfArguments() const override { return 1; } + + bool useDefaultImplementationForConstants() const override { return true; } + + bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return true; } + + DataTypePtr getReturnTypeImpl(const ColumnsWithTypeAndName & arguments) const override + { + FunctionArgumentDescriptors args{{"time_series", &isArray, nullptr, "Array"}}; + validateFunctionArgumentTypes(*this, arguments, args); + + return std::make_shared(); + } + + ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override + { + ColumnPtr array_ptr = arguments[0].column; + const ColumnArray * array = checkAndGetColumn(array_ptr.get()); + + const IColumn & src_data = array->getData(); + const ColumnArray::Offsets & offsets = array->getOffsets(); + + auto res = ColumnFloat64::create(input_rows_count); + auto & res_data = res->getData(); + + ColumnArray::Offset prev_src_offset = 0; + + Float64 period; + for (size_t i = 0; i < input_rows_count; ++i) + { + ColumnArray::Offset curr_offset = offsets[i]; + if (executeNumbers(src_data, period, prev_src_offset, curr_offset) + || executeNumbers(src_data, period, prev_src_offset, curr_offset) + || executeNumbers(src_data, period, prev_src_offset, curr_offset) + || executeNumbers(src_data, period, prev_src_offset, curr_offset) + || executeNumbers(src_data, period, prev_src_offset, curr_offset) + || executeNumbers(src_data, period, prev_src_offset, curr_offset) + || executeNumbers(src_data, period, prev_src_offset, curr_offset) + || executeNumbers(src_data, period, prev_src_offset, curr_offset) + || executeNumbers(src_data, period, prev_src_offset, curr_offset) + || executeNumbers(src_data, period, prev_src_offset, curr_offset)) + { + res_data[i] = period; + prev_src_offset = curr_offset; + } + else + throw Exception( + ErrorCodes::ILLEGAL_COLUMN, + "Illegal column {} of first argument of function {}", + arguments[0].column->getName(), + getName()); + } + return res; + } + + template + bool executeNumbers(const IColumn & src_data, Float64 & period, ColumnArray::Offset & start, ColumnArray::Offset & end) const + { + const ColumnVector * src_data_concrete = checkAndGetColumn>(&src_data); + if (!src_data_concrete) + return false; + + const PaddedPODArray & src_vec = src_data_concrete->getData(); + + chassert(start <= end); + size_t len = end - start; + if (len < 4) + { + period = NAN; // At least four data points are required to detect period + return true; + } + + std::vector src((src_vec.begin() + start), (src_vec.begin() + end)); + std::vector> out((len / 2) + 1); + + pocketfft::shape_t shape{len}; + + pocketfft::shape_t axes; + axes.reserve(shape.size()); + for (size_t i = 0; i < shape.size(); ++i) + axes.push_back(i); + + pocketfft::stride_t stride_src{sizeof(double)}; + pocketfft::stride_t stride_out{sizeof(std::complex)}; + + pocketfft::r2c(shape, stride_src, stride_out, axes, pocketfft::FORWARD, src.data(), out.data(), static_cast(1)); + + size_t spec_len = (len - 1) / 2; //removing the nyquist element when len is even + + double max_mag = 0; + size_t idx = 1; + for (size_t i = 1; i < spec_len; ++i) + { + double magnitude = sqrt(out[i].real() * out[i].real() + out[i].imag() * out[i].imag()); + if (magnitude > max_mag) + { + max_mag = magnitude; + idx = i; + } + } + + // In case all FFT values are zero, it means the input signal is flat. + // It implies the period of the series should be 0. + if (max_mag == 0) + { + period = 0; + return true; + } + + std::vector xfreq(spec_len); + double step = 0.5 / (spec_len - 1); + for (size_t i = 0; i < spec_len; ++i) + xfreq[i] = i * step; + + auto freq = xfreq[idx]; + + period = std::round(1 / freq); + return true; + } +}; + +REGISTER_FUNCTION(SeriesPeriodDetectFFT) +{ + factory.registerFunction(FunctionDocumentation{ + .description = R"( +Finds the period of the given time series data using FFT +FFT - Fast Fourier transform (https://en.wikipedia.org/wiki/Fast_Fourier_transform) + +**Syntax** + +``` sql +seriesPeriodDetectFFT(series); +``` + +**Arguments** + +- `series` - An array of numeric values + +**Returned value** + +- A real value equal to the period of time series +- Returns NAN when number of data points are less than four. + +Type: [Float64](../../sql-reference/data-types/float.md). + +**Examples** + +Query: + +``` sql +SELECT seriesPeriodDetectFFT([1, 4, 6, 1, 4, 6, 1, 4, 6, 1, 4, 6, 1, 4, 6, 1, 4, 6, 1, 4, 6]) AS print_0; +``` + +Result: + +``` text +┌───────────print_0──────┐ +│ 3 │ +└────────────────────────┘ +``` + +``` sql +SELECT seriesPeriodDetectFFT(arrayMap(x -> abs((x % 6) - 3), range(1000))) AS print_0; +``` + +Result: + +``` text +┌─print_0─┐ +│ 6 │ +└─────────┘ +``` +)", + .categories{"Time series analysis"}}); +} +} +#endif diff --git a/src/Functions/sleep.h b/src/Functions/sleep.h index 160a8a2afe2..73d58ca6b5b 100644 --- a/src/Functions/sleep.h +++ b/src/Functions/sleep.h @@ -10,12 +10,14 @@ #include #include #include +#include namespace ProfileEvents { extern const Event SleepFunctionCalls; extern const Event SleepFunctionMicroseconds; +extern const Event SleepFunctionElapsedMicroseconds; } namespace DB @@ -43,15 +45,20 @@ class FunctionSleep : public IFunction { private: UInt64 max_microseconds; + QueryStatusPtr query_status; + public: static constexpr auto name = variant == FunctionSleepVariant::PerBlock ? "sleep" : "sleepEachRow"; static FunctionPtr create(ContextPtr context) { - return std::make_shared>(context->getSettingsRef().function_sleep_max_microseconds_per_block); + return std::make_shared>( + context->getSettingsRef().function_sleep_max_microseconds_per_block, + context->getProcessListElementSafe()); } - FunctionSleep(UInt64 max_microseconds_) + FunctionSleep(UInt64 max_microseconds_, QueryStatusPtr query_status_) : max_microseconds(std::min(max_microseconds_, static_cast(std::numeric_limits::max()))) + , query_status(query_status_) { } @@ -128,9 +135,23 @@ public: "The maximum sleep time is {} microseconds. Requested: {} microseconds per block (of size {})", max_microseconds, microseconds, size); - sleepForMicroseconds(microseconds); + UInt64 elapsed = 0; + while (elapsed < microseconds) + { + UInt64 sleep_time = microseconds - elapsed; + if (query_status) + sleep_time = std::min(sleep_time, /* 1 second */ static_cast(1000000)); + + sleepForMicroseconds(sleep_time); + elapsed += sleep_time; + + if (query_status && !query_status->checkTimeLimit()) + break; + } + ProfileEvents::increment(ProfileEvents::SleepFunctionCalls, count); ProfileEvents::increment(ProfileEvents::SleepFunctionMicroseconds, microseconds); + ProfileEvents::increment(ProfileEvents::SleepFunctionElapsedMicroseconds, elapsed); } } diff --git a/src/Functions/stringToH3.cpp b/src/Functions/stringToH3.cpp index d8728b346d0..94418efdfdf 100644 --- a/src/Functions/stringToH3.cpp +++ b/src/Functions/stringToH3.cpp @@ -88,7 +88,7 @@ private: if (res_data[row_num] == 0) { - throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Invalid H3 index: {}", h3index_str); + throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Invalid H3 index: {} in function {}", h3index_str, name); } h3index_source.next(); diff --git a/src/Functions/substring.cpp b/src/Functions/substring.cpp index 7678692f612..e3dfdf3de5e 100644 --- a/src/Functions/substring.cpp +++ b/src/Functions/substring.cpp @@ -1,15 +1,16 @@ -#include -#include -#include #include +#include +#include +#include +#include #include #include -#include +#include #include -#include #include #include -#include +#include +#include #include @@ -20,101 +21,102 @@ using namespace GatherUtils; namespace ErrorCodes { - extern const int ILLEGAL_COLUMN; - extern const int ILLEGAL_TYPE_OF_ARGUMENT; - extern const int ZERO_ARRAY_OR_TUPLE_INDEX; - extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; +extern const int ILLEGAL_COLUMN; +extern const int ILLEGAL_TYPE_OF_ARGUMENT; +extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; +extern const int ZERO_ARRAY_OR_TUPLE_INDEX; } namespace { /// If 'is_utf8' - measure offset and length in code points instead of bytes. -/// UTF8 variant is not available for FixedString arguments. template class FunctionSubstring : public IFunction { public: static constexpr auto name = is_utf8 ? "substringUTF8" : "substring"; - static FunctionPtr create(ContextPtr) - { - return std::make_shared(); - } - - String getName() const override - { - return name; - } + static FunctionPtr create(ContextPtr) { return std::make_shared(); } + String getName() const override { return name; } bool isVariadic() const override { return true; } size_t getNumberOfArguments() const override { return 0; } - bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return true; } - bool useDefaultImplementationForConstants() const override { return true; } DataTypePtr getReturnTypeImpl(const DataTypes & arguments) const override { - size_t number_of_arguments = arguments.size(); + const size_t number_of_arguments = arguments.size(); if (number_of_arguments < 2 || number_of_arguments > 3) throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "Number of arguments for function {} doesn't match: " "passed {}, should be 2 or 3", getName(), number_of_arguments); - if ((is_utf8 && !isString(arguments[0])) || !isStringOrFixedString(arguments[0])) - throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal type {} of argument of function {}", - arguments[0]->getName(), getName()); + if constexpr (is_utf8) + { + /// UTF8 variant is not available for FixedString and Enum arguments. + if (!isString(arguments[0])) + throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, + "Illegal type {} of first argument of function {}, expected String", + arguments[0]->getName(), getName()); + } + else + { + if (!isStringOrFixedString(arguments[0]) && !isEnum(arguments[0])) + throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, + "Illegal type {} of first argument of function {}, expected String, FixedString or Enum", + arguments[0]->getName(), getName()); + } if (!isNativeNumber(arguments[1])) - throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal type {} of second argument of function {}", - arguments[1]->getName(), getName()); + throw Exception( + ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, + "Illegal type {} of second argument of function {}, expected (U)Int*", + arguments[1]->getName(), getName()); if (number_of_arguments == 3 && !isNativeNumber(arguments[2])) - throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal type {} of second argument of function {}", - arguments[2]->getName(), getName()); + throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, + "Illegal type {} of second argument of function {}, expected (U)Int*", + arguments[2]->getName(), getName()); return std::make_shared(); } template - ColumnPtr executeForSource(const ColumnPtr & column_start, const ColumnPtr & column_length, - const ColumnConst * column_start_const, const ColumnConst * column_length_const, - Int64 start_value, Int64 length_value, Source && source, - size_t input_rows_count) const + ColumnPtr executeForSource(const ColumnPtr & column_offset, const ColumnPtr & column_length, + bool column_offset_const, bool column_length_const, + Int64 offset, Int64 length, + Source && source, size_t input_rows_count) const { auto col_res = ColumnString::create(); if (!column_length) { - if (column_start_const) + if (column_offset_const) { - if (start_value > 0) - sliceFromLeftConstantOffsetUnbounded( - source, StringSink(*col_res, input_rows_count), static_cast(start_value - 1)); - else if (start_value < 0) - sliceFromRightConstantOffsetUnbounded( - source, StringSink(*col_res, input_rows_count), -static_cast(start_value)); + if (offset > 0) + sliceFromLeftConstantOffsetUnbounded(source, StringSink(*col_res, input_rows_count), static_cast(offset - 1)); + else if (offset < 0) + sliceFromRightConstantOffsetUnbounded(source, StringSink(*col_res, input_rows_count), -static_cast(offset)); else throw Exception(ErrorCodes::ZERO_ARRAY_OR_TUPLE_INDEX, "Indices in strings are 1-based"); } else - sliceDynamicOffsetUnbounded(source, StringSink(*col_res, input_rows_count), *column_start); + sliceDynamicOffsetUnbounded(source, StringSink(*col_res, input_rows_count), *column_offset); } else { - if (column_start_const && column_length_const) + if (column_offset_const && column_length_const) { - if (start_value > 0) - sliceFromLeftConstantOffsetBounded( - source, StringSink(*col_res, input_rows_count), static_cast(start_value - 1), length_value); - else if (start_value < 0) - sliceFromRightConstantOffsetBounded( - source, StringSink(*col_res, input_rows_count), -static_cast(start_value), length_value); + if (offset > 0) + sliceFromLeftConstantOffsetBounded(source, StringSink(*col_res, input_rows_count), static_cast(offset - 1), length); + else if (offset < 0) + sliceFromRightConstantOffsetBounded(source, StringSink(*col_res, input_rows_count), -static_cast(offset), length); else throw Exception(ErrorCodes::ZERO_ARRAY_OR_TUPLE_INDEX, "Indices in strings are 1-based"); } else - sliceDynamicOffsetBounded(source, StringSink(*col_res, input_rows_count), *column_start, *column_length); + sliceDynamicOffsetBounded(source, StringSink(*col_res, input_rows_count), *column_offset, *column_length); } return col_res; @@ -122,58 +124,60 @@ public: ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override { - size_t number_of_arguments = arguments.size(); + const size_t number_of_arguments = arguments.size(); ColumnPtr column_string = arguments[0].column; - ColumnPtr column_start = arguments[1].column; + ColumnPtr column_offset = arguments[1].column; ColumnPtr column_length; - if (number_of_arguments == 3) column_length = arguments[2].column; - const ColumnConst * column_start_const = checkAndGetColumn(column_start.get()); + const ColumnConst * column_offset_const = checkAndGetColumn(column_offset.get()); const ColumnConst * column_length_const = nullptr; - if (number_of_arguments == 3) column_length_const = checkAndGetColumn(column_length.get()); - Int64 start_value = 0; - Int64 length_value = 0; + Int64 offset = 0; + Int64 length = 0; - if (column_start_const) - start_value = column_start_const->getInt(0); + if (column_offset_const) + offset = column_offset_const->getInt(0); if (column_length_const) - length_value = column_length_const->getInt(0); + length = column_length_const->getInt(0); if constexpr (is_utf8) { if (const ColumnString * col = checkAndGetColumn(column_string.get())) - return executeForSource(column_start, column_length, column_start_const, column_length_const, start_value, - length_value, UTF8StringSource(*col), input_rows_count); - else if (const ColumnConst * col_const = checkAndGetColumnConst(column_string.get())) - return executeForSource(column_start, column_length, column_start_const, column_length_const, start_value, - length_value, ConstSource(*col_const), input_rows_count); - else - throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Illegal column {} of first argument of function {}", - arguments[0].column->getName(), getName()); + return executeForSource(column_offset, column_length, column_offset_const, column_length_const, offset, length, UTF8StringSource(*col), input_rows_count); + if (const ColumnConst * col_const = checkAndGetColumnConst(column_string.get())) + return executeForSource(column_offset, column_length, column_offset_const, column_length_const, offset, length, ConstSource(*col_const), input_rows_count); + throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Illegal column {} of first argument of function {}", arguments[0].column->getName(), getName()); } else { if (const ColumnString * col = checkAndGetColumn(column_string.get())) - return executeForSource(column_start, column_length, column_start_const, column_length_const, start_value, - length_value, StringSource(*col), input_rows_count); - else if (const ColumnFixedString * col_fixed = checkAndGetColumn(column_string.get())) - return executeForSource(column_start, column_length, column_start_const, column_length_const, start_value, - length_value, FixedStringSource(*col_fixed), input_rows_count); - else if (const ColumnConst * col_const = checkAndGetColumnConst(column_string.get())) - return executeForSource(column_start, column_length, column_start_const, column_length_const, start_value, - length_value, ConstSource(*col_const), input_rows_count); - else if (const ColumnConst * col_const_fixed = checkAndGetColumnConst(column_string.get())) - return executeForSource(column_start, column_length, column_start_const, column_length_const, start_value, - length_value, ConstSource(*col_const_fixed), input_rows_count); - else - throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Illegal column {} of first argument of function {}", - arguments[0].column->getName(), getName()); + return executeForSource(column_offset, column_length, column_offset_const, column_length_const, offset, length, StringSource(*col), input_rows_count); + if (const ColumnFixedString * col_fixed = checkAndGetColumn(column_string.get())) + return executeForSource(column_offset, column_length, column_offset_const, column_length_const, offset, length, FixedStringSource(*col_fixed), input_rows_count); + if (const ColumnConst * col_const = checkAndGetColumnConst(column_string.get())) + return executeForSource(column_offset, column_length, column_offset_const, column_length_const, offset, length, ConstSource(*col_const), input_rows_count); + if (const ColumnConst * col_const_fixed = checkAndGetColumnConst(column_string.get())) + return executeForSource(column_offset, column_length, column_offset_const, column_length_const, offset, length, ConstSource(*col_const_fixed), input_rows_count); + if (isEnum(arguments[0].type)) + { + if (const typename DataTypeEnum8::ColumnType * col_enum8 = checkAndGetColumn(column_string.get())) + { + const auto * type_enum8 = assert_cast(arguments[0].type.get()); + return executeForSource(column_offset, column_length, column_offset_const, column_length_const, offset, length, EnumSource(*col_enum8, *type_enum8), input_rows_count); + } + if (const typename DataTypeEnum16::ColumnType * col_enum16 = checkAndGetColumn(column_string.get())) + { + const auto * type_enum16 = assert_cast(arguments[0].type.get()); + return executeForSource(column_offset, column_length, column_offset_const, column_length_const, offset, length, EnumSource(*col_enum16, *type_enum16), input_rows_count); + } + } + + throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Illegal column {} of first argument of function {}", arguments[0].column->getName(), getName()); } } }; @@ -183,8 +187,8 @@ public: REGISTER_FUNCTION(Substring) { factory.registerFunction>({}, FunctionFactory::CaseInsensitive); - factory.registerAlias("substr", "substring", FunctionFactory::CaseInsensitive); - factory.registerAlias("mid", "substring", FunctionFactory::CaseInsensitive); /// from MySQL dialect + factory.registerAlias("substr", "substring", FunctionFactory::CaseInsensitive); // MySQL alias + factory.registerAlias("mid", "substring", FunctionFactory::CaseInsensitive); /// MySQL alias factory.registerFunction>({}, FunctionFactory::CaseSensitive); } diff --git a/src/Functions/toStartOfInterval.cpp b/src/Functions/toStartOfInterval.cpp index 48f60dddb33..ea0ad139481 100644 --- a/src/Functions/toStartOfInterval.cpp +++ b/src/Functions/toStartOfInterval.cpp @@ -1,7 +1,7 @@ -#include -#include #include #include +#include +#include #include #include #include @@ -11,6 +11,7 @@ #include #include #include +#include namespace DB @@ -24,9 +25,6 @@ namespace ErrorCodes } -namespace -{ - class FunctionToStartOfInterval : public IFunction { public: @@ -34,86 +32,90 @@ public: static constexpr auto name = "toStartOfInterval"; String getName() const override { return name; } - bool isVariadic() const override { return true; } size_t getNumberOfArguments() const override { return 0; } bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return false; } - bool useDefaultImplementationForConstants() const override { return true; } ColumnNumbers getArgumentsThatAreAlwaysConstant() const override { return {1, 2}; } - bool hasInformationAboutMonotonicity() const override { return true; } - Monotonicity getMonotonicityForRange(const IDataType &, const Field &, const Field &) const override - { - return { .is_monotonic = true, .is_always_monotonic = true }; - } + Monotonicity getMonotonicityForRange(const IDataType &, const Field &, const Field &) const override { return { .is_monotonic = true, .is_always_monotonic = true }; } DataTypePtr getReturnTypeImpl(const ColumnsWithTypeAndName & arguments) const override { - bool first_argument_is_date = false; + bool value_is_date = false; auto check_first_argument = [&] { - if (!isDate(arguments[0].type) && !isDateTime(arguments[0].type) && !isDateTime64(arguments[0].type)) - throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal type {} of argument of function {}. " - "Should be a date or a date with time", arguments[0].type->getName(), getName()); - first_argument_is_date = isDate(arguments[0].type); + const DataTypePtr & type_arg1 = arguments[0].type; + if (!isDate(type_arg1) && !isDateTime(type_arg1) && !isDateTime64(type_arg1)) + throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, + "Illegal type {} of 1st argument of function {}, expected a Date, DateTime or DateTime64", + type_arg1->getName(), getName()); + value_is_date = isDate(type_arg1); }; const DataTypeInterval * interval_type = nullptr; - bool result_type_is_date = false; - bool result_type_is_datetime = false; - bool result_type_is_datetime_64 = false; - auto check_interval_argument = [&] + enum class ResultType { - interval_type = checkAndGetDataType(arguments[1].type.get()); + Date, + DateTime, + DateTime64 + }; + ResultType result_type; + auto check_second_argument = [&] + { + const DataTypePtr & type_arg2 = arguments[1].type; + + interval_type = checkAndGetDataType(type_arg2.get()); if (!interval_type) - throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal type {} of argument of function {}. " - "Should be an interval of time", arguments[1].type->getName(), getName()); + throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, + "Illegal type {} of 2nd argument of function {}, expected a time interval", + type_arg2->getName(), getName()); + switch (interval_type->getKind()) // NOLINT(bugprone-switch-missing-default-case) { case IntervalKind::Nanosecond: case IntervalKind::Microsecond: case IntervalKind::Millisecond: - result_type_is_datetime_64 = true; + result_type = ResultType::DateTime64; break; case IntervalKind::Second: case IntervalKind::Minute: case IntervalKind::Hour: - case IntervalKind::Day: - result_type_is_datetime = true; + case IntervalKind::Day: /// weird why Day leads to DateTime but too afraid to change it + result_type = ResultType::DateTime; break; case IntervalKind::Week: case IntervalKind::Month: case IntervalKind::Quarter: case IntervalKind::Year: - result_type_is_date = true; + result_type = ResultType::Date; break; } }; - auto check_timezone_argument = [&] + auto check_third_argument = [&] { - if (!isString(arguments[2].type)) - throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal type {} of argument of function {}. " - "This argument is optional and must be a constant string with timezone name", - arguments[2].type->getName(), getName()); - if (first_argument_is_date && result_type_is_date) + const DataTypePtr & type_arg3 = arguments[2].type; + if (!isString(type_arg3)) throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, - "The timezone argument of function {} with interval type {} is allowed only when the 1st argument " - "has the type DateTime or DateTime64", - getName(), interval_type->getKind().toString()); + "Illegal type {} of 3rd argument of function {}, expected a constant timezone string", + type_arg3->getName(), getName()); + if (value_is_date && result_type == ResultType::Date) /// weird why this is && instead of || but too afraid to change it + throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, + "The timezone argument of function {} with interval type {} is allowed only when the 1st argument has type DateTime or DateTimt64", + getName(), interval_type->getKind().toString()); }; if (arguments.size() == 2) { check_first_argument(); - check_interval_argument(); + check_second_argument(); } else if (arguments.size() == 3) { check_first_argument(); - check_interval_argument(); - check_timezone_argument(); + check_second_argument(); + check_third_argument(); } else { @@ -122,25 +124,27 @@ public: getName(), arguments.size()); } - if (result_type_is_date) - return std::make_shared(); - else if (result_type_is_datetime) - return std::make_shared(extractTimeZoneNameFromFunctionArguments(arguments, 2, 0, false)); - else if (result_type_is_datetime_64) + switch (result_type) { - auto scale = 0; + case ResultType::Date: + return std::make_shared(); + case ResultType::DateTime: + return std::make_shared(extractTimeZoneNameFromFunctionArguments(arguments, 2, 0, false)); + case ResultType::DateTime64: + { + UInt32 scale = 0; + if (interval_type->getKind() == IntervalKind::Nanosecond) + scale = 9; + else if (interval_type->getKind() == IntervalKind::Microsecond) + scale = 6; + else if (interval_type->getKind() == IntervalKind::Millisecond) + scale = 3; - if (interval_type->getKind() == IntervalKind::Nanosecond) - scale = 9; - else if (interval_type->getKind() == IntervalKind::Microsecond) - scale = 6; - else if (interval_type->getKind() == IntervalKind::Millisecond) - scale = 3; - - return std::make_shared(scale, extractTimeZoneNameFromFunctionArguments(arguments, 2, 0, false)); + return std::make_shared(scale, extractTimeZoneNameFromFunctionArguments(arguments, 2, 0, false)); + } } - UNREACHABLE(); + std::unreachable(); } ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t /* input_rows_count */) const override @@ -154,110 +158,106 @@ public: private: ColumnPtr dispatchForTimeColumn( - const ColumnWithTypeAndName & time_column, const ColumnWithTypeAndName & interval_column, const DataTypePtr & result_type, const DateLUTImpl & time_zone) const + const ColumnWithTypeAndName & time_column, const ColumnWithTypeAndName & interval_column, + const DataTypePtr & result_type, const DateLUTImpl & time_zone) const { - const auto & from_datatype = *time_column.type.get(); + const auto & time_column_type = *time_column.type.get(); + const auto & time_column_col = *time_column.column.get(); - if (isDateTime64(from_datatype)) + if (isDateTime64(time_column_type)) { - const auto * time_column_vec = checkAndGetColumn(time_column.column.get()); - auto scale = assert_cast(from_datatype).getScale(); + const auto * time_column_vec = checkAndGetColumn(time_column_col); + auto scale = assert_cast(time_column_type).getScale(); if (time_column_vec) - return dispatchForIntervalColumn(assert_cast(from_datatype), *time_column_vec, interval_column, result_type, time_zone, scale); + return dispatchForIntervalColumn(assert_cast(time_column_type), *time_column_vec, interval_column, result_type, time_zone, scale); } - if (isDateTime(from_datatype)) + else if (isDateTime(time_column_type)) { - const auto * time_column_vec = checkAndGetColumn(time_column.column.get()); + const auto * time_column_vec = checkAndGetColumn(time_column_col); if (time_column_vec) - return dispatchForIntervalColumn(assert_cast(from_datatype), *time_column_vec, interval_column, result_type, time_zone); + return dispatchForIntervalColumn(assert_cast(time_column_type), *time_column_vec, interval_column, result_type, time_zone); } - if (isDate(from_datatype)) + else if (isDate(time_column_type)) { - const auto * time_column_vec = checkAndGetColumn(time_column.column.get()); + const auto * time_column_vec = checkAndGetColumn(time_column_col); if (time_column_vec) - return dispatchForIntervalColumn(assert_cast(from_datatype), *time_column_vec, interval_column, result_type, time_zone); + return dispatchForIntervalColumn(assert_cast(time_column_type), *time_column_vec, interval_column, result_type, time_zone); } - if (isDate32(from_datatype)) - { - const auto * time_column_vec = checkAndGetColumn(time_column.column.get()); - if (time_column_vec) - return dispatchForIntervalColumn(assert_cast(from_datatype), *time_column_vec, interval_column, result_type, time_zone); - } - throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal column for first argument of function {}. Must contain dates or dates with time", getName()); + throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal column for 1st argument of function {}, expected a Date, DateTime or DateTime64", getName()); } - template + template ColumnPtr dispatchForIntervalColumn( const TimeDataType & time_data_type, const TimeColumnType & time_column, const ColumnWithTypeAndName & interval_column, - const DataTypePtr & result_type, const DateLUTImpl & time_zone, const UInt16 scale = 1) const + const DataTypePtr & result_type, const DateLUTImpl & time_zone, UInt16 scale = 1) const { const auto * interval_type = checkAndGetDataType(interval_column.type.get()); if (!interval_type) - throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Illegal column for second argument of function {}, must be an interval of time.", getName()); + throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Illegal column for 2nd argument of function {}, must be a time interval", getName()); const auto * interval_column_const_int64 = checkAndGetColumnConst(interval_column.column.get()); if (!interval_column_const_int64) - throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Illegal column for second argument of function {}, must be a const interval of time.", getName()); + throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Illegal column for 2nd argument of function {}, must be a const time interval", getName()); - Int64 num_units = interval_column_const_int64->getValue(); + const Int64 num_units = interval_column_const_int64->getValue(); if (num_units <= 0) - throw Exception(ErrorCodes::ARGUMENT_OUT_OF_BOUND, "Value for second argument of function {} must be positive.", getName()); + throw Exception(ErrorCodes::ARGUMENT_OUT_OF_BOUND, "Value for 2nd argument of function {} must be positive", getName()); switch (interval_type->getKind()) // NOLINT(bugprone-switch-missing-default-case) { case IntervalKind::Nanosecond: - return execute(time_data_type, time_column, num_units, result_type, time_zone, scale); + return execute(time_data_type, time_column, num_units, result_type, time_zone, scale); case IntervalKind::Microsecond: - return execute(time_data_type, time_column, num_units, result_type, time_zone, scale); + return execute(time_data_type, time_column, num_units, result_type, time_zone, scale); case IntervalKind::Millisecond: - return execute(time_data_type, time_column, num_units, result_type, time_zone, scale); + return execute(time_data_type, time_column, num_units, result_type, time_zone, scale); case IntervalKind::Second: - return execute(time_data_type, time_column, num_units, result_type, time_zone, scale); + return execute(time_data_type, time_column, num_units, result_type, time_zone, scale); case IntervalKind::Minute: - return execute(time_data_type, time_column, num_units, result_type, time_zone, scale); + return execute(time_data_type, time_column, num_units, result_type, time_zone, scale); case IntervalKind::Hour: - return execute(time_data_type, time_column, num_units, result_type, time_zone, scale); + return execute(time_data_type, time_column, num_units, result_type, time_zone, scale); case IntervalKind::Day: - return execute(time_data_type, time_column, num_units, result_type, time_zone, scale); + return execute(time_data_type, time_column, num_units, result_type, time_zone, scale); case IntervalKind::Week: - return execute(time_data_type, time_column, num_units, result_type, time_zone, scale); + return execute(time_data_type, time_column, num_units, result_type, time_zone, scale); case IntervalKind::Month: - return execute(time_data_type, time_column, num_units, result_type, time_zone, scale); + return execute(time_data_type, time_column, num_units, result_type, time_zone, scale); case IntervalKind::Quarter: - return execute(time_data_type, time_column, num_units, result_type, time_zone, scale); + return execute(time_data_type, time_column, num_units, result_type, time_zone, scale); case IntervalKind::Year: - return execute(time_data_type, time_column, num_units, result_type, time_zone, scale); + return execute(time_data_type, time_column, num_units, result_type, time_zone, scale); } - UNREACHABLE(); + std::unreachable(); } - template - ColumnPtr execute(const TimeDataType &, const ColumnType & time_column_type, Int64 num_units, const DataTypePtr & result_type, const DateLUTImpl & time_zone, const UInt16 scale) const + template + ColumnPtr execute( + const TimeDataType &, const TimeColumnType & time_column_type, Int64 num_units, + const DataTypePtr & result_type, const DateLUTImpl & time_zone, UInt16 scale) const { - using ToColumnType = typename ToDataType::ColumnType; - using ToFieldType = typename ToDataType::FieldType; + using ResultColumnType = typename ResultDataType::ColumnType; + using ResultFieldType = typename ResultDataType::FieldType; const auto & time_data = time_column_type.getData(); size_t size = time_data.size(); auto result_col = result_type->createColumn(); - auto *col_to = assert_cast(result_col.get()); + auto * col_to = assert_cast(result_col.get()); auto & result_data = col_to->getData(); result_data.resize(size); Int64 scale_multiplier = DecimalUtils::scaleMultiplier(scale); for (size_t i = 0; i != size; ++i) - result_data[i] = static_cast(ToStartOfInterval::execute(time_data[i], num_units, time_zone, scale_multiplier)); + result_data[i] = static_cast(ToStartOfInterval::execute(time_data[i], num_units, time_zone, scale_multiplier)); return result_col; } }; -} - REGISTER_FUNCTION(ToStartOfInterval) { factory.registerFunction(); diff --git a/src/Functions/transform.cpp b/src/Functions/transform.cpp index f1d2b60f1f4..3c9654740f4 100644 --- a/src/Functions/transform.cpp +++ b/src/Functions/transform.cpp @@ -91,19 +91,6 @@ namespace const auto type_arr_from_nested = type_arr_from->getNestedType(); - auto src = tryGetLeastSupertype(DataTypes{type_x, type_arr_from_nested}); - if (!src - /// Compatibility with previous versions, that allowed even UInt64 with Int64, - /// regardless of ambiguous conversions. - && !isNativeNumber(type_x) && !isNativeNumber(type_arr_from_nested)) - { - throw Exception( - ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, - "First argument and elements of array " - "of the second argument of function {} must have compatible types", - getName()); - } - const DataTypeArray * type_arr_to = checkAndGetDataType(arguments[2].get()); if (!type_arr_to) @@ -766,15 +753,18 @@ namespace } } + WhichDataType which(from_type); + /// Note: Doesn't check the duplicates in the `from` array. /// Field may be of Float type, but for the purpose of bitwise equality we can treat them as UInt64 - if (WhichDataType which(from_type); isNativeNumber(which) || which.isDecimal32() || which.isDecimal64()) + if (isNativeNumber(which) || which.isDecimal32() || which.isDecimal64() || which.isEnum()) { cache.table_num_to_idx = std::make_unique(); auto & table = *cache.table_num_to_idx; for (size_t i = 0; i < size; ++i) { - if (applyVisitor(FieldVisitorAccurateEquals(), (*cache.from_column)[i], (*from_column_uncasted)[i])) + if (which.isEnum() /// The correctness of strings are already checked by casting them to the Enum type. + || applyVisitor(FieldVisitorAccurateEquals(), (*cache.from_column)[i], (*from_column_uncasted)[i])) { UInt64 key = 0; auto * dst = reinterpret_cast(&key); diff --git a/src/Functions/trap.cpp b/src/Functions/trap.cpp index 6260056ef31..99430f039a4 100644 --- a/src/Functions/trap.cpp +++ b/src/Functions/trap.cpp @@ -165,7 +165,7 @@ public: std::uniform_int_distribution(0x100000000000UL, 0x700000000000UL)(thread_local_rng)); void * map = mmap(hint, 4096, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); if (MAP_FAILED == map) - throwFromErrno("Allocator: Cannot mmap", ErrorCodes::CANNOT_ALLOCATE_MEMORY); + throw ErrnoException(ErrorCodes::CANNOT_ALLOCATE_MEMORY, "Allocator: Cannot mmap"); maps.push_back(map); } } diff --git a/src/IO/AIO.cpp b/src/IO/AIO.cpp index 7088be633e5..7a051950f52 100644 --- a/src/IO/AIO.cpp +++ b/src/IO/AIO.cpp @@ -46,7 +46,7 @@ AIOContext::AIOContext(unsigned int nr_events) { ctx = 0; if (io_setup(nr_events, &ctx) < 0) - DB::throwFromErrno("io_setup failed", DB::ErrorCodes::CANNOT_IOSETUP); + throw DB::ErrnoException(DB::ErrorCodes::CANNOT_IOSETUP, "io_setup failed"); } AIOContext::~AIOContext() @@ -137,7 +137,7 @@ AIOContext::AIOContext(unsigned int) { ctx = io_setup(); if (ctx < 0) - DB::throwFromErrno("io_setup failed", DB::ErrorCodes::CANNOT_IOSETUP); + throw DB::ErrnoException(DB::ErrorCodes::CANNOT_IOSETUP, "io_setup failed"); } AIOContext::~AIOContext() diff --git a/src/IO/Archives/IArchiveWriter.h b/src/IO/Archives/IArchiveWriter.h index d7ff038e7bc..cccc6dc953b 100644 --- a/src/IO/Archives/IArchiveWriter.h +++ b/src/IO/Archives/IArchiveWriter.h @@ -13,7 +13,7 @@ class WriteBufferFromFileBase; class IArchiveWriter : public std::enable_shared_from_this, boost::noncopyable { public: - /// Destructors finalizes writing the archive. + /// Call finalize() before destructing IArchiveWriter. virtual ~IArchiveWriter() = default; /// Starts writing a file to the archive. The function returns a write buffer, @@ -26,6 +26,10 @@ public: /// This function should be used mostly for debugging purposes. virtual bool isWritingFile() const = 0; + /// Finalizes writing of the archive. This function must be always called at the end of writing. + /// (Unless an error appeared and the archive is in fact no longer needed.) + virtual void finalize() = 0; + static constexpr const int kDefaultCompressionLevel = -1; /// Sets compression method and level. diff --git a/src/IO/Archives/ZipArchiveWriter.cpp b/src/IO/Archives/ZipArchiveWriter.cpp index b9a696ee2e2..785a5005f87 100644 --- a/src/IO/Archives/ZipArchiveWriter.cpp +++ b/src/IO/Archives/ZipArchiveWriter.cpp @@ -6,6 +6,8 @@ #include #include #include +#include +#include namespace DB @@ -15,86 +17,56 @@ namespace ErrorCodes extern const int CANNOT_PACK_ARCHIVE; extern const int SUPPORT_IS_DISABLED; extern const int LOGICAL_ERROR; + extern const int NOT_IMPLEMENTED; } -using RawHandle = zipFile; - -/// Holds a raw handle, calls acquireRawHandle() in the constructor and releaseRawHandle() in the destructor. -class ZipArchiveWriter::HandleHolder +namespace { -public: - HandleHolder() = default; - - explicit HandleHolder(const std::shared_ptr & writer_) : writer(writer_), raw_handle(writer->acquireRawHandle()) { } - - ~HandleHolder() + void checkResultCodeImpl(int code, const String & file_name) { - if (raw_handle) + if (code >= ZIP_OK) + return; + + String message = "Code = "; + switch (code) { - try - { - int err = zipCloseFileInZip(raw_handle); - /// If err == ZIP_PARAMERROR the file is already closed. - if (err != ZIP_PARAMERROR) - checkResult(err); - } - catch (...) - { - tryLogCurrentException("ZipArchiveWriter"); - } - writer->releaseRawHandle(raw_handle); + case ZIP_ERRNO: message += "ERRNO, errno = " + errnoToString(); break; + case ZIP_PARAMERROR: message += "PARAMERROR"; break; + case ZIP_BADZIPFILE: message += "BADZIPFILE"; break; + case ZIP_INTERNALERROR: message += "INTERNALERROR"; break; + default: message += std::to_string(code); break; } + throw Exception(ErrorCodes::CANNOT_PACK_ARCHIVE, "Couldn't pack zip archive: {}, filename={}", message, quoteString(file_name)); } - - HandleHolder(HandleHolder && src) noexcept - { - *this = std::move(src); - } - - HandleHolder & operator=(HandleHolder && src) noexcept - { - writer = std::exchange(src.writer, nullptr); - raw_handle = std::exchange(src.raw_handle, nullptr); - return *this; - } - - RawHandle getRawHandle() const { return raw_handle; } - std::shared_ptr getWriter() const { return writer; } - - void checkResult(int code) const { writer->checkResult(code); } - -private: - std::shared_ptr writer; - RawHandle raw_handle = nullptr; -}; +} /// This class represents a WriteBuffer actually returned by writeFile(). class ZipArchiveWriter::WriteBufferFromZipArchive : public WriteBufferFromFileBase { public: - WriteBufferFromZipArchive(HandleHolder && handle_, const String & filename_) + WriteBufferFromZipArchive(std::shared_ptr archive_writer_, const String & filename_) : WriteBufferFromFileBase(DBMS_DEFAULT_BUFFER_SIZE, nullptr, 0) - , handle(std::move(handle_)) , filename(filename_) { - auto compress_method = handle.getWriter()->compression_method; - auto compress_level = handle.getWriter()->compression_level; + zip_handle = archive_writer_->startWritingFile(); + archive_writer = archive_writer_; + + auto compress_method = archive_writer_->getCompressionMethod(); + auto compress_level = archive_writer_->getCompressionLevel(); checkCompressionMethodIsEnabled(compress_method); const char * password_cstr = nullptr; - const String & password_str = handle.getWriter()->password; - if (!password_str.empty()) + String current_password = archive_writer_->getPassword(); + if (!current_password.empty()) { checkEncryptionIsEnabled(); - password_cstr = password_str.c_str(); + password_cstr = current_password.c_str(); } - RawHandle raw_handle = handle.getRawHandle(); - - checkResult(zipOpenNewFileInZip3_64( - raw_handle, + int code = zipOpenNewFileInZip3_64( + zip_handle, filename_.c_str(), /* zipfi= */ nullptr, /* extrafield_local= */ nullptr, @@ -110,21 +82,30 @@ public: /* strategy= */ 0, password_cstr, /* crc_for_crypting= */ 0, - /* zip64= */ true)); + /* zip64= */ true); + checkResultCode(code); } ~WriteBufferFromZipArchive() override { try { - finalize(); + closeFile(/* throw_if_error= */ false); + endWritingFile(); } catch (...) { - tryLogCurrentException("ZipArchiveWriter"); + tryLogCurrentException("WriteBufferFromZipArchive"); } } + void finalizeImpl() override + { + next(); + closeFile(/* throw_if_error= */ true); + endWritingFile(); + } + void sync() override { next(); } std::string getFileName() const override { return filename; } @@ -133,110 +114,106 @@ private: { if (!offset()) return; - RawHandle raw_handle = handle.getRawHandle(); - int code = zipWriteInFileInZip(raw_handle, working_buffer.begin(), static_cast(offset())); - checkResult(code); + chassert(zip_handle); + int code = zipWriteInFileInZip(zip_handle, working_buffer.begin(), static_cast(offset())); + checkResultCode(code); } - void checkResult(int code) const { handle.checkResult(code); } + void closeFile(bool throw_if_error) + { + if (zip_handle) + { + int code = zipCloseFileInZip(zip_handle); + zip_handle = nullptr; + if (throw_if_error) + checkResultCode(code); + } + } - HandleHolder handle; - String filename; + void endWritingFile() + { + if (auto archive_writer_ptr = archive_writer.lock()) + { + archive_writer_ptr->endWritingFile(); + archive_writer.reset(); + } + } + + void checkResultCode(int code) const { checkResultCodeImpl(code, filename); } + + std::weak_ptr archive_writer; + const String filename; + ZipHandle zip_handle; }; -namespace +/// Provides a set of functions allowing the minizip library to write its output +/// to a WriteBuffer instead of an ordinary file in the local filesystem. +class ZipArchiveWriter::StreamInfo { - /// Provides a set of functions allowing the minizip library to write its output - /// to a WriteBuffer instead of an ordinary file in the local filesystem. - class StreamFromWriteBuffer +public: + explicit StreamInfo(std::unique_ptr write_buffer_) + : write_buffer(std::move(write_buffer_)), start_offset(write_buffer->count()) { - public: - static RawHandle open(std::unique_ptr archive_write_buffer) - { - Opaque opaque{std::move(archive_write_buffer)}; + } - zlib_filefunc64_def func_def; - func_def.zopen64_file = &StreamFromWriteBuffer::openFileFunc; - func_def.zclose_file = &StreamFromWriteBuffer::closeFileFunc; - func_def.zread_file = &StreamFromWriteBuffer::readFileFunc; - func_def.zwrite_file = &StreamFromWriteBuffer::writeFileFunc; - func_def.zseek64_file = &StreamFromWriteBuffer::seekFunc; - func_def.ztell64_file = &StreamFromWriteBuffer::tellFunc; - func_def.zerror_file = &StreamFromWriteBuffer::testErrorFunc; - func_def.opaque = &opaque; + ~StreamInfo() = default; - return zipOpen2_64( - /* path= */ nullptr, - /* append= */ false, - /* globalcomment= */ nullptr, - &func_def); - } + ZipHandle makeZipHandle() + { + zlib_filefunc64_def func_def; + func_def.zopen64_file = &StreamInfo::openFileFunc; + func_def.zclose_file = &StreamInfo::closeFileFunc; + func_def.zread_file = &StreamInfo::readFileFunc; + func_def.zwrite_file = &StreamInfo::writeFileFunc; + func_def.zseek64_file = &StreamInfo::seekFunc; + func_def.ztell64_file = &StreamInfo::tellFunc; + func_def.zerror_file = &StreamInfo::testErrorFunc; + func_def.opaque = this; - private: - std::unique_ptr write_buffer; - UInt64 start_offset = 0; + return zipOpen2_64( + /* path= */ nullptr, + /* append= */ false, + /* globalcomment= */ nullptr, + &func_def); + } - struct Opaque - { - std::unique_ptr write_buffer; - }; + WriteBuffer & getWriteBuffer() { return *write_buffer; } - static void * openFileFunc(void * opaque, const void *, int) - { - Opaque & opq = *reinterpret_cast(opaque); - return new StreamFromWriteBuffer(std::move(opq.write_buffer)); - } +private: + /// We do nothing in openFileFunc() and in closeFileFunc() because we already have `write_buffer` (file is already opened). + static void * openFileFunc(void * opaque, const void *, int) { return opaque; } + static int closeFileFunc(void *, void *) { return ZIP_OK; } - explicit StreamFromWriteBuffer(std::unique_ptr write_buffer_) - : write_buffer(std::move(write_buffer_)), start_offset(write_buffer->count()) {} + static unsigned long writeFileFunc(void * opaque, void *, const void * buf, unsigned long size) // NOLINT(google-runtime-int) + { + auto * stream_info = reinterpret_cast(opaque); + stream_info->write_buffer->write(reinterpret_cast(buf), size); + return size; + } - ~StreamFromWriteBuffer() - { - write_buffer->finalize(); - } + static int testErrorFunc(void *, void *) { return ZIP_OK; } - static int closeFileFunc(void *, void * stream) - { - delete reinterpret_cast(stream); - return ZIP_OK; - } + static ZPOS64_T tellFunc(void * opaque, void *) + { + auto * stream_info = reinterpret_cast(opaque); + auto pos = stream_info->write_buffer->count() - stream_info->start_offset; + return pos; + } - static StreamFromWriteBuffer & get(void * ptr) - { - return *reinterpret_cast(ptr); - } + static long seekFunc(void *, void *, ZPOS64_T, int) // NOLINT(google-runtime-int) + { + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "StreamInfo::seek() is not implemented"); + } - static unsigned long writeFileFunc(void *, void * stream, const void * buf, unsigned long size) // NOLINT(google-runtime-int) - { - auto & strm = get(stream); - strm.write_buffer->write(reinterpret_cast(buf), size); - return size; - } + static unsigned long readFileFunc(void *, void *, void *, unsigned long) // NOLINT(google-runtime-int) + { + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "StreamInfo::readFile() is not implemented"); + } - static int testErrorFunc(void *, void *) - { - return ZIP_OK; - } - - static ZPOS64_T tellFunc(void *, void * stream) - { - auto & strm = get(stream); - auto pos = strm.write_buffer->count() - strm.start_offset; - return pos; - } - - static long seekFunc(void *, void *, ZPOS64_T, int) // NOLINT(google-runtime-int) - { - throw Exception(ErrorCodes::LOGICAL_ERROR, "StreamFromWriteBuffer::seek must not be called"); - } - - static unsigned long readFileFunc(void *, void *, void *, unsigned long) // NOLINT(google-runtime-int) - { - throw Exception(ErrorCodes::LOGICAL_ERROR, "StreamFromWriteBuffer::readFile must not be called"); - } - }; -} + std::unique_ptr write_buffer; + UInt64 start_offset; +}; ZipArchiveWriter::ZipArchiveWriter(const String & path_to_archive_) @@ -248,21 +225,42 @@ ZipArchiveWriter::ZipArchiveWriter(const String & path_to_archive_, std::unique_ : path_to_archive(path_to_archive_), compression_method(MZ_COMPRESS_METHOD_DEFLATE) { if (archive_write_buffer_) - handle = StreamFromWriteBuffer::open(std::move(archive_write_buffer_)); + { + stream_info = std::make_unique(std::move(archive_write_buffer_)); + zip_handle = stream_info->makeZipHandle(); + } else - handle = zipOpen64(path_to_archive.c_str(), /* append= */ false); - if (!handle) - throw Exception(ErrorCodes::CANNOT_PACK_ARCHIVE, "Couldn't create zip archive {}", quoteString(path_to_archive)); + { + zip_handle = zipOpen64(path_to_archive.c_str(), /* append= */ false); + } + if (!zip_handle) + throw Exception(ErrorCodes::CANNOT_PACK_ARCHIVE, "Couldn't create zip archive {}", quoteString(path_to_archive)); } ZipArchiveWriter::~ZipArchiveWriter() { - if (handle) + if (!finalized) + { + /// It is totally OK to destroy instance without finalization when an exception occurs. + /// However it is suspicious to destroy instance without finalization at the green path. + if (!std::uncaught_exceptions() && std::current_exception() == nullptr) + { + Poco::Logger * log = &Poco::Logger::get("ZipArchiveWriter"); + LOG_ERROR(log, + "ZipArchiveWriter is not finalized when destructor is called. " + "The zip archive might not be written at all or might be truncated. " + "Stack trace: {}", StackTrace().toString()); + chassert(false && "ZipArchiveWriter is not finalized in destructor."); + } + } + + if (zip_handle) { try { - checkResult(zipClose(handle, /* global_comment= */ nullptr)); + zipCloseFileInZip(zip_handle); + zipClose(zip_handle, /* global_comment= */ nullptr); } catch (...) { @@ -273,13 +271,38 @@ ZipArchiveWriter::~ZipArchiveWriter() std::unique_ptr ZipArchiveWriter::writeFile(const String & filename) { - return std::make_unique(acquireHandle(), filename); + return std::make_unique(std::static_pointer_cast(shared_from_this()), filename); } bool ZipArchiveWriter::isWritingFile() const { std::lock_guard lock{mutex}; - return !handle; + return is_writing_file; +} + +void ZipArchiveWriter::finalize() +{ + std::lock_guard lock{mutex}; + if (finalized) + return; + + if (is_writing_file) + throw Exception(ErrorCodes::LOGICAL_ERROR, "ZipArchiveWriter::finalize() is called in the middle of writing a file into the zip archive. That's not allowed"); + + if (zip_handle) + { + int code = zipClose(zip_handle, /* global_comment= */ nullptr); + zip_handle = nullptr; + checkResultCode(code); + } + + if (stream_info) + { + stream_info->getWriteBuffer().finalize(); + stream_info.reset(); + } + + finalized = true; } void ZipArchiveWriter::setCompression(const String & compression_method_, int compression_level_) @@ -289,12 +312,30 @@ void ZipArchiveWriter::setCompression(const String & compression_method_, int co compression_level = compression_level_; } +int ZipArchiveWriter::getCompressionMethod() const +{ + std::lock_guard lock{mutex}; + return compression_method; +} + +int ZipArchiveWriter::getCompressionLevel() const +{ + std::lock_guard lock{mutex}; + return compression_level; +} + void ZipArchiveWriter::setPassword(const String & password_) { std::lock_guard lock{mutex}; password = password_; } +String ZipArchiveWriter::getPassword() const +{ + std::lock_guard lock{mutex}; + return password; +} + int ZipArchiveWriter::compressionMethodToInt(const String & compression_method_) { if (compression_method_.empty()) @@ -361,45 +402,24 @@ void ZipArchiveWriter::checkEncryptionIsEnabled() #endif } -ZipArchiveWriter::HandleHolder ZipArchiveWriter::acquireHandle() -{ - return HandleHolder{std::static_pointer_cast(shared_from_this())}; -} - -RawHandle ZipArchiveWriter::acquireRawHandle() +ZipArchiveWriter::ZipHandle ZipArchiveWriter::startWritingFile() { std::lock_guard lock{mutex}; - if (!handle) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot have more than one write buffer while writing a zip archive"); - return std::exchange(handle, nullptr); + if (is_writing_file) + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Cannot write two files to a zip archive in parallel"); + is_writing_file = true; + return zip_handle; } -void ZipArchiveWriter::releaseRawHandle(RawHandle raw_handle_) +void ZipArchiveWriter::endWritingFile() { std::lock_guard lock{mutex}; - handle = raw_handle_; + is_writing_file = false; } -void ZipArchiveWriter::checkResult(int code) const +void ZipArchiveWriter::checkResultCode(int code) const { - if (code >= ZIP_OK) - return; - - String message = "Code = "; - switch (code) - { - case ZIP_ERRNO: message += "ERRNO, errno = " + errnoToString(); break; - case ZIP_PARAMERROR: message += "PARAMERROR"; break; - case ZIP_BADZIPFILE: message += "BADZIPFILE"; break; - case ZIP_INTERNALERROR: message += "INTERNALERROR"; break; - default: message += std::to_string(code); break; - } - showError(message); -} - -void ZipArchiveWriter::showError(const String & message) const -{ - throw Exception(ErrorCodes::CANNOT_PACK_ARCHIVE, "Couldn't pack zip archive {}: {}", quoteString(path_to_archive), message); + checkResultCodeImpl(code, path_to_archive); } } diff --git a/src/IO/Archives/ZipArchiveWriter.h b/src/IO/Archives/ZipArchiveWriter.h index a54130556b3..891da1a2e75 100644 --- a/src/IO/Archives/ZipArchiveWriter.h +++ b/src/IO/Archives/ZipArchiveWriter.h @@ -4,6 +4,7 @@ #if USE_MINIZIP #include +#include #include @@ -22,7 +23,7 @@ public: /// Constructs an archive that will be written by using a specified `archive_write_buffer_`. ZipArchiveWriter(const String & path_to_archive_, std::unique_ptr archive_write_buffer_); - /// Destructors finalizes writing the archive. + /// Call finalize() before destructing IArchiveWriter. ~ZipArchiveWriter() override; /// Starts writing a file to the archive. The function returns a write buffer, @@ -35,6 +36,10 @@ public: /// This function should be used mostly for debugging purposes. bool isWritingFile() const override; + /// Finalizes writing of the archive. This function must be always called at the end of writing. + /// (Unless an error appeared and the archive is in fact no longer needed.) + void finalize() override; + /// Supported compression methods. static constexpr const char kStore[] = "store"; static constexpr const char kDeflate[] = "deflate"; @@ -68,22 +73,27 @@ public: static void checkEncryptionIsEnabled(); private: + class StreamInfo; + using ZipHandle = void *; class WriteBufferFromZipArchive; - class HandleHolder; - using RawHandle = void *; - HandleHolder acquireHandle(); - RawHandle acquireRawHandle(); - void releaseRawHandle(RawHandle raw_handle_); + int getCompressionMethod() const; + int getCompressionLevel() const; + String getPassword() const; - void checkResult(int code) const; - [[noreturn]] void showError(const String & message) const; + ZipHandle startWritingFile(); + void endWritingFile(); + + void checkResultCode(int code) const; const String path_to_archive; - int compression_method; /// By default the compression method is "deflate". - int compression_level = kDefaultCompressionLevel; - String password; - RawHandle handle = nullptr; + std::unique_ptr TSA_GUARDED_BY(mutex) stream_info; + int compression_method TSA_GUARDED_BY(mutex); /// By default the compression method is "deflate". + int compression_level TSA_GUARDED_BY(mutex) = kDefaultCompressionLevel; + String password TSA_GUARDED_BY(mutex); + ZipHandle zip_handle TSA_GUARDED_BY(mutex) = nullptr; + bool is_writing_file TSA_GUARDED_BY(mutex) = false; + bool finalized TSA_GUARDED_BY(mutex) = false; mutable std::mutex mutex; }; diff --git a/src/IO/AsynchronousReadBufferFromFile.cpp b/src/IO/AsynchronousReadBufferFromFile.cpp index 0e6c8090cb5..c6fe16a7f14 100644 --- a/src/IO/AsynchronousReadBufferFromFile.cpp +++ b/src/IO/AsynchronousReadBufferFromFile.cpp @@ -46,13 +46,13 @@ AsynchronousReadBufferFromFile::AsynchronousReadBufferFromFile( fd = ::open(file_name.c_str(), flags == -1 ? O_RDONLY | O_CLOEXEC : flags | O_CLOEXEC); if (-1 == fd) - throwFromErrnoWithPath("Cannot open file " + file_name, file_name, - errno == ENOENT ? ErrorCodes::FILE_DOESNT_EXIST : ErrorCodes::CANNOT_OPEN_FILE); + ErrnoException::throwFromPath( + errno == ENOENT ? ErrorCodes::FILE_DOESNT_EXIST : ErrorCodes::CANNOT_OPEN_FILE, file_name, "Cannot open file {}", file_name); #ifdef OS_DARWIN if (o_direct) { if (fcntl(fd, F_NOCACHE, 1) == -1) - throwFromErrnoWithPath("Cannot set F_NOCACHE on file " + file_name, file_name, ErrorCodes::CANNOT_OPEN_FILE); + ErrnoException::throwFromPath(ErrorCodes::CANNOT_OPEN_FILE, file_name, "Cannot set F_NOCACHE on file {}", file_name); } #endif } diff --git a/src/IO/BitHelpers.h b/src/IO/BitHelpers.h index a9c7343f991..45c9b1ba572 100644 --- a/src/IO/BitHelpers.h +++ b/src/IO/BitHelpers.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include diff --git a/src/IO/BrotliWriteBuffer.cpp b/src/IO/BrotliWriteBuffer.cpp index a19c6770dad..a497b78a6c2 100644 --- a/src/IO/BrotliWriteBuffer.cpp +++ b/src/IO/BrotliWriteBuffer.cpp @@ -13,33 +13,14 @@ namespace ErrorCodes } -class BrotliWriteBuffer::BrotliStateWrapper +BrotliWriteBuffer::BrotliStateWrapper::BrotliStateWrapper() +: state(BrotliEncoderCreateInstance(nullptr, nullptr, nullptr)) { -public: - BrotliStateWrapper() - : state(BrotliEncoderCreateInstance(nullptr, nullptr, nullptr)) - { - } +} - ~BrotliStateWrapper() - { - BrotliEncoderDestroyInstance(state); - } - - BrotliEncoderState * state; -}; - -BrotliWriteBuffer::BrotliWriteBuffer(std::unique_ptr out_, int compression_level, size_t buf_size, char * existing_memory, size_t alignment) - : WriteBufferWithOwnMemoryDecorator(std::move(out_), buf_size, existing_memory, alignment) - , brotli(std::make_unique()) - , in_available(0) - , in_data(nullptr) - , out_capacity(0) - , out_data(nullptr) +BrotliWriteBuffer::BrotliStateWrapper::~BrotliStateWrapper() { - BrotliEncoderSetParameter(brotli->state, BROTLI_PARAM_QUALITY, static_cast(compression_level)); - // Set LZ77 window size. According to brotli sources default value is 24 (c/tools/brotli.c:81) - BrotliEncoderSetParameter(brotli->state, BROTLI_PARAM_LGWIN, 24); + BrotliEncoderDestroyInstance(state); } BrotliWriteBuffer::~BrotliWriteBuffer() = default; @@ -58,18 +39,20 @@ void BrotliWriteBuffer::nextImpl() { do { + const auto * in_data_ptr = in_data; out->nextIfAtEnd(); out_data = reinterpret_cast(out->position()); out_capacity = out->buffer().end() - out->position(); int result = BrotliEncoderCompressStream( brotli->state, - in_available ? BROTLI_OPERATION_PROCESS : BROTLI_OPERATION_FINISH, + BROTLI_OPERATION_PROCESS, &in_available, &in_data, &out_capacity, &out_data, nullptr); + total_in += in_data - in_data_ptr; out->position() = out->buffer().end() - out_capacity; @@ -92,6 +75,10 @@ void BrotliWriteBuffer::finalizeBefore() { next(); + /// Don't write out if no data was ever compressed + if (!compress_empty && total_in == 0) + return; + while (true) { out->nextIfAtEnd(); diff --git a/src/IO/BrotliWriteBuffer.h b/src/IO/BrotliWriteBuffer.h index 8cbc78bd9e7..d4cda7b270c 100644 --- a/src/IO/BrotliWriteBuffer.h +++ b/src/IO/BrotliWriteBuffer.h @@ -4,18 +4,38 @@ #include #include +#include "config.h" + +#if USE_BROTLI +# include + namespace DB { + class BrotliWriteBuffer : public WriteBufferWithOwnMemoryDecorator { public: + template BrotliWriteBuffer( - std::unique_ptr out_, + WriteBufferT && out_, int compression_level, size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE, char * existing_memory = nullptr, - size_t alignment = 0); + size_t alignment = 0, + bool compress_empty_ = true) + : WriteBufferWithOwnMemoryDecorator(std::move(out_), buf_size, existing_memory, alignment) + , brotli(std::make_unique()) + , in_available(0) + , in_data(nullptr) + , out_capacity(0) + , out_data(nullptr) + , compress_empty(compress_empty_) + { + BrotliEncoderSetParameter(brotli->state, BROTLI_PARAM_QUALITY, static_cast(compression_level)); + // Set LZ77 window size. According to brotli sources default value is 24 (c/tools/brotli.c:81) + BrotliEncoderSetParameter(brotli->state, BROTLI_PARAM_LGWIN, 24); + } ~BrotliWriteBuffer() override; @@ -24,7 +44,15 @@ private: void finalizeBefore() override; - class BrotliStateWrapper; + class BrotliStateWrapper + { + public: + BrotliStateWrapper(); + ~BrotliStateWrapper(); + + BrotliEncoderState * state; + }; + std::unique_ptr brotli; @@ -33,6 +61,12 @@ private: size_t out_capacity; uint8_t * out_data; + +protected: + UInt64 total_in = 0; + bool compress_empty = true; }; } + +#endif diff --git a/src/IO/BufferBase.h b/src/IO/BufferBase.h index 7a59687fa56..4c0a467b155 100644 --- a/src/IO/BufferBase.h +++ b/src/IO/BufferBase.h @@ -2,6 +2,7 @@ #include #include +#include namespace DB diff --git a/src/IO/Bzip2WriteBuffer.cpp b/src/IO/Bzip2WriteBuffer.cpp index b84cbdd1e41..3421b4c3985 100644 --- a/src/IO/Bzip2WriteBuffer.cpp +++ b/src/IO/Bzip2WriteBuffer.cpp @@ -15,34 +15,22 @@ namespace ErrorCodes } -class Bzip2WriteBuffer::Bzip2StateWrapper +Bzip2WriteBuffer::Bzip2StateWrapper::Bzip2StateWrapper(int compression_level) { -public: - explicit Bzip2StateWrapper(int compression_level) - { - memset(&stream, 0, sizeof(stream)); + memset(&stream, 0, sizeof(stream)); - int ret = BZ2_bzCompressInit(&stream, compression_level, 0, 0); + int ret = BZ2_bzCompressInit(&stream, compression_level, 0, 0); - if (ret != BZ_OK) - throw Exception( - ErrorCodes::BZIP2_STREAM_ENCODER_FAILED, - "bzip2 stream encoder init failed: error code: {}", - ret); - } + if (ret != BZ_OK) + throw Exception( + ErrorCodes::BZIP2_STREAM_ENCODER_FAILED, + "bzip2 stream encoder init failed: error code: {}", + ret); +} - ~Bzip2StateWrapper() - { - BZ2_bzCompressEnd(&stream); - } - - bz_stream stream; -}; - -Bzip2WriteBuffer::Bzip2WriteBuffer(std::unique_ptr out_, int compression_level, size_t buf_size, char * existing_memory, size_t alignment) - : WriteBufferWithOwnMemoryDecorator(std::move(out_), buf_size, existing_memory, alignment) - , bz(std::make_unique(compression_level)) +Bzip2WriteBuffer::Bzip2StateWrapper::~Bzip2StateWrapper() { + BZ2_bzCompressEnd(&stream); } Bzip2WriteBuffer::~Bzip2WriteBuffer() = default; @@ -77,6 +65,8 @@ void Bzip2WriteBuffer::nextImpl() } while (bz->stream.avail_in > 0); + + total_in += offset(); } catch (...) { @@ -90,6 +80,10 @@ void Bzip2WriteBuffer::finalizeBefore() { next(); + /// Don't write out if no data was ever compressed + if (!compress_empty && total_in == 0) + return; + out->nextIfAtEnd(); bz->stream.next_out = out->position(); bz->stream.avail_out = static_cast(out->buffer().end() - out->position()); diff --git a/src/IO/Bzip2WriteBuffer.h b/src/IO/Bzip2WriteBuffer.h index d0371903487..63c67461c6a 100644 --- a/src/IO/Bzip2WriteBuffer.h +++ b/src/IO/Bzip2WriteBuffer.h @@ -4,18 +4,29 @@ #include #include +#include "config.h" + +#if USE_BZIP2 +# include + namespace DB { class Bzip2WriteBuffer : public WriteBufferWithOwnMemoryDecorator { public: + template Bzip2WriteBuffer( - std::unique_ptr out_, + WriteBufferT && out_, int compression_level, size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE, char * existing_memory = nullptr, - size_t alignment = 0); + size_t alignment = 0, + bool compress_empty_ = true) + : WriteBufferWithOwnMemoryDecorator(std::move(out_), buf_size, existing_memory, alignment), bz(std::make_unique(compression_level)) + , compress_empty(compress_empty_) + { + } ~Bzip2WriteBuffer() override; @@ -24,8 +35,20 @@ private: void finalizeBefore() override; - class Bzip2StateWrapper; + class Bzip2StateWrapper + { + public: + explicit Bzip2StateWrapper(int compression_level); + ~Bzip2StateWrapper(); + + bz_stream stream; + }; + std::unique_ptr bz; + bool compress_empty = true; + UInt64 total_in = 0; }; } + +#endif diff --git a/src/IO/CompressionMethod.cpp b/src/IO/CompressionMethod.cpp index 13e1adbb702..90453e16961 100644 --- a/src/IO/CompressionMethod.cpp +++ b/src/IO/CompressionMethod.cpp @@ -169,37 +169,66 @@ std::unique_ptr wrapReadBufferWithCompressionMethod( return createCompressedWrapper(std::move(nested), method, buf_size, existing_memory, alignment, zstd_window_log_max); } -std::unique_ptr wrapWriteBufferWithCompressionMethod( - std::unique_ptr nested, CompressionMethod method, int level, size_t buf_size, char * existing_memory, size_t alignment) + +template +std::unique_ptr createWriteCompressedWrapper( + WriteBufferT && nested, CompressionMethod method, int level, size_t buf_size, char * existing_memory, size_t alignment, bool compress_empty) { if (method == DB::CompressionMethod::Gzip || method == CompressionMethod::Zlib) - return std::make_unique(std::move(nested), method, level, buf_size, existing_memory, alignment); + return std::make_unique(std::forward(nested), method, level, buf_size, existing_memory, alignment, compress_empty); #if USE_BROTLI if (method == DB::CompressionMethod::Brotli) - return std::make_unique(std::move(nested), level, buf_size, existing_memory, alignment); + return std::make_unique(std::forward(nested), level, buf_size, existing_memory, alignment, compress_empty); #endif if (method == CompressionMethod::Xz) - return std::make_unique(std::move(nested), level, buf_size, existing_memory, alignment); + return std::make_unique(std::forward(nested), level, buf_size, existing_memory, alignment, compress_empty); if (method == CompressionMethod::Zstd) - return std::make_unique(std::move(nested), level, buf_size, existing_memory, alignment); + return std::make_unique(std::forward(nested), level, buf_size, existing_memory, alignment, compress_empty); if (method == CompressionMethod::Lz4) - return std::make_unique(std::move(nested), level, buf_size, existing_memory, alignment); + return std::make_unique(std::forward(nested), level, buf_size, existing_memory, alignment, compress_empty); #if USE_BZIP2 if (method == CompressionMethod::Bzip2) - return std::make_unique(std::move(nested), level, buf_size, existing_memory, alignment); + return std::make_unique(std::forward(nested), level, buf_size, existing_memory, alignment, compress_empty); #endif #if USE_SNAPPY if (method == CompressionMethod::Snappy) throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Unsupported compression method"); #endif - if (method == CompressionMethod::None) - return nested; throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Unsupported compression method"); } + +std::unique_ptr wrapWriteBufferWithCompressionMethod( + std::unique_ptr nested, + CompressionMethod method, + int level, + size_t buf_size, + char * existing_memory, + size_t alignment, + bool compress_empty) +{ + if (method == CompressionMethod::None) + return nested; + return createWriteCompressedWrapper(nested, method, level, buf_size, existing_memory, alignment, compress_empty); +} + + +std::unique_ptr wrapWriteBufferWithCompressionMethod( + WriteBuffer * nested, + CompressionMethod method, + int level, + size_t buf_size, + char * existing_memory, + size_t alignment, + bool compress_empty) +{ + assert(method != CompressionMethod::None); + return createWriteCompressedWrapper(nested, method, level, buf_size, existing_memory, alignment, compress_empty); +} + } diff --git a/src/IO/CompressionMethod.h b/src/IO/CompressionMethod.h index c142531cd05..d218e4c5882 100644 --- a/src/IO/CompressionMethod.h +++ b/src/IO/CompressionMethod.h @@ -61,13 +61,22 @@ std::unique_ptr wrapReadBufferWithCompressionMethod( char * existing_memory = nullptr, size_t alignment = 0); - std::unique_ptr wrapWriteBufferWithCompressionMethod( std::unique_ptr nested, CompressionMethod method, int level, size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE, char * existing_memory = nullptr, - size_t alignment = 0); + size_t alignment = 0, + bool compress_empty = true); + +std::unique_ptr wrapWriteBufferWithCompressionMethod( + WriteBuffer * nested, + CompressionMethod method, + int level, + size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE, + char * existing_memory = nullptr, + size_t alignment = 0, + bool compress_empty = true); } diff --git a/src/IO/ConnectionTimeouts.cpp b/src/IO/ConnectionTimeouts.cpp index 970afc75ec3..88073a72d78 100644 --- a/src/IO/ConnectionTimeouts.cpp +++ b/src/IO/ConnectionTimeouts.cpp @@ -133,6 +133,22 @@ ConnectionTimeouts ConnectionTimeouts::getHTTPTimeouts(const Settings & settings settings.http_receive_timeout); } +ConnectionTimeouts ConnectionTimeouts::getFetchPartHTTPTimeouts(const ServerSettings & server_settings, const Settings & user_settings) +{ + auto timeouts = getHTTPTimeouts(user_settings, server_settings.keep_alive_timeout); + + if (server_settings.replicated_fetches_http_connection_timeout.changed) + timeouts.connection_timeout = server_settings.replicated_fetches_http_connection_timeout; + + if (server_settings.replicated_fetches_http_send_timeout.changed) + timeouts.send_timeout = server_settings.replicated_fetches_http_send_timeout; + + if (server_settings.replicated_fetches_http_receive_timeout.changed) + timeouts.receive_timeout = server_settings.replicated_fetches_http_receive_timeout; + + return timeouts; +} + class SendReceiveTimeoutsForFirstAttempt { private: diff --git a/src/IO/ConnectionTimeouts.h b/src/IO/ConnectionTimeouts.h index aabebdb836d..42c4312d1d8 100644 --- a/src/IO/ConnectionTimeouts.h +++ b/src/IO/ConnectionTimeouts.h @@ -1,6 +1,7 @@ #pragma once #include +#include #include #include @@ -68,6 +69,8 @@ struct ConnectionTimeouts static ConnectionTimeouts getTCPTimeoutsWithFailover(const Settings & settings); static ConnectionTimeouts getHTTPTimeouts(const Settings & settings, Poco::Timespan http_keep_alive_timeout); + static ConnectionTimeouts getFetchPartHTTPTimeouts(const ServerSettings & server_settings, const Settings & user_settings); + ConnectionTimeouts getAdaptiveTimeouts(const String & method, bool first_attempt, bool first_byte) const; }; diff --git a/src/IO/LZMADeflatingWriteBuffer.cpp b/src/IO/LZMADeflatingWriteBuffer.cpp index a77b2bb7b39..db8f8c95fe6 100644 --- a/src/IO/LZMADeflatingWriteBuffer.cpp +++ b/src/IO/LZMADeflatingWriteBuffer.cpp @@ -7,9 +7,7 @@ namespace ErrorCodes extern const int LZMA_STREAM_ENCODER_FAILED; } -LZMADeflatingWriteBuffer::LZMADeflatingWriteBuffer( - std::unique_ptr out_, int compression_level, size_t buf_size, char * existing_memory, size_t alignment) - : WriteBufferWithOwnMemoryDecorator(std::move(out_), buf_size, existing_memory, alignment) +void LZMADeflatingWriteBuffer::initialize(int compression_level) { lstr = LZMA_STREAM_INIT; @@ -94,6 +92,10 @@ void LZMADeflatingWriteBuffer::finalizeBefore() { next(); + /// Don't write out if no data was ever compressed + if (!compress_empty && lstr.total_out == 0) + return; + do { out->nextIfAtEnd(); diff --git a/src/IO/LZMADeflatingWriteBuffer.h b/src/IO/LZMADeflatingWriteBuffer.h index 2e135455e00..797b85cd400 100644 --- a/src/IO/LZMADeflatingWriteBuffer.h +++ b/src/IO/LZMADeflatingWriteBuffer.h @@ -14,22 +14,32 @@ namespace DB class LZMADeflatingWriteBuffer : public WriteBufferWithOwnMemoryDecorator { public: + template LZMADeflatingWriteBuffer( - std::unique_ptr out_, + WriteBufferT && out_, int compression_level, size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE, char * existing_memory = nullptr, - size_t alignment = 0); + size_t alignment = 0, + bool compress_empty_ = true) + : WriteBufferWithOwnMemoryDecorator(std::move(out_), buf_size, existing_memory, alignment), compress_empty(compress_empty_) + { + initialize(compression_level); + } ~LZMADeflatingWriteBuffer() override; private: + void initialize(int compression_level); + void nextImpl() override; void finalizeBefore() override; void finalizeAfter() override; lzma_stream lstr; + + bool compress_empty = true; }; } diff --git a/src/IO/Lz4DeflatingWriteBuffer.cpp b/src/IO/Lz4DeflatingWriteBuffer.cpp index 8241bfd4f3c..a8cac823b50 100644 --- a/src/IO/Lz4DeflatingWriteBuffer.cpp +++ b/src/IO/Lz4DeflatingWriteBuffer.cpp @@ -63,11 +63,8 @@ namespace ErrorCodes extern const int LZ4_ENCODER_FAILED; } -Lz4DeflatingWriteBuffer::Lz4DeflatingWriteBuffer( - std::unique_ptr out_, int compression_level, size_t buf_size, char * existing_memory, size_t alignment) - : WriteBufferWithOwnMemoryDecorator(std::move(out_), buf_size, existing_memory, alignment) - , tmp_memory(buf_size) +void Lz4DeflatingWriteBuffer::initialize(int compression_level) { kPrefs = { {LZ4F_max256KB, @@ -105,7 +102,7 @@ void Lz4DeflatingWriteBuffer::nextImpl() if (first_time) { - auto sink = SinkToOut(out.get(), tmp_memory, LZ4F_HEADER_SIZE_MAX); + auto sink = SinkToOut(out, tmp_memory, LZ4F_HEADER_SIZE_MAX); chassert(sink.getCapacity() >= LZ4F_HEADER_SIZE_MAX); /// write frame header and check for errors @@ -131,7 +128,7 @@ void Lz4DeflatingWriteBuffer::nextImpl() /// Ensure that there is enough space for compressed block of minimal size size_t min_compressed_block_size = LZ4F_compressBound(1, &kPrefs); - auto sink = SinkToOut(out.get(), tmp_memory, min_compressed_block_size); + auto sink = SinkToOut(out, tmp_memory, min_compressed_block_size); chassert(sink.getCapacity() >= min_compressed_block_size); /// LZ4F_compressUpdate compresses whole input buffer at once so we need to shink it manually @@ -163,8 +160,12 @@ void Lz4DeflatingWriteBuffer::finalizeBefore() { next(); + /// Don't write out if no data was ever compressed + if (!compress_empty && first_time) + return; + auto suffix_size = LZ4F_compressBound(0, &kPrefs); - auto sink = SinkToOut(out.get(), tmp_memory, suffix_size); + auto sink = SinkToOut(out, tmp_memory, suffix_size); chassert(sink.getCapacity() >= suffix_size); /// compression end diff --git a/src/IO/Lz4DeflatingWriteBuffer.h b/src/IO/Lz4DeflatingWriteBuffer.h index 7bb8a5e6c0e..b37d61fa732 100644 --- a/src/IO/Lz4DeflatingWriteBuffer.h +++ b/src/IO/Lz4DeflatingWriteBuffer.h @@ -14,16 +14,26 @@ namespace DB class Lz4DeflatingWriteBuffer : public WriteBufferWithOwnMemoryDecorator { public: + template Lz4DeflatingWriteBuffer( - std::unique_ptr out_, + WriteBufferT && out_, int compression_level, size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE, char * existing_memory = nullptr, - size_t alignment = 0); + size_t alignment = 0, + bool compress_empty_ = true) + : WriteBufferWithOwnMemoryDecorator(std::move(out_), buf_size, existing_memory, alignment) + , tmp_memory(buf_size) + , compress_empty(compress_empty_) + { + initialize(compression_level); + } ~Lz4DeflatingWriteBuffer() override; private: + void initialize(int compression_level); + void nextImpl() override; void finalizeBefore() override; @@ -35,5 +45,6 @@ private: Memory<> tmp_memory; bool first_time = true; + bool compress_empty = true; }; } diff --git a/src/IO/MMapReadBufferFromFile.cpp b/src/IO/MMapReadBufferFromFile.cpp index 0596eba565f..d3eb11c920d 100644 --- a/src/IO/MMapReadBufferFromFile.cpp +++ b/src/IO/MMapReadBufferFromFile.cpp @@ -29,8 +29,8 @@ void MMapReadBufferFromFile::open() fd = ::open(file_name.c_str(), O_RDONLY | O_CLOEXEC); if (-1 == fd) - throwFromErrnoWithPath("Cannot open file " + file_name, file_name, - errno == ENOENT ? ErrorCodes::FILE_DOESNT_EXIST : ErrorCodes::CANNOT_OPEN_FILE); + ErrnoException::throwFromPath( + errno == ENOENT ? ErrorCodes::FILE_DOESNT_EXIST : ErrorCodes::CANNOT_OPEN_FILE, file_name, "Cannot open file {}", file_name); } diff --git a/src/IO/MMapReadBufferFromFileWithCache.cpp b/src/IO/MMapReadBufferFromFileWithCache.cpp index d13cf5db2f7..d53f3bc325d 100644 --- a/src/IO/MMapReadBufferFromFileWithCache.cpp +++ b/src/IO/MMapReadBufferFromFileWithCache.cpp @@ -1,4 +1,5 @@ #include +#include namespace DB diff --git a/src/IO/MMappedFile.cpp b/src/IO/MMappedFile.cpp index 9e45140d5f9..7249a25decb 100644 --- a/src/IO/MMappedFile.cpp +++ b/src/IO/MMappedFile.cpp @@ -30,8 +30,8 @@ void MMappedFile::open() fd = ::open(file_name.c_str(), O_RDONLY | O_CLOEXEC); if (-1 == fd) - throwFromErrnoWithPath("Cannot open file " + file_name, file_name, - errno == ENOENT ? ErrorCodes::FILE_DOESNT_EXIST : ErrorCodes::CANNOT_OPEN_FILE); + ErrnoException::throwFromPath( + errno == ENOENT ? ErrorCodes::FILE_DOESNT_EXIST : ErrorCodes::CANNOT_OPEN_FILE, file_name, "Cannot open file {}", file_name); } diff --git a/src/IO/MMappedFileDescriptor.cpp b/src/IO/MMappedFileDescriptor.cpp index 9cc1aaf656c..ebc4e7a6bbb 100644 --- a/src/IO/MMappedFileDescriptor.cpp +++ b/src/IO/MMappedFileDescriptor.cpp @@ -28,7 +28,7 @@ static size_t getFileSize(int fd) { struct stat stat_res {}; if (0 != fstat(fd, &stat_res)) - throwFromErrno("MMappedFileDescriptor: Cannot fstat.", ErrorCodes::CANNOT_STAT); + throw ErrnoException(ErrorCodes::CANNOT_STAT, "MMappedFileDescriptor: Cannot fstat"); off_t file_size = stat_res.st_size; @@ -63,8 +63,7 @@ void MMappedFileDescriptor::set(int fd_, size_t offset_, size_t length_) void * buf = mmap(nullptr, length, PROT_READ, MAP_PRIVATE, fd, offset); if (MAP_FAILED == buf) - throwFromErrno(fmt::format("MMappedFileDescriptor: Cannot mmap {}.", ReadableSize(length)), - ErrorCodes::CANNOT_ALLOCATE_MEMORY); + throw ErrnoException(ErrorCodes::CANNOT_ALLOCATE_MEMORY, "MMappedFileDescriptor: Cannot mmap {}", ReadableSize(length)); data = static_cast(buf); @@ -88,8 +87,7 @@ void MMappedFileDescriptor::finish() return; if (0 != munmap(data, length)) - throwFromErrno(fmt::format("MMappedFileDescriptor: Cannot munmap {}.", ReadableSize(length)), - ErrorCodes::CANNOT_MUNMAP); + throw ErrnoException(ErrorCodes::CANNOT_MUNMAP, "MMappedFileDescriptor: Cannot munmap {}", ReadableSize(length)); length = 0; diff --git a/src/IO/OpenedFile.cpp b/src/IO/OpenedFile.cpp index b75e087e5c3..4677a8259db 100644 --- a/src/IO/OpenedFile.cpp +++ b/src/IO/OpenedFile.cpp @@ -30,8 +30,8 @@ void OpenedFile::open() const fd = ::open(file_name.c_str(), (flags == -1 ? 0 : flags) | O_RDONLY | O_CLOEXEC); if (-1 == fd) - throwFromErrnoWithPath("Cannot open file " + file_name, file_name, - errno == ENOENT ? ErrorCodes::FILE_DOESNT_EXIST : ErrorCodes::CANNOT_OPEN_FILE); + DB::ErrnoException::throwFromPath( + errno == ENOENT ? ErrorCodes::FILE_DOESNT_EXIST : ErrorCodes::CANNOT_OPEN_FILE, file_name, "Cannot open file {}", file_name); } int OpenedFile::getFD() const diff --git a/src/IO/PeekableReadBuffer.cpp b/src/IO/PeekableReadBuffer.cpp index ce9c20e7a53..be650f2f3b4 100644 --- a/src/IO/PeekableReadBuffer.cpp +++ b/src/IO/PeekableReadBuffer.cpp @@ -20,33 +20,6 @@ PeekableReadBuffer::PeekableReadBuffer(ReadBuffer & sub_buf_, size_t start_size_ checkStateCorrect(); } -void PeekableReadBuffer::reset() -{ - checkStateCorrect(); -} - -void PeekableReadBuffer::setSubBuffer(ReadBuffer & sub_buf_) -{ - sub_buf = &sub_buf_; - resetImpl(); -} - -void PeekableReadBuffer::resetImpl() -{ - peeked_size = 0; - checkpoint = std::nullopt; - checkpoint_in_own_memory = false; - use_stack_memory = true; - - if (!currentlyReadFromOwnMemory()) - sub_buf->position() = pos; - - Buffer & sub_working = sub_buf->buffer(); - BufferBase::set(sub_working.begin(), sub_working.size(), sub_buf->offset()); - - checkStateCorrect(); -} - bool PeekableReadBuffer::peekNext() { checkStateCorrect(); diff --git a/src/IO/PeekableReadBuffer.h b/src/IO/PeekableReadBuffer.h index 78cb319327d..2ee209ffd6c 100644 --- a/src/IO/PeekableReadBuffer.h +++ b/src/IO/PeekableReadBuffer.h @@ -74,12 +74,6 @@ public: /// This data will be lost after destruction of peekable buffer. bool hasUnreadData() const; - // for streaming reading (like in Kafka) we need to restore initial state of the buffer - // without recreating the buffer. - void reset(); - - void setSubBuffer(ReadBuffer & sub_buf_); - const ReadBuffer & getSubBuffer() const { return *sub_buf; } private: diff --git a/src/IO/ReadBufferFromFile.cpp b/src/IO/ReadBufferFromFile.cpp index 79ac62c6421..cb987171bad 100644 --- a/src/IO/ReadBufferFromFile.cpp +++ b/src/IO/ReadBufferFromFile.cpp @@ -45,13 +45,13 @@ ReadBufferFromFile::ReadBufferFromFile( fd = ::open(file_name.c_str(), flags == -1 ? O_RDONLY | O_CLOEXEC : flags | O_CLOEXEC); if (-1 == fd) - throwFromErrnoWithPath("Cannot open file " + file_name, file_name, - errno == ENOENT ? ErrorCodes::FILE_DOESNT_EXIST : ErrorCodes::CANNOT_OPEN_FILE); + ErrnoException::throwFromPath( + errno == ENOENT ? ErrorCodes::FILE_DOESNT_EXIST : ErrorCodes::CANNOT_OPEN_FILE, file_name, "Cannot open file {}", file_name); #ifdef OS_DARWIN if (o_direct) { if (fcntl(fd, F_NOCACHE, 1) == -1) - throwFromErrnoWithPath("Cannot set F_NOCACHE on file " + file_name, file_name, ErrorCodes::CANNOT_OPEN_FILE); + ErrnoException::throwFromPath(ErrorCodes::CANNOT_OPEN_FILE, file_name, "Cannot set F_NOCACHE on file {}", file_name); } #endif } diff --git a/src/IO/ReadBufferFromFileDescriptor.cpp b/src/IO/ReadBufferFromFileDescriptor.cpp index 5a67dc7528c..3211f8eeb35 100644 --- a/src/IO/ReadBufferFromFileDescriptor.cpp +++ b/src/IO/ReadBufferFromFileDescriptor.cpp @@ -80,7 +80,8 @@ size_t ReadBufferFromFileDescriptor::readImpl(char * to, size_t min_bytes, size_ if (-1 == res && errno != EINTR) { ProfileEvents::increment(ProfileEvents::ReadBufferFromFileDescriptorReadFailed); - throwFromErrnoWithPath("Cannot read from file: " + getFileName(), getFileName(), ErrorCodes::CANNOT_READ_FROM_FILE_DESCRIPTOR); + ErrnoException::throwFromPath( + ErrorCodes::CANNOT_READ_FROM_FILE_DESCRIPTOR, getFileName(), "Cannot read from file {}", getFileName()); } if (res > 0) @@ -145,7 +146,7 @@ void ReadBufferFromFileDescriptor::prefetch(Priority) /// Ask OS to prefetch data into page cache. if (0 != posix_fadvise(fd, file_offset_of_buffer_end, internal_buffer.size(), POSIX_FADV_WILLNEED)) - throwFromErrno("Cannot posix_fadvise", ErrorCodes::CANNOT_ADVISE); + throw ErrnoException(ErrorCodes::CANNOT_ADVISE, "Cannot posix_fadvise"); #endif } @@ -208,8 +209,12 @@ off_t ReadBufferFromFileDescriptor::seek(off_t offset, int whence) off_t res = ::lseek(fd, seek_pos, SEEK_SET); if (-1 == res) - throwFromErrnoWithPath(fmt::format("Cannot seek through file {} at offset {}", getFileName(), seek_pos), getFileName(), - ErrorCodes::CANNOT_SEEK_THROUGH_FILE); + ErrnoException::throwFromPath( + ErrorCodes::CANNOT_SEEK_THROUGH_FILE, + getFileName(), + "Cannot seek through file {} at offset {}", + getFileName(), + seek_pos); /// Also note that seeking past the file size is not allowed. if (res != seek_pos) @@ -237,8 +242,8 @@ void ReadBufferFromFileDescriptor::rewind() ProfileEvents::increment(ProfileEvents::Seek); off_t res = ::lseek(fd, 0, SEEK_SET); if (-1 == res) - throwFromErrnoWithPath("Cannot seek through file " + getFileName(), getFileName(), - ErrorCodes::CANNOT_SEEK_THROUGH_FILE); + ErrnoException::throwFromPath( + ErrorCodes::CANNOT_SEEK_THROUGH_FILE, getFileName(), "Cannot seek through file {}", getFileName()); } /// In case of pread, the ProfileEvents::Seek is not accounted, but it's Ok. diff --git a/src/IO/ReadBufferFromIStream.cpp b/src/IO/ReadBufferFromIStream.cpp index e0c966fb700..3b3bdb5c564 100644 --- a/src/IO/ReadBufferFromIStream.cpp +++ b/src/IO/ReadBufferFromIStream.cpp @@ -34,6 +34,11 @@ bool ReadBufferFromIStream::nextImpl() ReadBufferFromIStream::ReadBufferFromIStream(std::istream & istr_, size_t size) : BufferWithOwnMemory(size), istr(istr_) { + /// - badbit will be set if some exception will be throw from ios implementation + /// - failbit can be set when for instance read() reads less data, so we + /// cannot set it, since we are requesting to read more data, then the + /// buffer has now. + istr.exceptions(std::ios::badbit); } } diff --git a/src/IO/ReadBufferFromPocoSocket.cpp b/src/IO/ReadBufferFromPocoSocket.cpp index ff72dc5386c..d399721d060 100644 --- a/src/IO/ReadBufferFromPocoSocket.cpp +++ b/src/IO/ReadBufferFromPocoSocket.cpp @@ -99,6 +99,9 @@ bool ReadBufferFromPocoSocket::nextImpl() if (bytes_read < 0) throw NetException(ErrorCodes::CANNOT_READ_FROM_SOCKET, "Cannot read from socket ({})", peer_address.toString()); + if (read_event != ProfileEvents::end()) + ProfileEvents::increment(read_event, bytes_read); + if (bytes_read) working_buffer.resize(bytes_read); else @@ -111,10 +114,17 @@ ReadBufferFromPocoSocket::ReadBufferFromPocoSocket(Poco::Net::Socket & socket_, : BufferWithOwnMemory(buf_size) , socket(socket_) , peer_address(socket.peerAddress()) + , read_event(ProfileEvents::end()) , socket_description("socket (" + peer_address.toString() + ")") { } +ReadBufferFromPocoSocket::ReadBufferFromPocoSocket(Poco::Net::Socket & socket_, const ProfileEvents::Event & read_event_, size_t buf_size) + : ReadBufferFromPocoSocket(socket_, buf_size) +{ + read_event = read_event_; +} + bool ReadBufferFromPocoSocket::poll(size_t timeout_microseconds) const { if (available()) diff --git a/src/IO/ReadBufferFromPocoSocket.h b/src/IO/ReadBufferFromPocoSocket.h index dab4ac86295..76156612764 100644 --- a/src/IO/ReadBufferFromPocoSocket.h +++ b/src/IO/ReadBufferFromPocoSocket.h @@ -20,10 +20,13 @@ protected: */ Poco::Net::SocketAddress peer_address; + ProfileEvents::Event read_event; + bool nextImpl() override; public: explicit ReadBufferFromPocoSocket(Poco::Net::Socket & socket_, size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE); + explicit ReadBufferFromPocoSocket(Poco::Net::Socket & socket_, const ProfileEvents::Event & read_event_, size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE); bool poll(size_t timeout_microseconds) const; diff --git a/src/IO/ReadBufferFromS3.cpp b/src/IO/ReadBufferFromS3.cpp index c9c9319c44c..619fd40edc3 100644 --- a/src/IO/ReadBufferFromS3.cpp +++ b/src/IO/ReadBufferFromS3.cpp @@ -196,7 +196,7 @@ bool ReadBufferFromS3::nextImpl() next_result = impl->next(); break; } - catch (Exception & e) + catch (Poco::Exception & e) { if (!processException(e, getPosition(), attempt) || last_attempt) throw; @@ -515,7 +515,9 @@ Aws::S3::Model::GetObjectResult ReadBufferFromS3::sendRequest(size_t attempt, si // We do not know in advance how many bytes we are going to consume, to avoid blocking estimated it from below constexpr ResourceCost estimated_cost = 1; ResourceGuard rlock(read_settings.resource_link, estimated_cost); + Aws::S3::Model::GetObjectOutcome outcome = client_ptr->GetObject(req); + rlock.unlock(); if (outcome.IsSuccess()) diff --git a/src/IO/ReadHelpers.cpp b/src/IO/ReadHelpers.cpp index 19750906fdb..ff5743a63af 100644 --- a/src/IO/ReadHelpers.cpp +++ b/src/IO/ReadHelpers.cpp @@ -1591,7 +1591,7 @@ void skipToNextRowOrEof(PeekableReadBuffer & buf, const String & row_after_delim if (skip_spaces) skipWhitespaceIfAny(buf); - if (checkString(row_between_delimiter, buf)) + if (buf.eof() || checkString(row_between_delimiter, buf)) break; } } diff --git a/src/IO/ReadHelpers.h b/src/IO/ReadHelpers.h index 17f3d3d4151..bba0b694d23 100644 --- a/src/IO/ReadHelpers.h +++ b/src/IO/ReadHelpers.h @@ -41,6 +41,7 @@ #include #include +#include #include static constexpr auto DEFAULT_MAX_STRING_SIZE = 1_GiB; diff --git a/src/IO/ReadSettings.h b/src/IO/ReadSettings.h index 4c8a6cb020a..a8a31d82e56 100644 --- a/src/IO/ReadSettings.h +++ b/src/IO/ReadSettings.h @@ -120,6 +120,7 @@ struct ReadSettings size_t http_retry_initial_backoff_ms = 100; size_t http_retry_max_backoff_ms = 1600; bool http_skip_not_found_url_for_globs = true; + bool http_make_head_request = true; /// Monitoring bool for_object_storage = false; // to choose which profile events should be incremented diff --git a/src/IO/ReadWriteBufferFromHTTP.cpp b/src/IO/ReadWriteBufferFromHTTP.cpp index 6dd6269e16f..297d73303c0 100644 --- a/src/IO/ReadWriteBufferFromHTTP.cpp +++ b/src/IO/ReadWriteBufferFromHTTP.cpp @@ -808,6 +808,11 @@ std::optional ReadWriteBufferFromHTTPBase::tryGetLa template HTTPFileInfo ReadWriteBufferFromHTTPBase::getFileInfo() { + /// May be disabled in case the user knows in advance that the server doesn't support HEAD requests. + /// Allows to avoid making unnecessary requests in such cases. + if (!settings.http_make_head_request) + return HTTPFileInfo{}; + Poco::Net::HTTPResponse response; try { @@ -920,13 +925,12 @@ PooledReadWriteBufferFromHTTP::PooledReadWriteBufferFromHTTP( Poco::URI uri_, const std::string & method_, OutStreamCallback out_stream_callback_, - const ConnectionTimeouts & timeouts_, const Poco::Net::HTTPBasicCredentials & credentials_, size_t buffer_size_, const UInt64 max_redirects, - size_t max_connections_per_endpoint) + PooledSessionFactoryPtr session_factory) : Parent( - std::make_shared(uri_, max_redirects, std::make_shared(timeouts_, max_connections_per_endpoint)), + std::make_shared(uri_, max_redirects, session_factory), uri_, credentials_, method_, diff --git a/src/IO/ReadWriteBufferFromHTTP.h b/src/IO/ReadWriteBufferFromHTTP.h index 8f0e2388e5b..29c0804bb28 100644 --- a/src/IO/ReadWriteBufferFromHTTP.h +++ b/src/IO/ReadWriteBufferFromHTTP.h @@ -265,6 +265,8 @@ private: size_t per_endpoint_pool_size; }; +using PooledSessionFactoryPtr = std::shared_ptr; + class PooledReadWriteBufferFromHTTP : public detail::ReadWriteBufferFromHTTPBase>> { using SessionType = UpdatableSession; @@ -273,13 +275,12 @@ class PooledReadWriteBufferFromHTTP : public detail::ReadWriteBufferFromHTTPBase public: explicit PooledReadWriteBufferFromHTTP( Poco::URI uri_, - const std::string & method_ = {}, - OutStreamCallback out_stream_callback_ = {}, - const ConnectionTimeouts & timeouts_ = {}, - const Poco::Net::HTTPBasicCredentials & credentials_ = {}, - size_t buffer_size_ = DBMS_DEFAULT_BUFFER_SIZE, - const UInt64 max_redirects = 0, - size_t max_connections_per_endpoint = DEFAULT_COUNT_OF_HTTP_CONNECTIONS_PER_ENDPOINT); + const std::string & method_, + OutStreamCallback out_stream_callback_, + const Poco::Net::HTTPBasicCredentials & credentials_, + size_t buffer_size_, + const UInt64 max_redirects, + PooledSessionFactoryPtr session_factory); }; diff --git a/src/IO/S3/Client.cpp b/src/IO/S3/Client.cpp index 7658ea5941c..a65a82d9b40 100644 --- a/src/IO/S3/Client.cpp +++ b/src/IO/S3/Client.cpp @@ -125,12 +125,11 @@ std::unique_ptr Client::create( const std::shared_ptr & credentials_provider, const PocoHTTPClientConfiguration & client_configuration, Aws::Client::AWSAuthV4Signer::PayloadSigningPolicy sign_payloads, - bool use_virtual_addressing, - bool disable_checksum) + const ClientSettings & client_settings) { verifyClientConfiguration(client_configuration); return std::unique_ptr( - new Client(max_redirects_, std::move(sse_kms_config_), credentials_provider, client_configuration, sign_payloads, use_virtual_addressing, disable_checksum)); + new Client(max_redirects_, std::move(sse_kms_config_), credentials_provider, client_configuration, sign_payloads, client_settings)); } std::unique_ptr Client::clone() const @@ -160,14 +159,12 @@ Client::Client( const std::shared_ptr & credentials_provider_, const PocoHTTPClientConfiguration & client_configuration_, Aws::Client::AWSAuthV4Signer::PayloadSigningPolicy sign_payloads_, - bool use_virtual_addressing_, - bool disable_checksum_) - : Aws::S3::S3Client(credentials_provider_, client_configuration_, sign_payloads_, use_virtual_addressing_) + const ClientSettings & client_settings_) + : Aws::S3::S3Client(credentials_provider_, client_configuration_, sign_payloads_, client_settings_.use_virtual_addressing) , credentials_provider(credentials_provider_) , client_configuration(client_configuration_) , sign_payloads(sign_payloads_) - , use_virtual_addressing(use_virtual_addressing_) - , disable_checksum(disable_checksum_) + , client_settings(client_settings_) , max_redirects(max_redirects_) , sse_kms_config(std::move(sse_kms_config_)) , log(&Poco::Logger::get("S3Client")) @@ -207,13 +204,12 @@ Client::Client( Client::Client( const Client & other, const PocoHTTPClientConfiguration & client_configuration_) : Aws::S3::S3Client(other.credentials_provider, client_configuration_, other.sign_payloads, - other.use_virtual_addressing) + other.client_settings.use_virtual_addressing) , initial_endpoint(other.initial_endpoint) , credentials_provider(other.credentials_provider) , client_configuration(client_configuration_) , sign_payloads(other.sign_payloads) - , use_virtual_addressing(other.use_virtual_addressing) - , disable_checksum(other.disable_checksum) + , client_settings(other.client_settings) , explicit_region(other.explicit_region) , detect_region(other.detect_region) , provider_type(other.provider_type) @@ -417,7 +413,7 @@ Model::CompleteMultipartUploadOutcome Client::CompleteMultipartUpload(CompleteMu outcome = Aws::S3::Model::CompleteMultipartUploadOutcome(Aws::S3::Model::CompleteMultipartUploadResult()); } - if (outcome.IsSuccess() && provider_type == ProviderType::GCS) + if (outcome.IsSuccess() && provider_type == ProviderType::GCS && client_settings.gcs_issue_compose_request) { /// For GCS we will try to compose object at the end, otherwise we cannot do a native copy /// for the object (e.g. for backups) @@ -515,7 +511,7 @@ Client::doRequest(RequestType & request, RequestFn request_fn) const addAdditionalAMZHeadersToCanonicalHeadersList(request, client_configuration.extra_headers); const auto & bucket = request.GetBucket(); request.setApiMode(api_mode); - if (disable_checksum) + if (client_settings.disable_checksum) request.disableChecksum(); if (auto region = getRegionForBucket(bucket); !region.empty()) @@ -574,6 +570,9 @@ Client::doRequest(RequestType & request, RequestFn request_fn) const if (!new_uri) return result; + if (initial_endpoint.substr(11) == "amazonaws.com") // Check if user didn't mention any region + new_uri->addRegionToURI(request.getRegionOverride()); + const auto & current_uri_override = request.getURIOverride(); /// we already tried with this URI if (current_uri_override && current_uri_override->uri == new_uri->uri) @@ -849,8 +848,7 @@ ClientFactory & ClientFactory::instance() std::unique_ptr ClientFactory::create( // NOLINT const PocoHTTPClientConfiguration & cfg_, - bool is_virtual_hosted_style, - bool disable_checksum, + ClientSettings client_settings, const String & access_key_id, const String & secret_access_key, const String & server_side_encryption_customer_key_base64, @@ -889,14 +887,17 @@ std::unique_ptr ClientFactory::create( // NOLINT client_configuration.retryStrategy = std::make_shared(client_configuration.s3_retry_attempts); + /// Use virtual addressing if endpoint is not specified. + if (client_configuration.endpointOverride.empty()) + client_settings.use_virtual_addressing = true; + return Client::create( client_configuration.s3_max_redirects, std::move(sse_kms_config), credentials_provider, client_configuration, // Client configuration. Aws::Client::AWSAuthV4Signer::PayloadSigningPolicy::Never, - is_virtual_hosted_style || client_configuration.endpointOverride.empty(), /// Use virtual addressing if endpoint is not specified. - disable_checksum + client_settings ); } diff --git a/src/IO/S3/Client.h b/src/IO/S3/Client.h index d0a21a2dafe..b137f0605dc 100644 --- a/src/IO/S3/Client.h +++ b/src/IO/S3/Client.h @@ -92,6 +92,23 @@ private: std::unordered_map> client_caches; }; +struct ClientSettings +{ + bool use_virtual_addressing; + /// Disable checksum to avoid extra read of the input stream + bool disable_checksum; + /// Should client send ComposeObject request after upload to GCS. + /// + /// Previously ComposeObject request was required to make Copy possible, + /// but not anymore (see [1]). + /// + /// [1]: https://cloud.google.com/storage/docs/release-notes#June_23_2023 + /// + /// Ability to enable it preserved since likely it is required for old + /// files. + bool gcs_issue_compose_request; +}; + /// Client that improves the client from the AWS SDK /// - inject region and URI into requests so they are rerouted to the correct destination if needed /// - automatically detect endpoint and regions for each bucket and cache them @@ -116,8 +133,7 @@ public: const std::shared_ptr & credentials_provider, const PocoHTTPClientConfiguration & client_configuration, Aws::Client::AWSAuthV4Signer::PayloadSigningPolicy sign_payloads, - bool use_virtual_addressing, - bool disable_checksum); + const ClientSettings & client_settings); std::unique_ptr clone() const; @@ -195,7 +211,6 @@ public: Model::DeleteObjectsOutcome DeleteObjects(DeleteObjectsRequest & request) const; using ComposeObjectOutcome = Aws::Utils::Outcome; - ComposeObjectOutcome ComposeObject(ComposeObjectRequest & request) const; using Aws::S3::S3Client::EnableRequestProcessing; using Aws::S3::S3Client::DisableRequestProcessing; @@ -212,8 +227,7 @@ private: const std::shared_ptr & credentials_provider_, const PocoHTTPClientConfiguration & client_configuration, Aws::Client::AWSAuthV4Signer::PayloadSigningPolicy sign_payloads, - bool use_virtual_addressing, - bool disable_checksum_); + const ClientSettings & client_settings_); Client( const Client & other, const PocoHTTPClientConfiguration & client_configuration); @@ -236,6 +250,8 @@ private: using Aws::S3::S3Client::DeleteObject; using Aws::S3::S3Client::DeleteObjects; + ComposeObjectOutcome ComposeObject(ComposeObjectRequest & request) const; + template std::invoke_result_t doRequest(RequestType & request, RequestFn request_fn) const; @@ -258,8 +274,7 @@ private: std::shared_ptr credentials_provider; PocoHTTPClientConfiguration client_configuration; Aws::Client::AWSAuthV4Signer::PayloadSigningPolicy sign_payloads; - bool use_virtual_addressing; - bool disable_checksum; + ClientSettings client_settings; std::string explicit_region; mutable bool detect_region = true; @@ -289,8 +304,7 @@ public: std::unique_ptr create( const PocoHTTPClientConfiguration & cfg, - bool is_virtual_hosted_style, - bool disable_checksum, + ClientSettings client_settings, const String & access_key_id, const String & secret_access_key, const String & server_side_encryption_customer_key_base64, diff --git a/src/IO/S3/PocoHTTPClientFactory.cpp b/src/IO/S3/PocoHTTPClientFactory.cpp index 9dd52a263b0..ef7af2d01ba 100644 --- a/src/IO/S3/PocoHTTPClientFactory.cpp +++ b/src/IO/S3/PocoHTTPClientFactory.cpp @@ -13,9 +13,9 @@ namespace DB::S3 { std::shared_ptr -PocoHTTPClientFactory::CreateHttpClient(const Aws::Client::ClientConfiguration & clientConfiguration) const +PocoHTTPClientFactory::CreateHttpClient(const Aws::Client::ClientConfiguration & client_configuration) const { - return std::make_shared(static_cast(clientConfiguration)); + return std::make_shared(static_cast(client_configuration)); } std::shared_ptr PocoHTTPClientFactory::CreateHttpRequest( diff --git a/src/IO/S3/PocoHTTPClientFactory.h b/src/IO/S3/PocoHTTPClientFactory.h index 4e555f05502..60704332e7b 100644 --- a/src/IO/S3/PocoHTTPClientFactory.h +++ b/src/IO/S3/PocoHTTPClientFactory.h @@ -15,7 +15,7 @@ class PocoHTTPClientFactory : public Aws::Http::HttpClientFactory public: ~PocoHTTPClientFactory() override = default; [[nodiscard]] std::shared_ptr - CreateHttpClient(const Aws::Client::ClientConfiguration & clientConfiguration) const override; + CreateHttpClient(const Aws::Client::ClientConfiguration & client_configuration) const override; [[nodiscard]] std::shared_ptr CreateHttpRequest(const Aws::String & uri, Aws::Http::HttpMethod method, const Aws::IOStreamFactory & streamFactory) const override; [[nodiscard]] std::shared_ptr diff --git a/src/IO/S3/Requests.h b/src/IO/S3/Requests.h index eae45491fe6..bfb94a5a67e 100644 --- a/src/IO/S3/Requests.h +++ b/src/IO/S3/Requests.h @@ -58,6 +58,11 @@ public: return BaseRequest::GetChecksumAlgorithmName(); } + std::string getRegionOverride() const + { + return region_override; + } + void overrideRegion(std::string region) const { region_override = std::move(region); diff --git a/src/IO/S3/URI.cpp b/src/IO/S3/URI.cpp index e05e0882329..e990875dd2f 100644 --- a/src/IO/S3/URI.cpp +++ b/src/IO/S3/URI.cpp @@ -146,6 +146,12 @@ URI::URI(const std::string & uri_) throw Exception(ErrorCodes::BAD_ARGUMENTS, "Bucket or key name are invalid in S3 URI."); } +void URI::addRegionToURI(const std::string ®ion) +{ + if (auto pos = endpoint.find("amazonaws.com"); pos != std::string::npos) + endpoint = endpoint.substr(0, pos) + region + "." + endpoint.substr(pos); +} + void URI::validateBucket(const String & bucket, const Poco::URI & uri) { /// S3 specification requires at least 3 and at most 63 characters in bucket name. diff --git a/src/IO/S3/URI.h b/src/IO/S3/URI.h index f8f40cf9108..2873728bc78 100644 --- a/src/IO/S3/URI.h +++ b/src/IO/S3/URI.h @@ -32,6 +32,7 @@ struct URI URI() = default; explicit URI(const std::string & uri_); + void addRegionToURI(const std::string & region); static void validateBucket(const std::string & bucket, const Poco::URI & uri); }; diff --git a/src/IO/S3/copyS3File.cpp b/src/IO/S3/copyS3File.cpp index 819c345938d..25de61360fe 100644 --- a/src/IO/S3/copyS3File.cpp +++ b/src/IO/S3/copyS3File.cpp @@ -655,6 +655,7 @@ namespace void performCopy() { + LOG_TEST(log, "Copy object {} to {} using native copy", src_key, dest_key); if (!supports_multipart_copy || size <= upload_settings.max_single_operation_copy_size) performSingleOperationCopy(); else diff --git a/src/IO/S3/tests/gtest_aws_s3_client.cpp b/src/IO/S3/tests/gtest_aws_s3_client.cpp index 0b44698ac2c..33917314bca 100644 --- a/src/IO/S3/tests/gtest_aws_s3_client.cpp +++ b/src/IO/S3/tests/gtest_aws_s3_client.cpp @@ -94,7 +94,7 @@ void doWriteRequest(std::shared_ptr client, const DB::S3:: client, uri.bucket, uri.key, - DBMS_DEFAULT_BUFFER_SIZE, + DB::DBMS_DEFAULT_BUFFER_SIZE, request_settings, {} ); @@ -140,10 +140,15 @@ void testServerSideEncryption( bool use_environment_credentials = false; bool use_insecure_imds_request = false; + DB::S3::ClientSettings client_settings{ + .use_virtual_addressing = uri.is_virtual_hosted_style, + .disable_checksum = disable_checksum, + .gcs_issue_compose_request = false, + }; + std::shared_ptr client = DB::S3::ClientFactory::instance().create( client_configuration, - uri.is_virtual_hosted_style, - disable_checksum, + client_settings, access_key_id, secret_access_key, server_side_encryption_customer_key_base64, diff --git a/src/IO/S3Common.cpp b/src/IO/S3Common.cpp index ffd6b6d711f..96ad6413ef5 100644 --- a/src/IO/S3Common.cpp +++ b/src/IO/S3Common.cpp @@ -109,6 +109,8 @@ AuthSettings AuthSettings::loadFromConfig(const std::string & config_elem, const { auto access_key_id = config.getString(config_elem + ".access_key_id", ""); auto secret_access_key = config.getString(config_elem + ".secret_access_key", ""); + auto session_token = config.getString(config_elem + ".session_token", ""); + auto region = config.getString(config_elem + ".region", ""); auto server_side_encryption_customer_key_base64 = config.getString(config_elem + ".server_side_encryption_customer_key_base64", ""); @@ -133,7 +135,7 @@ AuthSettings AuthSettings::loadFromConfig(const std::string & config_elem, const return AuthSettings { - std::move(access_key_id), std::move(secret_access_key), + std::move(access_key_id), std::move(secret_access_key), std::move(session_token), std::move(region), std::move(server_side_encryption_customer_key_base64), std::move(sse_kms_config), @@ -155,6 +157,8 @@ void AuthSettings::updateFrom(const AuthSettings & from) access_key_id = from.access_key_id; if (!from.secret_access_key.empty()) secret_access_key = from.secret_access_key; + if (!from.session_token.empty()) + session_token = from.session_token; headers = from.headers; region = from.region; diff --git a/src/IO/S3Common.h b/src/IO/S3Common.h index 71d52c727c7..ebfc07a3976 100644 --- a/src/IO/S3Common.h +++ b/src/IO/S3Common.h @@ -16,6 +16,7 @@ #include #include +#include #include #include @@ -79,6 +80,7 @@ struct AuthSettings std::string access_key_id; std::string secret_access_key; + std::string session_token; std::string region; std::string server_side_encryption_customer_key_base64; ServerSideEncryptionKMSConfig server_side_encryption_kms_config; diff --git a/src/IO/SharedThreadPools.cpp b/src/IO/SharedThreadPools.cpp index 6af5aab7a38..c8506663bc8 100644 --- a/src/IO/SharedThreadPools.cpp +++ b/src/IO/SharedThreadPools.cpp @@ -66,6 +66,8 @@ void StaticThreadPool::reloadConfiguration(size_t max_threads, size_t max_free_t if (!instance) throw Exception(ErrorCodes::LOGICAL_ERROR, "The {} is not initialized", name); + std::lock_guard lock(mutex); + instance->setMaxThreads(turbo_mode_enabled > 0 ? max_threads_turbo : max_threads); instance->setMaxFreeThreads(max_free_threads); instance->setQueueSize(queue_size); diff --git a/src/IO/SynchronousReader.cpp b/src/IO/SynchronousReader.cpp index 07cc2fd0778..5061439bfd6 100644 --- a/src/IO/SynchronousReader.cpp +++ b/src/IO/SynchronousReader.cpp @@ -43,7 +43,7 @@ std::future SynchronousReader::submit(Request reque #if defined(POSIX_FADV_WILLNEED) int fd = assert_cast(*request.descriptor).fd; if (0 != posix_fadvise(fd, request.offset, request.size, POSIX_FADV_WILLNEED)) - throwFromErrno("Cannot posix_fadvise", ErrorCodes::CANNOT_ADVISE); + throw ErrnoException(ErrorCodes::CANNOT_ADVISE, "Cannot posix_fadvise"); #endif return std::async(std::launch::deferred, [request, this] @@ -73,7 +73,7 @@ IAsynchronousReader::Result SynchronousReader::execute(Request request) if (-1 == res && errno != EINTR) { ProfileEvents::increment(ProfileEvents::ReadBufferFromFileDescriptorReadFailed); - throwFromErrno(fmt::format("Cannot read from file {}", fd), ErrorCodes::CANNOT_READ_FROM_FILE_DESCRIPTOR); + throw ErrnoException(ErrorCodes::CANNOT_READ_FROM_FILE_DESCRIPTOR, "Cannot read from file {}", fd); } if (res > 0) diff --git a/src/IO/WriteBufferDecorator.h b/src/IO/WriteBufferDecorator.h index 7c984eeea8d..ee47834b7af 100644 --- a/src/IO/WriteBufferDecorator.h +++ b/src/IO/WriteBufferDecorator.h @@ -12,13 +12,21 @@ class WriteBuffer; /// WriteBuffer that decorates data and delegates it to underlying buffer. /// It's used for writing compressed and encrypted data +/// This class can own or not own underlying buffer - constructor will differentiate +/// std::unique_ptr for owning and WriteBuffer* for not owning. template class WriteBufferDecorator : public Base { public: template explicit WriteBufferDecorator(std::unique_ptr out_, BaseArgs && ... args) - : Base(std::forward(args)...), out(std::move(out_)) + : Base(std::forward(args)...), owning_holder(std::move(out_)), out(owning_holder.get()) + { + } + + template + explicit WriteBufferDecorator(WriteBuffer * out_, BaseArgs && ... args) + : Base(std::forward(args)...), out(out_) { } @@ -38,7 +46,7 @@ public: } } - WriteBuffer * getNestedBuffer() { return out.get(); } + WriteBuffer * getNestedBuffer() { return out; } protected: /// Do some finalization before finalization of underlying buffer. @@ -47,7 +55,8 @@ protected: /// Do some finalization after finalization of underlying buffer. virtual void finalizeAfter() {} - std::unique_ptr out; + std::unique_ptr owning_holder; + WriteBuffer * out; }; using WriteBufferWithOwnMemoryDecorator = WriteBufferDecorator>; diff --git a/src/IO/WriteBufferFromEncryptedFile.h b/src/IO/WriteBufferFromEncryptedFile.h index 25dd54ca9d5..f8f864d00a6 100644 --- a/src/IO/WriteBufferFromEncryptedFile.h +++ b/src/IO/WriteBufferFromEncryptedFile.h @@ -28,7 +28,7 @@ public: void sync() override; - std::string getFileName() const override { return assert_cast(out.get())->getFileName(); } + std::string getFileName() const override { return assert_cast(out)->getFileName(); } private: void nextImpl() override; diff --git a/src/IO/WriteBufferFromFile.cpp b/src/IO/WriteBufferFromFile.cpp index 97059ff8f48..0ca6c26f08c 100644 --- a/src/IO/WriteBufferFromFile.cpp +++ b/src/IO/WriteBufferFromFile.cpp @@ -46,14 +46,14 @@ WriteBufferFromFile::WriteBufferFromFile( fd = ::open(file_name.c_str(), flags == -1 ? O_WRONLY | O_TRUNC | O_CREAT | O_CLOEXEC : flags | O_CLOEXEC, mode); if (-1 == fd) - throwFromErrnoWithPath("Cannot open file " + file_name, file_name, - errno == ENOENT ? ErrorCodes::FILE_DOESNT_EXIST : ErrorCodes::CANNOT_OPEN_FILE); + ErrnoException::throwFromPath( + errno == ENOENT ? ErrorCodes::FILE_DOESNT_EXIST : ErrorCodes::CANNOT_OPEN_FILE, file_name, "Cannot open file {}", file_name); #ifdef OS_DARWIN if (o_direct) { if (fcntl(fd, F_NOCACHE, 1) == -1) - throwFromErrnoWithPath("Cannot set F_NOCACHE on file " + file_name, file_name, ErrorCodes::CANNOT_OPEN_FILE); + ErrnoException::throwFromPath(ErrorCodes::CANNOT_OPEN_FILE, file_name, "Cannot set F_NOCACHE on file {}", file_name); } #endif } diff --git a/src/IO/WriteBufferFromFileDescriptor.cpp b/src/IO/WriteBufferFromFileDescriptor.cpp index 135ff608967..813ef0deab9 100644 --- a/src/IO/WriteBufferFromFileDescriptor.cpp +++ b/src/IO/WriteBufferFromFileDescriptor.cpp @@ -69,8 +69,8 @@ void WriteBufferFromFileDescriptor::nextImpl() String error_file_name = file_name; if (error_file_name.empty()) error_file_name = "(fd = " + toString(fd) + ")"; - throwFromErrnoWithPath("Cannot write to file " + error_file_name, error_file_name, - ErrorCodes::CANNOT_WRITE_TO_FILE_DESCRIPTOR); + ErrnoException::throwFromPath( + ErrorCodes::CANNOT_WRITE_TO_FILE_DESCRIPTOR, error_file_name, "Cannot write to file {}", error_file_name); } if (res > 0) @@ -137,7 +137,7 @@ void WriteBufferFromFileDescriptor::sync() ProfileEvents::increment(ProfileEvents::FileSyncElapsedMicroseconds, watch.elapsedMicroseconds()); if (-1 == res) - throwFromErrnoWithPath("Cannot fsync " + getFileName(), getFileName(), ErrorCodes::CANNOT_FSYNC); + ErrnoException::throwFromPath(ErrorCodes::CANNOT_FSYNC, getFileName(), "Cannot fsync {}", getFileName()); } @@ -145,8 +145,7 @@ off_t WriteBufferFromFileDescriptor::seek(off_t offset, int whence) // NOLINT { off_t res = lseek(fd, offset, whence); if (-1 == res) - throwFromErrnoWithPath("Cannot seek through file " + getFileName(), getFileName(), - ErrorCodes::CANNOT_SEEK_THROUGH_FILE); + ErrnoException::throwFromPath(ErrorCodes::CANNOT_SEEK_THROUGH_FILE, getFileName(), "Cannot seek through {}", getFileName()); return res; } @@ -154,7 +153,7 @@ void WriteBufferFromFileDescriptor::truncate(off_t length) // NOLINT { int res = ftruncate(fd, length); if (-1 == res) - throwFromErrnoWithPath("Cannot truncate file " + getFileName(), getFileName(), ErrorCodes::CANNOT_TRUNCATE_FILE); + ErrnoException::throwFromPath(ErrorCodes::CANNOT_TRUNCATE_FILE, getFileName(), "Cannot truncate file {}", getFileName()); } @@ -163,7 +162,7 @@ off_t WriteBufferFromFileDescriptor::size() const struct stat buf; int res = fstat(fd, &buf); if (-1 == res) - throwFromErrnoWithPath("Cannot execute fstat " + getFileName(), getFileName(), ErrorCodes::CANNOT_FSTAT); + ErrnoException::throwFromPath(ErrorCodes::CANNOT_FSTAT, getFileName(), "Cannot execute fstat {}", getFileName()); return buf.st_size; } diff --git a/src/IO/WriteBufferFromPocoSocket.cpp b/src/IO/WriteBufferFromPocoSocket.cpp index 171e7f1ce69..10d9fd131cd 100644 --- a/src/IO/WriteBufferFromPocoSocket.cpp +++ b/src/IO/WriteBufferFromPocoSocket.cpp @@ -34,6 +34,97 @@ namespace ErrorCodes extern const int LOGICAL_ERROR; } +ssize_t WriteBufferFromPocoSocket::socketSendBytesImpl(const char * ptr, size_t size) +{ + ssize_t res = 0; + + /// If async_callback is specified, set socket to non-blocking mode + /// and try to write data to it, if socket is not ready for writing, + /// run async_callback and try again later. + /// It is expected that file descriptor may be polled externally. + /// Note that send timeout is not checked here. External code should check it while polling. + if (async_callback) + { + socket.setBlocking(false); + /// Set socket to blocking mode at the end. + SCOPE_EXIT(socket.setBlocking(true)); + bool secure = socket.secure(); + res = socket.impl()->sendBytes(ptr, static_cast(size)); + + /// Check EAGAIN and ERR_SSL_WANT_WRITE/ERR_SSL_WANT_READ for secure socket (writing to secure socket can read too). + while (res < 0 && (errno == EAGAIN || (secure && (checkSSLWantRead(res) || checkSSLWantWrite(res))))) + { + /// In case of ERR_SSL_WANT_READ we should wait for socket to be ready for reading, otherwise - for writing. + if (secure && checkSSLWantRead(res)) + async_callback(socket.impl()->sockfd(), socket.getReceiveTimeout(), AsyncEventTimeoutType::RECEIVE, socket_description, AsyncTaskExecutor::Event::READ | AsyncTaskExecutor::Event::ERROR); + else + async_callback(socket.impl()->sockfd(), socket.getSendTimeout(), AsyncEventTimeoutType::SEND, socket_description, AsyncTaskExecutor::Event::WRITE | AsyncTaskExecutor::Event::ERROR); + + /// Try to write again. + res = socket.impl()->sendBytes(ptr, static_cast(size)); + } + } + else + { + res = socket.impl()->sendBytes(ptr, static_cast(size)); + } + + return res; +} + +void WriteBufferFromPocoSocket::socketSendBytes(const char * ptr, size_t size) +{ + if (!size) + return; + + Stopwatch watch; + size_t bytes_written = 0; + + SCOPE_EXIT({ + ProfileEvents::increment(ProfileEvents::NetworkSendElapsedMicroseconds, watch.elapsedMicroseconds()); + ProfileEvents::increment(ProfileEvents::NetworkSendBytes, bytes_written); + if (write_event != ProfileEvents::end()) + ProfileEvents::increment(write_event, bytes_written); + }); + + while (bytes_written < size) + { + ssize_t res = 0; + + /// Add more details to exceptions. + try + { + CurrentMetrics::Increment metric_increment(CurrentMetrics::NetworkSend); + if (size > INT_MAX) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Buffer overflow"); + + res = socketSendBytesImpl(ptr + bytes_written, size - bytes_written); + } + catch (const Poco::Net::NetException & e) + { + throw NetException(ErrorCodes::NETWORK_ERROR, "{}, while writing to socket ({} -> {})", e.displayText(), + our_address.toString(), peer_address.toString()); + } + catch (const Poco::TimeoutException &) + { + throw NetException(ErrorCodes::SOCKET_TIMEOUT, "Timeout exceeded while writing to socket ({}, {} ms)", + peer_address.toString(), + socket.impl()->getSendTimeout().totalMilliseconds()); + } + catch (const Poco::IOException & e) + { + throw NetException(ErrorCodes::NETWORK_ERROR, "{}, while writing to socket ({} -> {})", e.displayText(), + our_address.toString(), peer_address.toString()); + } + + if (res < 0) + throw NetException(ErrorCodes::CANNOT_WRITE_TO_SOCKET, "Cannot write to socket ({} -> {})", + our_address.toString(), peer_address.toString()); + + bytes_written += res; + } +} + void WriteBufferFromPocoSocket::nextImpl() { if (!offset()) @@ -60,36 +151,7 @@ void WriteBufferFromPocoSocket::nextImpl() if (size > INT_MAX) throw Exception(ErrorCodes::LOGICAL_ERROR, "Buffer overflow"); - /// If async_callback is specified, set socket to non-blocking mode - /// and try to write data to it, if socket is not ready for writing, - /// run async_callback and try again later. - /// It is expected that file descriptor may be polled externally. - /// Note that send timeout is not checked here. External code should check it while polling. - if (async_callback) - { - socket.setBlocking(false); - /// Set socket to blocking mode at the end. - SCOPE_EXIT(socket.setBlocking(true)); - bool secure = socket.secure(); - res = socket.impl()->sendBytes(pos, static_cast(size)); - - /// Check EAGAIN and ERR_SSL_WANT_WRITE/ERR_SSL_WANT_READ for secure socket (writing to secure socket can read too). - while (res < 0 && (errno == EAGAIN || (secure && (checkSSLWantRead(res) || checkSSLWantWrite(res))))) - { - /// In case of ERR_SSL_WANT_READ we should wait for socket to be ready for reading, otherwise - for writing. - if (secure && checkSSLWantRead(res)) - async_callback(socket.impl()->sockfd(), socket.getReceiveTimeout(), AsyncEventTimeoutType::RECEIVE, socket_description, AsyncTaskExecutor::Event::READ | AsyncTaskExecutor::Event::ERROR); - else - async_callback(socket.impl()->sockfd(), socket.getSendTimeout(), AsyncEventTimeoutType::SEND, socket_description, AsyncTaskExecutor::Event::WRITE | AsyncTaskExecutor::Event::ERROR); - - /// Try to write again. - res = socket.impl()->sendBytes(pos, static_cast(size)); - } - } - else - { - res = socket.impl()->sendBytes(pos, static_cast(size)); - } + res = socketSendBytesImpl(pos, size); } catch (const Poco::Net::NetException & e) { @@ -125,6 +187,12 @@ WriteBufferFromPocoSocket::WriteBufferFromPocoSocket(Poco::Net::Socket & socket_ { } +WriteBufferFromPocoSocket::WriteBufferFromPocoSocket(Poco::Net::Socket & socket_, const ProfileEvents::Event & write_event_, size_t buf_size) + : WriteBufferFromPocoSocket(socket_, buf_size) +{ + write_event = write_event_; +} + WriteBufferFromPocoSocket::~WriteBufferFromPocoSocket() { try diff --git a/src/IO/WriteBufferFromPocoSocket.h b/src/IO/WriteBufferFromPocoSocket.h index ecb61020357..9c5509aebd1 100644 --- a/src/IO/WriteBufferFromPocoSocket.h +++ b/src/IO/WriteBufferFromPocoSocket.h @@ -17,14 +17,33 @@ class WriteBufferFromPocoSocket : public BufferWithOwnMemory { public: explicit WriteBufferFromPocoSocket(Poco::Net::Socket & socket_, size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE); + explicit WriteBufferFromPocoSocket(Poco::Net::Socket & socket_, const ProfileEvents::Event & write_event_, size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE); ~WriteBufferFromPocoSocket() override; void setAsyncCallback(AsyncCallback async_callback_) { async_callback = std::move(async_callback_); } + using WriteBuffer::write; + void write(const std::string & str) { WriteBuffer::write(str.c_str(), str.size()); } + void write(std::string_view str) { WriteBuffer::write(str.data(), str.size()); } + void write(const char * str) { WriteBuffer::write(str, strlen(str)); } + void writeln(const std::string & str) { write(str); WriteBuffer::write("\n", 1); } + void writeln(std::string_view str) { write(str); WriteBuffer::write("\n", 1); } + void writeln(const char * str) { write(str); WriteBuffer::write("\n", 1); } + protected: void nextImpl() override; + void socketSendBytes(const char * ptr, size_t size); + void socketSendStr(const std::string & str) + { + return socketSendBytes(str.data(), str.size()); + } + void socketSendStr(const char * ptr) + { + return socketSendBytes(ptr, strlen(ptr)); + } + Poco::Net::Socket & socket; /** For error messages. It is necessary to receive this address in advance, because, @@ -34,9 +53,13 @@ protected: Poco::Net::SocketAddress peer_address; Poco::Net::SocketAddress our_address; + ProfileEvents::Event write_event; + private: AsyncCallback async_callback; std::string socket_description; + + ssize_t socketSendBytesImpl(const char * ptr, size_t size); }; } diff --git a/src/IO/WriteHelpers.h b/src/IO/WriteHelpers.h index 094352638e6..b4f8b476b11 100644 --- a/src/IO/WriteHelpers.h +++ b/src/IO/WriteHelpers.h @@ -63,9 +63,7 @@ namespace ErrorCodes inline void writeChar(char x, WriteBuffer & buf) { - buf.nextIfAtEnd(); - *buf.position() = x; - ++buf.position(); + buf.write(x); } /// Write the same character n times. diff --git a/src/IO/ZlibDeflatingWriteBuffer.cpp b/src/IO/ZlibDeflatingWriteBuffer.cpp index 6e4ab742413..ab6763fe6a6 100644 --- a/src/IO/ZlibDeflatingWriteBuffer.cpp +++ b/src/IO/ZlibDeflatingWriteBuffer.cpp @@ -10,36 +10,6 @@ namespace ErrorCodes extern const int ZLIB_DEFLATE_FAILED; } - -ZlibDeflatingWriteBuffer::ZlibDeflatingWriteBuffer( - std::unique_ptr out_, - CompressionMethod compression_method, - int compression_level, - size_t buf_size, - char * existing_memory, - size_t alignment) - : WriteBufferWithOwnMemoryDecorator(std::move(out_), buf_size, existing_memory, alignment) -{ - zstr.zalloc = nullptr; - zstr.zfree = nullptr; - zstr.opaque = nullptr; - zstr.next_in = nullptr; - zstr.avail_in = 0; - zstr.next_out = nullptr; - zstr.avail_out = 0; - - int window_bits = 15; - if (compression_method == CompressionMethod::Gzip) - { - window_bits += 16; - } - - int rc = deflateInit2(&zstr, compression_level, Z_DEFLATED, window_bits, 8, Z_DEFAULT_STRATEGY); - - if (rc != Z_OK) - throw Exception(ErrorCodes::ZLIB_DEFLATE_FAILED, "deflateInit2 failed: {}; zlib version: {}", zError(rc), ZLIB_VERSION); -} - void ZlibDeflatingWriteBuffer::nextImpl() { if (!offset()) @@ -82,6 +52,10 @@ void ZlibDeflatingWriteBuffer::finalizeBefore() { next(); + /// Don't write out if no data was ever compressed + if (!compress_empty && zstr.total_out == 0) + return; + /// https://github.com/zlib-ng/zlib-ng/issues/494 do { diff --git a/src/IO/ZlibDeflatingWriteBuffer.h b/src/IO/ZlibDeflatingWriteBuffer.h index 58e709b54e6..f01c41c7d13 100644 --- a/src/IO/ZlibDeflatingWriteBuffer.h +++ b/src/IO/ZlibDeflatingWriteBuffer.h @@ -12,17 +12,45 @@ namespace DB { +namespace ErrorCodes +{ + extern const int ZLIB_DEFLATE_FAILED; +} + /// Performs compression using zlib library and writes compressed data to out_ WriteBuffer. class ZlibDeflatingWriteBuffer : public WriteBufferWithOwnMemoryDecorator { public: + template ZlibDeflatingWriteBuffer( - std::unique_ptr out_, + WriteBufferT && out_, CompressionMethod compression_method, int compression_level, size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE, char * existing_memory = nullptr, - size_t alignment = 0); + size_t alignment = 0, + bool compress_empty_ = true) + : WriteBufferWithOwnMemoryDecorator(std::move(out_), buf_size, existing_memory, alignment), compress_empty(compress_empty_) + { + zstr.zalloc = nullptr; + zstr.zfree = nullptr; + zstr.opaque = nullptr; + zstr.next_in = nullptr; + zstr.avail_in = 0; + zstr.next_out = nullptr; + zstr.avail_out = 0; + + int window_bits = 15; + if (compression_method == CompressionMethod::Gzip) + { + window_bits += 16; + } + + int rc = deflateInit2(&zstr, compression_level, Z_DEFLATED, window_bits, 8, Z_DEFAULT_STRATEGY); + + if (rc != Z_OK) + throw Exception(ErrorCodes::ZLIB_DEFLATE_FAILED, "deflateInit2 failed: {}; zlib version: {}", zError(rc), ZLIB_VERSION); + } ~ZlibDeflatingWriteBuffer() override; @@ -36,6 +64,7 @@ private: virtual void finalizeAfter() override; z_stream zstr; + bool compress_empty = true; }; } diff --git a/src/IO/ZstdDeflatingWriteBuffer.cpp b/src/IO/ZstdDeflatingWriteBuffer.cpp index 949d65926b3..bad6e733cf1 100644 --- a/src/IO/ZstdDeflatingWriteBuffer.cpp +++ b/src/IO/ZstdDeflatingWriteBuffer.cpp @@ -8,9 +8,7 @@ namespace ErrorCodes extern const int ZSTD_ENCODER_FAILED; } -ZstdDeflatingWriteBuffer::ZstdDeflatingWriteBuffer( - std::unique_ptr out_, int compression_level, size_t buf_size, char * existing_memory, size_t alignment) - : WriteBufferWithOwnMemoryDecorator(std::move(out_), buf_size, existing_memory, alignment) +void ZstdDeflatingWriteBuffer::initialize(int compression_level) { cctx = ZSTD_createCCtx(); if (cctx == nullptr) @@ -44,6 +42,7 @@ void ZstdDeflatingWriteBuffer::flush(ZSTD_EndDirective mode) try { + size_t out_offset = out->offset(); bool ended = false; do { @@ -67,6 +66,8 @@ void ZstdDeflatingWriteBuffer::flush(ZSTD_EndDirective mode) ended = everything_was_compressed && everything_was_flushed; } while (!ended); + + total_out += out->offset() - out_offset; } catch (...) { @@ -84,6 +85,9 @@ void ZstdDeflatingWriteBuffer::nextImpl() void ZstdDeflatingWriteBuffer::finalizeBefore() { + /// Don't write out if no data was ever compressed + if (!compress_empty && total_out == 0) + return; flush(ZSTD_e_end); } diff --git a/src/IO/ZstdDeflatingWriteBuffer.h b/src/IO/ZstdDeflatingWriteBuffer.h index a66d6085a74..d25db515d28 100644 --- a/src/IO/ZstdDeflatingWriteBuffer.h +++ b/src/IO/ZstdDeflatingWriteBuffer.h @@ -14,12 +14,18 @@ namespace DB class ZstdDeflatingWriteBuffer : public WriteBufferWithOwnMemoryDecorator { public: + template ZstdDeflatingWriteBuffer( - std::unique_ptr out_, + WriteBufferT && out_, int compression_level, size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE, char * existing_memory = nullptr, - size_t alignment = 0); + size_t alignment = 0, + bool compress_empty_ = true) + : WriteBufferWithOwnMemoryDecorator(std::move(out_), buf_size, existing_memory, alignment), compress_empty(compress_empty_) + { + initialize(compression_level); + } ~ZstdDeflatingWriteBuffer() override; @@ -29,6 +35,8 @@ public: } private: + void initialize(int compression_level); + void nextImpl() override; /// Flush all pending data and write zstd footer to the underlying buffer. @@ -42,6 +50,9 @@ private: ZSTD_CCtx * cctx; ZSTD_inBuffer input; ZSTD_outBuffer output; + + size_t total_out = 0; + bool compress_empty = true; }; } diff --git a/src/IO/examples/lzma_buffers.cpp b/src/IO/examples/lzma_buffers.cpp index 126a192737b..f9e4fc0c5db 100644 --- a/src/IO/examples/lzma_buffers.cpp +++ b/src/IO/examples/lzma_buffers.cpp @@ -19,7 +19,7 @@ try { auto buf - = std::make_unique("test_lzma_buffers.xz", DBMS_DEFAULT_BUFFER_SIZE, O_WRONLY | O_CREAT | O_TRUNC); + = std::make_unique("test_lzma_buffers.xz", DB::DBMS_DEFAULT_BUFFER_SIZE, O_WRONLY | O_CREAT | O_TRUNC); DB::LZMADeflatingWriteBuffer lzma_buf(std::move(buf), /*compression level*/ 3); stopwatch.restart(); diff --git a/src/IO/examples/zlib_buffers.cpp b/src/IO/examples/zlib_buffers.cpp index a36b7a7a41d..1497e2c3f8e 100644 --- a/src/IO/examples/zlib_buffers.cpp +++ b/src/IO/examples/zlib_buffers.cpp @@ -21,7 +21,7 @@ try Stopwatch stopwatch; { - auto buf = std::make_unique("test_zlib_buffers.gz", DBMS_DEFAULT_BUFFER_SIZE, O_WRONLY | O_CREAT | O_TRUNC); + auto buf = std::make_unique("test_zlib_buffers.gz", DB::DBMS_DEFAULT_BUFFER_SIZE, O_WRONLY | O_CREAT | O_TRUNC); DB::ZlibDeflatingWriteBuffer deflating_buf(std::move(buf), DB::CompressionMethod::Gzip, /* compression_level = */ 3); stopwatch.restart(); diff --git a/src/IO/examples/zstd_buffers.cpp b/src/IO/examples/zstd_buffers.cpp index 26c8899605a..dc9913b81a6 100644 --- a/src/IO/examples/zstd_buffers.cpp +++ b/src/IO/examples/zstd_buffers.cpp @@ -21,7 +21,7 @@ try { auto buf - = std::make_unique("test_zstd_buffers.zst", DBMS_DEFAULT_BUFFER_SIZE, O_WRONLY | O_CREAT | O_TRUNC); + = std::make_unique("test_zstd_buffers.zst", DB::DBMS_DEFAULT_BUFFER_SIZE, O_WRONLY | O_CREAT | O_TRUNC); DB::ZstdDeflatingWriteBuffer zstd_buf(std::move(buf), /*compression level*/ 3); stopwatch.restart(); diff --git a/src/IO/tests/gtest_archive_reader_and_writer.cpp b/src/IO/tests/gtest_archive_reader_and_writer.cpp index b48955c25e7..37fbdff901a 100644 --- a/src/IO/tests/gtest_archive_reader_and_writer.cpp +++ b/src/IO/tests/gtest_archive_reader_and_writer.cpp @@ -102,7 +102,8 @@ TEST_P(ArchiveReaderAndWriterTest, EmptyArchive) { /// Make an archive. { - createArchiveWriter(getPathToArchive()); + auto writer = createArchiveWriter(getPathToArchive()); + writer->finalize(); } /// The created archive can be found in the local filesystem. @@ -132,7 +133,9 @@ TEST_P(ArchiveReaderAndWriterTest, SingleFileInArchive) { auto out = writer->writeFile("a.txt"); writeString(contents, *out); + out->finalize(); } + writer->finalize(); } /// Read the archive. @@ -198,11 +201,14 @@ TEST_P(ArchiveReaderAndWriterTest, TwoFilesInArchive) { auto out = writer->writeFile("a.txt"); writeString(a_contents, *out); + out->finalize(); } { auto out = writer->writeFile("b/c.txt"); writeString(c_contents, *out); + out->finalize(); } + writer->finalize(); } /// Read the archive. @@ -281,11 +287,14 @@ TEST_P(ArchiveReaderAndWriterTest, InMemory) { auto out = writer->writeFile("a.txt"); writeString(a_contents, *out); + out->finalize(); } { auto out = writer->writeFile("b.txt"); writeString(b_contents, *out); + out->finalize(); } + writer->finalize(); } /// The created archive is really in memory. @@ -335,7 +344,9 @@ TEST_P(ArchiveReaderAndWriterTest, Password) { auto out = writer->writeFile("a.txt"); writeString(contents, *out); + out->finalize(); } + writer->finalize(); } /// Read the archive. diff --git a/src/IO/tests/gtest_writebuffer_s3.cpp b/src/IO/tests/gtest_writebuffer_s3.cpp index 5880b40c408..7210dc6fbbf 100644 --- a/src/IO/tests/gtest_writebuffer_s3.cpp +++ b/src/IO/tests/gtest_writebuffer_s3.cpp @@ -210,10 +210,13 @@ struct Client : DB::S3::Client std::make_shared("", ""), GetClientConfiguration(), Aws::Client::AWSAuthV4Signer::PayloadSigningPolicy::Never, - /* use_virtual_addressing = */ true, - /* disable_checksum_= */ false) + DB::S3::ClientSettings{ + .use_virtual_addressing = true, + .disable_checksum= false, + .gcs_issue_compose_request = false, + }) , store(mock_s3_store) - { } + {} static std::shared_ptr CreateClient(String bucket = "mock-s3-bucket") { diff --git a/src/Interpreters/Access/InterpreterGrantQuery.cpp b/src/Interpreters/Access/InterpreterGrantQuery.cpp index 45e8ba9ea0d..259c6b39524 100644 --- a/src/Interpreters/Access/InterpreterGrantQuery.cpp +++ b/src/Interpreters/Access/InterpreterGrantQuery.cpp @@ -7,6 +7,7 @@ #include #include #include +#include #include #include #include @@ -396,7 +397,8 @@ namespace BlockIO InterpreterGrantQuery::execute() { - auto & query = query_ptr->as(); + const auto updated_query = removeOnClusterClauseIfNeeded(query_ptr, getContext()); + auto & query = updated_query->as(); query.replaceCurrentUserTag(getContext()->getUserName()); query.access_rights_elements.eraseNonGrantable(); @@ -430,7 +432,7 @@ BlockIO InterpreterGrantQuery::execute() current_user_access->checkGranteesAreAllowed(grantees); DDLQueryOnClusterParams params; params.access_to_check = std::move(required_access); - return executeDDLQueryOnCluster(query_ptr, getContext(), params); + return executeDDLQueryOnCluster(updated_query, getContext(), params); } /// Check if the current user has corresponding access rights granted with grant option. diff --git a/src/Interpreters/ActionLocksManager.cpp b/src/Interpreters/ActionLocksManager.cpp index fb5ef4b98ae..65f13ebd66c 100644 --- a/src/Interpreters/ActionLocksManager.cpp +++ b/src/Interpreters/ActionLocksManager.cpp @@ -18,6 +18,7 @@ namespace ActionLocks extern const StorageActionBlockType PartsMove = 7; extern const StorageActionBlockType PullReplicationLog = 8; extern const StorageActionBlockType Cleanup = 9; + extern const StorageActionBlockType ViewRefresh = 10; } diff --git a/src/Interpreters/ActionsVisitor.cpp b/src/Interpreters/ActionsVisitor.cpp index 6be9f6c803f..827914eaefe 100644 --- a/src/Interpreters/ActionsVisitor.cpp +++ b/src/Interpreters/ActionsVisitor.cpp @@ -1414,7 +1414,10 @@ FutureSetPtr ActionsMatcher::makeSet(const ASTFunction & node, Data & data, bool set_key = right_in_operand->getTreeHash(/*ignore_aliases=*/ true); if (auto set = data.prepared_sets->findSubquery(set_key)) + { + set->markAsINSubquery(); return set; + } FutureSetPtr external_table_set; @@ -1460,7 +1463,8 @@ FutureSetPtr ActionsMatcher::makeSet(const ASTFunction & node, Data & data, bool interpreter->buildQueryPlan(*source); } - return data.prepared_sets->addFromSubquery(set_key, std::move(source), nullptr, std::move(external_table_set), data.getContext()->getSettingsRef()); + return data.prepared_sets->addFromSubquery( + set_key, std::move(source), nullptr, std::move(external_table_set), data.getContext()->getSettingsRef(), /*in_subquery=*/true); } else { diff --git a/src/Interpreters/AddDefaultDatabaseVisitor.h b/src/Interpreters/AddDefaultDatabaseVisitor.h index 08d159b42ca..b977a73d461 100644 --- a/src/Interpreters/AddDefaultDatabaseVisitor.h +++ b/src/Interpreters/AddDefaultDatabaseVisitor.h @@ -5,6 +5,7 @@ #include #include #include +#include #include #include #include @@ -87,6 +88,12 @@ public: visit(child); } + void visit(ASTRefreshStrategy & refresh) const + { + ASTPtr unused; + visit(refresh, unused); + } + private: ContextPtr context; @@ -148,8 +155,6 @@ private: { if (table_expression.database_and_table_name) tryVisit(table_expression.database_and_table_name); - else if (table_expression.subquery) - tryVisit(table_expression.subquery); } void visit(const ASTTableIdentifier & identifier, ASTPtr & ast) const @@ -167,11 +172,6 @@ private: ast = qualified_identifier; } - void visit(ASTSubquery & subquery, ASTPtr &) const - { - tryVisit(subquery.children[0]); - } - void visit(ASTFunction & function, ASTPtr &) const { bool is_operator_in = functionIsInOrGlobalInOperator(function.name); @@ -236,6 +236,13 @@ private: } } + void visit(ASTRefreshStrategy & refresh, ASTPtr &) const + { + if (refresh.dependencies) + for (auto & table : refresh.dependencies->children) + tryVisit(table); + } + void visitChildren(IAST & ast) const { for (auto & child : ast.children) diff --git a/src/Interpreters/Aggregator.cpp b/src/Interpreters/Aggregator.cpp index b43edfb8d3e..cdc4292a79c 100644 --- a/src/Interpreters/Aggregator.cpp +++ b/src/Interpreters/Aggregator.cpp @@ -976,12 +976,12 @@ void Aggregator::executeOnBlockSmall( initDataVariantsWithSizeHint(result, method_chosen, params); else result.init(method_chosen); + result.keys_size = params.keys_size; result.key_sizes = key_sizes; } executeImpl(result, row_begin, row_end, key_columns, aggregate_instructions); - CurrentMemoryTracker::check(); } @@ -1014,7 +1014,9 @@ void Aggregator::mergeOnBlockSmall( #define M(NAME, IS_TWO_LEVEL) \ else if (result.type == AggregatedDataVariants::Type::NAME) \ mergeStreamsImpl(result.aggregates_pool, *result.NAME, result.NAME->data, \ - result.without_key, /* no_more_keys= */ false, \ + result.without_key, \ + result.consecutive_keys_cache_stats, \ + /* no_more_keys= */ false, \ row_begin, row_end, \ aggregate_columns_data, key_columns, result.aggregates_pool); @@ -1038,17 +1040,14 @@ void Aggregator::executeImpl( { #define M(NAME, IS_TWO_LEVEL) \ else if (result.type == AggregatedDataVariants::Type::NAME) \ - executeImpl(*result.NAME, result.aggregates_pool, row_begin, row_end, key_columns, aggregate_instructions, no_more_keys, all_keys_are_const, overflow_row); + executeImpl(*result.NAME, result.aggregates_pool, row_begin, row_end, key_columns, aggregate_instructions, \ + result.consecutive_keys_cache_stats, no_more_keys, all_keys_are_const, overflow_row); if (false) {} // NOLINT APPLY_FOR_AGGREGATED_VARIANTS(M) #undef M } -/** It's interesting - if you remove `noinline`, then gcc for some reason will inline this function, and the performance decreases (~ 10%). - * (Probably because after the inline of this function, more internal functions no longer be inlined.) - * Inline does not make sense, since the inner loop is entirely inside this function. - */ template void NO_INLINE Aggregator::executeImpl( Method & method, @@ -1057,12 +1056,44 @@ void NO_INLINE Aggregator::executeImpl( size_t row_end, ColumnRawPtrs & key_columns, AggregateFunctionInstruction * aggregate_instructions, + LastElementCacheStats & consecutive_keys_cache_stats, bool no_more_keys, bool all_keys_are_const, AggregateDataPtr overflow_row) const { - typename Method::State state(key_columns, key_sizes, aggregation_state_cache); + UInt64 total_rows = consecutive_keys_cache_stats.hits + consecutive_keys_cache_stats.misses; + double cache_hit_rate = total_rows ? static_cast(consecutive_keys_cache_stats.hits) / total_rows : 1.0; + bool use_cache = cache_hit_rate >= params.min_hit_rate_to_use_consecutive_keys_optimization; + if (use_cache) + { + typename Method::State state(key_columns, key_sizes, aggregation_state_cache); + executeImpl(method, state, aggregates_pool, row_begin, row_end, aggregate_instructions, no_more_keys, all_keys_are_const, overflow_row); + consecutive_keys_cache_stats.update(row_end - row_begin, state.getCacheMissesSinceLastReset()); + } + else + { + typename Method::StateNoCache state(key_columns, key_sizes, aggregation_state_cache); + executeImpl(method, state, aggregates_pool, row_begin, row_end, aggregate_instructions, no_more_keys, all_keys_are_const, overflow_row); + } +} + +/** It's interesting - if you remove `noinline`, then gcc for some reason will inline this function, and the performance decreases (~ 10%). + * (Probably because after the inline of this function, more internal functions no longer be inlined.) + * Inline does not make sense, since the inner loop is entirely inside this function. + */ +template +void NO_INLINE Aggregator::executeImpl( + Method & method, + State & state, + Arena * aggregates_pool, + size_t row_begin, + size_t row_end, + AggregateFunctionInstruction * aggregate_instructions, + bool no_more_keys, + bool all_keys_are_const, + AggregateDataPtr overflow_row) const +{ if (!no_more_keys) { /// Prefetching doesn't make sense for small hash tables, because they fit in caches entirely. @@ -1096,10 +1127,10 @@ void NO_INLINE Aggregator::executeImpl( } } -template +template void NO_INLINE Aggregator::executeImplBatch( Method & method, - typename Method::State & state, + State & state, Arena * aggregates_pool, size_t row_begin, size_t row_end, @@ -1119,8 +1150,6 @@ void NO_INLINE Aggregator::executeImplBatch( if constexpr (no_more_keys) return; - /// For all rows. - /// This pointer is unused, but the logic will compare it for nullptr to check if the cell is set. AggregateDataPtr place = reinterpret_cast(0x1); if (all_keys_are_const) @@ -1129,6 +1158,7 @@ void NO_INLINE Aggregator::executeImplBatch( } else { + /// For all rows. for (size_t i = row_begin; i < row_end; ++i) { if constexpr (prefetch && HasPrefetchMemberFunc) @@ -1191,21 +1221,23 @@ void NO_INLINE Aggregator::executeImplBatch( /// - and plus this will require other changes in the interface. std::unique_ptr places(new AggregateDataPtr[all_keys_are_const ? 1 : row_end]); - /// For all rows. - size_t start, end; + size_t key_start, key_end; /// If all keys are const, key columns contain only 1 row. if (all_keys_are_const) { - start = 0; - end = 1; + key_start = 0; + key_end = 1; } else { - start = row_begin; - end = row_end; + key_start = row_begin; + key_end = row_end; } - for (size_t i = start; i < end; ++i) + state.resetCache(); + + /// For all rows. + for (size_t i = key_start; i < key_end; ++i) { AggregateDataPtr aggregate_data = nullptr; @@ -1213,7 +1245,7 @@ void NO_INLINE Aggregator::executeImplBatch( { if constexpr (prefetch && HasPrefetchMemberFunc) { - if (i == row_begin + prefetching.iterationsToMeasure()) + if (i == key_start + prefetching.iterationsToMeasure()) prefetch_look_ahead = prefetching.calcPrefetchLookAhead(); if (i + prefetch_look_ahead < row_end) @@ -1305,10 +1337,10 @@ void NO_INLINE Aggregator::executeImplBatch( columns_data.emplace_back(getColumnData(inst->batch_arguments[argument_index])); } - if (all_keys_are_const) + if (all_keys_are_const || (!no_more_keys && state.hasOnlyOneValueSinceLastReset())) { auto add_into_aggregate_states_function_single_place = compiled_aggregate_functions_holder->compiled_aggregate_functions.add_into_aggregate_states_function_single_place; - add_into_aggregate_states_function_single_place(row_begin, row_end, columns_data.data(), places[0]); + add_into_aggregate_states_function_single_place(row_begin, row_end, columns_data.data(), places[key_start]); } else { @@ -1329,24 +1361,10 @@ void NO_INLINE Aggregator::executeImplBatch( AggregateFunctionInstruction * inst = aggregate_instructions + i; - if (all_keys_are_const) - { - if (inst->offsets) - inst->batch_that->addBatchSinglePlace(inst->offsets[static_cast(row_begin) - 1], inst->offsets[row_end - 1], places[0] + inst->state_offset, inst->batch_arguments, aggregates_pool); - else if (inst->has_sparse_arguments) - inst->batch_that->addBatchSparseSinglePlace(row_begin, row_end, places[0] + inst->state_offset, inst->batch_arguments, aggregates_pool); - else - inst->batch_that->addBatchSinglePlace(row_begin, row_end, places[0] + inst->state_offset, inst->batch_arguments, aggregates_pool); - } + if (all_keys_are_const || (!no_more_keys && state.hasOnlyOneValueSinceLastReset())) + addBatchSinglePlace(row_begin, row_end, inst, places[key_start] + inst->state_offset, aggregates_pool); else - { - if (inst->offsets) - inst->batch_that->addBatchArray(row_begin, row_end, places.get(), inst->state_offset, inst->batch_arguments, inst->offsets, aggregates_pool); - else if (inst->has_sparse_arguments) - inst->batch_that->addBatchSparse(row_begin, row_end, places.get(), inst->state_offset, inst->batch_arguments, aggregates_pool); - else - inst->batch_that->addBatch(row_begin, row_end, places.get(), inst->state_offset, inst->batch_arguments, aggregates_pool); - } + addBatch(row_begin, row_end, inst, places.get(), aggregates_pool); } } @@ -1410,28 +1428,63 @@ void NO_INLINE Aggregator::executeWithoutKeyImpl( continue; #endif - if (inst->offsets) - inst->batch_that->addBatchSinglePlace( - inst->offsets[static_cast(row_begin) - 1], - inst->offsets[row_end - 1], - res + inst->state_offset, - inst->batch_arguments, - arena); - else if (inst->has_sparse_arguments) - inst->batch_that->addBatchSparseSinglePlace( - row_begin, row_end, - res + inst->state_offset, - inst->batch_arguments, - arena); - else - inst->batch_that->addBatchSinglePlace( - row_begin, row_end, - res + inst->state_offset, - inst->batch_arguments, - arena); + addBatchSinglePlace(row_begin, row_end, inst, res + inst->state_offset, arena); } } +void Aggregator::addBatch( + size_t row_begin, size_t row_end, + AggregateFunctionInstruction * inst, + AggregateDataPtr * places, + Arena * arena) +{ + if (inst->offsets) + inst->batch_that->addBatchArray( + row_begin, row_end, places, + inst->state_offset, + inst->batch_arguments, + inst->offsets, + arena); + else if (inst->has_sparse_arguments) + inst->batch_that->addBatchSparse( + row_begin, row_end, places, + inst->state_offset, + inst->batch_arguments, + arena); + else + inst->batch_that->addBatch( + row_begin, row_end, places, + inst->state_offset, + inst->batch_arguments, + arena); +} + + +void Aggregator::addBatchSinglePlace( + size_t row_begin, size_t row_end, + AggregateFunctionInstruction * inst, + AggregateDataPtr place, + Arena * arena) +{ + if (inst->offsets) + inst->batch_that->addBatchSinglePlace( + inst->offsets[static_cast(row_begin) - 1], + inst->offsets[row_end - 1], + place, + inst->batch_arguments, + arena); + else if (inst->has_sparse_arguments) + inst->batch_that->addBatchSparseSinglePlace( + row_begin, row_end, place, + inst->batch_arguments, + arena); + else + inst->batch_that->addBatchSinglePlace( + row_begin, row_end, place, + inst->batch_arguments, + arena); +} + void NO_INLINE Aggregator::executeOnIntervalWithoutKey( AggregatedDataVariants & data_variants, size_t row_begin, size_t row_end, AggregateFunctionInstruction * aggregate_instructions) const @@ -2867,20 +2920,17 @@ ManyAggregatedDataVariants Aggregator::prepareVariantsToMerge(ManyAggregatedData return non_empty_data; } -template +template void NO_INLINE Aggregator::mergeStreamsImplCase( Arena * aggregates_pool, - Method & method [[maybe_unused]], + State & state, Table & data, AggregateDataPtr overflow_row, size_t row_begin, size_t row_end, const AggregateColumnsConstData & aggregate_columns_data, - const ColumnRawPtrs & key_columns, Arena * arena_for_keys) const { - typename Method::State state(key_columns, key_sizes, aggregation_state_cache); - std::unique_ptr places(new AggregateDataPtr[row_end]); if (!arena_for_keys) @@ -2890,7 +2940,7 @@ void NO_INLINE Aggregator::mergeStreamsImplCase( { AggregateDataPtr aggregate_data = nullptr; - if (!no_more_keys) + if constexpr (!no_more_keys) { auto emplace_result = state.emplaceKey(data, i, *arena_for_keys); // NOLINT if (emplace_result.isInserted()) @@ -2936,6 +2986,7 @@ void NO_INLINE Aggregator::mergeStreamsImpl( Method & method, Table & data, AggregateDataPtr overflow_row, + LastElementCacheStats & consecutive_keys_cache_stats, bool no_more_keys, Arena * arena_for_keys) const { @@ -2943,15 +2994,17 @@ void NO_INLINE Aggregator::mergeStreamsImpl( const ColumnRawPtrs & key_columns = params.makeRawKeyColumns(block); mergeStreamsImpl( - aggregates_pool, method, data, overflow_row, no_more_keys, 0, block.rows(), aggregate_columns_data, key_columns, arena_for_keys); + aggregates_pool, method, data, overflow_row, consecutive_keys_cache_stats, + no_more_keys, 0, block.rows(), aggregate_columns_data, key_columns, arena_for_keys); } template void NO_INLINE Aggregator::mergeStreamsImpl( Arena * aggregates_pool, - Method & method, + Method & method [[maybe_unused]], Table & data, AggregateDataPtr overflow_row, + LastElementCacheStats & consecutive_keys_cache_stats, bool no_more_keys, size_t row_begin, size_t row_end, @@ -2959,12 +3012,30 @@ void NO_INLINE Aggregator::mergeStreamsImpl( const ColumnRawPtrs & key_columns, Arena * arena_for_keys) const { - if (!no_more_keys) - mergeStreamsImplCase( - aggregates_pool, method, data, overflow_row, row_begin, row_end, aggregate_columns_data, key_columns, arena_for_keys); + UInt64 total_rows = consecutive_keys_cache_stats.hits + consecutive_keys_cache_stats.misses; + double cache_hit_rate = total_rows ? static_cast(consecutive_keys_cache_stats.hits) / total_rows : 1.0; + bool use_cache = cache_hit_rate >= params.min_hit_rate_to_use_consecutive_keys_optimization; + + if (use_cache) + { + typename Method::State state(key_columns, key_sizes, aggregation_state_cache); + + if (!no_more_keys) + mergeStreamsImplCase(aggregates_pool, state, data, overflow_row, row_begin, row_end, aggregate_columns_data, arena_for_keys); + else + mergeStreamsImplCase(aggregates_pool, state, data, overflow_row, row_begin, row_end, aggregate_columns_data, arena_for_keys); + + consecutive_keys_cache_stats.update(row_end - row_begin, state.getCacheMissesSinceLastReset()); + } else - mergeStreamsImplCase( - aggregates_pool, method, data, overflow_row, row_begin, row_end, aggregate_columns_data, key_columns, arena_for_keys); + { + typename Method::StateNoCache state(key_columns, key_sizes, aggregation_state_cache); + + if (!no_more_keys) + mergeStreamsImplCase(aggregates_pool, state, data, overflow_row, row_begin, row_end, aggregate_columns_data, arena_for_keys); + else + mergeStreamsImplCase(aggregates_pool, state, data, overflow_row, row_begin, row_end, aggregate_columns_data, arena_for_keys); + } } @@ -3024,7 +3095,7 @@ bool Aggregator::mergeOnBlock(Block block, AggregatedDataVariants & result, bool mergeBlockWithoutKeyStreamsImpl(std::move(block), result); #define M(NAME, IS_TWO_LEVEL) \ else if (result.type == AggregatedDataVariants::Type::NAME) \ - mergeStreamsImpl(std::move(block), result.aggregates_pool, *result.NAME, result.NAME->data, result.without_key, no_more_keys); + mergeStreamsImpl(std::move(block), result.aggregates_pool, *result.NAME, result.NAME->data, result.without_key, result.consecutive_keys_cache_stats, no_more_keys); APPLY_FOR_AGGREGATED_VARIANTS(M) #undef M @@ -3127,9 +3198,11 @@ void Aggregator::mergeBlocks(BucketToBlocks bucket_to_blocks, AggregatedDataVari for (Block & block : bucket_to_blocks[bucket]) { + /// Copy to avoid race. + auto consecutive_keys_cache_stats_copy = result.consecutive_keys_cache_stats; #define M(NAME) \ else if (result.type == AggregatedDataVariants::Type::NAME) \ - mergeStreamsImpl(std::move(block), aggregates_pool, *result.NAME, result.NAME->data.impls[bucket], nullptr, false); + mergeStreamsImpl(std::move(block), aggregates_pool, *result.NAME, result.NAME->data.impls[bucket], nullptr, consecutive_keys_cache_stats_copy, false); if (false) {} // NOLINT APPLY_FOR_VARIANTS_TWO_LEVEL(M) @@ -3184,7 +3257,7 @@ void Aggregator::mergeBlocks(BucketToBlocks bucket_to_blocks, AggregatedDataVari #define M(NAME, IS_TWO_LEVEL) \ else if (result.type == AggregatedDataVariants::Type::NAME) \ - mergeStreamsImpl(std::move(block), result.aggregates_pool, *result.NAME, result.NAME->data, result.without_key, no_more_keys); + mergeStreamsImpl(std::move(block), result.aggregates_pool, *result.NAME, result.NAME->data, result.without_key, result.consecutive_keys_cache_stats, no_more_keys); APPLY_FOR_AGGREGATED_VARIANTS(M) #undef M @@ -3262,7 +3335,7 @@ Block Aggregator::mergeBlocks(BlocksList & blocks, bool final) #define M(NAME, IS_TWO_LEVEL) \ else if (result.type == AggregatedDataVariants::Type::NAME) \ - mergeStreamsImpl(std::move(block), result.aggregates_pool, *result.NAME, result.NAME->data, nullptr, false, arena_for_keys.get()); + mergeStreamsImpl(std::move(block), result.aggregates_pool, *result.NAME, result.NAME->data, nullptr, result.consecutive_keys_cache_stats, false, arena_for_keys.get()); APPLY_FOR_AGGREGATED_VARIANTS(M) #undef M diff --git a/src/Interpreters/Aggregator.h b/src/Interpreters/Aggregator.h index ab53f76d2ce..6fc3ac2f6d6 100644 --- a/src/Interpreters/Aggregator.h +++ b/src/Interpreters/Aggregator.h @@ -205,8 +205,17 @@ struct AggregationMethodOneNumber } /// To use one `Method` in different threads, use different `State`. - using State = ColumnsHashing::HashMethodOneNumber; + template + using StateImpl = ColumnsHashing::HashMethodOneNumber< + typename Data::value_type, + Mapped, + FieldType, + use_cache && consecutive_keys_optimization, + /*need_offset=*/ false, + nullable>; + + using State = StateImpl; + using StateNoCache = StateImpl; /// Use optimization for low cardinality. static const bool low_cardinality_optimization = false; @@ -259,7 +268,11 @@ struct AggregationMethodString explicit AggregationMethodString(size_t size_hint) : data(size_hint) { } - using State = ColumnsHashing::HashMethodString; + template + using StateImpl = ColumnsHashing::HashMethodString; + + using State = StateImpl; + using StateNoCache = StateImpl; static const bool low_cardinality_optimization = false; static const bool one_key_nullable_optimization = false; @@ -292,7 +305,11 @@ struct AggregationMethodStringNoCache { } - using State = ColumnsHashing::HashMethodString; + template + using StateImpl = ColumnsHashing::HashMethodString; + + using State = StateImpl; + using StateNoCache = StateImpl; static const bool low_cardinality_optimization = false; static const bool one_key_nullable_optimization = nullable; @@ -334,7 +351,11 @@ struct AggregationMethodFixedString { } - using State = ColumnsHashing::HashMethodFixedString; + template + using StateImpl = ColumnsHashing::HashMethodFixedString; + + using State = StateImpl; + using StateNoCache = StateImpl; static const bool low_cardinality_optimization = false; static const bool one_key_nullable_optimization = false; @@ -366,7 +387,11 @@ struct AggregationMethodFixedStringNoCache { } - using State = ColumnsHashing::HashMethodFixedString; + template + using StateImpl = ColumnsHashing::HashMethodFixedString; + + using State = StateImpl; + using StateNoCache = StateImpl; static const bool low_cardinality_optimization = false; static const bool one_key_nullable_optimization = nullable; @@ -392,20 +417,24 @@ template struct AggregationMethodSingleLowCardinalityColumn : public SingleColumnMethod { using Base = SingleColumnMethod; - using BaseState = typename Base::State; - using Data = typename Base::Data; using Key = typename Base::Key; using Mapped = typename Base::Mapped; - using Base::data; + template + using BaseStateImpl = typename Base::template StateImpl; + AggregationMethodSingleLowCardinalityColumn() = default; template explicit AggregationMethodSingleLowCardinalityColumn(const Other & other) : Base(other) {} - using State = ColumnsHashing::HashMethodSingleLowCardinalityColumn; + template + using StateImpl = ColumnsHashing::HashMethodSingleLowCardinalityColumn, Mapped, use_cache>; + + using State = StateImpl; + using StateNoCache = StateImpl; static const bool low_cardinality_optimization = true; @@ -429,7 +458,7 @@ struct AggregationMethodSingleLowCardinalityColumn : public SingleColumnMethod /// For the case where all keys are of fixed length, and they fit in N (for example, 128) bits. -template +template struct AggregationMethodKeysFixed { using Data = TData; @@ -449,13 +478,17 @@ struct AggregationMethodKeysFixed { } - using State = ColumnsHashing::HashMethodKeysFixed< + template + using StateImpl = ColumnsHashing::HashMethodKeysFixed< typename Data::value_type, Key, Mapped, has_nullable_keys, has_low_cardinality, - use_cache>; + use_cache && consecutive_keys_optimization>; + + using State = StateImpl; + using StateNoCache = StateImpl; static const bool low_cardinality_optimization = false; static const bool one_key_nullable_optimization = false; @@ -546,7 +579,11 @@ struct AggregationMethodSerialized { } - using State = ColumnsHashing::HashMethodSerialized; + template + using StateImpl = ColumnsHashing::HashMethodSerialized; + + using State = StateImpl; + using StateNoCache = StateImpl; static const bool low_cardinality_optimization = false; static const bool one_key_nullable_optimization = false; @@ -566,6 +603,7 @@ class Aggregator; using ColumnsHashing::HashMethodContext; using ColumnsHashing::HashMethodContextPtr; +using ColumnsHashing::LastElementCacheStats; struct AggregatedDataVariants : private boost::noncopyable { @@ -599,6 +637,10 @@ struct AggregatedDataVariants : private boost::noncopyable */ AggregatedDataWithoutKey without_key = nullptr; + /// Stats of a cache for consecutive keys optimization. + /// Stats can be used to disable the cache in case of a lot of misses. + LastElementCacheStats consecutive_keys_cache_stats; + // Disable consecutive key optimization for Uint8/16, because they use a FixedHashMap // and the lookup there is almost free, so we don't need to cache the last lookup result std::unique_ptr> key8; @@ -1025,6 +1067,8 @@ public: bool optimize_group_by_constant_keys; + const double min_hit_rate_to_use_consecutive_keys_optimization; + struct StatsCollectingParams { StatsCollectingParams(); @@ -1042,6 +1086,7 @@ public: const size_t max_entries_for_hash_table_stats = 0; const size_t max_size_to_preallocate_for_aggregation = 0; }; + StatsCollectingParams stats_collecting_params; Params( @@ -1063,7 +1108,8 @@ public: bool enable_prefetch_, bool only_merge_, // true for projections bool optimize_group_by_constant_keys_, - const StatsCollectingParams & stats_collecting_params_ = {}) + double min_hit_rate_to_use_consecutive_keys_optimization_, + const StatsCollectingParams & stats_collecting_params_) : keys(keys_) , aggregates(aggregates_) , keys_size(keys.size()) @@ -1084,14 +1130,15 @@ public: , only_merge(only_merge_) , enable_prefetch(enable_prefetch_) , optimize_group_by_constant_keys(optimize_group_by_constant_keys_) + , min_hit_rate_to_use_consecutive_keys_optimization(min_hit_rate_to_use_consecutive_keys_optimization_) , stats_collecting_params(stats_collecting_params_) { } /// Only parameters that matter during merge. - Params(const Names & keys_, const AggregateDescriptions & aggregates_, bool overflow_row_, size_t max_threads_, size_t max_block_size_) + Params(const Names & keys_, const AggregateDescriptions & aggregates_, bool overflow_row_, size_t max_threads_, size_t max_block_size_, double min_hit_rate_to_use_consecutive_keys_optimization_) : Params( - keys_, aggregates_, overflow_row_, 0, OverflowMode::THROW, 0, 0, 0, false, nullptr, max_threads_, 0, false, 0, max_block_size_, false, true, {}) + keys_, aggregates_, overflow_row_, 0, OverflowMode::THROW, 0, 0, 0, false, nullptr, max_threads_, 0, false, 0, max_block_size_, false, true, false, min_hit_rate_to_use_consecutive_keys_optimization_, {}) { } @@ -1295,15 +1342,28 @@ private: size_t row_end, ColumnRawPtrs & key_columns, AggregateFunctionInstruction * aggregate_instructions, + LastElementCacheStats & consecutive_keys_cache_stats, + bool no_more_keys, + bool all_keys_are_const, + AggregateDataPtr overflow_row) const; + + template + void executeImpl( + Method & method, + State & state, + Arena * aggregates_pool, + size_t row_begin, + size_t row_end, + AggregateFunctionInstruction * aggregate_instructions, bool no_more_keys, bool all_keys_are_const, AggregateDataPtr overflow_row) const; /// Specialization for a particular value no_more_keys. - template + template void executeImplBatch( Method & method, - typename Method::State & state, + State & state, Arena * aggregates_pool, size_t row_begin, size_t row_end, @@ -1413,16 +1473,15 @@ private: bool final, ThreadPool * thread_pool) const; - template + template void mergeStreamsImplCase( Arena * aggregates_pool, - Method & method, + State & state, Table & data, AggregateDataPtr overflow_row, size_t row_begin, size_t row_end, const AggregateColumnsConstData & aggregate_columns_data, - const ColumnRawPtrs & key_columns, Arena * arena_for_keys) const; /// `arena_for_keys` used to store serialized aggregation keys (in methods like `serialized`) to save some space. @@ -1434,6 +1493,7 @@ private: Method & method, Table & data, AggregateDataPtr overflow_row, + LastElementCacheStats & consecutive_keys_cache_stats, bool no_more_keys, Arena * arena_for_keys = nullptr) const; @@ -1443,6 +1503,7 @@ private: Method & method, Table & data, AggregateDataPtr overflow_row, + LastElementCacheStats & consecutive_keys_cache_stats, bool no_more_keys, size_t row_begin, size_t row_end, @@ -1453,6 +1514,7 @@ private: void mergeBlockWithoutKeyStreamsImpl( Block block, AggregatedDataVariants & result) const; + void mergeWithoutKeyStreamsImpl( AggregatedDataVariants & result, size_t row_begin, @@ -1507,6 +1569,18 @@ private: MutableColumns & final_key_columns) const; static bool hasSparseArguments(AggregateFunctionInstruction * aggregate_instructions); + + static void addBatch( + size_t row_begin, size_t row_end, + AggregateFunctionInstruction * inst, + AggregateDataPtr * places, + Arena * arena); + + static void addBatchSinglePlace( + size_t row_begin, size_t row_end, + AggregateFunctionInstruction * inst, + AggregateDataPtr place, + Arena * arena); }; diff --git a/src/Interpreters/AsynchronousInsertQueue.cpp b/src/Interpreters/AsynchronousInsertQueue.cpp index a0750122a5c..63ee62cdef4 100644 --- a/src/Interpreters/AsynchronousInsertQueue.cpp +++ b/src/Interpreters/AsynchronousInsertQueue.cpp @@ -767,7 +767,6 @@ Chunk AsynchronousInsertQueue::processEntriesWithParsing( }; StreamingFormatExecutor executor(header, format, std::move(on_error), std::move(adding_defaults_transform)); - std::unique_ptr last_buffer; auto chunk_info = std::make_shared(); auto query_for_logging = serializeQuery(*key.query, insert_context->getSettingsRef().log_queries_cut_to_length); @@ -783,11 +782,6 @@ Chunk AsynchronousInsertQueue::processEntriesWithParsing( auto buffer = std::make_unique(*bytes); size_t num_bytes = bytes->size(); size_t num_rows = executor.execute(*buffer); - - /// Keep buffer, because it still can be used - /// in destructor, while resetting buffer at next iteration. - last_buffer = std::move(buffer); - total_rows += num_rows; chunk_info->offsets.push_back(total_rows); chunk_info->tokens.push_back(entry->async_dedup_token); @@ -796,8 +790,6 @@ Chunk AsynchronousInsertQueue::processEntriesWithParsing( current_exception.clear(); } - format->addBuffer(std::move(last_buffer)); - Chunk chunk(executor.getResultColumns(), total_rows); chunk.setChunkInfo(std::move(chunk_info)); return chunk; diff --git a/src/Interpreters/BackupLog.cpp b/src/Interpreters/BackupLog.cpp index 4953a2140ea..e49bb28bd45 100644 --- a/src/Interpreters/BackupLog.cpp +++ b/src/Interpreters/BackupLog.cpp @@ -27,6 +27,7 @@ NamesAndTypesList BackupLogElement::getNamesAndTypes() {"event_time_microseconds", std::make_shared(6)}, {"id", std::make_shared()}, {"name", std::make_shared()}, + {"base_backup_name", std::make_shared()}, {"status", std::make_shared(getBackupStatusEnumValues())}, {"error", std::make_shared()}, {"start_time", std::make_shared()}, @@ -49,6 +50,7 @@ void BackupLogElement::appendToBlock(MutableColumns & columns) const columns[i++]->insert(event_time_usec); columns[i++]->insert(info.id); columns[i++]->insert(info.name); + columns[i++]->insert(info.base_backup_name); columns[i++]->insert(static_cast(info.status)); columns[i++]->insert(info.error_message); columns[i++]->insert(static_cast(std::chrono::system_clock::to_time_t(info.start_time))); diff --git a/src/Interpreters/Cache/EvictionCandidates.cpp b/src/Interpreters/Cache/EvictionCandidates.cpp new file mode 100644 index 00000000000..7dceab4f95f --- /dev/null +++ b/src/Interpreters/Cache/EvictionCandidates.cpp @@ -0,0 +1,72 @@ +#include +#include + + +namespace ProfileEvents +{ + extern const Event FilesystemCacheEvictMicroseconds; + extern const Event FilesystemCacheEvictedBytes; + extern const Event FilesystemCacheEvictedFileSegments; +} + +namespace DB +{ + +EvictionCandidates::~EvictionCandidates() +{ + for (const auto & [key, key_candidates] : candidates) + { + for (const auto & candidate : key_candidates.candidates) + candidate->removal_candidate = false; + } +} + +void EvictionCandidates::add(LockedKey & locked_key, const FileSegmentMetadataPtr & candidate) +{ + auto [it, inserted] = candidates.emplace(locked_key.getKey(), KeyCandidates{}); + if (inserted) + it->second.key_metadata = locked_key.getKeyMetadata(); + it->second.candidates.push_back(candidate); + + candidate->removal_candidate = true; + ++candidates_size; +} + +void EvictionCandidates::evict(FileCacheQueryLimit::QueryContext * query_context, const CacheGuard::Lock & lock) +{ + if (candidates.empty()) + return; + + auto timer = DB::CurrentThread::getProfileEvents().timer(ProfileEvents::FilesystemCacheEvictMicroseconds); + + for (auto & [key, key_candidates] : candidates) + { + auto locked_key = key_candidates.key_metadata->tryLock(); + if (!locked_key) + continue; /// key could become invalid after we released the key lock above, just skip it. + + auto & to_evict = key_candidates.candidates; + while (!to_evict.empty()) + { + auto & candidate = to_evict.back(); + chassert(candidate->releasable()); + + const auto segment = candidate->file_segment; + auto queue_it = segment->getQueueIterator(); + chassert(queue_it); + + ProfileEvents::increment(ProfileEvents::FilesystemCacheEvictedFileSegments); + ProfileEvents::increment(ProfileEvents::FilesystemCacheEvictedBytes, segment->range().size()); + + locked_key->removeFileSegment(segment->offset(), segment->lock()); + queue_it->remove(lock); + + if (query_context) + query_context->remove(segment->key(), segment->offset(), lock); + + to_evict.pop_back(); + } + } +} + +} diff --git a/src/Interpreters/Cache/EvictionCandidates.h b/src/Interpreters/Cache/EvictionCandidates.h new file mode 100644 index 00000000000..0557962d97f --- /dev/null +++ b/src/Interpreters/Cache/EvictionCandidates.h @@ -0,0 +1,35 @@ +#pragma once +#include + +namespace DB +{ + +class EvictionCandidates +{ +public: + ~EvictionCandidates(); + + void add(LockedKey & locked_key, const FileSegmentMetadataPtr & candidate); + + void evict(FileCacheQueryLimit::QueryContext * query_context, const CacheGuard::Lock &); + + size_t size() const { return candidates_size; } + + auto begin() const { return candidates.begin(); } + + auto end() const { return candidates.end(); } + +private: + struct KeyCandidates + { + KeyMetadataPtr key_metadata; + std::vector candidates; + }; + + std::unordered_map candidates; + size_t candidates_size = 0; +}; + +using EvictionCandidatesPtr = std::unique_ptr; + +} diff --git a/src/Interpreters/Cache/FileCache.cpp b/src/Interpreters/Cache/FileCache.cpp index 2f2d199b8b6..6b627cb07b3 100644 --- a/src/Interpreters/Cache/FileCache.cpp +++ b/src/Interpreters/Cache/FileCache.cpp @@ -7,6 +7,8 @@ #include #include #include +#include +#include #include #include #include @@ -20,13 +22,8 @@ namespace fs = std::filesystem; namespace ProfileEvents { extern const Event FilesystemCacheLoadMetadataMicroseconds; - extern const Event FilesystemCacheEvictedBytes; - extern const Event FilesystemCacheEvictedFileSegments; - extern const Event FilesystemCacheEvictionSkippedFileSegments; - extern const Event FilesystemCacheEvictionTries; extern const Event FilesystemCacheLockCacheMicroseconds; extern const Event FilesystemCacheReserveMicroseconds; - extern const Event FilesystemCacheEvictMicroseconds; extern const Event FilesystemCacheGetOrSetMicroseconds; extern const Event FilesystemCacheGetMicroseconds; } @@ -51,18 +48,46 @@ namespace DB namespace ErrorCodes { extern const int LOGICAL_ERROR; + extern const int BAD_ARGUMENTS; +} + +void FileCacheReserveStat::update(size_t size, FileSegmentKind kind, bool releasable) +{ + auto & local_stat = stat_by_kind[kind]; + if (releasable) + { + stat.releasable_size += size; + ++stat.releasable_count; + + local_stat.releasable_size += size; + ++local_stat.releasable_count; + } + else + { + stat.non_releasable_size += size; + ++stat.non_releasable_count; + + local_stat.non_releasable_size += size; + ++local_stat.non_releasable_count; + } } FileCache::FileCache(const std::string & cache_name, const FileCacheSettings & settings) : max_file_segment_size(settings.max_file_segment_size) , bypass_cache_threshold(settings.enable_bypass_cache_with_threshold ? settings.bypass_cache_threshold : 0) , boundary_alignment(settings.boundary_alignment) - , background_download_threads(settings.background_download_threads) - , metadata_download_threads(settings.load_metadata_threads) + , load_metadata_threads(settings.load_metadata_threads) , log(&Poco::Logger::get("FileCache(" + cache_name + ")")) - , metadata(settings.base_path, settings.background_download_queue_size_limit) + , metadata(settings.base_path, settings.background_download_queue_size_limit, settings.background_download_threads) { - main_priority = std::make_unique(settings.max_size, settings.max_elements); + if (settings.cache_policy == "LRU") + main_priority = std::make_unique(settings.max_size, settings.max_elements); + else if (settings.cache_policy == "SLRU") + main_priority = std::make_unique(settings.max_size, settings.max_elements, settings.slru_size_ratio); + else + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Unknown cache policy: {}", settings.cache_policy); + + LOG_DEBUG(log, "Using {} cache policy", settings.cache_policy); if (settings.cache_hits_threshold) stash = std::make_unique(settings.cache_hits_threshold, settings.max_elements); @@ -133,12 +158,8 @@ void FileCache::initialize() throw; } + metadata.startup(); is_initialized = true; - - for (size_t i = 0; i < background_download_threads; ++i) - download_threads.emplace_back([this] { metadata.downloadThreadFunc(); }); - - cleanup_thread = std::make_unique(std::function{ [this]{ metadata.cleanupThreadFunc(); }}); } CacheGuard::Lock FileCache::lockCache() const @@ -273,7 +294,7 @@ FileSegments FileCache::splitRangeIntoFileSegments( size_t size, FileSegment::State state, size_t file_segments_limit, - const CreateFileSegmentSettings & settings) + const CreateFileSegmentSettings & create_settings) { assert(size > 0); @@ -290,7 +311,7 @@ FileSegments FileCache::splitRangeIntoFileSegments( remaining_size -= current_file_segment_size; auto file_segment_metadata_it = addFileSegment( - locked_key, current_pos, current_file_segment_size, state, settings, nullptr); + locked_key, current_pos, current_file_segment_size, state, create_settings, nullptr); file_segments.push_back(file_segment_metadata_it->second->file_segment); current_pos += current_file_segment_size; @@ -305,7 +326,7 @@ void FileCache::fillHolesWithEmptyFileSegments( const FileSegment::Range & range, size_t file_segments_limit, bool fill_with_detached_file_segments, - const CreateFileSegmentSettings & settings) + const CreateFileSegmentSettings & create_settings) { /// There are segments [segment1, ..., segmentN] /// (non-overlapping, non-empty, ascending-ordered) which (maybe partially) @@ -362,7 +383,7 @@ void FileCache::fillHolesWithEmptyFileSegments( if (fill_with_detached_file_segments) { auto file_segment = std::make_shared( - locked_key.getKey(), current_pos, hole_size, FileSegment::State::DETACHED, settings); + locked_key.getKey(), current_pos, hole_size, FileSegment::State::DETACHED, create_settings); file_segments.insert(it, file_segment); ++processed_count; @@ -373,7 +394,7 @@ void FileCache::fillHolesWithEmptyFileSegments( FileSegments hole; for (const auto & r : ranges) { - auto metadata_it = addFileSegment(locked_key, r.left, r.size(), FileSegment::State::EMPTY, settings, nullptr); + auto metadata_it = addFileSegment(locked_key, r.left, r.size(), FileSegment::State::EMPTY, create_settings, nullptr); hole.push_back(metadata_it->second->file_segment); ++processed_count; @@ -418,7 +439,7 @@ void FileCache::fillHolesWithEmptyFileSegments( if (fill_with_detached_file_segments) { auto file_segment = std::make_shared( - locked_key.getKey(), current_pos, hole_size, FileSegment::State::DETACHED, settings); + locked_key.getKey(), current_pos, hole_size, FileSegment::State::DETACHED, create_settings); file_segments.insert(file_segments.end(), file_segment); } @@ -428,7 +449,7 @@ void FileCache::fillHolesWithEmptyFileSegments( FileSegments hole; for (const auto & r : ranges) { - auto metadata_it = addFileSegment(locked_key, r.left, r.size(), FileSegment::State::EMPTY, settings, nullptr); + auto metadata_it = addFileSegment(locked_key, r.left, r.size(), FileSegment::State::EMPTY, create_settings, nullptr); hole.push_back(metadata_it->second->file_segment); ++processed_count; @@ -447,7 +468,7 @@ FileSegmentsHolderPtr FileCache::set( const Key & key, size_t offset, size_t size, - const CreateFileSegmentSettings & settings) + const CreateFileSegmentSettings & create_settings) { assertInitialized(); @@ -458,17 +479,17 @@ FileSegmentsHolderPtr FileCache::set( if (!file_segments.empty()) throw Exception(ErrorCodes::LOGICAL_ERROR, "Having intersection with already existing cache"); - if (settings.unbounded) + if (create_settings.unbounded) { /// If the file is unbounded, we can create a single file_segment_metadata for it. auto file_segment_metadata_it = addFileSegment( - *locked_key, offset, size, FileSegment::State::EMPTY, settings, nullptr); + *locked_key, offset, size, FileSegment::State::EMPTY, create_settings, nullptr); file_segments = {file_segment_metadata_it->second->file_segment}; } else { file_segments = splitRangeIntoFileSegments( - *locked_key, offset, size, FileSegment::State::EMPTY, /* file_segments_limit */0, settings); + *locked_key, offset, size, FileSegment::State::EMPTY, /* file_segments_limit */0, create_settings); } return std::make_unique(std::move(file_segments)); @@ -480,7 +501,7 @@ FileCache::getOrSet( size_t offset, size_t size, size_t file_size, - const CreateFileSegmentSettings & settings, + const CreateFileSegmentSettings & create_settings, size_t file_segments_limit) { ProfileEventTimeIncrement watch(ProfileEvents::FilesystemCacheGetOrSetMicroseconds); @@ -586,7 +607,7 @@ FileCache::getOrSet( if (file_segments.empty()) { - file_segments = splitRangeIntoFileSegments(*locked_key, range.left, range.size(), FileSegment::State::EMPTY, file_segments_limit, settings); + file_segments = splitRangeIntoFileSegments(*locked_key, range.left, range.size(), FileSegment::State::EMPTY, file_segments_limit, create_settings); } else { @@ -594,7 +615,7 @@ FileCache::getOrSet( chassert(file_segments.back()->range().left <= range.right); fillHolesWithEmptyFileSegments( - *locked_key, file_segments, range, file_segments_limit, /* fill_with_detached */false, settings); + *locked_key, file_segments, range, file_segments_limit, /* fill_with_detached */false, create_settings); if (!file_segments.front()->range().contains(offset)) { @@ -649,7 +670,7 @@ KeyMetadata::iterator FileCache::addFileSegment( size_t offset, size_t size, FileSegment::State state, - const CreateFileSegmentSettings & settings, + const CreateFileSegmentSettings & create_settings, const CacheGuard::Lock * lock) { /// Create a file_segment_metadata and put it in `files` map by [key][offset]. @@ -686,14 +707,14 @@ KeyMetadata::iterator FileCache::addFileSegment( stash_records.emplace( stash_key, stash->queue->add(locked_key.getKeyMetadata(), offset, 0, *lock)); - if (stash->queue->getElementsCount(*lock) > stash->queue->getElementsLimit()) + if (stash->queue->getElementsCount(*lock) > stash->queue->getElementsLimit(*lock)) stash->queue->pop(*lock); result_state = FileSegment::State::DETACHED; } else { - result_state = record_it->second->use(*lock) >= stash->hits_threshold + result_state = record_it->second->increasePriority(*lock) >= stash->hits_threshold ? FileSegment::State::EMPTY : FileSegment::State::DETACHED; } @@ -703,7 +724,7 @@ KeyMetadata::iterator FileCache::addFileSegment( result_state = state; } - auto file_segment = std::make_shared(key, offset, size, result_state, settings, background_download_threads > 0, this, locked_key.getKeyMetadata()); + auto file_segment = std::make_shared(key, offset, size, result_state, create_settings, metadata.isBackgroundDownloadEnabled(), this, locked_key.getKeyMetadata()); auto file_segment_metadata = std::make_shared(std::move(file_segment)); auto [file_segment_metadata_it, inserted] = locked_key.emplace(offset, file_segment_metadata); @@ -727,7 +748,7 @@ bool FileCache::tryReserve(FileSegment & file_segment, const size_t size, FileCa LOG_TEST( log, "Trying to reserve space ({} bytes) for {}:{}, current usage {}/{}", size, file_segment.key(), file_segment.offset(), - main_priority->getSize(cache_lock), main_priority->getSizeLimit()); + main_priority->getSize(cache_lock), main_priority->getSizeLimit(cache_lock)); /// In case of per query cache limit (by default disabled), we add/remove entries from both /// (main_priority and query_priority) priority queues, but iterate entries in order of query_priority, @@ -739,7 +760,7 @@ bool FileCache::tryReserve(FileSegment & file_segment, const size_t size, FileCa { query_priority = &query_context->getPriority(); - const bool query_limit_exceeded = query_priority->getSize(cache_lock) + size > query_priority->getSizeLimit(); + const bool query_limit_exceeded = query_priority->getSize(cache_lock) + size > query_priority->getSizeLimit(cache_lock); if (query_limit_exceeded && !query_context->recacheOnFileCacheQueryLimitExceeded()) { LOG_TEST(log, "Query limit exceeded, space reservation failed, " @@ -750,181 +771,38 @@ bool FileCache::tryReserve(FileSegment & file_segment, const size_t size, FileCa LOG_TEST( log, "Using query limit, current usage: {}/{} (while reserving for {}:{})", - query_priority->getSize(cache_lock), query_priority->getSizeLimit(), + query_priority->getSize(cache_lock), query_priority->getSizeLimit(cache_lock), file_segment.key(), file_segment.offset()); } - struct EvictionCandidates - { - explicit EvictionCandidates(KeyMetadataPtr key_metadata_) : key_metadata(std::move(key_metadata_)) {} - - void add(const FileSegmentMetadataPtr & candidate) - { - candidate->removal_candidate = true; - candidates.push_back(candidate); - } - - ~EvictionCandidates() - { - /// If failed to reserve space, we don't delete the candidates but drop the flag instead - /// so the segments can be used again - for (const auto & candidate : candidates) - candidate->removal_candidate = false; - } - - KeyMetadataPtr key_metadata; - std::vector candidates; - }; - - std::unordered_map to_delete; - size_t freeable_space = 0, freeable_count = 0; - - auto iterate_func = [&](LockedKey & locked_key, const FileSegmentMetadataPtr & segment_metadata) - { - chassert(segment_metadata->file_segment->assertCorrectness()); - - auto & stat_by_kind = reserve_stat.stat_by_kind[segment_metadata->file_segment->getKind()]; - if (segment_metadata->releasable()) - { - const auto & key = segment_metadata->file_segment->key(); - auto it = to_delete.find(key); - if (it == to_delete.end()) - it = to_delete.emplace(key, locked_key.getKeyMetadata()).first; - it->second.add(segment_metadata); - - stat_by_kind.releasable_size += segment_metadata->size(); - ++stat_by_kind.releasable_count; - - freeable_space += segment_metadata->size(); - ++freeable_count; - } - else - { - stat_by_kind.non_releasable_size += segment_metadata->size(); - ++stat_by_kind.non_releasable_count; - - ProfileEvents::increment(ProfileEvents::FilesystemCacheEvictionSkippedFileSegments); - } - - return PriorityIterationResult::CONTINUE; - }; + EvictionCandidates eviction_candidates; + IFileCachePriority::FinalizeEvictionFunc finalize_eviction_func; if (query_priority) { - auto is_query_priority_overflow = [&] - { - const size_t new_size = query_priority->getSize(cache_lock) + size - freeable_space; - return new_size > query_priority->getSizeLimit(); - }; - - if (is_query_priority_overflow()) - { - ProfileEvents::increment(ProfileEvents::FilesystemCacheEvictionTries); - - query_priority->iterate( - [&](LockedKey & locked_key, const FileSegmentMetadataPtr & segment_metadata) - { return is_query_priority_overflow() ? iterate_func(locked_key, segment_metadata) : PriorityIterationResult::BREAK; }, - cache_lock); - - if (is_query_priority_overflow()) - return false; - } - - LOG_TEST( - log, "Query limits satisfied (while reserving for {}:{})", - file_segment.key(), file_segment.offset()); - } - - auto is_main_priority_overflow = [main_priority_size_limit = main_priority->getSizeLimit(), - main_priority_elements_limit = main_priority->getElementsLimit(), - size, - &freeable_space, - &freeable_count, - &file_segment, - &cache_lock, - my_main_priority = this->main_priority.get(), - my_log = this->log] - { - const bool is_overflow = - /// size_limit == 0 means unlimited cache size - (main_priority_size_limit != 0 && (my_main_priority->getSize(cache_lock) + size - freeable_space > main_priority_size_limit)) - /// elements_limit == 0 means unlimited number of cache elements - || (main_priority_elements_limit != 0 && freeable_count == 0 - && my_main_priority->getElementsCount(cache_lock) == main_priority_elements_limit); - - LOG_TEST( - my_log, "Overflow: {}, size: {}, ready to remove: {} ({} in number), current cache size: {}/{}, elements: {}/{}, while reserving for {}:{}", - is_overflow, size, freeable_space, freeable_count, - my_main_priority->getSize(cache_lock), my_main_priority->getSizeLimit(), - my_main_priority->getElementsCount(cache_lock), my_main_priority->getElementsLimit(), - file_segment.key(), file_segment.offset()); - - return is_overflow; - }; - - /// If we have enough space in query_priority, we are not interested about stat there anymore. - /// Clean the stat before iterating main_priority to avoid calculating any segment stat twice. - reserve_stat.stat_by_kind.clear(); - - if (is_main_priority_overflow()) - { - ProfileEvents::increment(ProfileEvents::FilesystemCacheEvictionTries); - - main_priority->iterate( - [&](LockedKey & locked_key, const FileSegmentMetadataPtr & segment_metadata) - { return is_main_priority_overflow() ? iterate_func(locked_key, segment_metadata) : PriorityIterationResult::BREAK; }, - cache_lock); - - if (is_main_priority_overflow()) + if (!query_priority->collectCandidatesForEviction(size, reserve_stat, eviction_candidates, {}, finalize_eviction_func, cache_lock)) return false; + + LOG_TEST(log, "Query limits satisfied (while reserving for {}:{})", file_segment.key(), file_segment.offset()); + /// If we have enough space in query_priority, we are not interested about stat there anymore. + /// Clean the stat before iterating main_priority to avoid calculating any segment stat twice. + reserve_stat.stat_by_kind.clear(); } + /// A file_segment_metadata acquires a priority iterator on first successful space reservation attempt, + auto queue_iterator = file_segment.getQueueIterator(); + chassert(!queue_iterator || file_segment.getReservedSize() > 0); + + if (!main_priority->collectCandidatesForEviction(size, reserve_stat, eviction_candidates, queue_iterator, finalize_eviction_func, cache_lock)) + return false; + if (!file_segment.getKeyMetadata()->createBaseDirectory()) return false; - if (!to_delete.empty()) - { - LOG_DEBUG( - log, "Will evict {} file segments (while reserving {} bytes for {}:{})", - to_delete.size(), size, file_segment.key(), file_segment.offset()); + eviction_candidates.evict(query_context.get(), cache_lock); - ProfileEventTimeIncrement evict_watch(ProfileEvents::FilesystemCacheEvictMicroseconds); - - for (auto & [current_key, deletion_info] : to_delete) - { - auto locked_key = deletion_info.key_metadata->tryLock(); - if (!locked_key) - continue; /// key could become invalid after we released the key lock above, just skip it. - - /// delete from vector in reverse order just for efficiency - auto & candidates = deletion_info.candidates; - while (!candidates.empty()) - { - auto & candidate = candidates.back(); - chassert(candidate->releasable()); - - const auto * segment = candidate->file_segment.get(); - auto queue_it = segment->getQueueIterator(); - chassert(queue_it); - - ProfileEvents::increment(ProfileEvents::FilesystemCacheEvictedFileSegments); - ProfileEvents::increment(ProfileEvents::FilesystemCacheEvictedBytes, segment->range().size()); - - locked_key->removeFileSegment(segment->offset(), segment->lock()); - queue_it->remove(cache_lock); - - if (query_context) - query_context->remove(current_key, segment->offset(), cache_lock); - - candidates.pop_back(); - } - } - } - - /// A file_segment_metadata acquires a LRUQueue iterator on first successful space reservation attempt, - /// e.g. queue_iteratir is std::nullopt here if no space has been reserved yet. - auto queue_iterator = file_segment.getQueueIterator(); - chassert(!queue_iterator || file_segment.getReservedSize() > 0); + if (finalize_eviction_func) + finalize_eviction_func(cache_lock); if (queue_iterator) { @@ -956,6 +834,15 @@ bool FileCache::tryReserve(FileSegment & file_segment, const size_t size, FileCa return true; } +void FileCache::iterate(IterateFunc && func) +{ + return metadata.iterate([&](const LockedKey & locked_key) + { + for (const auto & file_segment_metadata : locked_key) + func(FileSegment::getInfo(file_segment_metadata.second->file_segment)); + }); +} + void FileCache::removeKey(const Key & key) { assertInitialized(); @@ -994,8 +881,7 @@ void FileCache::removeAllReleasable() { /// Remove all access information. auto lock = lockCache(); - stash->records.clear(); - stash->queue->removeAll(lock); + stash->clear(); } } @@ -1051,9 +937,9 @@ void FileCache::loadMetadataImpl() std::mutex set_exception_mutex; std::atomic stop_loading = false; - LOG_INFO(log, "Loading filesystem cache with {} threads", metadata_download_threads); + LOG_INFO(log, "Loading filesystem cache with {} threads", load_metadata_threads); - for (size_t i = 0; i < metadata_download_threads; ++i) + for (size_t i = 0; i < load_metadata_threads; ++i) { try { @@ -1140,9 +1026,6 @@ void FileCache::loadMetadataForKeys(const fs::path & keys_dir) const auto key = Key::fromKeyString(key_directory.filename().string()); auto key_metadata = metadata.getKeyMetadata(key, CacheMetadata::KeyNotFoundPolicy::CREATE_EMPTY, /* is_initial_load */true); - const size_t size_limit = main_priority->getSizeLimit(); - const size_t elements_limit = main_priority->getElementsLimit(); - for (fs::directory_iterator offset_it{key_directory}; offset_it != fs::directory_iterator(); ++offset_it) { auto offset_with_suffix = offset_it->path().filename().string(); @@ -1182,14 +1065,16 @@ void FileCache::loadMetadataForKeys(const fs::path & keys_dir) } bool limits_satisfied; - IFileCachePriority::Iterator cache_it; + IFileCachePriority::IteratorPtr cache_it; + size_t size_limit = 0; + { auto lock = lockCache(); - limits_satisfied = (size_limit == 0 || main_priority->getSize(lock) + size <= size_limit) - && (elements_limit == 0 || main_priority->getElementsCount(lock) + 1 <= elements_limit); + size_limit = main_priority->getSizeLimit(lock); + limits_satisfied = main_priority->canFit(size, lock); if (limits_satisfied) - cache_it = main_priority->add(key_metadata, offset, size, lock); + cache_it = main_priority->add(key_metadata, offset, size, lock, /* is_startup */true); /// TODO: we can get rid of this lockCache() if we first load everything in parallel /// without any mutual lock between loading threads, and only after do removeOverflow(). @@ -1235,7 +1120,7 @@ void FileCache::loadMetadataForKeys(const fs::path & keys_dir) log, "Cache capacity changed (max size: {}), " "cached file `{}` does not fit in cache anymore (size: {})", - main_priority->getSizeLimit(), offset_it->path().string(), size); + size_limit, offset_it->path().string(), size); fs::remove(offset_it->path()); } @@ -1258,15 +1143,8 @@ FileCache::~FileCache() void FileCache::deactivateBackgroundOperations() { - metadata.cancelDownload(); - metadata.cancelCleanup(); - - for (auto & thread : download_threads) - if (thread.joinable()) - thread.join(); - - if (cleanup_thread && cleanup_thread->joinable()) - cleanup_thread->join(); + shutdown.store(true); + metadata.shutdown(); } std::vector FileCache::getFileSegmentInfos() @@ -1280,7 +1158,7 @@ std::vector FileCache::getFileSegmentInfos() metadata.iterate([&](const LockedKey & locked_key) { for (const auto & [_, file_segment_metadata] : locked_key) - file_segments.push_back(FileSegment::getInfo(file_segment_metadata->file_segment, *this)); + file_segments.push_back(FileSegment::getInfo(file_segment_metadata->file_segment)); }); return file_segments; } @@ -1290,22 +1168,14 @@ std::vector FileCache::getFileSegmentInfos(const Key & key) std::vector file_segments; auto locked_key = metadata.lockKeyMetadata(key, CacheMetadata::KeyNotFoundPolicy::THROW_LOGICAL); for (const auto & [_, file_segment_metadata] : *locked_key) - file_segments.push_back(FileSegment::getInfo(file_segment_metadata->file_segment, *this)); + file_segments.push_back(FileSegment::getInfo(file_segment_metadata->file_segment)); return file_segments; } std::vector FileCache::dumpQueue() { assertInitialized(); - - std::vector file_segments; - main_priority->iterate([&](LockedKey &, const FileSegmentMetadataPtr & segment_metadata) - { - file_segments.push_back(FileSegment::getInfo(segment_metadata->file_segment, *this)); - return PriorityIterationResult::CONTINUE; - }, lockCache()); - - return file_segments; + return main_priority->dump(lockCache()); } std::vector FileCache::tryGetCachePaths(const Key & key) @@ -1342,13 +1212,82 @@ void FileCache::assertCacheCorrectness() { for (const auto & [_, file_segment_metadata] : locked_key) { - const auto & file_segment = *file_segment_metadata->file_segment; - UNUSED(file_segment); - chassert(file_segment.assertCorrectness()); + chassert(file_segment_metadata->file_segment->assertCorrectness()); } }); } +void FileCache::applySettingsIfPossible(const FileCacheSettings & new_settings, FileCacheSettings & actual_settings) +{ + if (!is_initialized || shutdown || new_settings == actual_settings) + return; + + std::lock_guard lock(apply_settings_mutex); + + if (new_settings.background_download_queue_size_limit != actual_settings.background_download_queue_size_limit + && metadata.setBackgroundDownloadQueueSizeLimit(new_settings.background_download_queue_size_limit)) + { + LOG_INFO(log, "Changed background_download_queue_size from {} to {}", + actual_settings.background_download_queue_size_limit, + new_settings.background_download_queue_size_limit); + + actual_settings.background_download_queue_size_limit = new_settings.background_download_queue_size_limit; + } + + if (new_settings.background_download_threads != actual_settings.background_download_threads) + { + bool updated = false; + try + { + updated = metadata.setBackgroundDownloadThreads(new_settings.background_download_threads); + } + catch (...) + { + actual_settings.background_download_threads = metadata.getBackgroundDownloadThreads(); + throw; + } + + if (updated) + { + LOG_INFO(log, "Changed background_download_threads from {} to {}", + actual_settings.background_download_threads, + new_settings.background_download_threads); + + actual_settings.background_download_threads = new_settings.background_download_threads; + } + } + + + if (new_settings.max_size != actual_settings.max_size + || new_settings.max_elements != actual_settings.max_elements) + { + auto cache_lock = lockCache(); + + bool updated = false; + try + { + updated = main_priority->modifySizeLimits( + new_settings.max_size, new_settings.max_elements, new_settings.slru_size_ratio, cache_lock); + } + catch (...) + { + actual_settings.max_size = main_priority->getSizeLimit(cache_lock); + actual_settings.max_elements = main_priority->getElementsLimit(cache_lock); + throw; + } + + if (updated) + { + LOG_INFO(log, "Changed max_size from {} to {}, max_elements from {} to {}", + actual_settings.max_size, new_settings.max_size, + actual_settings.max_elements, new_settings.max_elements); + + actual_settings.max_size = main_priority->getSizeLimit(cache_lock); + actual_settings.max_elements = main_priority->getElementsLimit(cache_lock); + } + } +} + FileCache::QueryContextHolder::QueryContextHolder( const String & query_id_, FileCache * cache_, @@ -1371,13 +1310,13 @@ FileCache::QueryContextHolder::~QueryContextHolder() } FileCache::QueryContextHolderPtr FileCache::getQueryContextHolder( - const String & query_id, const ReadSettings & settings) + const String & query_id, const ReadSettings & read_settings) { - if (!query_limit || settings.filesystem_cache_max_download_size == 0) + if (!query_limit || read_settings.filesystem_cache_max_download_size == 0) return {}; auto lock = lockCache(); - auto context = query_limit->getOrSetQueryContext(query_id, settings, lock); + auto context = query_limit->getOrSetQueryContext(query_id, read_settings, lock); return std::make_unique(query_id, this, std::move(context)); } @@ -1386,10 +1325,23 @@ std::vector FileCache::sync() std::vector file_segments; metadata.iterate([&](LockedKey & locked_key) { - auto broken = locked_key.sync(*this); + auto broken = locked_key.sync(); file_segments.insert(file_segments.end(), broken.begin(), broken.end()); }); return file_segments; } +FileCache::HitsCountStash::HitsCountStash(size_t hits_threashold_, size_t queue_size_) + : hits_threshold(hits_threashold_), queue_size(queue_size_), queue(std::make_unique(0, queue_size_)) +{ + if (!queue_size_) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Queue size for hits queue must be non-zero"); +} + +void FileCache::HitsCountStash::clear() +{ + records.clear(); + queue = std::make_unique(0, queue_size); +} + } diff --git a/src/Interpreters/Cache/FileCache.h b/src/Interpreters/Cache/FileCache.h index eb457ad31e8..11d35535e9e 100644 --- a/src/Interpreters/Cache/FileCache.h +++ b/src/Interpreters/Cache/FileCache.h @@ -16,31 +16,30 @@ #include #include #include +#include #include namespace DB { -namespace ErrorCodes -{ - extern const int BAD_ARGUMENTS; -} - /// Track acquired space in cache during reservation /// to make error messages when no space left more informative. struct FileCacheReserveStat { struct Stat { - size_t releasable_size; - size_t releasable_count; + size_t releasable_size = 0; + size_t releasable_count = 0; - size_t non_releasable_size; - size_t non_releasable_count; + size_t non_releasable_size = 0; + size_t non_releasable_count = 0; }; + Stat stat; std::unordered_map stat_by_kind; + + void update(size_t size, FileSegmentKind kind, bool releasable); }; /// Local cache for remote filesystem files, represented as a set of non-overlapping non-empty file segments. @@ -52,8 +51,6 @@ public: using QueryLimit = DB::FileCacheQueryLimit; using Priority = IFileCachePriority; using PriorityEntry = IFileCachePriority::Entry; - using PriorityIterator = IFileCachePriority::Iterator; - using PriorityIterationResult = IFileCachePriority::IterationResult; FileCache(const std::string & cache_name, const FileCacheSettings & settings); @@ -154,14 +151,18 @@ public: std::vector sync(); + using IterateFunc = std::function; + void iterate(IterateFunc && func); + + void applySettingsIfPossible(const FileCacheSettings & new_settings, FileCacheSettings & actual_settings); + private: using KeyAndOffset = FileCacheKeyAndOffset; const size_t max_file_segment_size; - const size_t bypass_cache_threshold = 0; + const size_t bypass_cache_threshold; const size_t boundary_alignment; - const size_t background_download_threads; /// 0 means background download is disabled. - const size_t metadata_download_threads; + size_t load_metadata_threads; Poco::Logger * log; @@ -169,6 +170,9 @@ private: std::atomic is_initialized = false; mutable std::mutex init_mutex; std::unique_ptr status_file; + std::atomic shutdown = false; + + std::mutex apply_settings_mutex; CacheMetadata metadata; @@ -177,16 +181,14 @@ private: struct HitsCountStash { - HitsCountStash(size_t hits_threashold_, size_t queue_size_) - : hits_threshold(hits_threashold_), queue(std::make_unique(0, queue_size_)) - { - if (!queue_size_) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Queue size for hits queue must be non-zero"); - } + HitsCountStash(size_t hits_threashold_, size_t queue_size_); + void clear(); const size_t hits_threshold; - FileCachePriorityPtr queue; - using Records = std::unordered_map; + const size_t queue_size; + + std::unique_ptr queue; + using Records = std::unordered_map; Records records; }; @@ -201,12 +203,6 @@ private: * then allowed loaded cache size is std::min(n - k, max_query_cache_size). */ FileCacheQueryLimitPtr query_limit; - /** - * A background cleanup task. - * Clears removed cache entries from metadata. - */ - std::vector download_threads; - std::unique_ptr cleanup_thread; void assertInitialized() const; void assertCacheCorrectness(); diff --git a/src/Interpreters/Cache/FileCacheFactory.cpp b/src/Interpreters/Cache/FileCacheFactory.cpp index 56436f27b0f..3e857d8a8e3 100644 --- a/src/Interpreters/Cache/FileCacheFactory.cpp +++ b/src/Interpreters/Cache/FileCacheFactory.cpp @@ -9,6 +9,28 @@ namespace ErrorCodes extern const int BAD_ARGUMENTS; } +FileCacheFactory::FileCacheData::FileCacheData( + FileCachePtr cache_, + const FileCacheSettings & settings_, + const std::string & config_path_) + : cache(cache_) + , config_path(config_path_) + , settings(settings_) +{ +} + +FileCacheSettings FileCacheFactory::FileCacheData::getSettings() const +{ + std::lock_guard lock(settings_mutex); + return settings; +} + +void FileCacheFactory::FileCacheData::setSettings(const FileCacheSettings & new_settings) +{ + std::lock_guard lock(settings_mutex); + settings = new_settings; +} + FileCacheFactory & FileCacheFactory::instance() { static FileCacheFactory ret; @@ -22,37 +44,86 @@ FileCacheFactory::CacheByName FileCacheFactory::getAll() } FileCachePtr FileCacheFactory::getOrCreate( - const std::string & cache_name, const FileCacheSettings & file_cache_settings) + const std::string & cache_name, + const FileCacheSettings & file_cache_settings, + const std::string & config_path) { std::lock_guard lock(mutex); - auto it = caches_by_name.find(cache_name); + auto it = std::find_if(caches_by_name.begin(), caches_by_name.end(), [&](const auto & cache_by_name) + { + return cache_by_name.second->getSettings().base_path == file_cache_settings.base_path; + }); + if (it == caches_by_name.end()) { auto cache = std::make_shared(cache_name, file_cache_settings); - it = caches_by_name.emplace( - cache_name, std::make_unique(cache, file_cache_settings)).first; + + bool inserted; + std::tie(it, inserted) = caches_by_name.emplace( + cache_name, std::make_unique(cache, file_cache_settings, config_path)); + + if (!inserted) + { + throw Exception(ErrorCodes::BAD_ARGUMENTS, + "Cache with name {} exists, but it has a different path", cache_name); + } + } + else if (it->second->getSettings() != file_cache_settings) + { + throw Exception(ErrorCodes::BAD_ARGUMENTS, + "Found more than one cache configuration with the same path, " + "but with different cache settings ({} and {})", + it->first, cache_name); + } + else if (it->first != cache_name) + { + caches_by_name.emplace(cache_name, it->second); } return it->second->cache; } -FileCachePtr FileCacheFactory::create(const std::string & cache_name, const FileCacheSettings & file_cache_settings) +FileCachePtr FileCacheFactory::create( + const std::string & cache_name, + const FileCacheSettings & file_cache_settings, + const std::string & config_path) { std::lock_guard lock(mutex); auto it = caches_by_name.find(cache_name); + if (it != caches_by_name.end()) throw Exception(ErrorCodes::BAD_ARGUMENTS, "Cache with name {} already exists", cache_name); - auto cache = std::make_shared(cache_name, file_cache_settings); - it = caches_by_name.emplace( - cache_name, std::make_unique(cache, file_cache_settings)).first; + it = std::find_if(caches_by_name.begin(), caches_by_name.end(), [&](const auto & cache_by_name) + { + return cache_by_name.second->getSettings().base_path == file_cache_settings.base_path; + }); + + if (it == caches_by_name.end()) + { + auto cache = std::make_shared(cache_name, file_cache_settings); + it = caches_by_name.emplace( + cache_name, std::make_unique(cache, file_cache_settings, config_path)).first; + } + else if (it->second->getSettings() != file_cache_settings) + { + throw Exception(ErrorCodes::BAD_ARGUMENTS, + "Found more than one cache configuration with the same path, " + "but with different cache settings ({} and {})", + it->first, cache_name); + } + else + { + [[maybe_unused]] bool inserted = caches_by_name.emplace(cache_name, it->second).second; + chassert(inserted); + } return it->second->cache; } -FileCacheFactory::FileCacheData FileCacheFactory::getByName(const std::string & cache_name) +FileCacheFactory::FileCacheDataPtr FileCacheFactory::getByName(const std::string & cache_name) { std::lock_guard lock(mutex); @@ -60,7 +131,46 @@ FileCacheFactory::FileCacheData FileCacheFactory::getByName(const std::string & if (it == caches_by_name.end()) throw Exception(ErrorCodes::BAD_ARGUMENTS, "There is no cache by name: {}", cache_name); - return *it->second; + return it->second; +} + +void FileCacheFactory::updateSettingsFromConfig(const Poco::Util::AbstractConfiguration & config) +{ + CacheByName caches_by_name_copy; + { + std::lock_guard lock(mutex); + caches_by_name_copy = caches_by_name; + } + + std::unordered_set checked_paths; + for (const auto & [_, cache_info] : caches_by_name_copy) + { + if (cache_info->config_path.empty() || checked_paths.contains(cache_info->config_path)) + continue; + + checked_paths.emplace(cache_info->config_path); + + FileCacheSettings new_settings; + new_settings.loadFromConfig(config, cache_info->config_path); + + FileCacheSettings old_settings = cache_info->getSettings(); + if (old_settings == new_settings) + continue; + + try + { + cache_info->cache->applySettingsIfPossible(new_settings, old_settings); + } + catch (...) + { + /// Settings changes could be partially applied in case of exception, + /// make sure cache_info->settings show correct state of applied settings. + cache_info->setSettings(old_settings); + throw; + } + + cache_info->setSettings(old_settings); + } } } diff --git a/src/Interpreters/Cache/FileCacheFactory.h b/src/Interpreters/Cache/FileCacheFactory.h index da037ac9bcb..c60b247005b 100644 --- a/src/Interpreters/Cache/FileCacheFactory.h +++ b/src/Interpreters/Cache/FileCacheFactory.h @@ -6,7 +6,6 @@ #include #include #include -#include namespace DB { @@ -17,26 +16,44 @@ namespace DB class FileCacheFactory final : private boost::noncopyable { public: - struct FileCacheData + class FileCacheData { - FileCachePtr cache; - FileCacheSettings settings; + friend class FileCacheFactory; + public: + FileCacheData(FileCachePtr cache_, const FileCacheSettings & settings_, const std::string & config_path_); - FileCacheData() = default; - FileCacheData(FileCachePtr cache_, const FileCacheSettings & settings_) : cache(cache_), settings(settings_) {} + FileCacheSettings getSettings() const; + + void setSettings(const FileCacheSettings & new_settings); + + const FileCachePtr cache; + const std::string config_path; + + private: + FileCacheSettings settings; + mutable std::mutex settings_mutex; }; + using FileCacheDataPtr = std::shared_ptr; using CacheByName = std::unordered_map; static FileCacheFactory & instance(); - FileCachePtr getOrCreate(const std::string & cache_name, const FileCacheSettings & file_cache_settings); + FileCachePtr getOrCreate( + const std::string & cache_name, + const FileCacheSettings & file_cache_settings, + const std::string & config_path); - FileCachePtr create(const std::string & cache_name, const FileCacheSettings & file_cache_settings); + FileCachePtr create( + const std::string & cache_name, + const FileCacheSettings & file_cache_settings, + const std::string & config_path); CacheByName getAll(); - FileCacheData getByName(const std::string & cache_name); + FileCacheDataPtr getByName(const std::string & cache_name); + + void updateSettingsFromConfig(const Poco::Util::AbstractConfiguration & config); private: std::mutex mutex; diff --git a/src/Interpreters/Cache/FileCacheSettings.cpp b/src/Interpreters/Cache/FileCacheSettings.cpp index a4eefbd7115..692d48c6ce6 100644 --- a/src/Interpreters/Cache/FileCacheSettings.cpp +++ b/src/Interpreters/Cache/FileCacheSettings.cpp @@ -3,6 +3,7 @@ #include #include #include +#include #include namespace DB @@ -13,7 +14,7 @@ namespace ErrorCodes extern const int BAD_ARGUMENTS; } -void FileCacheSettings::loadImpl(FuncHas has, FuncGetUInt get_uint, FuncGetString get_string) +void FileCacheSettings::loadImpl(FuncHas has, FuncGetUInt get_uint, FuncGetString get_string, FuncGetDouble get_double) { auto config_parse_size = [&](std::string_view key) { return parseWithSizeSuffix(get_string(key)); }; @@ -64,6 +65,15 @@ void FileCacheSettings::loadImpl(FuncHas has, FuncGetUInt get_uint, FuncGetStrin if (boundary_alignment > max_file_segment_size) throw Exception(ErrorCodes::BAD_ARGUMENTS, "Setting `boundary_alignment` cannot exceed `max_file_segment_size`"); + + if (has("cache_policy")) + { + cache_policy = get_string("cache_policy"); + boost::to_upper(cache_policy); + } + + if (has("slru_size_ratio")) + slru_size_ratio = get_double("slru_size_ratio"); } void FileCacheSettings::loadFromConfig(const Poco::Util::AbstractConfiguration & config, const std::string & config_prefix) @@ -71,15 +81,17 @@ void FileCacheSettings::loadFromConfig(const Poco::Util::AbstractConfiguration & auto config_has = [&](std::string_view key) { return config.has(fmt::format("{}.{}", config_prefix, key)); }; auto config_get_uint = [&](std::string_view key) { return config.getUInt(fmt::format("{}.{}", config_prefix, key)); }; auto config_get_string = [&](std::string_view key) { return config.getString(fmt::format("{}.{}", config_prefix, key)); }; - loadImpl(std::move(config_has), std::move(config_get_uint), std::move(config_get_string)); + auto config_get_double = [&](std::string_view key) { return config.getDouble(fmt::format("{}.{}", config_prefix, key)); }; + loadImpl(std::move(config_has), std::move(config_get_uint), std::move(config_get_string), std::move(config_get_double)); } void FileCacheSettings::loadFromCollection(const NamedCollection & collection) { - auto config_has = [&](std::string_view key) { return collection.has(std::string(key)); }; - auto config_get_uint = [&](std::string_view key) { return collection.get(std::string(key)); }; - auto config_get_string = [&](std::string_view key) { return collection.get(std::string(key)); }; - loadImpl(std::move(config_has), std::move(config_get_uint), std::move(config_get_string)); + auto collection_has = [&](std::string_view key) { return collection.has(std::string(key)); }; + auto collection_get_uint = [&](std::string_view key) { return collection.get(std::string(key)); }; + auto collection_get_string = [&](std::string_view key) { return collection.get(std::string(key)); }; + auto collection_get_double = [&](std::string_view key) { return collection.get(std::string(key)); }; + loadImpl(std::move(collection_has), std::move(collection_get_uint), std::move(collection_get_string), std::move(collection_get_double)); } } diff --git a/src/Interpreters/Cache/FileCacheSettings.h b/src/Interpreters/Cache/FileCacheSettings.h index bcc15f7b204..eafd7aafb29 100644 --- a/src/Interpreters/Cache/FileCacheSettings.h +++ b/src/Interpreters/Cache/FileCacheSettings.h @@ -32,14 +32,20 @@ struct FileCacheSettings size_t load_metadata_threads = FILECACHE_DEFAULT_LOAD_METADATA_THREADS; + std::string cache_policy = "LRU"; + double slru_size_ratio = 0.5; + void loadFromConfig(const Poco::Util::AbstractConfiguration & config, const std::string & config_prefix); void loadFromCollection(const NamedCollection & collection); + bool operator ==(const FileCacheSettings &) const = default; + private: using FuncHas = std::function; using FuncGetUInt = std::function; using FuncGetString = std::function; - void loadImpl(FuncHas has, FuncGetUInt get_uint, FuncGetString get_string); + using FuncGetDouble = std::function; + void loadImpl(FuncHas has, FuncGetUInt get_uint, FuncGetString get_string, FuncGetDouble get_double); }; } diff --git a/src/Interpreters/Cache/FileSegment.cpp b/src/Interpreters/Cache/FileSegment.cpp index 3fa3da19e5a..170d1e1092c 100644 --- a/src/Interpreters/Cache/FileSegment.cpp +++ b/src/Interpreters/Cache/FileSegment.cpp @@ -54,7 +54,7 @@ FileSegment::FileSegment( bool background_download_enabled_, FileCache * cache_, std::weak_ptr key_metadata_, - Priority::Iterator queue_iterator_) + Priority::IteratorPtr queue_iterator_) : file_key(key_) , segment_range(offset_, offset_ + size_ - 1) , segment_kind(settings.kind) @@ -120,6 +120,14 @@ String FileSegment::getPathInLocalCache() const return getKeyMetadata()->getFileSegmentPath(*this); } +String FileSegment::tryGetPathInLocalCache() const +{ + auto metadata = tryGetKeyMetadata(); + if (!metadata) + return ""; + return metadata->getFileSegmentPath(*this); +} + FileSegmentGuard::Lock FileSegment::lockFileSegment() const { ProfileEventTimeIncrement watch(ProfileEvents::FileSegmentLockMicroseconds); @@ -146,13 +154,13 @@ size_t FileSegment::getReservedSize() const return reserved_size; } -FileSegment::Priority::Iterator FileSegment::getQueueIterator() const +FileSegment::Priority::IteratorPtr FileSegment::getQueueIterator() const { auto lock = lockFileSegment(); return queue_iterator; } -void FileSegment::setQueueIterator(Priority::Iterator iterator) +void FileSegment::setQueueIterator(Priority::IteratorPtr iterator) { auto lock = lockFileSegment(); if (queue_iterator) @@ -480,7 +488,7 @@ bool FileSegment::reserve(size_t size_to_reserve, FileCacheReserveStat * reserve bool is_file_segment_size_exceeded; { - auto lock = segment_guard.lock(); + auto lock = lockFileSegment(); assertNotDetachedUnlocked(lock); assertIsDownloaderUnlocked("reserve", lock); @@ -773,7 +781,7 @@ bool FileSegment::assertCorrectness() const bool FileSegment::assertCorrectnessUnlocked(const FileSegmentGuard::Lock &) const { - auto check_iterator = [this](const Priority::Iterator & it) + auto check_iterator = [this](const Priority::IteratorPtr & it) { UNUSED(this); if (!it) @@ -833,13 +841,13 @@ void FileSegment::assertNotDetachedUnlocked(const FileSegmentGuard::Lock & lock) } } -FileSegment::Info FileSegment::getInfo(const FileSegmentPtr & file_segment, FileCache & cache) +FileSegment::Info FileSegment::getInfo(const FileSegmentPtr & file_segment) { auto lock = file_segment->lockFileSegment(); return Info{ .key = file_segment->key(), .offset = file_segment->offset(), - .path = cache.getPathInLocalCache(file_segment->key(), file_segment->offset(), file_segment->segment_kind), + .path = file_segment->tryGetPathInLocalCache(), .range_left = file_segment->range().left, .range_right = file_segment->range().right, .kind = file_segment->segment_kind, @@ -849,6 +857,7 @@ FileSegment::Info FileSegment::getInfo(const FileSegmentPtr & file_segment, File .cache_hits = file_segment->hits_count, .references = static_cast(file_segment.use_count()), .is_unbound = file_segment->is_unbound, + .queue_entry_type = file_segment->queue_iterator ? file_segment->queue_iterator->getType() : QueueEntryType::None, }; } @@ -904,7 +913,7 @@ void FileSegment::detach(const FileSegmentGuard::Lock & lock, const LockedKey &) setDetachedState(lock); } -void FileSegment::use() +void FileSegment::increasePriority() { ProfileEventTimeIncrement watch(ProfileEvents::FileSegmentUseMicroseconds); @@ -918,7 +927,7 @@ void FileSegment::use() if (it) { auto cache_lock = cache->lockCache(); - hits_count = it->use(cache_lock); + hits_count = it->increasePriority(cache_lock); } } diff --git a/src/Interpreters/Cache/FileSegment.h b/src/Interpreters/Cache/FileSegment.h index ff5c03c18f3..d2c7ac9f90c 100644 --- a/src/Interpreters/Cache/FileSegment.h +++ b/src/Interpreters/Cache/FileSegment.h @@ -11,8 +11,8 @@ #include #include #include +#include #include -#include namespace Poco { class Logger; } @@ -28,23 +28,6 @@ namespace DB class ReadBufferFromFileBase; struct FileCacheReserveStat; -/* - * FileSegmentKind is used to specify the eviction policy for file segments. - */ -enum class FileSegmentKind -{ - /* `Regular` file segment is still in cache after usage, and can be evicted - * (unless there're some holders). - */ - Regular, - - /* `Temporary` file segment is removed right after releasing. - * Also corresponding files are removed during cache loading (if any). - */ - Temporary, -}; - -String toString(FileSegmentKind kind); struct CreateFileSegmentSettings { @@ -69,40 +52,9 @@ public: using Downloader = std::string; using DownloaderId = std::string; using Priority = IFileCachePriority; - - enum class State - { - DOWNLOADED, - /** - * When file segment is first created and returned to user, it has state EMPTY. - * EMPTY state can become DOWNLOADING when getOrSetDownaloder is called successfully - * by any owner of EMPTY state file segment. - */ - EMPTY, - /** - * A newly created file segment never has DOWNLOADING state until call to getOrSetDownloader - * because each cache user might acquire multiple file segments and read them one by one, - * so only user which actually needs to read this segment earlier than others - becomes a downloader. - */ - DOWNLOADING, - /** - * Space reservation for a file segment is incremental, i.e. downloader reads buffer_size bytes - * from remote fs -> tries to reserve buffer_size bytes to put them to cache -> writes to cache - * on successful reservation and stops cache write otherwise. Those, who waited for the same file - * segment, will read downloaded part from cache and remaining part directly from remote fs. - */ - PARTIALLY_DOWNLOADED_NO_CONTINUATION, - /** - * If downloader did not finish download of current file segment for any reason apart from running - * out of cache space, then download can be continued by other owners of this file segment. - */ - PARTIALLY_DOWNLOADED, - /** - * If file segment cannot possibly be downloaded (first space reservation attempt failed), mark - * this file segment as out of cache scope. - */ - DETACHED, - }; + using State = FileSegmentState; + using Info = FileSegmentInfo; + using QueueEntryType = FileCacheQueueEntryType; FileSegment( const Key & key_, @@ -113,7 +65,7 @@ public: bool background_download_enabled_ = false, FileCache * cache_ = nullptr, std::weak_ptr key_metadata_ = std::weak_ptr(), - Priority::Iterator queue_iterator_ = Priority::Iterator{}); + Priority::IteratorPtr queue_iterator_ = nullptr); ~FileSegment() = default; @@ -205,22 +157,7 @@ public: /// exception. void detach(const FileSegmentGuard::Lock &, const LockedKey &); - struct Info - { - FileSegment::Key key; - size_t offset; - std::string path; - uint64_t range_left; - uint64_t range_right; - FileSegmentKind kind; - State state; - uint64_t size; - uint64_t downloaded_size; - uint64_t cache_hits; - uint64_t references; - bool is_unbound; - }; - static Info getInfo(const FileSegmentPtr & file_segment, FileCache & cache); + static FileSegmentInfo getInfo(const FileSegmentPtr & file_segment); bool isDetached() const; @@ -228,7 +165,7 @@ public: /// is not going to be changed. Completed states: DOWNALODED, DETACHED. bool isCompleted(bool sync = false) const; - void use(); + void increasePriority(); /** * ========== Methods used by `cache` ======================== @@ -236,9 +173,9 @@ public: FileSegmentGuard::Lock lock() const { return segment_guard.lock(); } - Priority::Iterator getQueueIterator() const; + Priority::IteratorPtr getQueueIterator() const; - void setQueueIterator(Priority::Iterator iterator); + void setQueueIterator(Priority::IteratorPtr iterator); KeyMetadataPtr tryGetKeyMetadata() const; @@ -306,6 +243,8 @@ private: LockedKeyPtr lockKeyMetadata(bool assert_exists = true) const; FileSegmentGuard::Lock lockFileSegment() const; + String tryGetPathInLocalCache() const; + Key file_key; Range segment_range; const FileSegmentKind segment_kind; @@ -326,7 +265,7 @@ private: mutable FileSegmentGuard segment_guard; std::weak_ptr key_metadata; - mutable Priority::Iterator queue_iterator; /// Iterator is put here on first reservation attempt, if successful. + mutable Priority::IteratorPtr queue_iterator; /// Iterator is put here on first reservation attempt, if successful. FileCache * cache; std::condition_variable cv; diff --git a/src/Interpreters/Cache/FileSegmentInfo.h b/src/Interpreters/Cache/FileSegmentInfo.h new file mode 100644 index 00000000000..bb87cbbc15d --- /dev/null +++ b/src/Interpreters/Cache/FileSegmentInfo.h @@ -0,0 +1,82 @@ +#pragma once +#include +#include + +namespace DB +{ + enum class FileSegmentState + { + DOWNLOADED, + /** + * When file segment is first created and returned to user, it has state EMPTY. + * EMPTY state can become DOWNLOADING when getOrSetDownaloder is called successfully + * by any owner of EMPTY state file segment. + */ + EMPTY, + /** + * A newly created file segment never has DOWNLOADING state until call to getOrSetDownloader + * because each cache user might acquire multiple file segments and read them one by one, + * so only user which actually needs to read this segment earlier than others - becomes a downloader. + */ + DOWNLOADING, + /** + * Space reservation for a file segment is incremental, i.e. downloader reads buffer_size bytes + * from remote fs -> tries to reserve buffer_size bytes to put them to cache -> writes to cache + * on successful reservation and stops cache write otherwise. Those, who waited for the same file + * segment, will read downloaded part from cache and remaining part directly from remote fs. + */ + PARTIALLY_DOWNLOADED_NO_CONTINUATION, + /** + * If downloader did not finish download of current file segment for any reason apart from running + * out of cache space, then download can be continued by other owners of this file segment. + */ + PARTIALLY_DOWNLOADED, + /** + * If file segment cannot possibly be downloaded (first space reservation attempt failed), mark + * this file segment as out of cache scope. + */ + DETACHED, + }; + + enum class FileSegmentKind + { + /** + * `Regular` file segment is still in cache after usage, and can be evicted + * (unless there're some holders). + */ + Regular, + + /** + * Temporary` file segment is removed right after releasing. + * Also corresponding files are removed during cache loading (if any). + */ + Temporary, + }; + + enum class FileCacheQueueEntryType + { + None, + LRU, + SLRU_Protected, + SLRU_Probationary, + }; + + std::string toString(FileSegmentKind kind); + + struct FileSegmentInfo + { + FileCacheKey key; + size_t offset; + std::string path; + uint64_t range_left; + uint64_t range_right; + FileSegmentKind kind; + FileSegmentState state; + uint64_t size; + uint64_t downloaded_size; + uint64_t cache_hits; + uint64_t references; + bool is_unbound; + FileCacheQueueEntryType queue_entry_type; + }; +} diff --git a/src/Interpreters/Cache/IFileCachePriority.cpp b/src/Interpreters/Cache/IFileCachePriority.cpp new file mode 100644 index 00000000000..eb396a1e323 --- /dev/null +++ b/src/Interpreters/Cache/IFileCachePriority.cpp @@ -0,0 +1,40 @@ +#include +#include + + +namespace CurrentMetrics +{ + extern const Metric FilesystemCacheSizeLimit; +} + +namespace DB +{ + +IFileCachePriority::IFileCachePriority(size_t max_size_, size_t max_elements_) + : max_size(max_size_), max_elements(max_elements_) +{ + CurrentMetrics::add(CurrentMetrics::FilesystemCacheSizeLimit, max_size_); +} + +IFileCachePriority::Entry::Entry( + const Key & key_, + size_t offset_, + size_t size_, + KeyMetadataPtr key_metadata_) + : key(key_) + , offset(offset_) + , key_metadata(key_metadata_) + , size(size_) +{ +} + +IFileCachePriority::Entry::Entry(const Entry & other) + : key(other.key) + , offset(other.offset) + , key_metadata(other.key_metadata) + , size(other.size.load()) + , hits(other.hits) +{ +} + +} diff --git a/src/Interpreters/Cache/IFileCachePriority.h b/src/Interpreters/Cache/IFileCachePriority.h index 7de380c163b..c07f6fb9fb4 100644 --- a/src/Interpreters/Cache/IFileCachePriority.h +++ b/src/Interpreters/Cache/IFileCachePriority.h @@ -1,98 +1,96 @@ #pragma once #include -#include #include #include -#include +#include #include +#include #include namespace DB { +struct FileCacheReserveStat; +class EvictionCandidates; -/// IFileCachePriority is used to maintain the priority of cached data. class IFileCachePriority : private boost::noncopyable { public: using Key = FileCacheKey; - using KeyAndOffset = FileCacheKeyAndOffset; + using QueueEntryType = FileCacheQueueEntryType; struct Entry { - Entry(const Key & key_, size_t offset_, size_t size_, KeyMetadataPtr key_metadata_) - : key(key_), offset(offset_), size(size_), key_metadata(key_metadata_) {} - - Entry(const Entry & other) - : key(other.key), offset(other.offset), size(other.size.load()), hits(other.hits), key_metadata(other.key_metadata) {} + Entry(const Key & key_, size_t offset_, size_t size_, KeyMetadataPtr key_metadata_); + Entry(const Entry & other); const Key key; const size_t offset; + const KeyMetadataPtr key_metadata; + std::atomic size; size_t hits = 0; - const KeyMetadataPtr key_metadata; }; - /// Provides an iterator to traverse the cache priority. Under normal circumstances, - /// the iterator can only return the records that have been directly swapped out. - /// For example, in the LRU algorithm, it can traverse all records, but in the LRU-K, it - /// can only traverse the records in the low priority queue. - class IIterator + class Iterator { public: - virtual ~IIterator() = default; - - virtual size_t use(const CacheGuard::Lock &) = 0; - - virtual void remove(const CacheGuard::Lock &) = 0; + virtual ~Iterator() = default; virtual const Entry & getEntry() const = 0; - virtual Entry & getEntry() = 0; + virtual size_t increasePriority(const CacheGuard::Lock &) = 0; + + virtual void updateSize(int64_t size) = 0; + + virtual void remove(const CacheGuard::Lock &) = 0; virtual void invalidate() = 0; - virtual void updateSize(int64_t size) = 0; + virtual QueueEntryType getType() const = 0; }; + using IteratorPtr = std::shared_ptr; - using Iterator = std::shared_ptr; - using ConstIterator = std::shared_ptr; - - enum class IterationResult - { - BREAK, - CONTINUE, - REMOVE_AND_CONTINUE, - }; - using IterateFunc = std::function; - - IFileCachePriority(size_t max_size_, size_t max_elements_) : max_size(max_size_), max_elements(max_elements_) {} + IFileCachePriority(size_t max_size_, size_t max_elements_); virtual ~IFileCachePriority() = default; - size_t getElementsLimit() const { return max_elements; } + size_t getElementsLimit(const CacheGuard::Lock &) const { return max_elements; } - size_t getSizeLimit() const { return max_size; } + size_t getSizeLimit(const CacheGuard::Lock &) const { return max_size; } virtual size_t getSize(const CacheGuard::Lock &) const = 0; virtual size_t getElementsCount(const CacheGuard::Lock &) const = 0; - virtual Iterator add( - KeyMetadataPtr key_metadata, size_t offset, size_t size, const CacheGuard::Lock &) = 0; + /// Throws exception if there is not enough size to fit it. + virtual IteratorPtr add( /// NOLINT + KeyMetadataPtr key_metadata, + size_t offset, + size_t size, + const CacheGuard::Lock &, + bool is_startup = false) = 0; - virtual void pop(const CacheGuard::Lock &) = 0; - - virtual void removeAll(const CacheGuard::Lock &) = 0; - - /// From lowest to highest priority. - virtual void iterate(IterateFunc && func, const CacheGuard::Lock &) = 0; + virtual bool canFit(size_t size, const CacheGuard::Lock &) const = 0; virtual void shuffle(const CacheGuard::Lock &) = 0; -private: - const size_t max_size = 0; - const size_t max_elements = 0; + virtual std::vector dump(const CacheGuard::Lock &) = 0; + + using FinalizeEvictionFunc = std::function; + virtual bool collectCandidatesForEviction( + size_t size, + FileCacheReserveStat & stat, + EvictionCandidates & res, + IFileCachePriority::IteratorPtr reservee, + FinalizeEvictionFunc & finalize_eviction_func, + const CacheGuard::Lock &) = 0; + + virtual bool modifySizeLimits(size_t max_size_, size_t max_elements_, double size_ratio_, const CacheGuard::Lock &) = 0; + +protected: + size_t max_size = 0; + size_t max_elements = 0; }; } diff --git a/src/Interpreters/Cache/LRUFileCachePriority.cpp b/src/Interpreters/Cache/LRUFileCachePriority.cpp index 5ecea95b1db..2155d2e1f8b 100644 --- a/src/Interpreters/Cache/LRUFileCachePriority.cpp +++ b/src/Interpreters/Cache/LRUFileCachePriority.cpp @@ -1,5 +1,6 @@ #include #include +#include #include #include #include @@ -11,6 +12,15 @@ namespace CurrentMetrics extern const Metric FilesystemCacheElements; } +namespace ProfileEvents +{ + extern const Event FilesystemCacheEvictionSkippedFileSegments; + extern const Event FilesystemCacheEvictionTries; + extern const Event FilesystemCacheEvictMicroseconds; + extern const Event FilesystemCacheEvictedBytes; + extern const Event FilesystemCacheEvictedFileSegments; +} + namespace DB { @@ -19,26 +29,31 @@ namespace ErrorCodes extern const int LOGICAL_ERROR; } -IFileCachePriority::Iterator LRUFileCachePriority::add( +IFileCachePriority::IteratorPtr LRUFileCachePriority::add( /// NOLINT KeyMetadataPtr key_metadata, size_t offset, size_t size, - const CacheGuard::Lock &) + const CacheGuard::Lock & lock, + bool /* is_startup */) { - const auto & key = key_metadata->key; - if (size == 0) + return std::make_shared(add(Entry(key_metadata->key, offset, size, key_metadata), lock)); +} + +LRUFileCachePriority::LRUIterator LRUFileCachePriority::add(Entry && entry, const CacheGuard::Lock & lock) +{ + if (entry.size == 0) { throw Exception( ErrorCodes::LOGICAL_ERROR, "Adding zero size entries to LRU queue is not allowed " - "(key: {}, offset: {})", key, offset); + "(key: {}, offset: {})", entry.key, entry.offset); } #ifndef NDEBUG - for (const auto & entry : queue) + for (const auto & queue_entry : queue) { /// entry.size == 0 means entry was invalidated. - if (entry.size != 0 && entry.key == key && entry.offset == offset) + if (queue_entry.size != 0 && queue_entry.key == entry.key && queue_entry.offset == entry.offset) throw Exception( ErrorCodes::LOGICAL_ERROR, "Attempt to add duplicate queue entry to queue. " @@ -47,42 +62,28 @@ IFileCachePriority::Iterator LRUFileCachePriority::add( } #endif - const auto & size_limit = getSizeLimit(); - if (size_limit && current_size + size > size_limit) + const auto & size_limit = getSizeLimit(lock); + if (size_limit && current_size + entry.size > size_limit) { throw Exception( ErrorCodes::LOGICAL_ERROR, "Not enough space to add {}:{} with size {}: current size: {}/{}", - key, offset, size, current_size, size_limit); + entry.key, entry.offset, entry.size, current_size, size_limit); } - auto iter = queue.insert(queue.end(), Entry(key, offset, size, key_metadata)); + auto iterator = queue.insert(queue.end(), entry); - updateSize(size); + updateSize(entry.size); updateElementsCount(1); LOG_TEST( log, "Added entry into LRU queue, key: {}, offset: {}, size: {}", - key, offset, size); + entry.key, entry.offset, entry.size); - return std::make_shared(this, iter); + return LRUIterator(this, iterator); } -void LRUFileCachePriority::removeAll(const CacheGuard::Lock &) -{ - LOG_TEST(log, "Removed all entries from LRU queue"); - - updateSize(-current_size); - updateElementsCount(-current_elements_num); - queue.clear(); -} - -void LRUFileCachePriority::pop(const CacheGuard::Lock &) -{ - remove(queue.begin()); -} - -LRUFileCachePriority::LRUQueueIterator LRUFileCachePriority::remove(LRUQueueIterator it) +LRUFileCachePriority::LRUQueue::iterator LRUFileCachePriority::remove(LRUQueue::iterator it, const CacheGuard::Lock &) { /// If size is 0, entry is invalidated, current_elements_num was already updated. if (it->size) @@ -110,30 +111,49 @@ void LRUFileCachePriority::updateElementsCount(int64_t num) CurrentMetrics::add(CurrentMetrics::FilesystemCacheElements, num); } - -LRUFileCachePriority::LRUFileCacheIterator::LRUFileCacheIterator( +LRUFileCachePriority::LRUIterator::LRUIterator( LRUFileCachePriority * cache_priority_, - LRUFileCachePriority::LRUQueueIterator queue_iter_) + LRUQueue::iterator iterator_) : cache_priority(cache_priority_) - , queue_iter(queue_iter_) + , iterator(iterator_) { } -void LRUFileCachePriority::iterate(IterateFunc && func, const CacheGuard::Lock &) +LRUFileCachePriority::LRUIterator::LRUIterator(const LRUIterator & other) +{ + *this = other; +} + +LRUFileCachePriority::LRUIterator & LRUFileCachePriority::LRUIterator::operator =(const LRUIterator & other) +{ + if (this == &other) + return *this; + + cache_priority = other.cache_priority; + iterator = other.iterator; + return *this; +} + +bool LRUFileCachePriority::LRUIterator::operator ==(const LRUIterator & other) const +{ + return cache_priority == other.cache_priority && iterator == other.iterator; +} + +void LRUFileCachePriority::iterate(IterateFunc && func, const CacheGuard::Lock & lock) { for (auto it = queue.begin(); it != queue.end();) { auto locked_key = it->key_metadata->tryLock(); if (!locked_key || it->size == 0) { - it = remove(it); + it = remove(it, lock); continue; } auto metadata = locked_key->tryGetByOffset(it->offset); if (!metadata) { - it = remove(it); + it = remove(it, lock); continue; } @@ -160,63 +180,212 @@ void LRUFileCachePriority::iterate(IterateFunc && func, const CacheGuard::Lock & } case IterationResult::REMOVE_AND_CONTINUE: { - it = remove(it); + it = remove(it, lock); break; } } } } -void LRUFileCachePriority::LRUFileCacheIterator::remove(const CacheGuard::Lock &) +bool LRUFileCachePriority::canFit(size_t size, const CacheGuard::Lock & lock) const { - checkUsable(); - cache_priority->remove(queue_iter); - queue_iter = LRUQueueIterator{}; + return canFit(size, 0, 0, lock); } -void LRUFileCachePriority::LRUFileCacheIterator::invalidate() +bool LRUFileCachePriority::canFit( + size_t size, + size_t released_size_assumption, + size_t released_elements_assumption, + const CacheGuard::Lock &) const { - checkUsable(); + return (max_size == 0 || (current_size + size - released_size_assumption <= max_size)) + && (max_elements == 0 || current_elements_num + 1 - released_elements_assumption <= max_elements); +} + +bool LRUFileCachePriority::collectCandidatesForEviction( + size_t size, + FileCacheReserveStat & stat, + EvictionCandidates & res, + IFileCachePriority::IteratorPtr, + FinalizeEvictionFunc &, + const CacheGuard::Lock & lock) +{ + if (canFit(size, lock)) + return true; + + ProfileEvents::increment(ProfileEvents::FilesystemCacheEvictionTries); + + IterateFunc iterate_func = [&](LockedKey & locked_key, const FileSegmentMetadataPtr & segment_metadata) + { + const auto & file_segment = segment_metadata->file_segment; + chassert(file_segment->assertCorrectness()); + + if (segment_metadata->releasable()) + { + res.add(locked_key, segment_metadata); + stat.update(segment_metadata->size(), file_segment->getKind(), true); + } + else + { + stat.update(segment_metadata->size(), file_segment->getKind(), false); + ProfileEvents::increment(ProfileEvents::FilesystemCacheEvictionSkippedFileSegments); + } + + return IterationResult::CONTINUE; + }; + + auto can_fit = [&] + { + return canFit(size, stat.stat.releasable_size, stat.stat.releasable_count, lock); + }; + + iterate([&](LockedKey & locked_key, const FileSegmentMetadataPtr & segment_metadata) + { + return can_fit() ? IterationResult::BREAK : iterate_func(locked_key, segment_metadata); + }, lock); + + return can_fit(); +} + +LRUFileCachePriority::LRUIterator LRUFileCachePriority::move(LRUIterator & it, LRUFileCachePriority & other, const CacheGuard::Lock &) +{ + const auto & entry = it.getEntry(); + if (entry.size == 0) + { + throw Exception( + ErrorCodes::LOGICAL_ERROR, + "Adding zero size entries to LRU queue is not allowed " + "(key: {}, offset: {})", entry.key, entry.offset); + } +#ifndef NDEBUG + for (const auto & queue_entry : queue) + { + /// entry.size == 0 means entry was invalidated. + if (queue_entry.size != 0 && queue_entry.key == entry.key && queue_entry.offset == entry.offset) + throw Exception( + ErrorCodes::LOGICAL_ERROR, + "Attempt to add duplicate queue entry to queue. " + "(Key: {}, offset: {}, size: {})", + entry.key, entry.offset, entry.size); + } +#endif + + queue.splice(queue.end(), other.queue, it.iterator); + + updateSize(entry.size); + updateElementsCount(1); + + other.updateSize(-entry.size); + other.updateElementsCount(-1); + return LRUIterator(this, it.iterator); +} + +std::vector LRUFileCachePriority::dump(const CacheGuard::Lock & lock) +{ + std::vector res; + iterate([&](LockedKey &, const FileSegmentMetadataPtr & segment_metadata) + { + res.emplace_back(FileSegment::getInfo(segment_metadata->file_segment)); + return IterationResult::CONTINUE; + }, lock); + return res; +} + +bool LRUFileCachePriority::modifySizeLimits( + size_t max_size_, size_t max_elements_, double /* size_ratio_ */, const CacheGuard::Lock & lock) +{ + if (max_size == max_size_ && max_elements == max_elements_) + return false; /// Nothing to change. + + auto check_limits_satisfied = [&]() + { + return (max_size_ == 0 || current_size <= max_size_) + && (max_elements_ == 0 || current_elements_num <= max_elements_); + }; + + if (check_limits_satisfied()) + { + max_size = max_size_; + max_elements = max_elements_; + return true; + } + + auto iterate_func = [&](LockedKey & locked_key, const FileSegmentMetadataPtr & segment_metadata) + { + chassert(segment_metadata->file_segment->assertCorrectness()); + + if (!segment_metadata->releasable()) + return IterationResult::CONTINUE; + + auto segment = segment_metadata->file_segment; + locked_key.removeFileSegment(segment->offset(), segment->lock()); + + ProfileEvents::increment(ProfileEvents::FilesystemCacheEvictedFileSegments); + ProfileEvents::increment(ProfileEvents::FilesystemCacheEvictedBytes, segment->getDownloadedSize()); + return IterationResult::REMOVE_AND_CONTINUE; + }; + + auto timer = DB::CurrentThread::getProfileEvents().timer(ProfileEvents::FilesystemCacheEvictMicroseconds); + iterate( + [&](LockedKey & locked_key, const FileSegmentMetadataPtr & segment_metadata) + { return check_limits_satisfied() ? IterationResult::BREAK : iterate_func(locked_key, segment_metadata); }, + lock); + + max_size = max_size_; + max_elements = max_elements_; + return true; +} + +void LRUFileCachePriority::LRUIterator::remove(const CacheGuard::Lock & lock) +{ + assertValid(); + cache_priority->remove(iterator, lock); + iterator = LRUQueue::iterator{}; +} + +void LRUFileCachePriority::LRUIterator::invalidate() +{ + assertValid(); LOG_TEST( cache_priority->log, "Invalidating entry in LRU queue. Key: {}, offset: {}, previous size: {}", - queue_iter->key, queue_iter->offset, queue_iter->size); + iterator->key, iterator->offset, iterator->size); - cache_priority->updateSize(-queue_iter->size); + cache_priority->updateSize(-iterator->size); cache_priority->updateElementsCount(-1); - queue_iter->size = 0; + iterator->size = 0; } -void LRUFileCachePriority::LRUFileCacheIterator::updateSize(int64_t size) +void LRUFileCachePriority::LRUIterator::updateSize(int64_t size) { - checkUsable(); + assertValid(); LOG_TEST( cache_priority->log, "Update size with {} in LRU queue for key: {}, offset: {}, previous size: {}", - size, queue_iter->key, queue_iter->offset, queue_iter->size); + size, iterator->key, iterator->offset, iterator->size); cache_priority->updateSize(size); - queue_iter->size += size; + iterator->size += size; } -size_t LRUFileCachePriority::LRUFileCacheIterator::use(const CacheGuard::Lock &) +size_t LRUFileCachePriority::LRUIterator::increasePriority(const CacheGuard::Lock &) { - checkUsable(); - cache_priority->queue.splice(cache_priority->queue.end(), cache_priority->queue, queue_iter); - return ++queue_iter->hits; + assertValid(); + cache_priority->queue.splice(cache_priority->queue.end(), cache_priority->queue, iterator); + return ++iterator->hits; } -void LRUFileCachePriority::LRUFileCacheIterator::checkUsable() const +void LRUFileCachePriority::LRUIterator::assertValid() const { - if (queue_iter == LRUQueueIterator{}) + if (iterator == LRUQueue::iterator{}) throw Exception(ErrorCodes::LOGICAL_ERROR, "Attempt to use invalid iterator"); } void LRUFileCachePriority::shuffle(const CacheGuard::Lock &) { - std::vector its; + std::vector its; its.reserve(queue.size()); for (auto it = queue.begin(); it != queue.end(); ++it) its.push_back(it); diff --git a/src/Interpreters/Cache/LRUFileCachePriority.h b/src/Interpreters/Cache/LRUFileCachePriority.h index 89f86961811..ed6ec405395 100644 --- a/src/Interpreters/Cache/LRUFileCachePriority.h +++ b/src/Interpreters/Cache/LRUFileCachePriority.h @@ -6,43 +6,50 @@ #include #include "Interpreters/Cache/Guards.h" -namespace CurrentMetrics -{ - extern const Metric FilesystemCacheSizeLimit; -} - namespace DB { /// Based on the LRU algorithm implementation, the record with the lowest priority is stored at /// the head of the queue, and the record with the highest priority is stored at the tail. -class LRUFileCachePriority : public IFileCachePriority +class LRUFileCachePriority final : public IFileCachePriority { private: - class LRUFileCacheIterator; + class LRUIterator; using LRUQueue = std::list; - using LRUQueueIterator = typename LRUQueue::iterator; + friend class SLRUFileCachePriority; public: - LRUFileCachePriority(size_t max_size_, size_t max_elements_) : IFileCachePriority(max_size_, max_elements_) - { - CurrentMetrics::set(CurrentMetrics::FilesystemCacheSizeLimit, max_size_); - } + LRUFileCachePriority(size_t max_size_, size_t max_elements_) : IFileCachePriority(max_size_, max_elements_) {} size_t getSize(const CacheGuard::Lock &) const override { return current_size; } size_t getElementsCount(const CacheGuard::Lock &) const override { return current_elements_num; } - Iterator add(KeyMetadataPtr key_metadata, size_t offset, size_t size, const CacheGuard::Lock &) override; + bool canFit(size_t size, const CacheGuard::Lock &) const override; - void pop(const CacheGuard::Lock &) override; + IteratorPtr add( /// NOLINT + KeyMetadataPtr key_metadata, + size_t offset, + size_t size, + const CacheGuard::Lock &, + bool is_startup = false) override; - void removeAll(const CacheGuard::Lock &) override; - - void iterate(IterateFunc && func, const CacheGuard::Lock &) override; + bool collectCandidatesForEviction( + size_t size, + FileCacheReserveStat & stat, + EvictionCandidates & res, + IFileCachePriority::IteratorPtr reservee, + FinalizeEvictionFunc & finalize_eviction_func, + const CacheGuard::Lock &) override; void shuffle(const CacheGuard::Lock &) override; + std::vector dump(const CacheGuard::Lock &) override; + + void pop(const CacheGuard::Lock & lock) { remove(queue.begin(), lock); } + + bool modifySizeLimits(size_t max_size_, size_t max_elements_, double size_ratio_, const CacheGuard::Lock &) override; + private: void updateElementsCount(int64_t num); void updateSize(int64_t size); @@ -55,21 +62,38 @@ private: /// because of invalidated entries. std::atomic current_elements_num = 0; - LRUQueueIterator remove(LRUQueueIterator it); + bool canFit(size_t size, size_t released_size_assumption, size_t released_elements_assumption, const CacheGuard::Lock &) const; + + LRUQueue::iterator remove(LRUQueue::iterator it, const CacheGuard::Lock &); + + enum class IterationResult + { + BREAK, + CONTINUE, + REMOVE_AND_CONTINUE, + }; + using IterateFunc = std::function; + void iterate(IterateFunc && func, const CacheGuard::Lock &); + + LRUIterator move(LRUIterator & it, LRUFileCachePriority & other, const CacheGuard::Lock &); + LRUIterator add(Entry && entry, const CacheGuard::Lock &); }; -class LRUFileCachePriority::LRUFileCacheIterator : public IFileCachePriority::IIterator +class LRUFileCachePriority::LRUIterator : public IFileCachePriority::Iterator { + friend class LRUFileCachePriority; + friend class SLRUFileCachePriority; + public: - LRUFileCacheIterator( - LRUFileCachePriority * cache_priority_, - LRUFileCachePriority::LRUQueueIterator queue_iter_); + LRUIterator(LRUFileCachePriority * cache_priority_, LRUQueue::iterator iterator_); - const Entry & getEntry() const override { return *queue_iter; } + LRUIterator(const LRUIterator & other); + LRUIterator & operator =(const LRUIterator & other); + bool operator ==(const LRUIterator & other) const; - Entry & getEntry() override { return *queue_iter; } + const Entry & getEntry() const override { return *iterator; } - size_t use(const CacheGuard::Lock &) override; + size_t increasePriority(const CacheGuard::Lock &) override; void remove(const CacheGuard::Lock &) override; @@ -77,11 +101,13 @@ public: void updateSize(int64_t size) override; + QueueEntryType getType() const override { return QueueEntryType::LRU; } + private: - void checkUsable() const; + void assertValid() const; LRUFileCachePriority * cache_priority; - mutable LRUFileCachePriority::LRUQueueIterator queue_iter; + mutable LRUQueue::iterator iterator; }; } diff --git a/src/Interpreters/Cache/Metadata.cpp b/src/Interpreters/Cache/Metadata.cpp index 6d3927c3f36..231c3d4a8d6 100644 --- a/src/Interpreters/Cache/Metadata.cpp +++ b/src/Interpreters/Cache/Metadata.cpp @@ -134,11 +134,12 @@ std::string KeyMetadata::getFileSegmentPath(const FileSegment & file_segment) co / CacheMetadata::getFileNameForFileSegment(file_segment.offset(), file_segment.getKind()); } -CacheMetadata::CacheMetadata(const std::string & path_, size_t background_download_queue_size_limit_) +CacheMetadata::CacheMetadata(const std::string & path_, size_t background_download_queue_size_limit_, size_t background_download_threads_) : path(path_) , cleanup_queue(std::make_shared()) , download_queue(std::make_shared(background_download_queue_size_limit_)) , log(&Poco::Logger::get("CacheMetadata")) + , download_threads_num(background_download_threads_) { } @@ -351,7 +352,10 @@ CacheMetadata::removeEmptyKey( try { if (fs::exists(key_directory)) + { fs::remove_all(key_directory); + LOG_TEST(log, "Directory ({}) for key {} removed", key_directory.string(), key); + } } catch (...) { @@ -364,7 +368,10 @@ CacheMetadata::removeEmptyKey( { std::unique_lock mutex(key_prefix_directory_mutex); if (fs::exists(key_prefix_directory) && fs::is_empty(key_prefix_directory)) + { fs::remove(key_prefix_directory); + LOG_TEST(log, "Prefix directory ({}) for key {} removed", key_prefix_directory.string(), key); + } } catch (...) { @@ -458,11 +465,6 @@ void CacheMetadata::cleanupThreadFunc() } } -void CacheMetadata::cancelCleanup() -{ - cleanup_queue->cancel(); -} - class DownloadQueue { friend struct CacheMetadata; @@ -473,7 +475,7 @@ public: { { std::lock_guard lock(mutex); - if (cancelled || (queue_size_limit && queue.size() == queue_size_limit)) + if (cancelled || (queue_size_limit && queue.size() >= queue_size_limit)) return false; queue.push(DownloadInfo{file_segment->key(), file_segment->offset(), file_segment}); } @@ -483,6 +485,8 @@ public: return true; } + bool setQueueLimit(size_t size) { return queue_size_limit.exchange(size) != size; } + private: void cancel() { @@ -493,8 +497,8 @@ private: cv.notify_all(); } - const size_t queue_size_limit; - std::mutex mutex; + std::atomic queue_size_limit; + mutable std::mutex mutex; std::condition_variable cv; bool cancelled = false; @@ -515,7 +519,7 @@ private: std::queue queue; }; -void CacheMetadata::downloadThreadFunc() +void CacheMetadata::downloadThreadFunc(const bool & stop_flag) { std::optional> memory; while (true) @@ -526,13 +530,13 @@ void CacheMetadata::downloadThreadFunc() { std::unique_lock lock(download_queue->mutex); - if (download_queue->cancelled) + if (download_queue->cancelled || stop_flag) return; if (download_queue->queue.empty()) { - download_queue->cv.wait(lock, [&](){ return download_queue->cancelled || !download_queue->queue.empty(); }); - if (download_queue->cancelled) + download_queue->cv.wait(lock, [&](){ return download_queue->cancelled || !download_queue->queue.empty() || stop_flag; }); + if (download_queue->cancelled || stop_flag) return; } @@ -607,6 +611,11 @@ void CacheMetadata::downloadThreadFunc() } } +bool CacheMetadata::setBackgroundDownloadQueueSizeLimit(size_t size) +{ + return download_queue->setQueueLimit(size); +} + void CacheMetadata::downloadImpl(FileSegment & file_segment, std::optional> & memory) { LOG_TEST( @@ -670,9 +679,85 @@ void CacheMetadata::downloadImpl(FileSegment & file_segment, std::optional()); + download_threads.back()->thread = std::make_unique([this, thread = download_threads.back()] { downloadThreadFunc(thread->stop_flag); }); + } + cleanup_thread = std::make_unique([this]{ cleanupThreadFunc(); }); +} + +void CacheMetadata::shutdown() { download_queue->cancel(); + cleanup_queue->cancel(); + + for (auto & download_thread : download_threads) + { + if (download_thread->thread && download_thread->thread->joinable()) + download_thread->thread->join(); + } + if (cleanup_thread && cleanup_thread->joinable()) + cleanup_thread->join(); +} + +bool CacheMetadata::isBackgroundDownloadEnabled() +{ + return download_threads_num; +} + +bool CacheMetadata::setBackgroundDownloadThreads(size_t threads_num) +{ + if (threads_num == download_threads_num) + return false; + + SCOPE_EXIT({ download_threads_num = download_threads.size(); }); + + if (threads_num > download_threads_num) + { + size_t add_threads = threads_num - download_threads_num; + for (size_t i = 0; i < add_threads; ++i) + { + download_threads.emplace_back(std::make_shared()); + try + { + download_threads.back()->thread = std::make_unique( + [this, thread = download_threads.back()] { downloadThreadFunc(thread->stop_flag); }); + } + catch (...) + { + download_threads.pop_back(); + throw; + } + } + } + else if (threads_num < download_threads_num) + { + size_t remove_threads = download_threads_num - threads_num; + + { + std::lock_guard lock(download_queue->mutex); + for (size_t i = 0; i < remove_threads; ++i) + download_threads[download_threads.size() - 1 - i]->stop_flag = true; + } + + download_queue->cv.notify_all(); + + for (size_t i = 0; i < remove_threads; ++i) + { + chassert(download_threads.back()->stop_flag); + + auto & thread = download_threads.back()->thread; + if (thread && thread->joinable()) + thread->join(); + + download_threads.pop_back(); + } + } + return true; } LockedKey::LockedKey(std::shared_ptr key_metadata_) @@ -928,7 +1013,7 @@ std::string LockedKey::toString() const } -std::vector LockedKey::sync(FileCache & cache) +std::vector LockedKey::sync() { std::vector broken; for (auto it = key_metadata->begin(); it != key_metadata->end();) @@ -961,7 +1046,7 @@ std::vector LockedKey::sync(FileCache & cache) "File segment has DOWNLOADED state, but file does not exist ({})", file_segment->getInfoForLog()); - broken.push_back(FileSegment::getInfo(file_segment, cache)); + broken.push_back(FileSegment::getInfo(file_segment)); it = removeFileSegment(file_segment->offset(), file_segment->lock(), /* can_be_broken */true); continue; } @@ -980,7 +1065,7 @@ std::vector LockedKey::sync(FileCache & cache) "File segment has unexpected size. Having {}, expected {} ({})", actual_size, expected_size, file_segment->getInfoForLog()); - broken.push_back(FileSegment::getInfo(file_segment, cache)); + broken.push_back(FileSegment::getInfo(file_segment)); it = removeFileSegment(file_segment->offset(), file_segment->lock(), /* can_be_broken */false); } return broken; diff --git a/src/Interpreters/Cache/Metadata.h b/src/Interpreters/Cache/Metadata.h index 1d7040c91c3..0770a165ffa 100644 --- a/src/Interpreters/Cache/Metadata.h +++ b/src/Interpreters/Cache/Metadata.h @@ -5,6 +5,7 @@ #include #include #include +#include #include namespace DB @@ -29,7 +30,7 @@ struct FileSegmentMetadata : private boost::noncopyable bool evicting() const { return removal_candidate.load(); } - Priority::Iterator getQueueIterator() const { return file_segment->getQueueIterator(); } + Priority::IteratorPtr getQueueIterator() const { return file_segment->getQueueIterator(); } FileSegmentPtr file_segment; std::atomic removal_candidate{false}; @@ -102,7 +103,9 @@ public: using Key = FileCacheKey; using IterateFunc = std::function; - explicit CacheMetadata(const std::string & path_, size_t background_download_queue_size_limit_); + explicit CacheMetadata(const std::string & path_, size_t background_download_queue_size_limit_, size_t background_download_threads_); + + void startup(); const String & getBaseDirectory() const { return path; } @@ -115,6 +118,7 @@ public: static String getFileNameForFileSegment(size_t offset, FileSegmentKind segment_kind); void iterate(IterateFunc && func); + bool isEmpty() const; enum class KeyNotFoundPolicy @@ -138,21 +142,13 @@ public: void removeKey(const Key & key, bool if_exists, bool if_releasable); void removeAllKeys(bool if_releasable); - void cancelCleanup(); + void shutdown(); - /// Firstly, this cleanup does not delete cache files, - /// but only empty keys from cache_metadata_map and key (prefix) directories from fs. - /// Secondly, it deletes those only if arose as a result of - /// (1) eviction in FileCache::tryReserve(); - /// (2) removal of cancelled non-downloaded file segments after FileSegment::complete(). - /// which does not include removal of cache files because of FileCache::removeKey/removeAllKeys, - /// triggered by removal of source files from objects storage. - /// E.g. number of elements submitted to background cleanup should remain low. - void cleanupThreadFunc(); + bool setBackgroundDownloadThreads(size_t threads_num); + size_t getBackgroundDownloadThreads() const { return download_threads.size(); } + bool setBackgroundDownloadQueueSizeLimit(size_t size); - void downloadThreadFunc(); - - void cancelDownload(); + bool isBackgroundDownloadEnabled(); private: const std::string path; /// Cache base path @@ -172,6 +168,16 @@ private: static constexpr size_t buckets_num = 1024; std::vector metadata_buckets{buckets_num}; + struct DownloadThread + { + std::unique_ptr thread; + bool stop_flag{false}; + }; + std::vector> download_threads; + std::atomic download_threads_num; + + std::unique_ptr cleanup_thread; + MetadataBucket & getMetadataBucket(const Key & key); void downloadImpl(FileSegment & file_segment, std::optional> & memory); MetadataBucket::iterator removeEmptyKey( @@ -179,6 +185,18 @@ private: MetadataBucket::iterator it, LockedKey &, const CacheMetadataGuard::Lock &); + + void downloadThreadFunc(const bool & stop_flag); + + /// Firstly, this cleanup does not delete cache files, + /// but only empty keys from cache_metadata_map and key (prefix) directories from fs. + /// Secondly, it deletes those only if arose as a result of + /// (1) eviction in FileCache::tryReserve(); + /// (2) removal of cancelled non-downloaded file segments after FileSegment::complete(). + /// which does not include removal of cache files because of FileCache::removeKey/removeAllKeys, + /// triggered by removal of source files from objects storage. + /// E.g. number of elements submitted to background cleanup should remain low. + void cleanupThreadFunc(); }; @@ -243,7 +261,7 @@ struct LockedKey : private boost::noncopyable void markAsRemoved(); - std::vector sync(FileCache & cache); + std::vector sync(); std::string toString() const; diff --git a/src/Interpreters/Cache/QueryLimit.cpp b/src/Interpreters/Cache/QueryLimit.cpp index 8b5b42cca1a..109e94b2971 100644 --- a/src/Interpreters/Cache/QueryLimit.cpp +++ b/src/Interpreters/Cache/QueryLimit.cpp @@ -95,7 +95,7 @@ void FileCacheQueryLimit::QueryContext::remove( records.erase({key, offset}); } -IFileCachePriority::Iterator FileCacheQueryLimit::QueryContext::tryGet( +IFileCachePriority::IteratorPtr FileCacheQueryLimit::QueryContext::tryGet( const Key & key, size_t offset, const CacheGuard::Lock &) diff --git a/src/Interpreters/Cache/QueryLimit.h b/src/Interpreters/Cache/QueryLimit.h index f8247e8c520..cab1ccc63ba 100644 --- a/src/Interpreters/Cache/QueryLimit.h +++ b/src/Interpreters/Cache/QueryLimit.h @@ -27,7 +27,6 @@ public: public: using Key = FileCacheKey; using Priority = IFileCachePriority; - using PriorityIterator = IFileCachePriority::Iterator; QueryContext(size_t query_cache_size, bool recache_on_query_limit_exceeded_); @@ -36,7 +35,7 @@ public: bool recacheOnFileCacheQueryLimitExceeded() const { return recache_on_query_limit_exceeded; } - IFileCachePriority::Iterator tryGet( + Priority::IteratorPtr tryGet( const Key & key, size_t offset, const CacheGuard::Lock &); @@ -53,7 +52,7 @@ public: const CacheGuard::Lock &); private: - using Records = std::unordered_map; + using Records = std::unordered_map; Records records; LRUFileCachePriority priority; const bool recache_on_query_limit_exceeded; diff --git a/src/Interpreters/Cache/SLRUFileCachePriority.cpp b/src/Interpreters/Cache/SLRUFileCachePriority.cpp new file mode 100644 index 00000000000..8b46712731c --- /dev/null +++ b/src/Interpreters/Cache/SLRUFileCachePriority.cpp @@ -0,0 +1,300 @@ +#include +#include +#include +#include +#include +#include +#include + + +namespace DB +{ + +namespace +{ + size_t getRatio(size_t total, double ratio) + { + return static_cast(total * std::clamp(ratio, 0.0, 1.0)); + } +} + +SLRUFileCachePriority::SLRUFileCachePriority( + size_t max_size_, + size_t max_elements_, + double size_ratio_) + : IFileCachePriority(max_size_, max_elements_) + , size_ratio(size_ratio_) + , protected_queue(LRUFileCachePriority(getRatio(max_size_, size_ratio), getRatio(max_elements_, size_ratio))) + , probationary_queue(LRUFileCachePriority(getRatio(max_size_, 1 - size_ratio), getRatio(max_elements_, 1 - size_ratio))) +{ + LOG_DEBUG( + log, "Using probationary queue size: {}, protected queue size: {}", + probationary_queue.max_size, protected_queue.max_elements); +} + +size_t SLRUFileCachePriority::getSize(const CacheGuard::Lock & lock) const +{ + return protected_queue.getSize(lock) + probationary_queue.getSize(lock); +} + +size_t SLRUFileCachePriority::getElementsCount(const CacheGuard::Lock & lock) const +{ + return protected_queue.getElementsCount(lock) + probationary_queue.getElementsCount(lock); +} + +bool SLRUFileCachePriority::canFit(size_t size, const CacheGuard::Lock & lock) const +{ + return probationary_queue.canFit(size, lock) || protected_queue.canFit(size, lock); +} + +IFileCachePriority::IteratorPtr SLRUFileCachePriority::add( /// NOLINT + KeyMetadataPtr key_metadata, + size_t offset, + size_t size, + const CacheGuard::Lock & lock, + bool is_startup) +{ + if (is_startup) + { + /// If it is server startup, we put entries in any queue it will fit in, + /// but with preference for probationary queue, + /// because we do not know the distribution between queues after server restart. + if (probationary_queue.canFit(size, lock)) + { + auto lru_iterator = probationary_queue.add(Entry(key_metadata->key, offset, size, key_metadata), lock); + return std::make_shared(this, std::move(lru_iterator), false); + } + else + { + auto lru_iterator = protected_queue.add(Entry(key_metadata->key, offset, size, key_metadata), lock); + return std::make_shared(this, std::move(lru_iterator), true); + } + } + else + { + auto lru_iterator = probationary_queue.add(Entry(key_metadata->key, offset, size, key_metadata), lock); + return std::make_shared(this, std::move(lru_iterator), false); + } +} + +bool SLRUFileCachePriority::collectCandidatesForEviction( + size_t size, + FileCacheReserveStat & stat, + EvictionCandidates & res, + IFileCachePriority::IteratorPtr reservee, + FinalizeEvictionFunc & finalize_eviction_func, + const CacheGuard::Lock & lock) +{ + /// If `it` is nullptr, then it is the first space reservation attempt + /// for a corresponding file segment, so it will be directly put into probationary queue. + if (!reservee) + { + return probationary_queue.collectCandidatesForEviction(size, stat, res, reservee, finalize_eviction_func, lock); + } + + /// If `it` not nullptr (e.g. is already in some queue), + /// we need to check in which queue (protected/probationary) it currently is + /// (in order to know where we need to free space). + if (!assert_cast(reservee.get())->is_protected) + { + return probationary_queue.collectCandidatesForEviction(size, stat, res, reservee, finalize_eviction_func, lock); + } + + /// Entry is in protected queue. + /// Check if we have enough space in protected queue to fit a new size of entry. + /// `size` is the increment to the current entry.size we want to increase. + if (protected_queue.canFit(size, lock)) + return true; + + /// If not enough space - we need to "downgrade" lowest priority entries from protected + /// queue to probationary queue. + /// The amount of such "downgraded" entries is equal to the amount + /// required to make space for additionary `size` bytes for entry. + auto downgrade_candidates = std::make_shared(); + FileCacheReserveStat downgrade_stat; + FinalizeEvictionFunc noop; + + if (!protected_queue.collectCandidatesForEviction(size, downgrade_stat, *downgrade_candidates, reservee, noop, lock)) + return false; + + const size_t size_to_downgrade = downgrade_stat.stat.releasable_size; + + if (!probationary_queue.canFit(size_to_downgrade, lock) + && !probationary_queue.collectCandidatesForEviction(size_to_downgrade, stat, res, reservee, noop, lock)) + return false; + + finalize_eviction_func = [=, this](const CacheGuard::Lock & lk) mutable + { + for (const auto & [key, key_candidates] : *downgrade_candidates) + { + for (const auto & candidate : key_candidates.candidates) + { + auto * candidate_it = assert_cast(candidate->getQueueIterator().get()); + candidate_it->lru_iterator = probationary_queue.move(candidate_it->lru_iterator, protected_queue, lk); + candidate_it->is_protected = false; + } + } + }; + + return true; +} + +void SLRUFileCachePriority::increasePriority(SLRUIterator & iterator, const CacheGuard::Lock & lock) +{ + /// If entry is already in protected queue, + /// we only need to increase its priority within the protected queue. + if (iterator.is_protected) + { + iterator.lru_iterator.increasePriority(lock); + return; + } + + /// Entry is in probationary queue. + /// We need to move it to protected queue. + const size_t size = iterator.getEntry().size; + if (size > protected_queue.getSizeLimit(lock)) + { + /// Entry size is bigger than the whole protected queue limit. + /// This is only possible if protected_queue_size_limit is less than max_file_segment_size, + /// which is not possible in any realistic cache configuration. + iterator.lru_iterator.increasePriority(lock); + return; + } + + /// Check if there is enough space in protected queue to move entry there. + /// If not - we need to "downgrade" lowest priority entries from protected + /// queue to probationary queue. + EvictionCandidates downgrade_candidates; + FileCacheReserveStat downgrade_stat; + FinalizeEvictionFunc noop; + + if (!protected_queue.collectCandidatesForEviction(size, downgrade_stat, downgrade_candidates, {}, noop, lock)) + { + /// We cannot make space for entry to be moved to protected queue + /// (not enough releasable file segments). + /// Then just increase its priority within probationary queue. + iterator.lru_iterator.increasePriority(lock); + return; + } + + /// The amount of such "downgraded" entries is equal to the amount + /// required to make space for entry we want to insert. + const size_t size_to_downgrade = downgrade_stat.stat.releasable_count; + size_t size_to_free = 0; + if (size_to_downgrade && size_to_downgrade > size) + size_to_free = size_to_downgrade - size; + + /// Now we need to check if those "downgrade" candidates can actually + /// be moved to probationary queue. + EvictionCandidates eviction_candidates; + FileCacheReserveStat stat; + + if (size_to_free) + { + if (!probationary_queue.collectCandidatesForEviction(size_to_free, stat, eviction_candidates, {}, noop, lock)) + { + /// "downgrade" candidates cannot be moved to probationary queue, + /// so entry cannot be moved to protected queue as well. + /// Then just increase its priority within probationary queue. + iterator.lru_iterator.increasePriority(lock); + return; + } + /// Make space for "downgrade" candidates. + eviction_candidates.evict(nullptr, lock); + } + + /// All checks passed, now we can move downgrade candidates to + /// probationary queue and our entry to protected queue. + Entry entry_copy = iterator.getEntry(); + iterator.lru_iterator.remove(lock); + + for (const auto & [key, key_candidates] : downgrade_candidates) + { + for (const auto & candidate : key_candidates.candidates) + { + auto * candidate_it = assert_cast(candidate->getQueueIterator().get()); + candidate_it->lru_iterator = probationary_queue.move(candidate_it->lru_iterator, protected_queue, lock); + candidate_it->is_protected = false; + } + } + + iterator.lru_iterator = protected_queue.add(std::move(entry_copy), lock); + iterator.is_protected = true; +} + +std::vector SLRUFileCachePriority::dump(const CacheGuard::Lock & lock) +{ + auto res = probationary_queue.dump(lock); + auto part_res = protected_queue.dump(lock); + res.insert(res.end(), part_res.begin(), part_res.end()); + return res; +} + +void SLRUFileCachePriority::shuffle(const CacheGuard::Lock & lock) +{ + protected_queue.shuffle(lock); + probationary_queue.shuffle(lock); +} + +bool SLRUFileCachePriority::modifySizeLimits( + size_t max_size_, size_t max_elements_, double size_ratio_, const CacheGuard::Lock & lock) +{ + if (max_size == max_size_ && max_elements == max_elements_ && size_ratio == size_ratio_) + return false; /// Nothing to change. + + protected_queue.modifySizeLimits(getRatio(max_size_, size_ratio_), getRatio(max_elements_, size_ratio_), 0, lock); + probationary_queue.modifySizeLimits(getRatio(max_size_, 1 - size_ratio_), getRatio(max_elements_, 1 - size_ratio_), 0, lock); + + max_size = max_size_; + max_elements = max_elements_; + size_ratio = size_ratio_; + return true; +} + +SLRUFileCachePriority::SLRUIterator::SLRUIterator( + SLRUFileCachePriority * cache_priority_, + LRUFileCachePriority::LRUIterator && lru_iterator_, + bool is_protected_) + : cache_priority(cache_priority_) + , lru_iterator(lru_iterator_) + , is_protected(is_protected_) +{ +} + +const SLRUFileCachePriority::Entry & SLRUFileCachePriority::SLRUIterator::getEntry() const +{ + assertValid(); + return lru_iterator.getEntry(); +} + +size_t SLRUFileCachePriority::SLRUIterator::increasePriority(const CacheGuard::Lock & lock) +{ + assertValid(); + cache_priority->increasePriority(*this, lock); + return getEntry().hits; +} + +void SLRUFileCachePriority::SLRUIterator::updateSize(int64_t size) +{ + assertValid(); + lru_iterator.updateSize(size); +} + +void SLRUFileCachePriority::SLRUIterator::invalidate() +{ + assertValid(); + lru_iterator.invalidate(); +} + +void SLRUFileCachePriority::SLRUIterator::remove(const CacheGuard::Lock & lock) +{ + assertValid(); + lru_iterator.remove(lock); +} + +void SLRUFileCachePriority::SLRUIterator::assertValid() const +{ + lru_iterator.assertValid(); +} + +} diff --git a/src/Interpreters/Cache/SLRUFileCachePriority.h b/src/Interpreters/Cache/SLRUFileCachePriority.h new file mode 100644 index 00000000000..b9ea246bc83 --- /dev/null +++ b/src/Interpreters/Cache/SLRUFileCachePriority.h @@ -0,0 +1,88 @@ +#pragma once + +#include +#include + + +namespace DB +{ + +/// Based on the SLRU algorithm implementation, the record with the lowest priority is stored at +/// the head of the queue, and the record with the highest priority is stored at the tail. +class SLRUFileCachePriority : public IFileCachePriority +{ +private: + using LRUIterator = LRUFileCachePriority::LRUIterator; + using LRUQueue = std::list; + +public: + class SLRUIterator; + + SLRUFileCachePriority(size_t max_size_, size_t max_elements_, double size_ratio_); + + size_t getSize(const CacheGuard::Lock & lock) const override; + + size_t getElementsCount(const CacheGuard::Lock &) const override; + + bool canFit(size_t size, const CacheGuard::Lock &) const override; + + IteratorPtr add( /// NOLINT + KeyMetadataPtr key_metadata, + size_t offset, + size_t size, + const CacheGuard::Lock &, + bool is_startup = false) override; + + bool collectCandidatesForEviction( + size_t size, + FileCacheReserveStat & stat, + EvictionCandidates & res, + IFileCachePriority::IteratorPtr reservee, + FinalizeEvictionFunc & finalize_eviction_func, + const CacheGuard::Lock &) override; + + void shuffle(const CacheGuard::Lock &) override; + + std::vector dump(const CacheGuard::Lock &) override; + + bool modifySizeLimits(size_t max_size_, size_t max_elements_, double size_ratio_, const CacheGuard::Lock &) override; + +private: + double size_ratio; + LRUFileCachePriority protected_queue; + LRUFileCachePriority probationary_queue; + Poco::Logger * log = &Poco::Logger::get("SLRUFileCachePriority"); + + void increasePriority(SLRUIterator & iterator, const CacheGuard::Lock & lock); +}; + +class SLRUFileCachePriority::SLRUIterator : public IFileCachePriority::Iterator +{ + friend class SLRUFileCachePriority; +public: + SLRUIterator( + SLRUFileCachePriority * cache_priority_, + LRUIterator && lru_iterator_, + bool is_protected_); + + const Entry & getEntry() const override; + + size_t increasePriority(const CacheGuard::Lock &) override; + + void remove(const CacheGuard::Lock &) override; + + void invalidate() override; + + void updateSize(int64_t size) override; + + QueueEntryType getType() const override { return is_protected ? QueueEntryType::SLRU_Protected : QueueEntryType::SLRU_Probationary; } + +private: + void assertValid() const; + + SLRUFileCachePriority * cache_priority; + mutable LRUIterator lru_iterator; + bool is_protected; +}; + +} diff --git a/src/Interpreters/Cache/WriteBufferToFileSegment.cpp b/src/Interpreters/Cache/WriteBufferToFileSegment.cpp index 15a80667cc4..73d93514db5 100644 --- a/src/Interpreters/Cache/WriteBufferToFileSegment.cpp +++ b/src/Interpreters/Cache/WriteBufferToFileSegment.cpp @@ -7,6 +7,7 @@ #include #include +#include namespace DB { diff --git a/src/Interpreters/ClusterProxy/executeQuery.cpp b/src/Interpreters/ClusterProxy/executeQuery.cpp index 8a2f7e3205a..18f7280dd19 100644 --- a/src/Interpreters/ClusterProxy/executeQuery.cpp +++ b/src/Interpreters/ClusterProxy/executeQuery.cpp @@ -21,6 +21,7 @@ #include #include #include +#include namespace DB @@ -30,6 +31,7 @@ namespace ErrorCodes { extern const int TOO_LARGE_DISTRIBUTED_DEPTH; extern const int LOGICAL_ERROR; + extern const int CLUSTER_DOESNT_EXIST; } namespace ClusterProxy @@ -40,7 +42,8 @@ ContextMutablePtr updateSettingsForCluster(const Cluster & cluster, const Settings & settings, const StorageID & main_table, ASTPtr additional_filter_ast, - Poco::Logger * log) + Poco::Logger * log, + const DistributedSettings * distributed_settings) { Settings new_settings = settings; new_settings.queue_max_wait_ms = Cluster::saturate(new_settings.queue_max_wait_ms, settings.max_execution_time); @@ -100,6 +103,12 @@ ContextMutablePtr updateSettingsForCluster(const Cluster & cluster, } } + if (!settings.skip_unavailable_shards.changed && distributed_settings) + { + new_settings.skip_unavailable_shards = distributed_settings->skip_unavailable_shards.value; + new_settings.skip_unavailable_shards.changed = true; + } + if (settings.offset) { new_settings.offset = 0; @@ -126,7 +135,7 @@ ContextMutablePtr updateSettingsForCluster(const Cluster & cluster, } /// disable parallel replicas if cluster contains only shards with 1 replica - if (context->canUseParallelReplicas()) + if (context->canUseTaskBasedParallelReplicas()) { bool disable_parallel_replicas = true; for (const auto & shard : cluster.getShardsInfo()) @@ -193,6 +202,7 @@ void executeQuery( const ExpressionActionsPtr & sharding_key_expr, const std::string & sharding_key_column_name, const ClusterPtr & not_optimized_cluster, + const DistributedSettings & distributed_settings, AdditionalShardFilterGenerator shard_filter_generator) { const Settings & settings = context->getSettingsRef(); @@ -204,7 +214,8 @@ void executeQuery( SelectStreamFactory::Shards remote_shards; auto cluster = query_info.getCluster(); - auto new_context = updateSettingsForCluster(*cluster, context, settings, main_table, query_info.additional_filter_ast, log); + auto new_context = updateSettingsForCluster(*cluster, context, settings, main_table, query_info.additional_filter_ast, log, + &distributed_settings); if (context->getSettingsRef().allow_experimental_parallel_reading_from_replicas && context->getSettingsRef().allow_experimental_parallel_reading_from_replicas.value != new_context->getSettingsRef().allow_experimental_parallel_reading_from_replicas.value) @@ -254,7 +265,7 @@ void executeQuery( // decide for each shard if parallel reading from replicas should be enabled // according to settings and number of replicas declared per shard const auto & addresses = cluster->getShardsAddresses().at(i); - bool parallel_replicas_enabled = addresses.size() > 1 && context->canUseParallelReplicas(); + bool parallel_replicas_enabled = addresses.size() > 1 && context->canUseTaskBasedParallelReplicas(); stream_factory.createForShard( shard_info, @@ -322,11 +333,44 @@ void executeQueryWithParallelReplicas( SelectStreamFactory & stream_factory, const ASTPtr & query_ast, ContextPtr context, - std::shared_ptr storage_limits, - const ClusterPtr & not_optimized_cluster) + std::shared_ptr storage_limits) { const auto & settings = context->getSettingsRef(); + + /// check cluster for parallel replicas + if (settings.cluster_for_parallel_replicas.value.empty()) + { + throw Exception( + ErrorCodes::CLUSTER_DOESNT_EXIST, + "Reading in parallel from replicas is enabled but cluster to execute query is not provided. Please set " + "'cluster_for_parallel_replicas' setting"); + } + auto not_optimized_cluster = context->getCluster(settings.cluster_for_parallel_replicas); + auto new_context = Context::createCopy(context); + + /// check hedged connections setting + if (settings.use_hedged_requests.value) + { + if (settings.use_hedged_requests.changed) + { + LOG_WARNING( + &Poco::Logger::get("executeQueryWithParallelReplicas"), + "Setting 'use_hedged_requests' explicitly with enabled 'allow_experimental_parallel_reading_from_replicas' has no effect. " + "Hedged connections are not used for parallel reading from replicas"); + } + else + { + LOG_INFO( + &Poco::Logger::get("executeQueryWithParallelReplicas"), + "Disabling 'use_hedged_requests' in favor of 'allow_experimental_parallel_reading_from_replicas'. Hedged connections are " + "not used for parallel reading from replicas"); + } + + /// disable hedged connections -> parallel replicas uses own logic to choose replicas + new_context->setSetting("use_hedged_requests", Field{false}); + } + auto scalars = new_context->hasQueryContext() ? new_context->getQueryContext()->getScalars() : Scalars{}; UInt64 shard_num = 0; /// shard_num is 1-based, so 0 - no shard specified @@ -338,7 +382,6 @@ void executeQueryWithParallelReplicas( shard_num = column->getUInt(0); } - size_t all_replicas_count = 0; ClusterPtr new_cluster; /// if got valid shard_num from query initiator, then parallel replicas scope is the specified shard /// shards are numbered in order of appearance in the cluster config @@ -362,16 +405,14 @@ void executeQueryWithParallelReplicas( // shard_num is 1-based, but getClusterWithSingleShard expects 0-based index auto single_shard_cluster = not_optimized_cluster->getClusterWithSingleShard(shard_num - 1); // convert cluster to representation expected by parallel replicas - new_cluster = single_shard_cluster->getClusterWithReplicasAsShards(settings); + new_cluster = single_shard_cluster->getClusterWithReplicasAsShards(settings, settings.max_parallel_replicas); } else { - new_cluster = not_optimized_cluster->getClusterWithReplicasAsShards(settings); + new_cluster = not_optimized_cluster->getClusterWithReplicasAsShards(settings, settings.max_parallel_replicas); } - all_replicas_count = std::min(static_cast(settings.max_parallel_replicas), new_cluster->getShardCount()); - - auto coordinator = std::make_shared(all_replicas_count); + auto coordinator = std::make_shared(new_cluster->getShardCount()); auto external_tables = new_context->getExternalTables(); auto read_from_remote = std::make_unique( query_ast, diff --git a/src/Interpreters/ClusterProxy/executeQuery.h b/src/Interpreters/ClusterProxy/executeQuery.h index 7ffaa3ae62c..1fc49057e07 100644 --- a/src/Interpreters/ClusterProxy/executeQuery.h +++ b/src/Interpreters/ClusterProxy/executeQuery.h @@ -8,6 +8,7 @@ namespace DB { struct Settings; +struct DistributedSettings; class Cluster; using ClusterPtr = std::shared_ptr; struct SelectQueryInfo; @@ -42,7 +43,8 @@ ContextMutablePtr updateSettingsForCluster(const Cluster & cluster, const Settings & settings, const StorageID & main_table, ASTPtr additional_filter_ast = nullptr, - Poco::Logger * log = nullptr); + Poco::Logger * log = nullptr, + const DistributedSettings * distributed_settings = nullptr); using AdditionalShardFilterGenerator = std::function; /// Execute a distributed query, creating a query plan, from which the query pipeline can be built. @@ -62,6 +64,7 @@ void executeQuery( const ExpressionActionsPtr & sharding_key_expr, const std::string & sharding_key_column_name, const ClusterPtr & not_optimized_cluster, + const DistributedSettings & distributed_settings, AdditionalShardFilterGenerator shard_filter_generator = {}); @@ -71,8 +74,7 @@ void executeQueryWithParallelReplicas( SelectStreamFactory & stream_factory, const ASTPtr & query_ast, ContextPtr context, - std::shared_ptr storage_limits, - const ClusterPtr & not_optimized_cluster); + std::shared_ptr storage_limits); } } diff --git a/src/Interpreters/ConcurrentHashJoin.cpp b/src/Interpreters/ConcurrentHashJoin.cpp index 1a8e0ad96fa..8e73bc8b484 100644 --- a/src/Interpreters/ConcurrentHashJoin.cpp +++ b/src/Interpreters/ConcurrentHashJoin.cpp @@ -44,7 +44,8 @@ ConcurrentHashJoin::ConcurrentHashJoin(ContextPtr context_, std::shared_ptr(); - inner_hash_join->data = std::make_unique(table_join_, right_sample_block, any_take_last_row_); + + inner_hash_join->data = std::make_unique(table_join_, right_sample_block, any_take_last_row_, 0, fmt::format("concurrent{}", i)); hash_joins.emplace_back(std::move(inner_hash_join)); } } diff --git a/src/Interpreters/Context.cpp b/src/Interpreters/Context.cpp index f976dea1ca5..e9962d08160 100644 --- a/src/Interpreters/Context.cpp +++ b/src/Interpreters/Context.cpp @@ -35,6 +35,7 @@ #include #include #include +#include #include #include #include @@ -64,8 +65,8 @@ #include #include #include -#include -#include +#include +#include #include #include #include @@ -76,6 +77,7 @@ #include #include #include +#include #include #include #include @@ -96,6 +98,7 @@ #include #include #include +#include #include #include #include @@ -151,6 +154,9 @@ namespace CurrentMetrics extern const Metric TablesLoaderForegroundThreadsActive; extern const Metric TablesLoaderForegroundThreadsScheduled; extern const Metric IOWriterThreadsScheduled; + extern const Metric AttachedTable; + extern const Metric AttachedDatabase; + extern const Metric PartsActive; } @@ -209,8 +215,6 @@ struct ContextSharedPart : boost::noncopyable mutable zkutil::ZooKeeperPtr zookeeper TSA_GUARDED_BY(zookeeper_mutex); /// Client for ZooKeeper. ConfigurationPtr zookeeper_config TSA_GUARDED_BY(zookeeper_mutex); /// Stores zookeeper configs - ConfigurationPtr sensitive_data_masker_config; - #if USE_NURAFT mutable std::mutex keeper_dispatcher_mutex; mutable std::shared_ptr keeper_dispatcher TSA_GUARDED_BY(keeper_dispatcher_mutex); @@ -252,8 +256,8 @@ struct ContextSharedPart : boost::noncopyable ExternalLoaderXMLConfigRepository * user_defined_executable_functions_config_repository TSA_GUARDED_BY(external_user_defined_executable_functions_mutex) = nullptr; scope_guard user_defined_executable_functions_xmls TSA_GUARDED_BY(external_user_defined_executable_functions_mutex); - mutable OnceFlag user_defined_sql_objects_loader_initialized; - mutable std::unique_ptr user_defined_sql_objects_loader; + mutable OnceFlag user_defined_sql_objects_storage_initialized; + mutable std::unique_ptr user_defined_sql_objects_storage; #if USE_NLP mutable OnceFlag synonyms_extensions_initialized; @@ -289,6 +293,7 @@ struct ContextSharedPart : boost::noncopyable MergeList merge_list; /// The list of executable merge (for (Replicated)?MergeTree) MovesList moves_list; /// The list of executing moves (for (Replicated)?MergeTree) ReplicatedFetchList replicated_fetch_list; + RefreshSet refresh_set; /// The list of active refreshes (for MaterializedView) ConfigurationPtr users_config TSA_GUARDED_BY(mutex); /// Config with the users, profiles and quotas sections. InterserverIOHandler interserver_io_handler; /// Handler for interserver communication. @@ -309,6 +314,11 @@ struct ContextSharedPart : boost::noncopyable mutable OnceFlag threadpool_writer_initialized; mutable std::unique_ptr threadpool_writer; +#if USE_LIBURING + mutable OnceFlag io_uring_reader_initialized; + mutable std::unique_ptr io_uring_reader; +#endif + mutable ThrottlerPtr replicated_fetches_throttler; /// A server-wide throttler for replicated fetches mutable ThrottlerPtr replicated_sends_throttler; /// A server-wide throttler for replicated sends @@ -337,6 +347,9 @@ struct ContextSharedPart : boost::noncopyable std::atomic_size_t max_table_size_to_drop = 50000000000lu; /// Protects MergeTree tables from accidental DROP (50GB by default) std::atomic_size_t max_partition_size_to_drop = 50000000000lu; /// Protects MergeTree partitions from accidental DROP (50GB by default) /// No lock required for format_schema_path modified only during initialization + std::atomic_size_t max_database_num_to_warn = 1000lu; + std::atomic_size_t max_table_num_to_warn = 5000lu; + std::atomic_size_t max_part_num_to_warn = 100000lu; String format_schema_path; /// Path to a directory that contains schema files used by input formats. String google_protos_path; /// Path to a directory that contains the proto files for the well-known Protobuf types. mutable OnceFlag action_locks_manager_initialized; @@ -354,6 +367,8 @@ struct ContextSharedPart : boost::noncopyable OrdinaryBackgroundExecutorPtr moves_executor TSA_GUARDED_BY(background_executors_mutex); OrdinaryBackgroundExecutorPtr fetch_executor TSA_GUARDED_BY(background_executors_mutex); OrdinaryBackgroundExecutorPtr common_executor TSA_GUARDED_BY(background_executors_mutex); + /// The global pool of HTTP sessions for background fetches. + PooledSessionFactoryPtr fetches_session_factory TSA_GUARDED_BY(background_executors_mutex); RemoteHostFilter remote_host_filter TSA_GUARDED_BY(mutex); /// Allowed URL from config.xml HTTPHeaderFilter http_header_filter TSA_GUARDED_BY(mutex); /// Forbidden HTTP headers from config.xml @@ -543,7 +558,7 @@ struct ContextSharedPart : boost::noncopyable SHUTDOWN(log, "dictionaries loader", external_dictionaries_loader, enablePeriodicUpdates(false)); SHUTDOWN(log, "UDFs loader", external_user_defined_executable_functions_loader, enablePeriodicUpdates(false)); - SHUTDOWN(log, "another UDFs loader", user_defined_sql_objects_loader, stopWatching()); + SHUTDOWN(log, "another UDFs storage", user_defined_sql_objects_storage, stopWatching()); LOG_TRACE(log, "Shutting down named sessions"); Session::shutdownNamedSessions(); @@ -570,7 +585,7 @@ struct ContextSharedPart : boost::noncopyable std::unique_ptr delete_embedded_dictionaries; std::unique_ptr delete_external_dictionaries_loader; std::unique_ptr delete_external_user_defined_executable_functions_loader; - std::unique_ptr delete_user_defined_sql_objects_loader; + std::unique_ptr delete_user_defined_sql_objects_storage; std::unique_ptr delete_buffer_flush_schedule_pool; std::unique_ptr delete_schedule_pool; std::unique_ptr delete_distributed_schedule_pool; @@ -650,7 +665,7 @@ struct ContextSharedPart : boost::noncopyable delete_embedded_dictionaries = std::move(embedded_dictionaries); delete_external_dictionaries_loader = std::move(external_dictionaries_loader); delete_external_user_defined_executable_functions_loader = std::move(external_user_defined_executable_functions_loader); - delete_user_defined_sql_objects_loader = std::move(user_defined_sql_objects_loader); + delete_user_defined_sql_objects_storage = std::move(user_defined_sql_objects_storage); delete_buffer_flush_schedule_pool = std::move(buffer_flush_schedule_pool); delete_schedule_pool = std::move(schedule_pool); delete_distributed_schedule_pool = std::move(distributed_schedule_pool); @@ -668,7 +683,7 @@ struct ContextSharedPart : boost::noncopyable delete_embedded_dictionaries.reset(); delete_external_dictionaries_loader.reset(); delete_external_user_defined_executable_functions_loader.reset(); - delete_user_defined_sql_objects_loader.reset(); + delete_user_defined_sql_objects_storage.reset(); delete_ddl_worker.reset(); delete_buffer_flush_schedule_pool.reset(); delete_schedule_pool.reset(); @@ -815,6 +830,8 @@ MovesList & Context::getMovesList() { return shared->moves_list; } const MovesList & Context::getMovesList() const { return shared->moves_list; } ReplicatedFetchList & Context::getReplicatedFetchList() { return shared->replicated_fetch_list; } const ReplicatedFetchList & Context::getReplicatedFetchList() const { return shared->replicated_fetch_list; } +RefreshSet & Context::getRefreshSet() { return shared->refresh_set; } +const RefreshSet & Context::getRefreshSet() const { return shared->refresh_set; } String Context::resolveDatabase(const String & database_name) const { @@ -866,6 +883,12 @@ Strings Context::getWarnings() const { SharedLockGuard lock(shared->mutex); common_warnings = shared->warnings; + if (CurrentMetrics::get(CurrentMetrics::AttachedTable) > static_cast(shared->max_table_num_to_warn)) + common_warnings.emplace_back(fmt::format("The number of attached tables is more than {}", shared->max_table_num_to_warn)); + if (CurrentMetrics::get(CurrentMetrics::AttachedDatabase) > static_cast(shared->max_database_num_to_warn)) + common_warnings.emplace_back(fmt::format("The number of attached databases is more than {}", shared->max_table_num_to_warn)); + if (CurrentMetrics::get(CurrentMetrics::PartsActive) > static_cast(shared->max_part_num_to_warn)) + common_warnings.emplace_back(fmt::format("The number of active parts is more than {}", shared->max_part_num_to_warn)); } /// Make setting's name ordered std::set obsolete_settings; @@ -1076,7 +1099,7 @@ void Context::setTemporaryStorageInCache(const String & cache_disk_name, size_t if (shared->root_temp_data_on_disk) throw Exception(ErrorCodes::LOGICAL_ERROR, "Temporary storage is already set"); - auto file_cache = FileCacheFactory::instance().getByName(disk_ptr->getCacheName()).cache; + auto file_cache = FileCacheFactory::instance().getByName(disk_ptr->getCacheName())->cache; if (!file_cache) throw Exception(ErrorCodes::NO_ELEMENTS_IN_CONFIG, "Cache '{}' is not found", disk_ptr->getCacheName()); @@ -2446,24 +2469,30 @@ void Context::loadOrReloadUserDefinedExecutableFunctions(const Poco::Util::Abstr shared->user_defined_executable_functions_xmls = external_user_defined_executable_functions_loader.addConfigRepository(std::move(repository)); } -const IUserDefinedSQLObjectsLoader & Context::getUserDefinedSQLObjectsLoader() const +const IUserDefinedSQLObjectsStorage & Context::getUserDefinedSQLObjectsStorage() const { - callOnce(shared->user_defined_sql_objects_loader_initialized, [&] { - shared->user_defined_sql_objects_loader = createUserDefinedSQLObjectsLoader(getGlobalContext()); + callOnce(shared->user_defined_sql_objects_storage_initialized, [&] { + shared->user_defined_sql_objects_storage = createUserDefinedSQLObjectsStorage(getGlobalContext()); }); SharedLockGuard lock(shared->mutex); - return *shared->user_defined_sql_objects_loader; + return *shared->user_defined_sql_objects_storage; } -IUserDefinedSQLObjectsLoader & Context::getUserDefinedSQLObjectsLoader() +IUserDefinedSQLObjectsStorage & Context::getUserDefinedSQLObjectsStorage() { - callOnce(shared->user_defined_sql_objects_loader_initialized, [&] { - shared->user_defined_sql_objects_loader = createUserDefinedSQLObjectsLoader(getGlobalContext()); + callOnce(shared->user_defined_sql_objects_storage_initialized, [&] { + shared->user_defined_sql_objects_storage = createUserDefinedSQLObjectsStorage(getGlobalContext()); }); - SharedLockGuard lock(shared->mutex); - return *shared->user_defined_sql_objects_loader; + std::lock_guard lock(shared->mutex); + return *shared->user_defined_sql_objects_storage; +} + +void Context::setUserDefinedSQLObjectsStorage(std::unique_ptr storage) +{ + std::lock_guard lock(shared->mutex); + shared->user_defined_sql_objects_storage = std::move(storage); } #if USE_NLP @@ -3317,16 +3346,6 @@ bool Context::hasAuxiliaryZooKeeper(const String & name) const return getConfigRef().has("auxiliary_zookeepers." + name); } -void Context::reloadQueryMaskingRulesIfChanged(const ConfigurationPtr & config) const -{ - const auto old_config = shared->sensitive_data_masker_config; - if (old_config && isSameConfiguration(*config, *old_config, "query_masking_rules")) - return; - - SensitiveDataMasker::setInstance(std::make_unique(*config, "query_masking_rules")); - shared->sensitive_data_masker_config = config; -} - InterserverCredentialsPtr Context::getInterserverCredentials() const { return shared->interserver_io_credentials.get(); @@ -3416,6 +3435,24 @@ UInt16 Context::getServerPort(const String & port_name) const return it->second; } +void Context::setMaxPartNumToWarn(size_t max_part_to_warn) +{ + SharedLockGuard lock(shared->mutex); + shared->max_part_num_to_warn = max_part_to_warn; +} + +void Context::setMaxTableNumToWarn(size_t max_table_to_warn) +{ + SharedLockGuard lock(shared->mutex); + shared->max_table_num_to_warn= max_table_to_warn; +} + +void Context::setMaxDatabaseNumToWarn(size_t max_database_to_warn) +{ + SharedLockGuard lock(shared->mutex); + shared->max_database_num_to_warn= max_database_to_warn; +} + std::shared_ptr Context::getCluster(const std::string & cluster_name) const { if (auto res = tryGetCluster(cluster_name)) @@ -4023,7 +4060,8 @@ void Context::checkCanBeDropped(const String & database, const String & table, c "2. File '{}' intended to force DROP {}\n" "How to fix this:\n" "1. Either increase (or set to zero) max_[table/partition]_size_to_drop in server config\n" - "2. Either create forcing file {} and make sure that ClickHouse has write permission for it.\n" + "2. Either pass a bigger (or set to zero) max_[table/partition]_size_to_drop through query settings\n" + "3. Either create forcing file {} and make sure that ClickHouse has write permission for it.\n" "Example:\nsudo touch '{}' && sudo chmod 666 '{}'", backQuoteIfNeed(database), backQuoteIfNeed(table), size_str, max_size_to_drop_str, @@ -4051,6 +4089,10 @@ void Context::checkTableCanBeDropped(const String & database, const String & tab checkCanBeDropped(database, table, table_size, max_table_size_to_drop); } +void Context::checkTableCanBeDropped(const String & database, const String & table, const size_t & table_size, const size_t & max_table_size_to_drop) const +{ + checkCanBeDropped(database, table, table_size, max_table_size_to_drop); +} void Context::setMaxPartitionSizeToDrop(size_t max_size) { @@ -4070,6 +4112,10 @@ void Context::checkPartitionCanBeDropped(const String & database, const String & checkCanBeDropped(database, table, partition_size, max_partition_size_to_drop); } +void Context::checkPartitionCanBeDropped(const String & database, const String & table, const size_t & partition_size, const size_t & max_partition_size_to_drop) const +{ + checkCanBeDropped(database, table, partition_size, max_partition_size_to_drop); +} InputFormatPtr Context::getInputFormat(const String & name, ReadBuffer & buf, const Block & sample, UInt64 max_block_size, const std::optional & format_settings, const std::optional max_parsing_threads) const { @@ -4500,7 +4546,7 @@ StorageID Context::resolveStorageIDImpl(StorageID storage_id, StorageNamespace w if (!storage_id) { if (exception) - exception->emplace(ErrorCodes::UNKNOWN_TABLE, "Both table name and UUID are empty"); + exception->emplace(Exception(ErrorCodes::UNKNOWN_TABLE, "Both table name and UUID are empty")); return storage_id; } @@ -4561,7 +4607,7 @@ StorageID Context::resolveStorageIDImpl(StorageID storage_id, StorageNamespace w if (current_database.empty()) { if (exception) - exception->emplace(ErrorCodes::UNKNOWN_DATABASE, "Default database is not selected"); + exception->emplace(Exception(ErrorCodes::UNKNOWN_DATABASE, "Default database is not selected")); return StorageID::createEmpty(); } storage_id.database_name = current_database; @@ -4788,6 +4834,11 @@ void Context::initializeBackgroundExecutorsIfNeeded() ); LOG_INFO(shared->log, "Initialized background executor for move operations with num_threads={}, num_tasks={}", background_move_pool_size, background_move_pool_size); + auto timeouts = ConnectionTimeouts::getFetchPartHTTPTimeouts(getServerSettings(), getSettingsRef()); + /// The number of background fetches is limited by the number of threads in the background thread pool. + /// It doesn't make any sense to limit the number of connections per host any further. + shared->fetches_session_factory = std::make_shared(timeouts, background_fetches_pool_size); + shared->fetch_executor = std::make_shared ( "Fetch", @@ -4841,6 +4892,12 @@ OrdinaryBackgroundExecutorPtr Context::getCommonExecutor() const return shared->common_executor; } +PooledSessionFactoryPtr Context::getCommonFetchesSessionFactory() const +{ + SharedLockGuard lock(shared->background_executors_mutex); + return shared->fetches_session_factory; +} + IAsynchronousReader & Context::getThreadPoolReader(FilesystemReaderType type) const { callOnce(shared->readers_initialized, [&] { @@ -4861,6 +4918,17 @@ IAsynchronousReader & Context::getThreadPoolReader(FilesystemReaderType type) co } } +#if USE_LIBURING +IOUringReader & Context::getIOURingReader() const +{ + callOnce(shared->io_uring_reader_initialized, [&] { + shared->io_uring_reader = std::make_unique(512); + }); + + return *shared->io_uring_reader; +} +#endif + ThreadPool & Context::getThreadPoolWriter() const { callOnce(shared->threadpool_writer_initialized, [&] { @@ -4935,6 +5003,7 @@ ReadSettings Context::getReadSettings() const res.http_retry_initial_backoff_ms = settings.http_retry_initial_backoff_ms; res.http_retry_max_backoff_ms = settings.http_retry_max_backoff_ms; res.http_skip_not_found_url_for_globs = settings.http_skip_not_found_url_for_globs; + res.http_make_head_request = settings.http_make_head_request; res.mmap_cache = getMMappedFileCache().get(); @@ -4973,13 +5042,13 @@ Context::ParallelReplicasMode Context::getParallelReplicasMode() const if (!settings_ref.parallel_replicas_custom_key.value.empty()) return CUSTOM_KEY; - if (settings_ref.allow_experimental_parallel_reading_from_replicas > 0 && !settings_ref.use_hedged_requests) + if (settings_ref.allow_experimental_parallel_reading_from_replicas > 0) return READ_TASKS; return SAMPLE_KEY; } -bool Context::canUseParallelReplicas() const +bool Context::canUseTaskBasedParallelReplicas() const { const auto & settings_ref = getSettingsRef(); return getParallelReplicasMode() == ParallelReplicasMode::READ_TASKS && settings_ref.max_parallel_replicas > 1; @@ -4987,12 +5056,12 @@ bool Context::canUseParallelReplicas() const bool Context::canUseParallelReplicasOnInitiator() const { - return canUseParallelReplicas() && !getClientInfo().collaborate_with_initiator; + return canUseTaskBasedParallelReplicas() && !getClientInfo().collaborate_with_initiator; } bool Context::canUseParallelReplicasOnFollower() const { - return canUseParallelReplicas() && getClientInfo().collaborate_with_initiator; + return canUseTaskBasedParallelReplicas() && getClientInfo().collaborate_with_initiator; } void Context::setPreparedSetsCache(const PreparedSetsCachePtr & cache) diff --git a/src/Interpreters/Context.h b/src/Interpreters/Context.h index 0336e82a011..b09eeb8ca2d 100644 --- a/src/Interpreters/Context.h +++ b/src/Interpreters/Context.h @@ -66,7 +66,7 @@ enum class RowPolicyFilterType; class EmbeddedDictionaries; class ExternalDictionariesLoader; class ExternalUserDefinedExecutableFunctionsLoader; -class IUserDefinedSQLObjectsLoader; +class IUserDefinedSQLObjectsStorage; class InterserverCredentials; using InterserverCredentialsPtr = std::shared_ptr; class InterserverIOHandler; @@ -74,6 +74,7 @@ class BackgroundSchedulePool; class MergeList; class MovesList; class ReplicatedFetchList; +class RefreshSet; class Cluster; class Compiler; class MarkCache; @@ -109,6 +110,7 @@ class AsynchronousInsertLog; class BackupLog; class BlobStorageLog; class IAsynchronousReader; +class IOUringReader; struct MergeTreeSettings; struct InitialAllRangesAnnouncement; struct ParallelReadRequest; @@ -201,6 +203,9 @@ using TemporaryDataOnDiskScopePtr = std::shared_ptr; class PreparedSetsCache; using PreparedSetsCachePtr = std::shared_ptr; +class PooledSessionFactory; +using PooledSessionFactoryPtr = std::shared_ptr; + class SessionTracker; struct ServerSettings; @@ -800,8 +805,9 @@ public: const ExternalUserDefinedExecutableFunctionsLoader & getExternalUserDefinedExecutableFunctionsLoader() const; ExternalUserDefinedExecutableFunctionsLoader & getExternalUserDefinedExecutableFunctionsLoader(); - const IUserDefinedSQLObjectsLoader & getUserDefinedSQLObjectsLoader() const; - IUserDefinedSQLObjectsLoader & getUserDefinedSQLObjectsLoader(); + const IUserDefinedSQLObjectsStorage & getUserDefinedSQLObjectsStorage() const; + IUserDefinedSQLObjectsStorage & getUserDefinedSQLObjectsStorage(); + void setUserDefinedSQLObjectsStorage(std::unique_ptr storage); void loadOrReloadUserDefinedExecutableFunctions(const Poco::Util::AbstractConfiguration & config); #if USE_NLP @@ -841,6 +847,9 @@ public: void setHTTPHeaderFilter(const Poco::Util::AbstractConfiguration & config); const HTTPHeaderFilter & getHTTPHeaderFilter() const; + void setMaxTableNumToWarn(size_t max_table_to_warn); + void setMaxDatabaseNumToWarn(size_t max_database_to_warn); + void setMaxPartNumToWarn(size_t max_part_to_warn); /// The port that the server listens for executing SQL queries. UInt16 getTCPPort() const; @@ -914,6 +923,9 @@ public: ReplicatedFetchList & getReplicatedFetchList(); const ReplicatedFetchList & getReplicatedFetchList() const; + RefreshSet & getRefreshSet(); + const RefreshSet & getRefreshSet() const; + /// If the current session is expired at the time of the call, synchronously creates and returns a new session with the startNewSession() call. /// If no ZooKeeper configured, throws an exception. std::shared_ptr getZooKeeper() const; @@ -951,8 +963,6 @@ public: // Reload Zookeeper void reloadZooKeeperIfChanged(const ConfigurationPtr & config) const; - void reloadQueryMaskingRulesIfChanged(const ConfigurationPtr & config) const; - void setSystemZooKeeperLogAfterInitializationIfNeeded(); /// --- Caches ------------------------------------------------------------------------------------------ @@ -1078,11 +1088,13 @@ public: void setMaxTableSizeToDrop(size_t max_size); size_t getMaxTableSizeToDrop() const; void checkTableCanBeDropped(const String & database, const String & table, const size_t & table_size) const; + void checkTableCanBeDropped(const String & database, const String & table, const size_t & table_size, const size_t & max_table_size_to_drop) const; /// Prevents DROP PARTITION if its size is greater than max_size (50GB by default, max_size=0 turn off this check) void setMaxPartitionSizeToDrop(size_t max_size); size_t getMaxPartitionSizeToDrop() const; void checkPartitionCanBeDropped(const String & database, const String & table, const size_t & partition_size) const; + void checkPartitionCanBeDropped(const String & database, const String & table, const size_t & partition_size, const size_t & max_partition_size_to_drop) const; /// Lets you select the compression codec according to the conditions described in the configuration file. std::shared_ptr chooseCompressionCodec(size_t part_size, double part_size_ratio) const; @@ -1208,8 +1220,12 @@ public: OrdinaryBackgroundExecutorPtr getMovesExecutor() const; OrdinaryBackgroundExecutorPtr getFetchesExecutor() const; OrdinaryBackgroundExecutorPtr getCommonExecutor() const; + PooledSessionFactoryPtr getCommonFetchesSessionFactory() const; IAsynchronousReader & getThreadPoolReader(FilesystemReaderType type) const; +#if USE_LIBURING + IOUringReader & getIOURingReader() const; +#endif std::shared_ptr getAsyncReadCounters() const; @@ -1222,7 +1238,7 @@ public: WriteSettings getWriteSettings() const; /** There are multiple conditions that have to be met to be able to use parallel replicas */ - bool canUseParallelReplicas() const; + bool canUseTaskBasedParallelReplicas() const; bool canUseParallelReplicasOnInitiator() const; bool canUseParallelReplicasOnFollower() const; diff --git a/src/Interpreters/DDLWorker.cpp b/src/Interpreters/DDLWorker.cpp index 30cf6fd0568..f08fd72ff7f 100644 --- a/src/Interpreters/DDLWorker.cpp +++ b/src/Interpreters/DDLWorker.cpp @@ -611,7 +611,7 @@ void DDLWorker::processTask(DDLTaskBase & task, const ZooKeeperPtr & zookeeper) { /// Connection has been lost and now we are retrying, /// but our previous ephemeral node still exists. - zookeeper->handleEphemeralNodeExistence(active_node_path, canary_value); + zookeeper->deleteEphemeralNodeIfContentMatches(active_node_path, canary_value); } zookeeper->create(active_node_path, canary_value, zkutil::CreateMode::Ephemeral); diff --git a/src/Interpreters/DatabaseCatalog.cpp b/src/Interpreters/DatabaseCatalog.cpp index c388ade9062..fc1975e8c86 100644 --- a/src/Interpreters/DatabaseCatalog.cpp +++ b/src/Interpreters/DatabaseCatalog.cpp @@ -331,7 +331,7 @@ DatabaseAndTable DatabaseCatalog::getTableImpl( if (!table_id) { if (exception) - exception->emplace(ErrorCodes::UNKNOWN_TABLE, "Cannot find table: StorageID is empty"); + exception->emplace(Exception(ErrorCodes::UNKNOWN_TABLE, "Cannot find table: StorageID is empty")); return {}; } diff --git a/src/Interpreters/DatabaseCatalog.h b/src/Interpreters/DatabaseCatalog.h index ee5d0b84b23..6d8fd84557c 100644 --- a/src/Interpreters/DatabaseCatalog.h +++ b/src/Interpreters/DatabaseCatalog.h @@ -30,29 +30,6 @@ namespace fs = std::filesystem; namespace DB { -class TableNameHints : public IHints<> -{ -public: - TableNameHints(ConstDatabasePtr database_, ContextPtr context_) - : context(context_), - database(database_) - { - } - Names getAllRegisteredNames() const override - { - Names result; - if (database) - { - for (auto table_it = database->getTablesIterator(context); table_it->isValid(); table_it->next()) - result.emplace_back(table_it->name()); - } - return result; - } -private: - ContextPtr context; - ConstDatabasePtr database; -}; - class IDatabase; class Exception; class ColumnsDescription; @@ -392,6 +369,68 @@ private: static constexpr time_t DBMS_DEFAULT_DISK_RELOAD_PERIOD_SEC = 5; }; +class TableNameHints : public IHints<> +{ +public: + TableNameHints(ConstDatabasePtr database_, ContextPtr context_) + : context(context_), + database(database_) + { + } + + /// getHintForTable tries to get a hint for the provided table_name in the provided + /// database. If the results are empty, it goes for extended hints for the table + /// with getExtendedHintForTable which looks for the table name in every database that's + /// available in the database catalog. It finally returns a single hint which is the database + /// name and table_name pair which is similar to the table_name provided. Perhaps something to + /// consider is should we return more than one pair of hint? + std::pair getHintForTable(const String & table_name) const + { + auto results = this->getHints(table_name, getAllRegisteredNames()); + if (results.empty()) + return getExtendedHintForTable(table_name); + return std::make_pair(database->getDatabaseName(), results[0]); + } + + /// getExtendedHintsForTable tries to get hint for the given table_name across all + /// the databases that are available in the database catalog. + std::pair getExtendedHintForTable(const String & table_name) const + { + /// load all available databases from the DatabaseCatalog instance + auto & database_catalog = DatabaseCatalog::instance(); + auto all_databases = database_catalog.getDatabases(); + + for (const auto & [db_name, db] : all_databases) + { + /// this case should be covered already by getHintForTable + if (db_name == database->getDatabaseName()) + continue; + + TableNameHints hints(db, context); + auto results = hints.getHints(table_name); + + /// if the results are not empty, return the first instance of the table_name + /// and the corresponding database_name that was found. + if (!results.empty()) + return std::make_pair(db_name, results[0]); + } + return {}; + } + + Names getAllRegisteredNames() const override + { + Names result; + if (database) + for (auto table_it = database->getTablesIterator(context); table_it->isValid(); table_it->next()) + result.emplace_back(table_it->name()); + return result; + } + +private: + ContextPtr context; + ConstDatabasePtr database; +}; + /// This class is useful when creating a table or database. /// Usually we create IStorage/IDatabase object first and then add it to IDatabase/DatabaseCatalog. diff --git a/src/Interpreters/EmbeddedDictionaries.h b/src/Interpreters/EmbeddedDictionaries.h index 674b3a7f01e..e71098636fe 100644 --- a/src/Interpreters/EmbeddedDictionaries.h +++ b/src/Interpreters/EmbeddedDictionaries.h @@ -12,14 +12,13 @@ namespace Poco { class Logger; namespace Util { class AbstractConfiguration; } } +namespace DB +{ + class RegionsHierarchies; class RegionsNames; class GeoDictionariesLoader; - -namespace DB -{ - /// Metrica's Dictionaries which can be used in functions. class EmbeddedDictionaries : WithContext diff --git a/src/Interpreters/ExpressionAnalyzer.cpp b/src/Interpreters/ExpressionAnalyzer.cpp index 3b389dcf61e..969c57535f9 100644 --- a/src/Interpreters/ExpressionAnalyzer.cpp +++ b/src/Interpreters/ExpressionAnalyzer.cpp @@ -56,6 +56,7 @@ #include #include #include +#include #include @@ -858,11 +859,8 @@ const ASTSelectQuery * ExpressionAnalyzer::getSelectQuery() const bool ExpressionAnalyzer::isRemoteStorage() const { - const Settings & csettings = getContext()->getSettingsRef(); // Consider any storage used in parallel replicas as remote, so the query is executed in multiple servers - const bool enable_parallel_processing_of_joins - = csettings.max_parallel_replicas > 1 && csettings.allow_experimental_parallel_reading_from_replicas > 0; - return syntax->is_remote_storage || enable_parallel_processing_of_joins; + return syntax->is_remote_storage || getContext()->canUseTaskBasedParallelReplicas(); } const ASTSelectQuery * SelectQueryExpressionAnalyzer::getAggregatingQuery() const @@ -954,6 +952,9 @@ static std::shared_ptr tryCreateJoin( std::unique_ptr & joined_plan, ContextPtr context) { + if (analyzed_join->kind() == JoinKind::Paste) + return std::make_shared(analyzed_join, right_sample_block); + if (algorithm == JoinAlgorithm::DIRECT || algorithm == JoinAlgorithm::DEFAULT) { JoinPtr direct_join = tryKeyValueJoin(analyzed_join, right_sample_block); diff --git a/src/Interpreters/GraceHashJoin.cpp b/src/Interpreters/GraceHashJoin.cpp index 89ea3a326cc..26d666a8913 100644 --- a/src/Interpreters/GraceHashJoin.cpp +++ b/src/Interpreters/GraceHashJoin.cpp @@ -6,6 +6,7 @@ #include #include +#include #include #include @@ -271,7 +272,7 @@ GraceHashJoin::GraceHashJoin( , left_key_names(table_join->getOnlyClause().key_names_left) , right_key_names(table_join->getOnlyClause().key_names_right) , tmp_data(std::make_unique(tmp_data_, CurrentMetrics::TemporaryFilesForJoin)) - , hash_join(makeInMemoryJoin()) + , hash_join(makeInMemoryJoin("grace0")) , hash_join_sample_block(hash_join->savedBlockSample()) { if (!GraceHashJoin::isSupported(table_join)) @@ -424,8 +425,10 @@ void GraceHashJoin::initialize(const Block & sample_block) { left_sample_block = sample_block.cloneEmpty(); output_sample_block = left_sample_block.cloneEmpty(); - ExtraBlockPtr not_processed; + ExtraBlockPtr not_processed = nullptr; hash_join->joinBlock(output_sample_block, not_processed); + if (not_processed) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Unhandled not processed block in GraceHashJoin"); initBuckets(); } @@ -447,9 +450,6 @@ void GraceHashJoin::joinBlock(Block & block, std::shared_ptr & not_p block = std::move(blocks[current_bucket->idx]); hash_join->joinBlock(block, not_processed); - if (not_processed) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Unhandled not processed block in GraceHashJoin"); - flushBlocksToBuckets(blocks, buckets); } @@ -528,6 +528,29 @@ public: Block nextImpl() override { + ExtraBlockPtr not_processed = nullptr; + + { + std::lock_guard lock(extra_block_mutex); + if (!not_processed_blocks.empty()) + { + not_processed = std::move(not_processed_blocks.front()); + not_processed_blocks.pop_front(); + } + } + + if (not_processed) + { + Block block = std::move(not_processed->block); + hash_join->joinBlock(block, not_processed); + if (not_processed) + { + std::lock_guard lock(extra_block_mutex); + not_processed_blocks.emplace_back(std::move(not_processed)); + } + return block; + } + Block block; size_t num_buckets = buckets.size(); size_t current_idx = buckets[current_bucket]->idx; @@ -565,12 +588,12 @@ public: } } while (block.rows() == 0); - ExtraBlockPtr not_processed; hash_join->joinBlock(block, not_processed); - if (not_processed) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Unsupported hash join type"); - + { + std::lock_guard lock(extra_block_mutex); + not_processed_blocks.emplace_back(std::move(not_processed)); + } return block; } @@ -582,6 +605,9 @@ public: Names left_key_names; Names right_key_names; + + std::mutex extra_block_mutex; + std::list not_processed_blocks TSA_GUARDED_BY(extra_block_mutex); }; IBlocksStreamPtr GraceHashJoin::getDelayedBlocks() @@ -611,7 +637,7 @@ IBlocksStreamPtr GraceHashJoin::getDelayedBlocks() continue; } - hash_join = makeInMemoryJoin(prev_keys_num); + hash_join = makeInMemoryJoin(fmt::format("grace{}", bucket_idx), prev_keys_num); auto right_reader = current_bucket->startJoining(); size_t num_rows = 0; /// count rows that were written and rehashed while (Block block = right_reader.read()) @@ -632,10 +658,9 @@ IBlocksStreamPtr GraceHashJoin::getDelayedBlocks() return nullptr; } -GraceHashJoin::InMemoryJoinPtr GraceHashJoin::makeInMemoryJoin(size_t reserve_num) +GraceHashJoin::InMemoryJoinPtr GraceHashJoin::makeInMemoryJoin(const String & bucket_id, size_t reserve_num) { - auto ret = std::make_unique(table_join, right_sample_block, any_take_last_row, reserve_num); - return std::move(ret); + return std::make_unique(table_join, right_sample_block, any_take_last_row, reserve_num, bucket_id); } Block GraceHashJoin::prepareRightBlock(const Block & block) @@ -661,7 +686,7 @@ void GraceHashJoin::addBlockToJoinImpl(Block block) { std::lock_guard lock(hash_join_mutex); if (!hash_join) - hash_join = makeInMemoryJoin(); + hash_join = makeInMemoryJoin(fmt::format("grace{}", bucket_index)); // buckets size has been changed in other threads. Need to scatter current_block again. // rehash could only happen under hash_join_mutex's scope. @@ -705,7 +730,7 @@ void GraceHashJoin::addBlockToJoinImpl(Block block) current_block = concatenateBlocks(current_blocks); } - hash_join = makeInMemoryJoin(prev_keys_num); + hash_join = makeInMemoryJoin(fmt::format("grace{}", bucket_index), prev_keys_num); if (current_block.rows() > 0) hash_join->addBlockToJoin(current_block, /* check_limits = */ false); diff --git a/src/Interpreters/GraceHashJoin.h b/src/Interpreters/GraceHashJoin.h index 44949440467..2cadeee10b9 100644 --- a/src/Interpreters/GraceHashJoin.h +++ b/src/Interpreters/GraceHashJoin.h @@ -44,9 +44,8 @@ class GraceHashJoin final : public IJoin { class FileBucket; class DelayedBlocks; - using InMemoryJoin = HashJoin; - using InMemoryJoinPtr = std::shared_ptr; + using InMemoryJoinPtr = std::shared_ptr; public: using BucketPtr = std::shared_ptr; @@ -91,7 +90,7 @@ public: private: void initBuckets(); /// Create empty join for in-memory processing. - InMemoryJoinPtr makeInMemoryJoin(size_t reserve_num = 0); + InMemoryJoinPtr makeInMemoryJoin(const String & bucket_id, size_t reserve_num = 0); /// Add right table block to the @join. Calls @rehash on overflow. void addBlockToJoinImpl(Block block); diff --git a/src/Interpreters/HashJoin.cpp b/src/Interpreters/HashJoin.cpp index 060fe95958f..a84e1ec2175 100644 --- a/src/Interpreters/HashJoin.cpp +++ b/src/Interpreters/HashJoin.cpp @@ -30,9 +30,10 @@ #include #include #include +#include #include - +#include namespace DB { @@ -217,7 +218,7 @@ static void correctNullabilityInplace(ColumnWithTypeAndName & column, bool nulla } } -static void correctNullabilityInplace(ColumnWithTypeAndName & column, bool nullable, const ColumnUInt8 & negative_null_map) +static void correctNullabilityInplace(ColumnWithTypeAndName & column, bool nullable, const IColumn::Filter & negative_null_map) { if (nullable) { @@ -233,7 +234,8 @@ static void correctNullabilityInplace(ColumnWithTypeAndName & column, bool nulla JoinCommon::removeColumnNullability(column); } -HashJoin::HashJoin(std::shared_ptr table_join_, const Block & right_sample_block_, bool any_take_last_row_, size_t reserve_num) +HashJoin::HashJoin(std::shared_ptr table_join_, const Block & right_sample_block_, + bool any_take_last_row_, size_t reserve_num, const String & instance_id_) : table_join(table_join_) , kind(table_join->kind()) , strictness(table_join->strictness()) @@ -241,10 +243,11 @@ HashJoin::HashJoin(std::shared_ptr table_join_, const Block & right_s , asof_inequality(table_join->getAsofInequality()) , data(std::make_shared()) , right_sample_block(right_sample_block_) + , instance_log_id(!instance_id_.empty() ? "(" + instance_id_ + ") " : "") , log(&Poco::Logger::get("HashJoin")) { - LOG_DEBUG(log, "({}) Datatype: {}, kind: {}, strictness: {}, right header: {}", fmt::ptr(this), data->type, kind, strictness, right_sample_block.dumpStructure()); - LOG_DEBUG(log, "({}) Keys: {}", fmt::ptr(this), TableJoin::formatClauses(table_join->getClauses(), true)); + LOG_TRACE(log, "{}Keys: {}, datatype: {}, kind: {}, strictness: {}, right header: {}", + instance_log_id, TableJoin::formatClauses(table_join->getClauses(), true), data->type, kind, strictness, right_sample_block.dumpStructure()); if (isCrossOrComma(kind)) { @@ -269,8 +272,9 @@ HashJoin::HashJoin(std::shared_ptr table_join_, const Block & right_s sample_block_with_columns_to_add = right_table_keys = materializeBlock(right_sample_block); } - JoinCommon::convertToFullColumnsInplace(right_table_keys); + materializeBlockInplace(right_table_keys); initRightBlockStructure(data->sample_block); + data->sample_block = prepareRightBlock(data->sample_block); JoinCommon::createMissedColumns(sample_block_with_columns_to_add); @@ -373,10 +377,20 @@ HashJoin::Type HashJoin::chooseMethod(JoinKind kind, const ColumnRawPtrs & key_c return Type::keys256; /// If there is single string key, use hash table of it's values. - if (keys_size == 1 - && (typeid_cast(key_columns[0]) - || (isColumnConst(*key_columns[0]) && typeid_cast(&assert_cast(key_columns[0])->getDataColumn())))) - return Type::key_string; + if (keys_size == 1) + { + auto is_string_column = [](const IColumn * column_ptr) -> bool + { + if (const auto * lc_column_ptr = typeid_cast(column_ptr)) + return typeid_cast(lc_column_ptr->getDictionary().getNestedColumn().get()); + return typeid_cast(column_ptr); + }; + + const auto * key_column = key_columns[0]; + if (is_string_column(key_column) || + (isColumnConst(*key_column) && is_string_column(assert_cast(key_column)->getDataColumnPtr().get()))) + return Type::key_string; + } if (keys_size == 1 && typeid_cast(key_columns[0])) return Type::key_fixed_string; @@ -791,7 +805,13 @@ bool HashJoin::addBlockToJoin(const Block & source_block_, bool check_limits) size_t rows = source_block.rows(); - ColumnPtrMap all_key_columns = JoinCommon::materializeColumnsInplaceMap(source_block, table_join->getAllNames(JoinTableSide::Right)); + const auto & right_key_names = table_join->getAllNames(JoinTableSide::Right); + ColumnPtrMap all_key_columns(right_key_names.size()); + for (const auto & column_name : right_key_names) + { + const auto & column = source_block.getByName(column_name).column; + all_key_columns[column_name] = recursiveRemoveSparse(column->convertToFullColumnIfConst())->convertToFullColumnIfLowCardinality(); + } Block block_to_save = prepareRightBlock(source_block); if (shrink_blocks) @@ -804,6 +824,8 @@ bool HashJoin::addBlockToJoin(const Block & source_block_, bool check_limits) throw DB::Exception(ErrorCodes::LOGICAL_ERROR, "addBlockToJoin called when HashJoin locked to prevent updates"); data->blocks_allocated_size += block_to_save.allocatedBytes(); + + assertBlocksHaveEqualStructure(data->sample_block, block_to_save, "joined block"); data->blocks.emplace_back(std::move(block_to_save)); Block * stored_block = &data->blocks.back(); @@ -1012,16 +1034,15 @@ public: }; AddedColumns( + const Block & left_block, const Block & block_with_columns_to_add, - const Block & block, const Block & saved_block_sample, const HashJoin & join, std::vector && join_on_keys_, bool is_asof_join, bool is_join_get_) : join_on_keys(join_on_keys_) - , rows_to_add(block.rows()) - , sample_block(saved_block_sample) + , rows_to_add(left_block.rows()) , is_join_get(is_join_get_) { size_t num_columns_to_add = block_with_columns_to_add.columns(); @@ -1038,7 +1059,7 @@ public: /// because it uses not qualified right block column names auto qualified_name = join.getTableJoin().renamedRightColumnName(src_column.name); /// Don't insert column if it's in left block - if (!block.has(qualified_name)) + if (!left_block.has(qualified_name)) addColumn(src_column, qualified_name); } @@ -1052,6 +1073,17 @@ public: for (auto & tn : type_name) right_indexes.push_back(saved_block_sample.getPositionByName(tn.name)); + + nullable_column_ptrs.resize(right_indexes.size(), nullptr); + for (size_t j = 0; j < right_indexes.size(); ++j) + { + /** If it's joinGetOrNull, we will have nullable columns in result block + * even if right column is not nullable in storage (saved_block_sample). + */ + const auto & saved_column = saved_block_sample.getByPosition(right_indexes[j]).column; + if (columns[j]->isNullable() && !saved_column->isNullable()) + nullable_column_ptrs[j] = typeid_cast(columns[j].get()); + } } size_t size() const { return columns.size(); } @@ -1061,33 +1093,6 @@ public: return ColumnWithTypeAndName(std::move(columns[i]), type_name[i].type, type_name[i].qualified_name); } - static void assertBlockEqualsStructureUpToLowCard(const Block & lhs_block, const Block & rhs_block) - { - if (lhs_block.columns() != rhs_block.columns()) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Different number of columns in blocks [{}] and [{}]", - lhs_block.dumpStructure(), rhs_block.dumpStructure()); - - for (size_t i = 0; i < lhs_block.columns(); ++i) - { - const auto & lhs = lhs_block.getByPosition(i); - const auto & rhs = rhs_block.getByPosition(i); - if (lhs.name != rhs.name) - throw DB::Exception(ErrorCodes::LOGICAL_ERROR, "Block structure mismatch: [{}] != [{}] ({} != {})", - lhs_block.dumpStructure(), rhs_block.dumpStructure(), lhs.name, rhs.name); - - const auto & ltype = recursiveRemoveLowCardinality(lhs.type); - const auto & rtype = recursiveRemoveLowCardinality(rhs.type); - if (!ltype->equals(*rtype)) - throw DB::Exception(ErrorCodes::LOGICAL_ERROR, "Block structure mismatch: [{}] != [{}] ({} != {})", - lhs_block.dumpStructure(), rhs_block.dumpStructure(), ltype->getName(), rtype->getName()); - - const auto & lcol = recursiveRemoveLowCardinality(lhs.column); - const auto & rcol = recursiveRemoveLowCardinality(rhs.column); - if (lcol->getDataType() != rcol->getDataType()) - throw DB::Exception(ErrorCodes::LOGICAL_ERROR, "Block structure mismatch: [{}] != [{}] ({} != {})", - lhs_block.dumpStructure(), rhs_block.dumpStructure(), lcol->getDataType(), rcol->getDataType()); - } - } template void appendFromBlock(const Block & block, size_t row_num) @@ -1096,38 +1101,50 @@ public: applyLazyDefaults(); #ifndef NDEBUG - /// Like assertBlocksHaveEqualStructure but doesn't check low cardinality - assertBlockEqualsStructureUpToLowCard(sample_block, block); -#else - UNUSED(assertBlockEqualsStructureUpToLowCard); + for (size_t j = 0; j < right_indexes.size(); ++j) + { + const auto * column_from_block = block.getByPosition(right_indexes[j]).column.get(); + const auto * dest_column = columns[j].get(); + if (auto * nullable_col = nullable_column_ptrs[j]) + { + if (!is_join_get) + throw Exception(ErrorCodes::LOGICAL_ERROR, + "Columns {} and {} can have different nullability only in joinGetOrNull", + dest_column->getName(), column_from_block->getName()); + dest_column = nullable_col->getNestedColumnPtr().get(); + } + /** Using dest_column->structureEquals(*column_from_block) will not work for low cardinality columns, + * because dictionaries can be different, while calling insertFrom on them is safe, for example: + * ColumnLowCardinality(size = 0, UInt8(size = 0), ColumnUnique(size = 1, String(size = 1))) + * and + * ColumnLowCardinality(size = 0, UInt16(size = 0), ColumnUnique(size = 1, String(size = 1))) + */ + if (typeid(*dest_column) != typeid(*column_from_block)) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Columns {} and {} have different types {} and {}", + dest_column->getName(), column_from_block->getName(), + demangle(typeid(*dest_column).name()), demangle(typeid(*column_from_block).name())); + } #endif if (is_join_get) { - /// If it's joinGetOrNull, we need to wrap not-nullable columns in StorageJoin. - for (size_t j = 0, size = right_indexes.size(); j < size; ++j) + size_t right_indexes_size = right_indexes.size(); + for (size_t j = 0; j < right_indexes_size; ++j) { const auto & column_from_block = block.getByPosition(right_indexes[j]); - if (auto * nullable_col = typeid_cast(columns[j].get()); - nullable_col && !column_from_block.column->isNullable()) + if (auto * nullable_col = nullable_column_ptrs[j]) nullable_col->insertFromNotNullable(*column_from_block.column, row_num); - else if (auto * lowcard_col = typeid_cast(columns[j].get()); - lowcard_col && !typeid_cast(column_from_block.column.get())) - lowcard_col->insertFromFullColumn(*column_from_block.column, row_num); else columns[j]->insertFrom(*column_from_block.column, row_num); } } else { - for (size_t j = 0, size = right_indexes.size(); j < size; ++j) + size_t right_indexes_size = right_indexes.size(); + for (size_t j = 0; j < right_indexes_size; ++j) { const auto & column_from_block = block.getByPosition(right_indexes[j]); - if (auto * lowcard_col = typeid_cast(columns[j].get()); - lowcard_col && !typeid_cast(column_from_block.column.get())) - lowcard_col->insertFromFullColumn(*column_from_block.column, row_num); - else - columns[j]->insertFrom(*column_from_block.column, row_num); + columns[j]->insertFrom(*column_from_block.column, row_num); } } } @@ -1151,18 +1168,37 @@ public: std::vector join_on_keys; + size_t max_joined_block_rows = 0; size_t rows_to_add; std::unique_ptr offsets_to_replicate; bool need_filter = false; + IColumn::Filter filter; + + void reserve(bool need_replicate) + { + if (!max_joined_block_rows) + return; + + /// Do not allow big allocations when user set max_joined_block_rows to huge value + size_t reserve_size = std::min(max_joined_block_rows, DEFAULT_BLOCK_SIZE * 2); + + if (need_replicate) + /// Reserve 10% more space for columns, because some rows can be repeated + reserve_size = static_cast(1.1 * reserve_size); + + for (auto & column : columns) + column->reserve(reserve_size); + } private: std::vector type_name; MutableColumns columns; + std::vector nullable_column_ptrs; + std::vector right_indexes; size_t lazy_defaults_count = 0; /// for ASOF const IColumn * left_asof_key = nullptr; - Block sample_block; bool is_join_get; @@ -1341,7 +1377,7 @@ void setUsed(IColumn::Filter & filter [[maybe_unused]], size_t pos [[maybe_unuse /// Joins right table columns which indexes are present in right_indexes using specified map. /// Makes filter (1 if row presented in right table) and returns offsets to replicate (for ALL JOINS). template -NO_INLINE IColumn::Filter joinRightColumns( +NO_INLINE size_t joinRightColumns( std::vector && key_getter_vector, const std::vector & mapv, AddedColumns & added_columns, @@ -1350,9 +1386,8 @@ NO_INLINE IColumn::Filter joinRightColumns( constexpr JoinFeatures join_features; size_t rows = added_columns.rows_to_add; - IColumn::Filter filter; if constexpr (need_filter) - filter = IColumn::Filter(rows, 0); + added_columns.filter = IColumn::Filter(rows, 0); Arena pool; @@ -1360,9 +1395,20 @@ NO_INLINE IColumn::Filter joinRightColumns( added_columns.offsets_to_replicate = std::make_unique(rows); IColumn::Offset current_offset = 0; - - for (size_t i = 0; i < rows; ++i) + size_t max_joined_block_rows = added_columns.max_joined_block_rows; + size_t i = 0; + for (; i < rows; ++i) { + if constexpr (join_features.need_replication) + { + if (unlikely(current_offset > max_joined_block_rows)) + { + added_columns.offsets_to_replicate->resize_assume_reserved(i); + added_columns.filter.resize_assume_reserved(i); + break; + } + } + bool right_row_found = false; KnownRowsHolder known_rows; @@ -1387,7 +1433,7 @@ NO_INLINE IColumn::Filter joinRightColumns( auto row_ref = mapped->findAsof(left_asof_key, i); if (row_ref.block) { - setUsed(filter, i); + setUsed(added_columns.filter, i); if constexpr (multiple_disjuncts) used_flags.template setUsed(row_ref.block, row_ref.row_num, 0); else @@ -1400,7 +1446,7 @@ NO_INLINE IColumn::Filter joinRightColumns( } else if constexpr (join_features.is_all_join) { - setUsed(filter, i); + setUsed(added_columns.filter, i); used_flags.template setUsed(find_result); auto used_flags_opt = join_features.need_flags ? &used_flags : nullptr; addFoundRowAll(mapped, added_columns, current_offset, known_rows, used_flags_opt); @@ -1412,7 +1458,7 @@ NO_INLINE IColumn::Filter joinRightColumns( if (used_once) { auto used_flags_opt = join_features.need_flags ? &used_flags : nullptr; - setUsed(filter, i); + setUsed(added_columns.filter, i); addFoundRowAll(mapped, added_columns, current_offset, known_rows, used_flags_opt); } } @@ -1423,7 +1469,7 @@ NO_INLINE IColumn::Filter joinRightColumns( /// Use first appeared left key only if (used_once) { - setUsed(filter, i); + setUsed(added_columns.filter, i); added_columns.appendFromBlock(*mapped.block, mapped.row_num); } @@ -1440,7 +1486,7 @@ NO_INLINE IColumn::Filter joinRightColumns( } else /// ANY LEFT, SEMI LEFT, old ANY (RightAny) { - setUsed(filter, i); + setUsed(added_columns.filter, i); used_flags.template setUsed(find_result); added_columns.appendFromBlock(*mapped.block, mapped.row_num); @@ -1455,7 +1501,7 @@ NO_INLINE IColumn::Filter joinRightColumns( if (!right_row_found) { if constexpr (join_features.is_anti_join && join_features.left) - setUsed(filter, i); + setUsed(added_columns.filter, i); addNotFoundRow(added_columns, current_offset); } @@ -1466,11 +1512,11 @@ NO_INLINE IColumn::Filter joinRightColumns( } added_columns.applyLazyDefaults(); - return filter; + return i; } template -IColumn::Filter joinRightColumnsSwitchMultipleDisjuncts( +size_t joinRightColumnsSwitchMultipleDisjuncts( std::vector && key_getter_vector, const std::vector & mapv, AddedColumns & added_columns, @@ -1482,7 +1528,7 @@ IColumn::Filter joinRightColumnsSwitchMultipleDisjuncts( } template -IColumn::Filter joinRightColumnsSwitchNullability( +size_t joinRightColumnsSwitchNullability( std::vector && key_getter_vector, const std::vector & mapv, AddedColumns & added_columns, @@ -1499,7 +1545,7 @@ IColumn::Filter joinRightColumnsSwitchNullability( } template -IColumn::Filter switchJoinRightColumns( +size_t switchJoinRightColumns( const std::vector & mapv, AddedColumns & added_columns, HashJoin::Type type, @@ -1548,10 +1594,61 @@ IColumn::Filter switchJoinRightColumns( } } +/** Since we do not store right key columns, + * this function is used to copy left key columns to right key columns. + * If the user requests some right columns, we just copy left key columns to right, since they are equal. + * Example: SELECT t1.key, t2.key FROM t1 FULL JOIN t2 ON t1.key = t2.key; + * In that case for matched rows in t2.key we will use values from t1.key. + * However, in some cases we might need to adjust the type of column, e.g. t1.key :: LowCardinality(String) and t2.key :: String + * Also, the nullability of the column might be different. + * Returns the right column after with necessary adjustments. + */ +ColumnWithTypeAndName copyLeftKeyColumnToRight( + const DataTypePtr & right_key_type, const String & renamed_right_column, const ColumnWithTypeAndName & left_column, const IColumn::Filter * null_map_filter = nullptr) +{ + ColumnWithTypeAndName right_column = left_column; + right_column.name = renamed_right_column; + + if (null_map_filter) + right_column.column = JoinCommon::filterWithBlanks(right_column.column, *null_map_filter); + + bool should_be_nullable = isNullableOrLowCardinalityNullable(right_key_type); + if (null_map_filter) + correctNullabilityInplace(right_column, should_be_nullable, *null_map_filter); + else + correctNullabilityInplace(right_column, should_be_nullable); + + if (!right_column.type->equals(*right_key_type)) + { + right_column.column = castColumnAccurate(right_column, right_key_type); + right_column.type = right_key_type; + } + + right_column.column = right_column.column->convertToFullColumnIfConst(); + return right_column; +} + +/// Cut first num_rows rows from block in place and returns block with remaining rows +Block sliceBlock(Block & block, size_t num_rows) +{ + size_t total_rows = block.rows(); + if (num_rows >= total_rows) + return {}; + size_t remaining_rows = total_rows - num_rows; + Block remaining_block = block.cloneEmpty(); + for (size_t i = 0; i < block.columns(); ++i) + { + auto & col = block.getByPosition(i); + remaining_block.getByPosition(i).column = col.column->cut(num_rows, remaining_rows); + col.column = col.column->cut(0, num_rows); + } + return remaining_block; +} + } /// nameless template -void HashJoin::joinBlockImpl( +Block HashJoin::joinBlockImpl( Block & block, const Block & block_with_columns_to_add, const std::vector & maps_, @@ -1583,8 +1680,8 @@ void HashJoin::joinBlockImpl( * For ASOF, the last column is used as the ASOF column */ AddedColumns added_columns( - block_with_columns_to_add, block, + block_with_columns_to_add, savedBlockSample(), *this, std::move(join_on_keys), @@ -1593,8 +1690,16 @@ void HashJoin::joinBlockImpl( bool has_required_right_keys = (required_right_keys.columns() != 0); added_columns.need_filter = join_features.need_filter || has_required_right_keys; + added_columns.max_joined_block_rows = table_join->maxJoinedBlockRows(); + if (!added_columns.max_joined_block_rows) + added_columns.max_joined_block_rows = std::numeric_limits::max(); + else + added_columns.reserve(join_features.need_replication); - IColumn::Filter row_filter = switchJoinRightColumns(maps_, added_columns, data->type, used_flags); + size_t num_joined = switchJoinRightColumns(maps_, added_columns, data->type, used_flags); + /// Do not hold memory for join_on_keys anymore + added_columns.join_on_keys.clear(); + Block remaining_block = sliceBlock(block, num_joined); for (size_t i = 0; i < added_columns.size(); ++i) block.insert(added_columns.moveColumn(i)); @@ -1605,7 +1710,7 @@ void HashJoin::joinBlockImpl( { /// If ANY INNER | RIGHT JOIN - filter all the columns except the new ones. for (size_t i = 0; i < existing_columns; ++i) - block.safeGetByPosition(i).column = block.safeGetByPosition(i).column->filter(row_filter, -1); + block.safeGetByPosition(i).column = block.safeGetByPosition(i).column->filter(added_columns.filter, -1); /// Add join key columns from right block if needed using value from left table because of equality for (size_t i = 0; i < required_right_keys.columns(); ++i) @@ -1614,31 +1719,19 @@ void HashJoin::joinBlockImpl( // renamed ??? if (!block.findByName(right_key.name)) { - const auto & left_name = required_right_keys_sources[i]; - /// asof column is already in block. if (join_features.is_asof_join && right_key.name == table_join->getOnlyClause().key_names_right.back()) continue; - const auto & col = block.getByName(left_name); - bool is_nullable = JoinCommon::isNullable(right_key.type); - auto right_col_name = getTableJoin().renamedRightColumnName(right_key.name); - ColumnWithTypeAndName right_col(col.column, col.type, right_col_name); - if (right_col.type->lowCardinality() != right_key.type->lowCardinality()) - JoinCommon::changeLowCardinalityInplace(right_col); - correctNullabilityInplace(right_col, is_nullable); + const auto & left_column = block.getByName(required_right_keys_sources[i]); + const auto & right_col_name = getTableJoin().renamedRightColumnName(right_key.name); + auto right_col = copyLeftKeyColumnToRight(right_key.type, right_col_name, left_column); block.insert(std::move(right_col)); } } } else if (has_required_right_keys) { - /// Some trash to represent IColumn::Filter as ColumnUInt8 needed for ColumnNullable::applyNullMap() - auto null_map_filter_ptr = ColumnUInt8::create(); - ColumnUInt8 & null_map_filter = assert_cast(*null_map_filter_ptr); - null_map_filter.getData().swap(row_filter); - const IColumn::Filter & filter = null_map_filter.getData(); - /// Add join key columns from right block if needed. for (size_t i = 0; i < required_right_keys.columns(); ++i) { @@ -1646,21 +1739,12 @@ void HashJoin::joinBlockImpl( auto right_col_name = getTableJoin().renamedRightColumnName(right_key.name); if (!block.findByName(right_col_name)) { - const auto & left_name = required_right_keys_sources[i]; - /// asof column is already in block. if (join_features.is_asof_join && right_key.name == table_join->getOnlyClause().key_names_right.back()) continue; - const auto & col = block.getByName(left_name); - bool is_nullable = JoinCommon::isNullable(right_key.type); - - ColumnPtr thin_column = JoinCommon::filterWithBlanks(col.column, filter); - - ColumnWithTypeAndName right_col(thin_column, col.type, right_col_name); - if (right_col.type->lowCardinality() != right_key.type->lowCardinality()) - JoinCommon::changeLowCardinalityInplace(right_col); - correctNullabilityInplace(right_col, is_nullable, null_map_filter); + const auto & left_column = block.getByName(required_right_keys_sources[i]); + auto right_col = copyLeftKeyColumnToRight(right_key.type, right_col_name, left_column, &added_columns.filter); block.insert(std::move(right_col)); if constexpr (join_features.need_replication) @@ -1681,6 +1765,8 @@ void HashJoin::joinBlockImpl( for (size_t pos : right_keys_to_replicate) block.safeGetByPosition(pos).column = block.safeGetByPosition(pos).column->replicate(*offsets_to_replicate); } + + return remaining_block; } void HashJoin::joinBlockImplCross(Block & block, ExtraBlockPtr & not_processed) const @@ -1814,7 +1900,7 @@ ColumnWithTypeAndName HashJoin::joinGet(const Block & block, const Block & block std::vector maps_vector; maps_vector.push_back(&std::get(data->maps[0])); joinBlockImpl( - keys, block_with_columns_to_add, maps_vector, true); + keys, block_with_columns_to_add, maps_vector, /* is_join_get = */ true); return keys.getByPosition(keys.columns() - 1); } @@ -1857,7 +1943,11 @@ void HashJoin::joinBlock(Block & block, ExtraBlockPtr & not_processed) if (joinDispatch(kind, strictness, maps_vector, [&](auto kind_, auto strictness_, auto & maps_vector_) { - joinBlockImpl(block, sample_block_with_columns_to_add, maps_vector_); + Block remaining_block = joinBlockImpl(block, sample_block_with_columns_to_add, maps_vector_); + if (remaining_block.rows()) + not_processed = std::make_shared(ExtraBlock{std::move(remaining_block)}); + else + not_processed.reset(); })) { /// Joined @@ -1871,10 +1961,10 @@ HashJoin::~HashJoin() { if (!data) { - LOG_TRACE(log, "({}) Join data has been already released", fmt::ptr(this)); + LOG_TRACE(log, "{}Join data has been already released", instance_log_id); return; } - LOG_TRACE(log, "({}) Join data is being destroyed, {} bytes and {} rows in hash table", fmt::ptr(this), getTotalByteCount(), getTotalRowCount()); + LOG_TRACE(log, "{}Join data is being destroyed, {} bytes and {} rows in hash table", instance_log_id, getTotalByteCount(), getTotalRowCount()); } template @@ -1941,9 +2031,9 @@ public: } else { - auto fill_callback = [&](auto, auto strictness, auto & map) + auto fill_callback = [&](auto, auto, auto & map) { - rows_added = fillColumnsFromMap(map, columns_right); + rows_added = fillColumnsFromMap(map, columns_right); }; if (!joinDispatch(parent.kind, parent.strictness, parent.data->maps.front(), fill_callback)) @@ -2004,24 +2094,24 @@ private: return rows_added; } - template + template size_t fillColumnsFromMap(const Maps & maps, MutableColumns & columns_keys_and_right) { switch (parent.data->type) { #define M(TYPE) \ case HashJoin::Type::TYPE: \ - return fillColumns(*maps.TYPE, columns_keys_and_right); + return fillColumns(*maps.TYPE, columns_keys_and_right); APPLY_FOR_JOIN_VARIANTS(M) #undef M default: - throw Exception(ErrorCodes::UNSUPPORTED_JOIN_KEYS, "Unsupported JOIN keys (type: {})", parent.data->type) ; + throw Exception(ErrorCodes::UNSUPPORTED_JOIN_KEYS, "Unsupported JOIN keys (type: {})", parent.data->type); } UNREACHABLE(); } - template + template size_t fillColumns(const Map & map, MutableColumns & columns_keys_and_right) { size_t rows_added = 0; @@ -2067,8 +2157,8 @@ private: { const Mapped & mapped = it->getMapped(); - size_t off = map.offsetInternal(it.getPtr()); - if (parent.isUsed(off)) + size_t offset = map.offsetInternal(it.getPtr()); + if (parent.isUsed(offset)) continue; AdderNonJoined::add(mapped, rows_added, columns_keys_and_right); @@ -2155,7 +2245,7 @@ void HashJoin::reuseJoinedData(const HashJoin & join) BlocksList HashJoin::releaseJoinedBlocks(bool restructure) { - LOG_TRACE(log, "({}) Join data is being released, {} bytes and {} rows in hash table", fmt::ptr(this), getTotalByteCount(), getTotalRowCount()); + LOG_TRACE(log, "{}Join data is being released, {} bytes and {} rows in hash table", instance_log_id, getTotalByteCount(), getTotalRowCount()); BlocksList right_blocks = std::move(data->blocks); if (!restructure) @@ -2179,7 +2269,7 @@ BlocksList HashJoin::releaseJoinedBlocks(bool restructure) for (const auto & sample_column : right_sample_block) { positions.emplace_back(tmp_block.getPositionByName(sample_column.name)); - is_nullable.emplace_back(JoinCommon::isNullable(sample_column.type)); + is_nullable.emplace_back(isNullableOrLowCardinalityNullable(sample_column.type)); } } diff --git a/src/Interpreters/HashJoin.h b/src/Interpreters/HashJoin.h index d125e56057f..284cf5d0e7f 100644 --- a/src/Interpreters/HashJoin.h +++ b/src/Interpreters/HashJoin.h @@ -147,7 +147,8 @@ class HashJoin : public IJoin { public: HashJoin( - std::shared_ptr table_join_, const Block & right_sample_block, bool any_take_last_row_ = false, size_t reserve_num = 0); + std::shared_ptr table_join_, const Block & right_sample_block, + bool any_take_last_row_ = false, size_t reserve_num = 0, const String & instance_id_ = ""); ~HashJoin() override; @@ -436,6 +437,10 @@ private: bool shrink_blocks = false; Int64 memory_usage_before_adding_blocks = 0; + /// Identifier to distinguish different HashJoin instances in logs + /// Several instances can be created, for example, in GraceHashJoin to handle different buckets + String instance_log_id; + Poco::Logger * log; /// Should be set via setLock to protect hash table from modification from StorageJoin @@ -447,7 +452,7 @@ private: void initRightBlockStructure(Block & saved_block_sample); template - void joinBlockImpl( + Block joinBlockImpl( Block & block, const Block & block_with_columns_to_add, const std::vector & maps_, diff --git a/src/Interpreters/InterpreterAlterQuery.cpp b/src/Interpreters/InterpreterAlterQuery.cpp index ddeb4bcef2c..2a34932d950 100644 --- a/src/Interpreters/InterpreterAlterQuery.cpp +++ b/src/Interpreters/InterpreterAlterQuery.cpp @@ -155,6 +155,7 @@ BlockIO InterpreterAlterQuery::executeToTable(const ASTAlterQuery & alter) } else throw Exception(ErrorCodes::LOGICAL_ERROR, "Wrong parameter type in ALTER query"); + if (!getContext()->getSettings().allow_experimental_statistic && ( command_ast->type == ASTAlterCommand::ADD_STATISTIC || command_ast->type == ASTAlterCommand::DROP_STATISTIC || @@ -407,6 +408,7 @@ AccessRightsElements InterpreterAlterQuery::getRequiredAccessForCommand(const AS break; } case ASTAlterCommand::DELETE: + case ASTAlterCommand::APPLY_DELETED_MASK: case ASTAlterCommand::DROP_PARTITION: case ASTAlterCommand::DROP_DETACHED_PARTITION: { @@ -458,6 +460,11 @@ AccessRightsElements InterpreterAlterQuery::getRequiredAccessForCommand(const AS required_access.emplace_back(AccessType::ALTER_VIEW_MODIFY_QUERY, database, table); break; } + case ASTAlterCommand::MODIFY_REFRESH: + { + required_access.emplace_back(AccessType::ALTER_VIEW_MODIFY_REFRESH, database, table); + break; + } case ASTAlterCommand::LIVE_VIEW_REFRESH: { required_access.emplace_back(AccessType::ALTER_VIEW_REFRESH, database, table); diff --git a/src/Interpreters/InterpreterCreateFunctionQuery.cpp b/src/Interpreters/InterpreterCreateFunctionQuery.cpp index 3e87f4fe440..ea59115b077 100644 --- a/src/Interpreters/InterpreterCreateFunctionQuery.cpp +++ b/src/Interpreters/InterpreterCreateFunctionQuery.cpp @@ -1,11 +1,12 @@ #include #include -#include +#include #include #include #include #include +#include #include @@ -19,6 +20,7 @@ namespace ErrorCodes BlockIO InterpreterCreateFunctionQuery::execute() { + FunctionNameNormalizer().visit(query_ptr.get()); const auto updated_query_ptr = removeOnClusterClauseIfNeeded(query_ptr, getContext()); ASTCreateFunctionQuery & create_function_query = updated_query_ptr->as(); @@ -32,7 +34,7 @@ BlockIO InterpreterCreateFunctionQuery::execute() if (!create_function_query.cluster.empty()) { - if (current_context->getUserDefinedSQLObjectsLoader().isReplicated()) + if (current_context->getUserDefinedSQLObjectsStorage().isReplicated()) throw Exception(ErrorCodes::INCORRECT_QUERY, "ON CLUSTER is not allowed because used-defined functions are replicated automatically"); DDLQueryOnClusterParams params; diff --git a/src/Interpreters/InterpreterCreateIndexQuery.cpp b/src/Interpreters/InterpreterCreateIndexQuery.cpp index 3b47a002e50..ed29c82a0f0 100644 --- a/src/Interpreters/InterpreterCreateIndexQuery.cpp +++ b/src/Interpreters/InterpreterCreateIndexQuery.cpp @@ -4,6 +4,7 @@ #include #include #include +#include #include #include #include @@ -22,6 +23,7 @@ namespace ErrorCodes BlockIO InterpreterCreateIndexQuery::execute() { + FunctionNameNormalizer().visit(query_ptr.get()); auto current_context = getContext(); const auto & create_index = query_ptr->as(); diff --git a/src/Interpreters/InterpreterCreateQuery.cpp b/src/Interpreters/InterpreterCreateQuery.cpp index 2b60b0b7b47..29abe292908 100644 --- a/src/Interpreters/InterpreterCreateQuery.cpp +++ b/src/Interpreters/InterpreterCreateQuery.cpp @@ -282,7 +282,7 @@ BlockIO InterpreterCreateQuery::createDatabase(ASTCreateQuery & create) else if (create.uuid != UUIDHelpers::Nil && !DatabaseCatalog::instance().hasUUIDMapping(create.uuid)) throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot find UUID mapping for {}, it's a bug", create.uuid); - DatabasePtr database = DatabaseFactory::get(create, metadata_path / "", getContext()); + DatabasePtr database = DatabaseFactory::instance().get(create, metadata_path / "", getContext()); if (create.uuid != UUIDHelpers::Nil) create.setDatabase(TABLE_WITH_UUID_NAME_PLACEHOLDER); @@ -786,10 +786,28 @@ InterpreterCreateQuery::TableProperties InterpreterCreateQuery::getTableProperti } else { - as_select_sample = InterpreterSelectWithUnionQuery::getSampleBlock(create.select->clone(), - getContext(), - false /* is_subquery */, - create.isParameterizedView()); + /** To get valid sample block we need to prepare query without only_analyze, because we need to execute scalar + * subqueries. Otherwise functions that expect only constant arguments will throw error during query analysis, + * because the result of scalar subquery is not a constant. + * + * Example: + * CREATE MATERIALIZED VIEW test_mv ENGINE=MergeTree ORDER BY arr + * AS + * WITH (SELECT '\d[a-z]') AS constant_value + * SELECT extractAll(concat(toString(number), 'a'), assumeNotNull(constant_value)) AS arr + * FROM test_table; + * + * For new analyzer this issue does not exists because we always execute scalar subqueries. + * We can improve this in new analyzer, and execute scalar subqueries only in contexts when we expect constant + * for example: LIMIT, OFFSET, functions parameters, functions constant only arguments. + */ + + SelectQueryOptions options; + if (create.isParameterizedView()) + options = options.createParameterizedView(); + + InterpreterSelectWithUnionQuery interpreter(create.select->clone(), getContext(), options); + as_select_sample = interpreter.getSampleBlock(); } properties.columns = ColumnsDescription(as_select_sample.getNamesAndTypesList()); @@ -1071,6 +1089,13 @@ void InterpreterCreateQuery::assertOrSetUUID(ASTCreateQuery & create, const Data "{} UUID specified, but engine of database {} is not Atomic", kind, create.getDatabase()); } + if (create.refresh_strategy && database->getEngineName() != "Atomic") + throw Exception(ErrorCodes::INCORRECT_QUERY, + "Refreshable materialized view requires Atomic database engine, but database {} has engine {}", create.getDatabase(), database->getEngineName()); + /// TODO: Support Replicated databases, only with Shared/ReplicatedMergeTree. + /// Figure out how to make the refreshed data appear all at once on other + /// replicas; maybe a replicated SYSTEM SYNC REPLICA query before the rename? + /// The database doesn't support UUID so we'll ignore it. The UUID could be set here because of either /// a) the initiator of `ON CLUSTER` query generated it to ensure the same UUIDs are used on different hosts; or /// b) `RESTORE from backup` query generated it to ensure the same UUIDs are used on different hosts. @@ -1192,6 +1217,16 @@ BlockIO InterpreterCreateQuery::createTable(ASTCreateQuery & create) visitor.visit(*create.select); } + if (create.refresh_strategy) + { + if (!getContext()->getSettingsRef().allow_experimental_refreshable_materialized_view) + throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, + "Refreshable materialized views are experimental. Enable allow_experimental_refreshable_materialized_view to use."); + + AddDefaultDatabaseVisitor visitor(getContext(), current_database); + visitor.visit(*create.refresh_strategy); + } + if (create.columns_list) { AddDefaultDatabaseVisitor visitor(getContext(), current_database); @@ -1223,7 +1258,7 @@ BlockIO InterpreterCreateQuery::createTable(ASTCreateQuery & create) { input_block = InterpreterSelectWithUnionQuery(create.select->clone(), getContext(), - SelectQueryOptions().analyze()).getSampleBlock(); + {}).getSampleBlock(); } Block output_block = to_table->getInMemoryMetadataPtr()->getSampleBlock(); @@ -1252,6 +1287,23 @@ BlockIO InterpreterCreateQuery::createTable(ASTCreateQuery & create) if (need_add_to_database) database = DatabaseCatalog::instance().tryGetDatabase(database_name); + if (database && database->getEngineName() == "Replicated" && create.select) + { + bool is_storage_replicated = false; + if (create.storage && create.storage->engine) + { + const auto & storage_name = create.storage->engine->name; + if (storage_name.starts_with("Replicated") || storage_name.starts_with("Shared")) + is_storage_replicated = true; + } + + const bool allow_create_select_for_replicated = create.isView() || create.is_create_empty || !is_storage_replicated; + if (!allow_create_select_for_replicated) + throw Exception( + ErrorCodes::SUPPORT_IS_DISABLED, + "CREATE AS SELECT is not supported with Replicated databases. Use separate CREATE and INSERT queries"); + } + if (need_add_to_database && database && database->shouldReplicateQuery(getContext(), query_ptr)) { chassert(!ddl_guard); @@ -1712,7 +1764,7 @@ void InterpreterCreateQuery::prepareOnClusterQuery(ASTCreateQuery & create, Cont throw Exception(ErrorCodes::INCORRECT_QUERY, "Seems like cluster is configured for cross-replication, " - "but zookeeper_path for ReplicatedMergeTree is not specified or contains {uuid} macro. " + "but zookeeper_path for ReplicatedMergeTree is not specified or contains {{uuid}} macro. " "It's not supported for cross replication, because tables must have different UUIDs. " "Please specify unique zookeeper_path explicitly."); } diff --git a/src/Interpreters/InterpreterDescribeCacheQuery.cpp b/src/Interpreters/InterpreterDescribeCacheQuery.cpp index 69b612eb2ef..54b43a8850b 100644 --- a/src/Interpreters/InterpreterDescribeCacheQuery.cpp +++ b/src/Interpreters/InterpreterDescribeCacheQuery.cpp @@ -42,8 +42,8 @@ BlockIO InterpreterDescribeCacheQuery::execute() MutableColumns res_columns = sample_block.cloneEmptyColumns(); auto cache_data = FileCacheFactory::instance().getByName(ast.cache_name); - const auto & settings = cache_data.settings; - const auto & cache = cache_data.cache; + auto settings = cache_data->getSettings(); + const auto & cache = cache_data->cache; size_t i = 0; res_columns[i++]->insert(settings.max_size); diff --git a/src/Interpreters/InterpreterDropFunctionQuery.cpp b/src/Interpreters/InterpreterDropFunctionQuery.cpp index af60d9c5df7..c2cd24044da 100644 --- a/src/Interpreters/InterpreterDropFunctionQuery.cpp +++ b/src/Interpreters/InterpreterDropFunctionQuery.cpp @@ -1,7 +1,7 @@ #include #include -#include +#include #include #include #include @@ -32,7 +32,7 @@ BlockIO InterpreterDropFunctionQuery::execute() if (!drop_function_query.cluster.empty()) { - if (current_context->getUserDefinedSQLObjectsLoader().isReplicated()) + if (current_context->getUserDefinedSQLObjectsStorage().isReplicated()) throw Exception(ErrorCodes::INCORRECT_QUERY, "ON CLUSTER is not allowed because used-defined functions are replicated automatically"); DDLQueryOnClusterParams params; diff --git a/src/Interpreters/InterpreterSelectQuery.cpp b/src/Interpreters/InterpreterSelectQuery.cpp index 4f4e96a9be7..cdf1b4228bc 100644 --- a/src/Interpreters/InterpreterSelectQuery.cpp +++ b/src/Interpreters/InterpreterSelectQuery.cpp @@ -481,7 +481,7 @@ InterpreterSelectQuery::InterpreterSelectQuery( /// Check support for FINAL for parallel replicas bool is_query_with_final = isQueryWithFinal(query_info); - if (is_query_with_final && settings.allow_experimental_parallel_reading_from_replicas > 0) + if (is_query_with_final && context->canUseTaskBasedParallelReplicas()) { if (settings.allow_experimental_parallel_reading_from_replicas == 1) { @@ -870,7 +870,38 @@ bool InterpreterSelectQuery::adjustParallelReplicasAfterAnalysis() ASTSelectQuery & query = getSelectQuery(); /// While only_analyze we don't know anything about parts, so any decision about how many parallel replicas to use would be wrong - if (!storage || options.only_analyze || !context->canUseParallelReplicasOnInitiator()) + if (!storage || !context->canUseParallelReplicasOnInitiator()) + return false; + + /// check if IN operator with subquery is present in the query + /// if so, disable parallel replicas + if (query_analyzer->getPreparedSets()->hasSubqueries()) + { + bool in_subqueries = false; + const auto & sets = query_analyzer->getPreparedSets(); + const auto subqueries = sets->getSubqueries(); + for (const auto & subquery : subqueries) + { + if (subquery->isINSubquery()) + { + in_subqueries = true; + break; + } + } + + if (in_subqueries) + { + if (settings.allow_experimental_parallel_reading_from_replicas == 2) + throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "IN with subquery is not supported with parallel replicas"); + + context->setSetting("allow_experimental_parallel_reading_from_replicas", Field(0)); + context->setSetting("max_parallel_replicas", UInt64{0}); + LOG_DEBUG(log, "Disabling parallel replicas to execute a query with IN with subquery"); + return true; + } + } + + if (options.only_analyze) return false; if (getTrivialCount(0).has_value()) @@ -1698,7 +1729,7 @@ void InterpreterSelectQuery::executeImpl(QueryPlan & query_plan, std::optional

pipelineType() == JoinPipelineType::YShaped) + if (expressions.join->pipelineType() == JoinPipelineType::YShaped && expressions.join->getTableJoin().kind() != JoinKind::Paste) { const auto & table_join = expressions.join->getTableJoin(); const auto & join_clause = table_join.getOnlyClause(); @@ -2006,7 +2037,7 @@ static void executeMergeAggregatedImpl( * but it can work more slowly. */ - Aggregator::Params params(keys, aggregates, overflow_row, settings.max_threads, settings.max_block_size); + Aggregator::Params params(keys, aggregates, overflow_row, settings.max_threads, settings.max_block_size, settings.min_hit_rate_to_use_consecutive_keys_optimization); auto merging_aggregated = std::make_unique( query_plan.getCurrentDataStream(), @@ -2672,6 +2703,7 @@ static Aggregator::Params getAggregatorParams( settings.enable_software_prefetch_in_aggregation, /* only_merge */ false, settings.optimize_group_by_constant_keys, + settings.min_hit_rate_to_use_consecutive_keys_optimization, stats_collecting_params }; } diff --git a/src/Interpreters/InterpreterSystemQuery.cpp b/src/Interpreters/InterpreterSystemQuery.cpp index db10d377cc1..db02ee13a4f 100644 --- a/src/Interpreters/InterpreterSystemQuery.cpp +++ b/src/Interpreters/InterpreterSystemQuery.cpp @@ -45,6 +45,7 @@ #include #include #include +#include #include #include #include @@ -53,6 +54,7 @@ #include #include #include +#include #include #include #include @@ -92,6 +94,7 @@ namespace ErrorCodes extern const int TIMEOUT_EXCEEDED; extern const int TABLE_WAS_NOT_DROPPED; extern const int ABORTED; + extern const int SUPPORT_IS_DISABLED; } @@ -106,6 +109,7 @@ namespace ActionLocks extern const StorageActionBlockType PartsMove; extern const StorageActionBlockType PullReplicationLog; extern const StorageActionBlockType Cleanup; + extern const StorageActionBlockType ViewRefresh; } @@ -163,6 +167,8 @@ AccessType getRequiredAccessType(StorageActionBlockType action_type) return AccessType::SYSTEM_PULLING_REPLICATION_LOG; else if (action_type == ActionLocks::Cleanup) return AccessType::SYSTEM_CLEANUP; + else if (action_type == ActionLocks::ViewRefresh) + return AccessType::SYSTEM_VIEWS; else throw Exception(ErrorCodes::LOGICAL_ERROR, "Unknown action type: {}", std::to_string(action_type)); } @@ -287,7 +293,7 @@ BlockIO InterpreterSystemQuery::execute() { getContext()->checkAccess(AccessType::SYSTEM_SHUTDOWN); if (kill(0, SIGTERM)) - throwFromErrno("System call kill(0, SIGTERM) failed", ErrorCodes::CANNOT_KILL); + throw ErrnoException(ErrorCodes::CANNOT_KILL, "System call kill(0, SIGTERM) failed"); break; } case Type::KILL: @@ -378,7 +384,7 @@ BlockIO InterpreterSystemQuery::execute() } else { - auto cache = FileCacheFactory::instance().getByName(query.filesystem_cache_name).cache; + auto cache = FileCacheFactory::instance().getByName(query.filesystem_cache_name)->cache; if (query.key_to_drop.empty()) { cache->removeAllReleasable(); @@ -432,7 +438,7 @@ BlockIO InterpreterSystemQuery::execute() } else { - auto cache = FileCacheFactory::instance().getByName(query.filesystem_cache_name).cache; + auto cache = FileCacheFactory::instance().getByName(query.filesystem_cache_name)->cache; auto file_segments = cache->sync(); fill_data(query.filesystem_cache_name, cache, file_segments); } @@ -442,6 +448,10 @@ BlockIO InterpreterSystemQuery::execute() result.pipeline = QueryPipeline(std::move(source)); break; } + case Type::DROP_DISK_METADATA_CACHE: + { + throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "Not implemented"); + } case Type::DROP_SCHEMA_CACHE: { getContext()->checkAccess(AccessType::SYSTEM_DROP_SCHEMA_CACHE); @@ -599,6 +609,23 @@ BlockIO InterpreterSystemQuery::execute() case Type::START_CLEANUP: startStopAction(ActionLocks::Cleanup, true); break; + case Type::START_VIEW: + case Type::START_VIEWS: + startStopAction(ActionLocks::ViewRefresh, true); + break; + case Type::STOP_VIEW: + case Type::STOP_VIEWS: + startStopAction(ActionLocks::ViewRefresh, false); + break; + case Type::REFRESH_VIEW: + getRefreshTask()->run(); + break; + case Type::CANCEL_VIEW: + getRefreshTask()->cancel(); + break; + case Type::TEST_VIEW: + getRefreshTask()->setFakeTime(query.fake_time_for_view); + break; case Type::DROP_REPLICA: dropReplica(query); break; @@ -611,6 +638,10 @@ BlockIO InterpreterSystemQuery::execute() case Type::SYNC_DATABASE_REPLICA: syncReplicatedDatabase(query); break; + case Type::REPLICA_UNREADY: + throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "Not implemented"); + case Type::REPLICA_READY: + throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "Not implemented"); case Type::SYNC_TRANSACTION_LOG: syncTransactionLog(); break; @@ -954,7 +985,7 @@ void InterpreterSystemQuery::dropDatabaseReplica(ASTSystemQuery & query) if (auto * replicated = dynamic_cast(database.get())) { check_not_local_replica(replicated, query); - DatabaseReplicated::dropReplica(replicated, replicated->getZooKeeperPath(), query.shard, query.replica); + DatabaseReplicated::dropReplica(replicated, replicated->getZooKeeperPath(), query.shard, query.replica, /*throw_if_noop*/ true); } else throw Exception(ErrorCodes::BAD_ARGUMENTS, "Database {} is not Replicated, cannot drop replica", query.getDatabase()); @@ -979,7 +1010,7 @@ void InterpreterSystemQuery::dropDatabaseReplica(ASTSystemQuery & query) } check_not_local_replica(replicated, query); - DatabaseReplicated::dropReplica(replicated, replicated->getZooKeeperPath(), query.shard, query.replica); + DatabaseReplicated::dropReplica(replicated, replicated->getZooKeeperPath(), query.shard, query.replica, /*throw_if_noop*/ false); LOG_TRACE(log, "Dropped replica {} of Replicated database {}", query.replica, backQuoteIfNeed(database->getDatabaseName())); } } @@ -992,7 +1023,7 @@ void InterpreterSystemQuery::dropDatabaseReplica(ASTSystemQuery & query) if (auto * replicated = dynamic_cast(elem.second.get())) check_not_local_replica(replicated, query); - DatabaseReplicated::dropReplica(nullptr, query.replica_zk_path, query.shard, query.replica); + DatabaseReplicated::dropReplica(nullptr, query.replica_zk_path, query.shard, query.replica, /*throw_if_noop*/ true); LOG_INFO(log, "Dropped replica {} of Replicated database with path {}", query.replica, query.replica_zk_path); } else @@ -1082,6 +1113,17 @@ void InterpreterSystemQuery::flushDistributed(ASTSystemQuery &) throw Exception(ErrorCodes::NOT_IMPLEMENTED, "SYSTEM RESTART DISK is not supported"); } +RefreshTaskHolder InterpreterSystemQuery::getRefreshTask() +{ + auto ctx = getContext(); + ctx->checkAccess(AccessType::SYSTEM_VIEWS); + auto task = ctx->getRefreshSet().getTask(table_id); + if (!task) + throw Exception( + ErrorCodes::BAD_ARGUMENTS, "Refreshable view {} doesn't exist", table_id.getNameForLogs()); + return task; +} + AccessRightsElements InterpreterSystemQuery::getRequiredAccessForDDLOnCluster() const { @@ -1119,6 +1161,8 @@ AccessRightsElements InterpreterSystemQuery::getRequiredAccessForDDLOnCluster() required_access.emplace_back(AccessType::SYSTEM_DROP_CACHE); break; } + case Type::DROP_DISK_METADATA_CACHE: + throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "Not implemented"); case Type::RELOAD_DICTIONARY: case Type::RELOAD_DICTIONARIES: case Type::RELOAD_EMBEDDED_DICTIONARIES: @@ -1229,6 +1273,20 @@ AccessRightsElements InterpreterSystemQuery::getRequiredAccessForDDLOnCluster() required_access.emplace_back(AccessType::SYSTEM_REPLICATION_QUEUES, query.getDatabase(), query.getTable()); break; } + case Type::REFRESH_VIEW: + case Type::START_VIEW: + case Type::START_VIEWS: + case Type::STOP_VIEW: + case Type::STOP_VIEWS: + case Type::CANCEL_VIEW: + case Type::TEST_VIEW: + { + if (!query.table) + required_access.emplace_back(AccessType::SYSTEM_VIEWS); + else + required_access.emplace_back(AccessType::SYSTEM_VIEWS, query.getDatabase(), query.getTable()); + break; + } case Type::DROP_REPLICA: case Type::DROP_DATABASE_REPLICA: { @@ -1245,6 +1303,9 @@ AccessRightsElements InterpreterSystemQuery::getRequiredAccessForDDLOnCluster() required_access.emplace_back(AccessType::SYSTEM_SYNC_REPLICA, query.getDatabase(), query.getTable()); break; } + case Type::REPLICA_READY: + case Type::REPLICA_UNREADY: + throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "Not implemented"); case Type::RESTART_REPLICA: { required_access.emplace_back(AccessType::SYSTEM_RESTART_REPLICA, query.getDatabase(), query.getTable()); diff --git a/src/Interpreters/InterpreterSystemQuery.h b/src/Interpreters/InterpreterSystemQuery.h index 826d4cc0c69..89de7402b4d 100644 --- a/src/Interpreters/InterpreterSystemQuery.h +++ b/src/Interpreters/InterpreterSystemQuery.h @@ -3,6 +3,7 @@ #include #include #include +#include #include #include #include @@ -57,6 +58,7 @@ private: void restartReplica(const StorageID & replica, ContextMutablePtr system_context); void restartReplicas(ContextMutablePtr system_context); void syncReplica(ASTSystemQuery & query); + void setReplicaReadiness(bool ready); void waitLoadingParts(); void syncReplicatedDatabase(ASTSystemQuery & query); @@ -71,6 +73,8 @@ private: void flushDistributed(ASTSystemQuery & query); [[noreturn]] void restartDisk(String & name); + RefreshTaskHolder getRefreshTask(); + AccessRightsElements getRequiredAccessForDDLOnCluster() const; void startStopAction(StorageActionBlockType action_type, bool start); }; diff --git a/src/Interpreters/InterpreterWatchQuery.cpp b/src/Interpreters/InterpreterWatchQuery.cpp index e1af704a358..8865c47a785 100644 --- a/src/Interpreters/InterpreterWatchQuery.cpp +++ b/src/Interpreters/InterpreterWatchQuery.cpp @@ -61,7 +61,7 @@ QueryPipelineBuilder InterpreterWatchQuery::buildQueryPipeline() storage = DatabaseCatalog::instance().tryGetTable(table_id, getContext()); if (!storage) - throw Exception(ErrorCodes::UNKNOWN_TABLE, "Table {} doesn't exist.", table_id.getNameForLogs()); + throw Exception(ErrorCodes::UNKNOWN_TABLE, "Table {} does not exist.", table_id.getNameForLogs()); auto storage_name = storage->getName(); if (storage_name == "LiveView" diff --git a/src/Interpreters/JIT/CHJIT.cpp b/src/Interpreters/JIT/CHJIT.cpp index 63fe4f44f5f..046d0b4fc10 100644 --- a/src/Interpreters/JIT/CHJIT.cpp +++ b/src/Interpreters/JIT/CHJIT.cpp @@ -153,7 +153,7 @@ public: { int res = mprotect(block.base(), block.blockSize(), protection_flags | PROT_READ); if (res != 0) - throwFromErrno("Cannot mprotect memory region", ErrorCodes::CANNOT_MPROTECT); + throw ErrnoException(ErrorCodes::CANNOT_MPROTECT, "Cannot mprotect memory region"); llvm::sys::Memory::InvalidateInstructionCache(block.base(), block.blockSize()); invalidate_cache = false; @@ -161,7 +161,7 @@ public: # endif int res = mprotect(block.base(), block.blockSize(), protection_flags); if (res != 0) - throwFromErrno("Cannot mprotect memory region", ErrorCodes::CANNOT_MPROTECT); + throw ErrnoException(ErrorCodes::CANNOT_MPROTECT, "Cannot mprotect memory region"); if (invalidate_cache) llvm::sys::Memory::InvalidateInstructionCache(block.base(), block.blockSize()); @@ -232,10 +232,12 @@ private: int res = posix_memalign(&buf, page_size, allocate_size); if (res != 0) - throwFromErrno( - fmt::format("Cannot allocate memory (posix_memalign) alignment {} size {}.", page_size, ReadableSize(allocate_size)), + ErrnoException::throwWithErrno( ErrorCodes::CANNOT_ALLOCATE_MEMORY, - res); + res, + "Cannot allocate memory (posix_memalign) alignment {} size {}", + page_size, + ReadableSize(allocate_size)); page_blocks.emplace_back(buf, pages_to_allocate_size, page_size); page_blocks_allocated_size.emplace_back(0); @@ -244,6 +246,31 @@ private: } }; +#ifdef PRINT_ASSEMBLY + +class AssemblyPrinter +{ +public: + explicit AssemblyPrinter(llvm::TargetMachine &target_machine_) + : target_machine(target_machine_) + { + } + + void print(llvm::Module & module) + { + llvm::legacy::PassManager pass_manager; + target_machine.Options.MCOptions.AsmVerbose = true; + if (target_machine.addPassesToEmitFile(pass_manager, llvm::errs(), nullptr, llvm::CodeGenFileType::CGFT_AssemblyFile)) + throw Exception(ErrorCodes::CANNOT_COMPILE_CODE, "MachineCode cannot be printed"); + + pass_manager.run(module); + } +private: + llvm::TargetMachine & target_machine; +}; + +#endif + /** MemoryManager for module. * Keep total allocated size during RuntimeDyld linker execution. */ @@ -375,6 +402,11 @@ CHJIT::CompiledModule CHJIT::compileModule(std::unique_ptr module) { runOptimizationPassesOnModule(*module); +#ifdef PRINT_ASSEMBLY + AssemblyPrinter assembly_printer(*machine); + assembly_printer.print(*module); +#endif + auto buffer = compiler->compile(*module); llvm::Expected> object = llvm::object::ObjectFile::createObjectFile(*buffer); diff --git a/src/Interpreters/JoinUtils.cpp b/src/Interpreters/JoinUtils.cpp index 33c9dfa76ca..6bd202a1dd7 100644 --- a/src/Interpreters/JoinUtils.cpp +++ b/src/Interpreters/JoinUtils.cpp @@ -120,19 +120,11 @@ bool canBecomeNullable(const DataTypePtr & type) return can_be_inside; } -bool isNullable(const DataTypePtr & type) -{ - bool is_nullable = type->isNullable(); - if (const auto * low_cardinality_type = typeid_cast(type.get())) - is_nullable |= low_cardinality_type->getDictionaryType()->isNullable(); - return is_nullable; -} - /// Add nullability to type. /// Note: LowCardinality(T) transformed to LowCardinality(Nullable(T)) DataTypePtr convertTypeToNullable(const DataTypePtr & type) { - if (isNullable(type)) + if (isNullableOrLowCardinalityNullable(type)) return type; if (const auto * low_cardinality_type = typeid_cast(type.get())) @@ -323,20 +315,6 @@ ColumnRawPtrs materializeColumnsInplace(Block & block, const Names & names) return ptrs; } -ColumnPtrMap materializeColumnsInplaceMap(const Block & block, const Names & names) -{ - ColumnPtrMap ptrs; - ptrs.reserve(names.size()); - - for (const auto & column_name : names) - { - ColumnPtr column = block.getByName(column_name).column; - ptrs[column_name] = materializeColumn(column); - } - - return ptrs; -} - ColumnPtr materializeColumn(const Block & block, const String & column_name) { const auto & src_column = block.getByName(column_name).column; @@ -367,27 +345,6 @@ ColumnRawPtrs getRawPointers(const Columns & columns) return ptrs; } -void convertToFullColumnsInplace(Block & block) -{ - for (size_t i = 0; i < block.columns(); ++i) - { - auto & col = block.getByPosition(i); - col.column = recursiveRemoveLowCardinality(recursiveRemoveSparse(col.column)); - col.type = recursiveRemoveLowCardinality(col.type); - } -} - -void convertToFullColumnsInplace(Block & block, const Names & names, bool change_type) -{ - for (const String & column_name : names) - { - auto & col = block.getByName(column_name); - col.column = recursiveRemoveLowCardinality(recursiveRemoveSparse(col.column)); - if (change_type) - col.type = recursiveRemoveLowCardinality(col.type); - } -} - void restoreLowCardinalityInplace(Block & block, const Names & lowcard_keys) { for (const auto & column_name : lowcard_keys) @@ -517,8 +474,8 @@ void addDefaultValues(IColumn & column, const DataTypePtr & type, size_t count) bool typesEqualUpToNullability(DataTypePtr left_type, DataTypePtr right_type) { - DataTypePtr left_type_strict = removeNullable(recursiveRemoveLowCardinality(left_type)); - DataTypePtr right_type_strict = removeNullable(recursiveRemoveLowCardinality(right_type)); + DataTypePtr left_type_strict = removeNullable(removeLowCardinality(left_type)); + DataTypePtr right_type_strict = removeNullable(removeLowCardinality(right_type)); return left_type_strict->equals(*right_type_strict); } diff --git a/src/Interpreters/JoinUtils.h b/src/Interpreters/JoinUtils.h index f112ca22e5b..ff48f34d82c 100644 --- a/src/Interpreters/JoinUtils.h +++ b/src/Interpreters/JoinUtils.h @@ -59,7 +59,6 @@ private: }; -bool isNullable(const DataTypePtr & type); bool canBecomeNullable(const DataTypePtr & type); DataTypePtr convertTypeToNullable(const DataTypePtr & type); void convertColumnToNullable(ColumnWithTypeAndName & column); @@ -71,10 +70,7 @@ ColumnPtr emptyNotNullableClone(const ColumnPtr & column); ColumnPtr materializeColumn(const Block & block, const String & name); Columns materializeColumns(const Block & block, const Names & names); ColumnRawPtrs materializeColumnsInplace(Block & block, const Names & names); -ColumnPtrMap materializeColumnsInplaceMap(const Block & block, const Names & names); ColumnRawPtrs getRawPointers(const Columns & columns); -void convertToFullColumnsInplace(Block & block); -void convertToFullColumnsInplace(Block & block, const Names & names, bool change_type = true); void restoreLowCardinalityInplace(Block & block, const Names & lowcard_keys); ColumnRawPtrs extractKeysForJoin(const Block & block_keys, const Names & key_names_right); diff --git a/src/Interpreters/MergeJoin.cpp b/src/Interpreters/MergeJoin.cpp index 30c62386ca3..f0427b5a6ca 100644 --- a/src/Interpreters/MergeJoin.cpp +++ b/src/Interpreters/MergeJoin.cpp @@ -138,6 +138,9 @@ Block extractMinMax(const Block & block, const Block & keys) } min_max.setColumns(std::move(columns)); + + for (auto & column : min_max) + column.column = column.column->convertToFullColumnIfLowCardinality(); return min_max; } @@ -224,6 +227,16 @@ public: MergeJoinCursor(const Block & block, const SortDescription & desc_) : impl(block, desc_) { + for (auto *& column : impl.sort_columns) + { + const auto * lowcard_column = typeid_cast(column); + if (lowcard_column) + { + auto & new_col = column_holder.emplace_back(lowcard_column->convertToFullColumn()); + column = new_col.get(); + } + } + /// SortCursorImpl can work with permutation, but MergeJoinCursor can't. if (impl.permutation) throw Exception(ErrorCodes::LOGICAL_ERROR, "Logical error: MergeJoinCursor doesn't support permutation"); @@ -287,6 +300,7 @@ public: private: SortCursorImpl impl; + Columns column_holder; bool has_left_nullable = false; bool has_right_nullable = false; @@ -537,9 +551,6 @@ MergeJoin::MergeJoin(std::shared_ptr table_join_, const Block & right lowcard_right_keys.push_back(right_key); } - JoinCommon::convertToFullColumnsInplace(right_table_keys); - JoinCommon::convertToFullColumnsInplace(right_sample_block, key_names_right); - for (const auto & column : right_table_keys) if (required_right_keys.contains(column.name)) right_columns_to_add.insert(ColumnWithTypeAndName{nullptr, column.type, column.name}); @@ -662,9 +673,7 @@ bool MergeJoin::saveRightBlock(Block && block) Block MergeJoin::modifyRightBlock(const Block & src_block) const { - Block block = materializeBlock(src_block); - JoinCommon::convertToFullColumnsInplace(block, table_join->getOnlyClause().key_names_right); - return block; + return materializeBlock(src_block); } bool MergeJoin::addBlockToJoin(const Block & src_block, bool) @@ -705,8 +714,6 @@ void MergeJoin::joinBlock(Block & block, ExtraBlockPtr & not_processed) lowcard_keys.push_back(column_name); } - JoinCommon::convertToFullColumnsInplace(block, key_names_left, false); - sortBlock(block, left_sort_description); } @@ -739,8 +746,6 @@ void MergeJoin::joinBlock(Block & block, ExtraBlockPtr & not_processed) if (needConditionJoinColumn()) block.erase(deriveTempName(mask_column_name_left, JoinTableSide::Left)); - - JoinCommon::restoreLowCardinalityInplace(block, lowcard_keys); } template diff --git a/src/Interpreters/MutationsInterpreter.cpp b/src/Interpreters/MutationsInterpreter.cpp index 8e56b08f1ed..bf50766c165 100644 --- a/src/Interpreters/MutationsInterpreter.cpp +++ b/src/Interpreters/MutationsInterpreter.cpp @@ -26,6 +26,7 @@ #include #include #include +#include #include #include #include @@ -153,19 +154,29 @@ bool isStorageTouchedByMutations( return false; bool all_commands_can_be_skipped = true; - for (const MutationCommand & command : commands) + for (const auto & command : commands) { - if (!command.predicate) /// The command touches all rows. - return true; - - if (command.partition) + if (command.type == MutationCommand::APPLY_DELETED_MASK) { - const String partition_id = storage.getPartitionIDFromQuery(command.partition, context); - if (partition_id == source_part->info.partition_id) - all_commands_can_be_skipped = false; + if (source_part->hasLightweightDelete()) + return true; } else - all_commands_can_be_skipped = false; + { + if (!command.predicate) /// The command touches all rows. + return true; + + if (command.partition) + { + const String partition_id = storage.getPartitionIDFromQuery(command.partition, context); + if (partition_id == source_part->info.partition_id) + all_commands_can_be_skipped = false; + } + else + { + all_commands_can_be_skipped = false; + } + } } if (all_commands_can_be_skipped) @@ -211,7 +222,6 @@ bool isStorageTouchedByMutations( return count != 0; } - ASTPtr getPartitionAndPredicateExpressionForMutationCommand( const MutationCommand & command, const StoragePtr & storage, @@ -244,6 +254,32 @@ ASTPtr getPartitionAndPredicateExpressionForMutationCommand( return command.predicate ? command.predicate->clone() : partition_predicate_as_ast_func; } + +MutationCommand createCommandToApplyDeletedMask(const MutationCommand & command) +{ + if (command.type != MutationCommand::APPLY_DELETED_MASK) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Expected APPLY_DELETED_MASK mutation command, got: {}", magic_enum::enum_name(command.type)); + + auto alter_command = std::make_shared(); + alter_command->type = ASTAlterCommand::DELETE; + alter_command->partition = command.partition; + + auto row_exists_predicate = makeASTFunction("equals", + std::make_shared(LightweightDeleteDescription::FILTER_COLUMN.name), + std::make_shared(Field(0))); + + if (command.predicate) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Mutation command APPLY DELETED MASK does not support WHERE clause"); + + alter_command->predicate = row_exists_predicate; + + auto mutation_command = MutationCommand::parse(alter_command.get()); + if (!mutation_command) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Failed to parse command {}. It's a bug", queryToString(alter_command)); + + return *mutation_command; +} + MutationsInterpreter::Source::Source(StoragePtr storage_) : storage(std::move(storage_)) { } @@ -517,15 +553,18 @@ void MutationsInterpreter::prepare(bool dry_run) NameSet updated_columns; bool materialize_ttl_recalculate_only = source.materializeTTLRecalculateOnly(); - for (const MutationCommand & command : commands) + for (auto & command : commands) { - if (command.type == MutationCommand::Type::UPDATE - || command.type == MutationCommand::Type::DELETE) + if (command.type == MutationCommand::Type::APPLY_DELETED_MASK) + command = createCommandToApplyDeletedMask(command); + + if (command.type == MutationCommand::Type::UPDATE || command.type == MutationCommand::Type::DELETE) materialize_ttl_recalculate_only = false; for (const auto & [name, _] : command.column_to_update_expression) { - if (!available_columns_set.contains(name) && name != LightweightDeleteDescription::FILTER_COLUMN.name + if (!available_columns_set.contains(name) + && name != LightweightDeleteDescription::FILTER_COLUMN.name && name != BlockNumberColumn::name) throw Exception(ErrorCodes::THERE_IS_NO_COLUMN, "Column {} is updated but not requested to read", name); @@ -574,7 +613,7 @@ void MutationsInterpreter::prepare(bool dry_run) std::vector read_columns; /// First, break a sequence of commands into stages. - for (auto & command : commands) + for (const auto & command : commands) { // we can return deleted rows only if it's the only present command assert(command.type == MutationCommand::DELETE || command.type == MutationCommand::UPDATE || !settings.return_mutated_rows); @@ -585,7 +624,7 @@ void MutationsInterpreter::prepare(bool dry_run) if (stages.empty() || !stages.back().column_to_updated.empty()) stages.emplace_back(context); - auto predicate = getPartitionAndPredicateExpressionForMutationCommand(command); + auto predicate = getPartitionAndPredicateExpressionForMutationCommand(command); if (!settings.return_mutated_rows) predicate = makeASTFunction("isZeroOrNull", predicate); @@ -605,16 +644,12 @@ void MutationsInterpreter::prepare(bool dry_run) NameSet affected_materialized; - for (const auto & kv : command.column_to_update_expression) + for (const auto & [column_name, update_expr] : command.column_to_update_expression) { - const String & column = kv.first; - - auto materialized_it = column_to_affected_materialized.find(column); + auto materialized_it = column_to_affected_materialized.find(column_name); if (materialized_it != column_to_affected_materialized.end()) - { - for (const String & mat_column : materialized_it->second) + for (const auto & mat_column : materialized_it->second) affected_materialized.emplace(mat_column); - } /// When doing UPDATE column = expression WHERE condition /// we will replace column to the result of the following expression: @@ -627,33 +662,39 @@ void MutationsInterpreter::prepare(bool dry_run) /// Outer CAST is added just in case if we don't trust the returning type of 'if'. DataTypePtr type; - if (auto physical_column = columns_desc.tryGetPhysical(column)) + if (auto physical_column = columns_desc.tryGetPhysical(column_name)) + { type = physical_column->type; - else if (column == LightweightDeleteDescription::FILTER_COLUMN.name) + } + else if (column_name == LightweightDeleteDescription::FILTER_COLUMN.name) + { type = LightweightDeleteDescription::FILTER_COLUMN.type; - else if (column == BlockNumberColumn::name) + deleted_mask_updated = true; + } + else if (column_name == BlockNumberColumn::name) + { type = BlockNumberColumn::type; + } else - throw Exception(ErrorCodes::LOGICAL_ERROR, "Unknown column {}", column); + { + throw Exception(ErrorCodes::LOGICAL_ERROR, "Unknown column {}", column_name); + } auto type_literal = std::make_shared(type->getName()); - - const auto & update_expr = kv.second; - ASTPtr condition = getPartitionAndPredicateExpressionForMutationCommand(command); /// And new check validateNestedArraySizes for Nested subcolumns - if (isArray(type) && !Nested::splitName(column).second.empty()) + if (isArray(type) && !Nested::splitName(column_name).second.empty()) { std::shared_ptr function = nullptr; - auto nested_update_exprs = getExpressionsOfUpdatedNestedSubcolumns(column, all_columns, command.column_to_update_expression); + auto nested_update_exprs = getExpressionsOfUpdatedNestedSubcolumns(column_name, all_columns, command.column_to_update_expression); if (!nested_update_exprs) { function = makeASTFunction("validateNestedArraySizes", condition, update_expr->clone(), - std::make_shared(column)); + std::make_shared(column_name)); condition = makeASTFunction("and", condition, function); } else if (nested_update_exprs->size() > 1) @@ -675,10 +716,10 @@ void MutationsInterpreter::prepare(bool dry_run) makeASTFunction("_CAST", update_expr->clone(), type_literal), - std::make_shared(column)), + std::make_shared(column_name)), type_literal); - stages.back().column_to_updated.emplace(column, updated_column); + stages.back().column_to_updated.emplace(column_name, updated_column); if (condition && settings.return_mutated_rows) stages.back().filters.push_back(condition); @@ -986,27 +1027,42 @@ void MutationsInterpreter::prepareMutationStages(std::vector & prepared_s auto all_columns = storage_snapshot->getColumnsByNames(options, available_columns); /// Add _row_exists column if it is present in the part - if (source.hasLightweightDeleteMask()) - all_columns.push_back({LightweightDeleteDescription::FILTER_COLUMN}); + if (source.hasLightweightDeleteMask() || deleted_mask_updated) + all_columns.push_back(LightweightDeleteDescription::FILTER_COLUMN); + bool has_filters = false; /// Next, for each stage calculate columns changed by this and previous stages. for (size_t i = 0; i < prepared_stages.size(); ++i) { if (settings.return_all_columns || !prepared_stages[i].filters.empty()) { for (const auto & column : all_columns) + { + if (column.name == LightweightDeleteDescription::FILTER_COLUMN.name && !deleted_mask_updated) + continue; + prepared_stages[i].output_columns.insert(column.name); - continue; + } + + has_filters = true; + settings.apply_deleted_mask = true; } + else + { + if (i > 0) + prepared_stages[i].output_columns = prepared_stages[i - 1].output_columns; - if (i > 0) - prepared_stages[i].output_columns = prepared_stages[i - 1].output_columns; + /// Make sure that all updated columns are included into output_columns set. + /// This is important for a "hidden" column like _row_exists gets because it is a virtual column + /// and so it is not in the list of AllPhysical columns. + for (const auto & [column_name, _] : prepared_stages[i].column_to_updated) + { + if (column_name == LightweightDeleteDescription::FILTER_COLUMN.name && has_filters && !deleted_mask_updated) + continue; - /// Make sure that all updated columns are included into output_columns set. - /// This is important for a "hidden" column like _row_exists gets because it is a virtual column - /// and so it is not in the list of AllPhysical columns. - for (const auto & kv : prepared_stages[i].column_to_updated) - prepared_stages[i].output_columns.insert(kv.first); + prepared_stages[i].output_columns.insert(column_name); + } + } } /// Now, calculate `expressions_chain` for each stage except the first. @@ -1024,7 +1080,7 @@ void MutationsInterpreter::prepareMutationStages(std::vector & prepared_s all_asts->children.push_back(kv.second); /// Add all output columns to prevent ExpressionAnalyzer from deleting them from source columns. - for (const String & column : stage.output_columns) + for (const auto & column : stage.output_columns) all_asts->children.push_back(std::make_shared(column)); /// Executing scalar subquery on that stage can lead to deadlock @@ -1081,7 +1137,6 @@ void MutationsInterpreter::prepareMutationStages(std::vector & prepared_s actions_chain.getLastStep().addRequiredOutput(name); actions_chain.getLastActions(); - actions_chain.finalize(); if (i) @@ -1224,7 +1279,7 @@ void MutationsInterpreter::Source::read( VirtualColumns virtual_columns(std::move(required_columns), part); - createMergeTreeSequentialSource( + createReadFromPartStep( plan, *data, storage_snapshot, part, std::move(virtual_columns.columns_to_read), apply_deleted_mask_, filter, context_, diff --git a/src/Interpreters/MutationsInterpreter.h b/src/Interpreters/MutationsInterpreter.h index 1372ea77f4f..eda94190185 100644 --- a/src/Interpreters/MutationsInterpreter.h +++ b/src/Interpreters/MutationsInterpreter.h @@ -32,6 +32,8 @@ ASTPtr getPartitionAndPredicateExpressionForMutationCommand( ContextPtr context ); +MutationCommand createCommandToApplyDeletedMask(const MutationCommand & command); + /// Create an input stream that will read data from storage and apply mutation commands (UPDATEs, DELETEs, MATERIALIZEs) /// to this data. class MutationsInterpreter @@ -213,6 +215,7 @@ private: std::unique_ptr updated_header; std::vector stages; bool is_prepared = false; /// Has the sequence of stages been prepared. + bool deleted_mask_updated = false; NameSet materialized_indices; NameSet materialized_projections; diff --git a/src/Interpreters/PartLog.cpp b/src/Interpreters/PartLog.cpp index 973c5260ea1..338775bfb0c 100644 --- a/src/Interpreters/PartLog.cpp +++ b/src/Interpreters/PartLog.cpp @@ -245,6 +245,7 @@ bool PartLog::addNewParts( elem.part_type = part->getType(); elem.bytes_compressed_on_disk = part->getBytesOnDisk(); + elem.bytes_uncompressed = part->getBytesUncompressedOnDisk(); elem.rows = part->rows_count; elem.error = static_cast(execution_status.code); diff --git a/src/Interpreters/PasteJoin.h b/src/Interpreters/PasteJoin.h new file mode 100644 index 00000000000..df7bb2f280c --- /dev/null +++ b/src/Interpreters/PasteJoin.h @@ -0,0 +1,96 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int LOGICAL_ERROR; + extern const int NOT_IMPLEMENTED; +} + +/// Dummy class, actual joining is done by MergeTransform +class PasteJoin : public IJoin +{ +public: + explicit PasteJoin(std::shared_ptr table_join_, const Block & right_sample_block_) + : table_join(table_join_) + , right_sample_block(right_sample_block_) + { + LOG_TRACE(&Poco::Logger::get("PasteJoin"), "Will use paste join"); + } + + std::string getName() const override { return "PasteJoin"; } + const TableJoin & getTableJoin() const override { return *table_join; } + + bool addBlockToJoin(const Block & /* block */, bool /* check_limits */) override + { + throw Exception(ErrorCodes::LOGICAL_ERROR, "PasteJoin::addBlockToJoin should not be called"); + } + + static bool isSupported(const std::shared_ptr & table_join) + { + bool support_storage = !table_join->isSpecialStorage(); + + /// Key column can change nullability and it's not handled on type conversion stage, so algorithm should be aware of it + bool support_using = !table_join->hasUsing(); + + bool check_strictness = table_join->strictness() == JoinStrictness::All; + + bool if_has_keys = table_join->getClauses().empty(); + + return support_using && support_storage && check_strictness && if_has_keys; + } + + void checkTypesOfKeys(const Block & /*left_block*/) const override + { + if (!isSupported(table_join)) + throw DB::Exception(ErrorCodes::NOT_IMPLEMENTED, "PasteJoin doesn't support specified query"); + } + + /// Used just to get result header + void joinBlock(Block & block, std::shared_ptr & /* not_processed */) override + { + for (const auto & col : right_sample_block) + block.insert(col); + block = materializeBlock(block).cloneEmpty(); + } + + void setTotals(const Block & block) override { totals = block; } + const Block & getTotals() const override { return totals; } + + size_t getTotalRowCount() const override + { + throw Exception(ErrorCodes::LOGICAL_ERROR, "PasteJoin::getTotalRowCount should not be called"); + } + + size_t getTotalByteCount() const override + { + throw Exception(ErrorCodes::LOGICAL_ERROR, "PasteJoin::getTotalByteCount should not be called"); + } + + bool alwaysReturnsEmptySet() const override { return false; } + + IBlocksStreamPtr + getNonJoinedBlocks(const Block & /* left_sample_block */, const Block & /* result_sample_block */, UInt64 /* max_block_size */) const override + { + throw Exception(ErrorCodes::LOGICAL_ERROR, "PasteJoin::getNonJoinedBlocks should not be called"); + } + + /// Left and right streams have the same priority and are processed simultaneously + JoinPipelineType pipelineType() const override { return JoinPipelineType::YShaped; } + +private: + std::shared_ptr table_join; + Block right_sample_block; + Block totals; +}; + +} diff --git a/src/Interpreters/PreparedSets.cpp b/src/Interpreters/PreparedSets.cpp index 955d8892284..18a25482b7f 100644 --- a/src/Interpreters/PreparedSets.cpp +++ b/src/Interpreters/PreparedSets.cpp @@ -98,10 +98,12 @@ FutureSetFromSubquery::FutureSetFromSubquery( std::unique_ptr source_, StoragePtr external_table_, FutureSetPtr external_table_set_, - const Settings & settings) + const Settings & settings, + bool in_subquery_) : external_table(std::move(external_table_)) , external_table_set(std::move(external_table_set_)) , source(std::move(source_)) + , in_subquery(in_subquery_) { set_and_key = std::make_shared(); set_and_key->key = std::move(key); @@ -261,14 +263,16 @@ FutureSetPtr PreparedSets::addFromSubquery( std::unique_ptr source, StoragePtr external_table, FutureSetPtr external_table_set, - const Settings & settings) + const Settings & settings, + bool in_subquery) { auto from_subquery = std::make_shared( toString(key, {}), std::move(source), std::move(external_table), std::move(external_table_set), - settings); + settings, + in_subquery); auto [it, inserted] = sets_from_subqueries.emplace(key, from_subquery); @@ -318,6 +322,15 @@ std::shared_ptr PreparedSets::findSubquery(const Hash & k return it->second; } +void PreparedSets::markAsINSubquery(const Hash & key) +{ + auto it = sets_from_subqueries.find(key); + if (it == sets_from_subqueries.end()) + return; + + it->second->markAsINSubquery(); +} + std::shared_ptr PreparedSets::findStorage(const Hash & key) const { auto it = sets_from_storage.find(key); @@ -327,11 +340,11 @@ std::shared_ptr PreparedSets::findStorage(const Hash & key return it->second; } -PreparedSets::Subqueries PreparedSets::getSubqueries() +PreparedSets::Subqueries PreparedSets::getSubqueries() const { PreparedSets::Subqueries res; res.reserve(sets_from_subqueries.size()); - for (auto & [_, set] : sets_from_subqueries) + for (const auto & [_, set] : sets_from_subqueries) res.push_back(set); return res; diff --git a/src/Interpreters/PreparedSets.h b/src/Interpreters/PreparedSets.h index e237789c63c..9f8bac9f71c 100644 --- a/src/Interpreters/PreparedSets.h +++ b/src/Interpreters/PreparedSets.h @@ -59,7 +59,7 @@ using FutureSetPtr = std::shared_ptr; class FutureSetFromStorage final : public FutureSet { public: - FutureSetFromStorage(SetPtr set_); + explicit FutureSetFromStorage(SetPtr set_); SetPtr get() const override; DataTypes getTypes() const override; @@ -97,7 +97,8 @@ public: std::unique_ptr source_, StoragePtr external_table_, FutureSetPtr external_table_set_, - const Settings & settings); + const Settings & settings, + bool in_subquery_); FutureSetFromSubquery( String key, @@ -112,6 +113,8 @@ public: QueryTreeNodePtr detachQueryTree() { return std::move(query_tree); } void setQueryPlan(std::unique_ptr source_); + void markAsINSubquery() { in_subquery = true; } + bool isINSubquery() const { return in_subquery; } private: SetAndKeyPtr set_and_key; @@ -120,6 +123,11 @@ private: std::unique_ptr source; QueryTreeNodePtr query_tree; + bool in_subquery = false; // subquery used in IN operator + // the flag can be removed after enabling new analyzer and removing interpreter + // or after enabling support IN operator with subqueries in parallel replicas + // Note: it's necessary with interpreter since prepared sets used also for GLOBAL JOINs, + // with new analyzer it's not a case }; /// Container for all the sets used in query. @@ -145,7 +153,8 @@ public: std::unique_ptr source, StoragePtr external_table, FutureSetPtr external_table_set, - const Settings & settings); + const Settings & settings, + bool in_subquery = false); FutureSetPtr addFromSubquery( const Hash & key, @@ -155,9 +164,11 @@ public: FutureSetPtr findTuple(const Hash & key, const DataTypes & types) const; std::shared_ptr findStorage(const Hash & key) const; std::shared_ptr findSubquery(const Hash & key) const; + void markAsINSubquery(const Hash & key); using Subqueries = std::vector>; - Subqueries getSubqueries(); + Subqueries getSubqueries() const; + bool hasSubqueries() const { return !sets_from_subqueries.empty(); } const SetsFromTuple & getSetsFromTuple() const { return sets_from_tuple; } // const SetsFromStorage & getSetsFromStorage() const { return sets_from_storage; } diff --git a/src/Interpreters/RequiredSourceColumnsVisitor.cpp b/src/Interpreters/RequiredSourceColumnsVisitor.cpp index 1bcec02f0c0..c07d783788a 100644 --- a/src/Interpreters/RequiredSourceColumnsVisitor.cpp +++ b/src/Interpreters/RequiredSourceColumnsVisitor.cpp @@ -8,6 +8,7 @@ #include #include #include +#include namespace DB { @@ -126,7 +127,7 @@ void RequiredSourceColumnsMatcher::visit(const ASTSelectQuery & select, const AS if (const auto * identifier = node->as()) data.addColumnIdentifier(*identifier); - else + else if (!node->as()) data.addColumnAliasIfAny(*node); } diff --git a/src/Interpreters/ServerAsynchronousMetrics.cpp b/src/Interpreters/ServerAsynchronousMetrics.cpp index 84d31bae13f..31d4a4e51a4 100644 --- a/src/Interpreters/ServerAsynchronousMetrics.cpp +++ b/src/Interpreters/ServerAsynchronousMetrics.cpp @@ -54,8 +54,8 @@ ServerAsynchronousMetrics::ServerAsynchronousMetrics( int update_period_seconds, int heavy_metrics_update_period_seconds, const ProtocolServerMetricsFunc & protocol_server_metrics_func_) - : AsynchronousMetrics(update_period_seconds, protocol_server_metrics_func_) - , WithContext(global_context_) + : WithContext(global_context_) + , AsynchronousMetrics(update_period_seconds, protocol_server_metrics_func_) , heavy_metric_update_period(heavy_metrics_update_period_seconds) { /// sanity check @@ -63,6 +63,12 @@ ServerAsynchronousMetrics::ServerAsynchronousMetrics( throw Exception(ErrorCodes::INVALID_SETTING_VALUE, "Setting asynchronous_metrics_update_period_s and asynchronous_heavy_metrics_update_period_s must not be zero"); } +ServerAsynchronousMetrics::~ServerAsynchronousMetrics() +{ + /// NOTE: stop() from base class is not enough, since this leads to leak on vptr + stop(); +} + void ServerAsynchronousMetrics::updateImpl(AsynchronousMetricValues & new_values, TimePoint update_time, TimePoint current_time) { if (auto mark_cache = getContext()->getMarkCache()) @@ -249,6 +255,9 @@ void ServerAsynchronousMetrics::updateImpl(AsynchronousMetricValues & new_values size_t total_number_of_rows_system = 0; size_t total_number_of_parts_system = 0; + size_t total_primary_key_bytes_memory = 0; + size_t total_primary_key_bytes_memory_allocated = 0; + for (const auto & db : databases) { /// Check if database can contain MergeTree tables @@ -287,6 +296,15 @@ void ServerAsynchronousMetrics::updateImpl(AsynchronousMetricValues & new_values total_number_of_rows_system += rows; total_number_of_parts_system += parts; } + + // only fetch the parts which are in active state + auto all_parts = table_merge_tree->getDataPartsVectorForInternalUsage(); + + for (const auto & part : all_parts) + { + total_primary_key_bytes_memory += part->getIndexSizeInBytes(); + total_primary_key_bytes_memory_allocated += part->getIndexSizeInAllocatedBytes(); + } } if (StorageReplicatedMergeTree * table_replicated_merge_tree = typeid_cast(table.get())) @@ -341,11 +359,14 @@ void ServerAsynchronousMetrics::updateImpl(AsynchronousMetricValues & new_values new_values["TotalPartsOfMergeTreeTables"] = { total_number_of_parts, "Total amount of data parts in all tables of MergeTree family." " Numbers larger than 10 000 will negatively affect the server startup time and it may indicate unreasonable choice of the partition key." }; - new_values["NumberOfTablesSystem"] = { total_number_of_tables_system, "Total number of tables in the system database on the server stored in tables of MergeTree family."}; + new_values["NumberOfTablesSystem"] = { total_number_of_tables_system, "Total number of tables in the system database on the server stored in tables of MergeTree family." }; new_values["TotalBytesOfMergeTreeTablesSystem"] = { total_number_of_bytes_system, "Total amount of bytes (compressed, including data and indices) stored in tables of MergeTree family in the system database." }; new_values["TotalRowsOfMergeTreeTablesSystem"] = { total_number_of_rows_system, "Total amount of rows (records) stored in tables of MergeTree family in the system database." }; new_values["TotalPartsOfMergeTreeTablesSystem"] = { total_number_of_parts_system, "Total amount of data parts in tables of MergeTree family in the system database." }; + + new_values["TotalPrimaryKeyBytesInMemory"] = { total_primary_key_bytes_memory, "The total amount of memory (in bytes) used by primary key values (only takes active parts into account)." }; + new_values["TotalPrimaryKeyBytesInMemoryAllocated"] = { total_primary_key_bytes_memory_allocated, "The total amount of memory (in bytes) reserved for primary key values (only takes active parts into account)." }; } #if USE_NURAFT diff --git a/src/Interpreters/ServerAsynchronousMetrics.h b/src/Interpreters/ServerAsynchronousMetrics.h index 8243699a111..a579d12de2c 100644 --- a/src/Interpreters/ServerAsynchronousMetrics.h +++ b/src/Interpreters/ServerAsynchronousMetrics.h @@ -7,7 +7,7 @@ namespace DB { -class ServerAsynchronousMetrics : public AsynchronousMetrics, WithContext +class ServerAsynchronousMetrics : WithContext, public AsynchronousMetrics { public: ServerAsynchronousMetrics( @@ -15,6 +15,8 @@ public: int update_period_seconds, int heavy_metrics_update_period_seconds, const ProtocolServerMetricsFunc & protocol_server_metrics_func_); + ~ServerAsynchronousMetrics() override; + private: void updateImpl(AsynchronousMetricValues & new_values, TimePoint update_time, TimePoint current_time) override; void logImpl(AsynchronousMetricValues & new_values) override; diff --git a/src/Interpreters/Session.cpp b/src/Interpreters/Session.cpp index 162772061b5..d2f9fe8b325 100644 --- a/src/Interpreters/Session.cpp +++ b/src/Interpreters/Session.cpp @@ -112,8 +112,7 @@ public: throw Exception(ErrorCodes::SESSION_NOT_FOUND, "Session {} not found", session_id); /// Create a new session from current context. - auto context = Context::createCopy(global_context); - it = sessions.insert(std::make_pair(key, std::make_shared(key, context, timeout, *this))).first; + it = sessions.insert(std::make_pair(key, std::make_shared(key, global_context, timeout, *this))).first; const auto & session = it->second; if (!thread.joinable()) @@ -128,7 +127,7 @@ public: /// Use existing session. const auto & session = it->second; - LOG_TEST(log, "Reuse session from storage with session_id: {}, user_id: {}", key.second, key.first); + LOG_TRACE(log, "Reuse session from storage with session_id: {}, user_id: {}", key.second, key.first); if (!session.unique()) throw Exception(ErrorCodes::SESSION_IS_LOCKED, "Session {} is locked by a concurrent client", session_id); @@ -703,6 +702,10 @@ void Session::releaseSessionID() { if (!named_session) return; + + prepared_client_info = getClientInfo(); + session_context.reset(); + named_session->release(); named_session = nullptr; } diff --git a/src/Interpreters/Session.h b/src/Interpreters/Session.h index 2249d8fbb2f..75e1414b8cb 100644 --- a/src/Interpreters/Session.h +++ b/src/Interpreters/Session.h @@ -8,6 +8,7 @@ #include #include +#include #include namespace Poco::Net { class SocketAddress; } diff --git a/src/Interpreters/TableJoin.cpp b/src/Interpreters/TableJoin.cpp index fa289b82aaf..5f3492f0871 100644 --- a/src/Interpreters/TableJoin.cpp +++ b/src/Interpreters/TableJoin.cpp @@ -34,6 +34,7 @@ #include #include +#include namespace DB { @@ -375,7 +376,7 @@ void TableJoin::addJoinedColumnsAndCorrectTypesImpl(TColumns & left_columns, boo * For `JOIN ON expr1 == expr2` we will infer common type later in makeTableJoin, * when part of plan built and types of expression will be known. */ - inferJoinKeyCommonType(left_columns, columns_from_joined_table, !isSpecialStorage(), isEnabledAlgorithm(JoinAlgorithm::FULL_SORTING_MERGE)); + inferJoinKeyCommonType(left_columns, columns_from_joined_table, !isSpecialStorage()); if (auto it = left_type_map.find(col.name); it != left_type_map.end()) { @@ -558,7 +559,8 @@ TableJoin::createConvertingActions( */ NameToNameMap left_column_rename; NameToNameMap right_column_rename; - inferJoinKeyCommonType(left_sample_columns, right_sample_columns, !isSpecialStorage(), isEnabledAlgorithm(JoinAlgorithm::FULL_SORTING_MERGE)); + + inferJoinKeyCommonType(left_sample_columns, right_sample_columns, !isSpecialStorage()); if (!left_type_map.empty() || !right_type_map.empty()) { left_dag = applyKeyConvertToTable(left_sample_columns, left_type_map, JoinTableSide::Left, left_column_rename); @@ -612,8 +614,11 @@ TableJoin::createConvertingActions( } template -void TableJoin::inferJoinKeyCommonType(const LeftNamesAndTypes & left, const RightNamesAndTypes & right, bool allow_right, bool strict) +void TableJoin::inferJoinKeyCommonType(const LeftNamesAndTypes & left, const RightNamesAndTypes & right, bool allow_right) { + /// FullSortingMerge and PartialMerge join algorithms don't support joining keys with different types + /// (e.g. String and LowCardinality(String)) + bool require_strict_keys_match = isEnabledAlgorithm(JoinAlgorithm::FULL_SORTING_MERGE); if (!left_type_map.empty() || !right_type_map.empty()) return; @@ -645,7 +650,7 @@ void TableJoin::inferJoinKeyCommonType(const LeftNamesAndTypes & left, const Rig const auto & ltype = ltypeit->second; const auto & rtype = rtypeit->second; - bool type_equals = strict ? ltype->equals(*rtype) : JoinCommon::typesEqualUpToNullability(ltype, rtype); + bool type_equals = require_strict_keys_match ? ltype->equals(*rtype) : JoinCommon::typesEqualUpToNullability(ltype, rtype); if (type_equals) return true; diff --git a/src/Interpreters/TableJoin.h b/src/Interpreters/TableJoin.h index f97e6a74b8c..247835d9c53 100644 --- a/src/Interpreters/TableJoin.h +++ b/src/Interpreters/TableJoin.h @@ -218,7 +218,7 @@ private: /// Calculates common supertypes for corresponding join key columns. template - void inferJoinKeyCommonType(const LeftNamesAndTypes & left, const RightNamesAndTypes & right, bool allow_right, bool strict); + void inferJoinKeyCommonType(const LeftNamesAndTypes & left, const RightNamesAndTypes & right, bool allow_right); void deduplicateAndQualifyColumnNames(const NameSet & left_table_columns, const String & right_table_prefix); diff --git a/src/Interpreters/TemporaryDataOnDisk.cpp b/src/Interpreters/TemporaryDataOnDisk.cpp index 161535afa68..96aa642295c 100644 --- a/src/Interpreters/TemporaryDataOnDisk.cpp +++ b/src/Interpreters/TemporaryDataOnDisk.cpp @@ -106,7 +106,10 @@ FileSegmentsHolderPtr TemporaryDataOnDisk::createCacheFile(size_t max_file_size) const auto key = FileSegment::Key::random(); auto holder = file_cache->set(key, 0, std::max(10_MiB, max_file_size), CreateFileSegmentSettings(FileSegmentKind::Temporary, /* unbounded */ true)); - fs::create_directories(file_cache->getPathInLocalCache(key)); + + chassert(holder->size() == 1); + holder->back().getKeyMetadata()->createBaseDirectory(); + return holder; } diff --git a/src/Interpreters/ThreadStatusExt.cpp b/src/Interpreters/ThreadStatusExt.cpp index 3189f093b50..7ae137c01d2 100644 --- a/src/Interpreters/ThreadStatusExt.cpp +++ b/src/Interpreters/ThreadStatusExt.cpp @@ -120,7 +120,7 @@ ThreadGroupPtr ThreadGroup::createForBackgroundProcess(ContextPtr storage_contex void ThreadGroup::attachQueryForLog(const String & query_, UInt64 normalized_hash) { - auto hash = normalized_hash ? normalized_hash : normalizedQueryHash(query_); + auto hash = normalized_hash ? normalized_hash : normalizedQueryHash(query_, false); std::lock_guard lock(mutex); shared_data.query_for_logs = query_; @@ -130,7 +130,7 @@ void ThreadGroup::attachQueryForLog(const String & query_, UInt64 normalized_has void ThreadStatus::attachQueryForLog(const String & query_) { local_data.query_for_logs = query_; - local_data.normalized_query_hash = normalizedQueryHash(query_); + local_data.normalized_query_hash = normalizedQueryHash(query_, false); if (!thread_group) throw Exception(ErrorCodes::LOGICAL_ERROR, "No thread group attached to the thread {}", thread_id); @@ -221,7 +221,7 @@ void ThreadStatus::applyQuerySettings() LOG_TRACE(log, "Setting nice to {}", new_os_thread_priority); if (0 != setpriority(PRIO_PROCESS, static_cast(thread_id), new_os_thread_priority)) - throwFromErrno("Cannot 'setpriority'", ErrorCodes::CANNOT_SET_THREAD_PRIORITY); + throw ErrnoException(ErrorCodes::CANNOT_SET_THREAD_PRIORITY, "Cannot 'setpriority'"); os_thread_priority = new_os_thread_priority; } diff --git a/src/Interpreters/TreeCNFConverter.cpp b/src/Interpreters/TreeCNFConverter.cpp index 1613b09ee48..d2c7300c80c 100644 --- a/src/Interpreters/TreeCNFConverter.cpp +++ b/src/Interpreters/TreeCNFConverter.cpp @@ -2,6 +2,7 @@ #include #include #include +#include #include #include @@ -239,7 +240,8 @@ CNFQuery TreeCNFConverter::toCNF( if (!cnf) throw Exception(ErrorCodes::TOO_MANY_TEMPORARY_COLUMNS, "Cannot convert expression '{}' to CNF, because it produces to many clauses." - "Size of boolean formula in CNF can be exponential of size of source formula."); + "Size of boolean formula in CNF can be exponential of size of source formula.", + queryToString(query)); return *cnf; } diff --git a/src/Interpreters/TreeRewriter.cpp b/src/Interpreters/TreeRewriter.cpp index c63aae32090..9cbf24091e3 100644 --- a/src/Interpreters/TreeRewriter.cpp +++ b/src/Interpreters/TreeRewriter.cpp @@ -73,6 +73,7 @@ namespace ErrorCodes extern const int NOT_IMPLEMENTED; extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; extern const int UNKNOWN_IDENTIFIER; + extern const int UNEXPECTED_EXPRESSION; } namespace @@ -776,6 +777,37 @@ void expandGroupByAll(ASTSelectQuery * select_query) select_query->setExpression(ASTSelectQuery::Expression::GROUP_BY, group_expression_list); } +void expandOrderByAll(ASTSelectQuery * select_query) +{ + auto * all_elem = select_query->orderBy()->children[0]->as(); + if (!all_elem) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Select analyze for not order by asts."); + + auto order_expression_list = std::make_shared(); + + for (const auto & expr : select_query->select()->children) + { + if (auto * identifier = expr->as(); identifier != nullptr) + if (Poco::toUpper(identifier->name()) == "ALL" || Poco::toUpper(identifier->alias) == "ALL") + throw Exception(ErrorCodes::UNEXPECTED_EXPRESSION, + "Cannot use ORDER BY ALL to sort a column with name 'all', please disable setting `enable_order_by_all` and try again"); + + if (auto * function = expr->as(); function != nullptr) + if (Poco::toUpper(function->alias) == "ALL") + throw Exception(ErrorCodes::UNEXPECTED_EXPRESSION, + "Cannot use ORDER BY ALL to sort a column with name 'all', please disable setting `enable_order_by_all` and try again"); + + auto elem = std::make_shared(); + elem->direction = all_elem->direction; + elem->nulls_direction = all_elem->nulls_direction; + elem->nulls_direction_was_explicitly_specified = all_elem->nulls_direction_was_explicitly_specified; + elem->children.push_back(expr); + order_expression_list->children.push_back(elem); + } + + select_query->setExpression(ASTSelectQuery::Expression::ORDER_BY, order_expression_list); +} + ASTs getAggregates(ASTPtr & query, const ASTSelectQuery & select_query) { /// There can not be aggregate functions inside the WHERE and PREWHERE. @@ -1292,6 +1324,10 @@ TreeRewriterResultPtr TreeRewriter::analyzeSelect( if (select_query->group_by_all) expandGroupByAll(select_query); + // expand ORDER BY ALL + if (settings.enable_order_by_all && select_query->order_by_all) + expandOrderByAll(select_query); + /// Remove unneeded columns according to 'required_result_columns'. /// Leave all selected columns in case of DISTINCT; columns that contain arrayJoin function inside. /// Must be after 'normalizeTree' (after expanding aliases, for aliases not get lost) diff --git a/src/Interpreters/executeDDLQueryOnCluster.cpp b/src/Interpreters/executeDDLQueryOnCluster.cpp index 5d8a9e0582d..9486350a0f6 100644 --- a/src/Interpreters/executeDDLQueryOnCluster.cpp +++ b/src/Interpreters/executeDDLQueryOnCluster.cpp @@ -41,12 +41,9 @@ static ZooKeeperRetriesInfo getRetriesInfo() { const auto & config_ref = Context::getGlobalContextInstance()->getConfigRef(); return ZooKeeperRetriesInfo( - "DistributedDDL", - &Poco::Logger::get("DDLQueryStatusSource"), config_ref.getInt("distributed_ddl_keeper_max_retries", 5), config_ref.getInt("distributed_ddl_keeper_initial_backoff_ms", 100), - config_ref.getInt("distributed_ddl_keeper_max_backoff_ms", 5000) - ); + config_ref.getInt("distributed_ddl_keeper_max_backoff_ms", 5000)); } bool isSupportedAlterTypeForOnClusterDDLQuery(int type) @@ -438,8 +435,8 @@ Chunk DDLQueryStatusSource::generate() Strings tmp_active_hosts; { - auto retries_info = getRetriesInfo(); - auto retries_ctl = ZooKeeperRetriesControl("executeDDLQueryOnCluster", retries_info, context->getProcessListElement()); + auto retries_ctl = ZooKeeperRetriesControl( + "executeDDLQueryOnCluster", &Poco::Logger::get("DDLQueryStatusSource"), getRetriesInfo(), context->getProcessListElement()); retries_ctl.retryLoop([&]() { auto zookeeper = context->getZooKeeper(); @@ -478,8 +475,11 @@ Chunk DDLQueryStatusSource::generate() String status_data; bool finished_exists = false; - auto retries_info = getRetriesInfo(); - auto retries_ctl = ZooKeeperRetriesControl("executeDDLQueryOnCluster", retries_info, context->getProcessListElement()); + auto retries_ctl = ZooKeeperRetriesControl( + "executeDDLQueryOnCluster", + &Poco::Logger::get("DDLQueryStatusSource"), + getRetriesInfo(), + context->getProcessListElement()); retries_ctl.retryLoop([&]() { finished_exists = context->getZooKeeper()->tryGet(fs::path(node_path) / "finished" / host_id, status_data); diff --git a/src/Interpreters/executeQuery.cpp b/src/Interpreters/executeQuery.cpp index 86c223a410b..63804d2d86f 100644 --- a/src/Interpreters/executeQuery.cpp +++ b/src/Interpreters/executeQuery.cpp @@ -36,6 +36,9 @@ #include #include #include +#include +#include +#include #include #include @@ -76,10 +79,6 @@ #include #include -#include -#include -#include - namespace ProfileEvents { extern const Event FailedQuery; @@ -299,7 +298,7 @@ QueryLogElement logQueryStart( elem.query = query_for_logging; if (settings.log_formatted_queries) elem.formatted_query = queryToString(query_ast); - elem.normalized_query_hash = normalizedQueryHash(query_for_logging); + elem.normalized_query_hash = normalizedQueryHash(query_for_logging, false); elem.query_kind = query_ast->getQueryKind(); elem.client_info = context->getClientInfo(); @@ -573,7 +572,7 @@ void logExceptionBeforeStart( elem.current_database = context->getCurrentDatabase(); elem.query = query_for_logging; - elem.normalized_query_hash = normalizedQueryHash(query_for_logging); + elem.normalized_query_hash = normalizedQueryHash(query_for_logging, false); // Log query_kind if ast is valid if (ast) diff --git a/src/Interpreters/fuzzers/execute_query_fuzzer.cpp b/src/Interpreters/fuzzers/execute_query_fuzzer.cpp index 40e2325e46e..fd023754abf 100644 --- a/src/Interpreters/fuzzers/execute_query_fuzzer.cpp +++ b/src/Interpreters/fuzzers/execute_query_fuzzer.cpp @@ -2,6 +2,7 @@ #include #include "Processors/Executors/PullingPipelineExecutor.h" +#include #include #include #include @@ -31,6 +32,7 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t * data, size_t size) registerFunctions(); registerAggregateFunctions(); registerTableFunctions(); + registerDatabases(); registerStorages(); registerDictionaries(); registerDisks(/* global_skip_access_check= */ true); diff --git a/src/Interpreters/loadMetadata.cpp b/src/Interpreters/loadMetadata.cpp index 541f9c6ee89..b2fd43c178c 100644 --- a/src/Interpreters/loadMetadata.cpp +++ b/src/Interpreters/loadMetadata.cpp @@ -1,3 +1,4 @@ +#include #include #include diff --git a/src/Interpreters/removeOnClusterClauseIfNeeded.cpp b/src/Interpreters/removeOnClusterClauseIfNeeded.cpp index 7dc452a0fcb..f8df03ed830 100644 --- a/src/Interpreters/removeOnClusterClauseIfNeeded.cpp +++ b/src/Interpreters/removeOnClusterClauseIfNeeded.cpp @@ -3,7 +3,7 @@ #include #include #include -#include +#include #include #include #include @@ -14,6 +14,7 @@ #include #include #include +#include namespace DB @@ -33,7 +34,8 @@ static bool isAccessControlQuery(const ASTPtr & query) || query->as() || query->as() || query->as() - || query->as(); + || query->as() + || query->as(); } ASTPtr removeOnClusterClauseIfNeeded(const ASTPtr & query, ContextPtr context, const WithoutOnClusterASTRewriteParams & params) @@ -45,7 +47,7 @@ ASTPtr removeOnClusterClauseIfNeeded(const ASTPtr & query, ContextPtr context, c if ((isUserDefinedFunctionQuery(query) && context->getSettings().ignore_on_cluster_for_replicated_udf_queries - && context->getUserDefinedSQLObjectsLoader().isReplicated()) + && context->getUserDefinedSQLObjectsStorage().isReplicated()) || (isAccessControlQuery(query) && context->getSettings().ignore_on_cluster_for_replicated_access_entities_queries && context->getAccessControl().containsStorage(ReplicatedAccessStorage::STORAGE_TYPE))) diff --git a/src/Interpreters/tests/gtest_lru_file_cache.cpp b/src/Interpreters/tests/gtest_filecache.cpp similarity index 79% rename from src/Interpreters/tests/gtest_lru_file_cache.cpp rename to src/Interpreters/tests/gtest_filecache.cpp index 04a3877844b..1005e6090b8 100644 --- a/src/Interpreters/tests/gtest_lru_file_cache.cpp +++ b/src/Interpreters/tests/gtest_filecache.cpp @@ -19,6 +19,7 @@ #include #include #include +#include #include #include #include @@ -83,11 +84,12 @@ using HolderPtr = FileSegmentsHolderPtr; fs::path caches_dir = fs::current_path() / "lru_cache_test"; std::string cache_base_path = caches_dir / "cache1" / ""; +std::string cache_base_path2 = caches_dir / "cache2" / ""; void assertEqual(const FileSegmentsHolderPtr & file_segments, const Ranges & expected_ranges, const States & expected_states = {}) { - std::cerr << "File segments: "; + std::cerr << "\nFile segments: "; for (const auto & file_segment : *file_segments) std::cerr << file_segment->range().toString() << ", "; @@ -115,9 +117,12 @@ void assertEqual(const FileSegmentsHolderPtr & file_segments, const Ranges & exp void assertEqual(const std::vector & file_segments, const Ranges & expected_ranges, const States & expected_states = {}) { - std::cerr << "File segments: "; + std::cerr << "\nFile segments: "; for (const auto & file_segment : file_segments) std::cerr << FileSegment::Range(file_segment.range_left, file_segment.range_right).toString() << ", "; + std::cerr << "\nExpected: "; + for (const auto & r : expected_ranges) + std::cerr << r.toString() << ", "; ASSERT_EQ(file_segments.size(), expected_ranges.size()); @@ -141,6 +146,49 @@ void assertEqual(const std::vector & file_segments, const Ran } } +void assertProtectedOrProbationary(const std::vector & file_segments, const Ranges & expected, bool assert_protected) +{ + std::cerr << "\nFile segments: "; + std::vector res; + for (const auto & f : file_segments) + { + auto range = FileSegment::Range(f.range_left, f.range_right); + bool is_protected = (f.queue_entry_type == FileCacheQueueEntryType::SLRU_Protected); + bool is_probationary = (f.queue_entry_type == FileCacheQueueEntryType::SLRU_Probationary); + ASSERT_TRUE(is_probationary || is_protected); + + std::cerr << fmt::format("{} (protected: {})", range.toString(), is_protected) << ", "; + + if ((is_protected && assert_protected) || (!is_protected && !assert_protected)) + { + res.push_back(range); + } + } + std::cerr << "\nExpected: "; + for (const auto & range : expected) + { + std::cerr << range.toString() << ", "; + } + + ASSERT_EQ(res.size(), expected.size()); + for (size_t i = 0; i < res.size(); ++i) + { + ASSERT_EQ(res[i], expected[i]); + } +} + +void assertProtected(const std::vector & file_segments, const Ranges & expected) +{ + std::cerr << "\nAssert protected"; + assertProtectedOrProbationary(file_segments, expected, true); +} + +void assertProbationary(const std::vector & file_segments, const Ranges & expected) +{ + std::cerr << "\nAssert probationary"; + assertProtectedOrProbationary(file_segments, expected, false); +} + FileSegment & get(const HolderPtr & holder, int i) { auto it = std::next(holder->begin(), i); @@ -151,7 +199,7 @@ FileSegment & get(const HolderPtr & holder, int i) void download(FileSegment & file_segment) { - std::cerr << "Downloading range " << file_segment.range().toString() << "\n"; + std::cerr << "\nDownloading range " << file_segment.range().toString() << "\n"; ASSERT_EQ(file_segment.getOrSetDownloader(), FileSegment::getCallerId()); ASSERT_EQ(file_segment.state(), State::DOWNLOADING); @@ -184,7 +232,14 @@ void download(const HolderPtr & holder) void increasePriority(const HolderPtr & holder) { for (auto & it : *holder) - it->use(); + it->increasePriority(); +} + +void increasePriority(const HolderPtr & holder, size_t pos) +{ + FileSegments::iterator it = holder->begin(); + std::advance(it, pos); + (*it)->increasePriority(); } class FileCacheTest : public ::testing::Test @@ -221,7 +276,10 @@ public: if (fs::exists(cache_base_path)) fs::remove_all(cache_base_path); + if (fs::exists(cache_base_path2)) + fs::remove_all(cache_base_path2); fs::create_directories(cache_base_path); + fs::create_directories(cache_base_path2); } void TearDown() override @@ -233,7 +291,7 @@ public: pcg64 rng; }; -TEST_F(FileCacheTest, get) +TEST_F(FileCacheTest, LRUPolicy) { DB::ThreadStatus thread_status; @@ -1057,3 +1115,206 @@ TEST_F(FileCacheTest, TemporaryDataReadBufferSize) ASSERT_EQ(stream.getSize(), 62); } } + +TEST_F(FileCacheTest, SLRUPolicy) +{ + DB::ThreadStatus thread_status; + std::string query_id = "query_id"; /// To work with cache need query_id and query context. + + Poco::XML::DOMParser dom_parser; + std::string xml(R"CONFIG( +)CONFIG"); + Poco::AutoPtr document = dom_parser.parseString(xml); + Poco::AutoPtr config = new Poco::Util::XMLConfiguration(document); + getMutableContext().context->setConfig(config); + + auto query_context = DB::Context::createCopy(getContext().context); + query_context->makeQueryContext(); + query_context->setCurrentQueryId(query_id); + chassert(&DB::CurrentThread::get() == &thread_status); + DB::CurrentThread::QueryScope query_scope_holder(query_context); + + DB::FileCacheSettings settings; + settings.base_path = cache_base_path; + settings.max_size = 40; + settings.max_elements = 6; + settings.boundary_alignment = 1; + + settings.cache_policy = "SLRU"; + settings.slru_size_ratio = 0.5; + + const size_t file_size = -1; // the value doesn't really matter because boundary_alignment == 1. + size_t file_cache_name = 0; + + { + auto cache = DB::FileCache(std::to_string(++file_cache_name), settings); + cache.initialize(); + auto key = cache.createKeyForPath("key1"); + + auto add_range = [&](size_t offset, size_t size) + { + std::cerr << "Add [" << offset << ", " << offset + size - 1 << "]" << std::endl; + + auto holder = cache.getOrSet(key, offset, size, file_size, {}); + assertEqual(holder, { Range(offset, offset + size - 1) }, { State::EMPTY }); + download(holder->front()); + assertEqual(holder, { Range(offset, offset + size - 1) }, { State::DOWNLOADED }); + }; + + auto check_covering_range = [&](size_t offset, size_t size, Ranges covering_ranges) + { + auto holder = cache.getOrSet(key, offset, size, file_size, {}); + std::vector states(covering_ranges.size(), State::DOWNLOADED); + assertEqual(holder, covering_ranges, states); + increasePriority(holder); + }; + + add_range(0, 10); + add_range(10, 5); + + assertEqual(cache.getFileSegmentInfos(key), { Range(0, 9), Range(10, 14) }); + assertEqual(cache.dumpQueue(), { Range(0, 9), Range(10, 14) }); + + ASSERT_EQ(cache.getFileSegmentsNum(), 2); + ASSERT_EQ(cache.getUsedCacheSize(), 15); + + assertProbationary(cache.dumpQueue(), { Range(0, 9), Range(10, 14) }); + assertProtected(cache.dumpQueue(), Ranges{}); + + check_covering_range(9, 1, { Range(0, 9) }); + assertEqual(cache.dumpQueue(), { Range(10, 14), Range(0, 9) }); + + check_covering_range(10, 1, { Range(10, 14) }); + assertEqual(cache.dumpQueue(), { Range(0, 9), Range(10, 14) }); + + assertProbationary(cache.dumpQueue(), Ranges{}); + assertProtected(cache.dumpQueue(), { Range(0, 9), Range(10, 14) }); + + add_range(17, 4); + assertEqual(cache.dumpQueue(), { Range(17, 20), Range(0, 9), Range(10, 14) }); + + add_range(24, 3); + assertEqual(cache.dumpQueue(), { Range(17, 20), Range(24, 26), Range(0, 9), Range(10, 14) }); + + add_range(27, 1); + assertEqual(cache.dumpQueue(), { Range(17, 20), Range(24, 26), Range(27, 27), Range(0, 9), Range(10, 14) }); + + assertProbationary(cache.dumpQueue(), { Range(17, 20), Range(24, 26), Range(27, 27) }); + assertProtected(cache.dumpQueue(), { Range(0, 9), Range(10, 14) }); + + assertEqual(cache.getFileSegmentInfos(key), { Range(0, 9), Range(10, 14), Range(17, 20), Range(24, 26), Range(27, 27) }); + ASSERT_EQ(cache.getFileSegmentsNum(), 5); + ASSERT_EQ(cache.getUsedCacheSize(), 23); + + add_range(28, 3); + assertEqual(cache.dumpQueue(), { Range(24, 26), Range(27, 27), Range(28, 30), Range(0, 9), Range(10, 14) }); + + assertProbationary(cache.dumpQueue(), { Range(24, 26), Range(27, 27), Range(28, 30) }); + assertProtected(cache.dumpQueue(), { Range(0, 9), Range(10, 14) }); + + check_covering_range(4, 1, { Range(0, 9) }); + + assertProbationary(cache.dumpQueue(), { Range(24, 26), Range(27, 27), Range(28, 30) }); + assertProtected(cache.dumpQueue(), { Range(10, 14), Range(0, 9) }); + + check_covering_range(27, 3, { Range(27, 27), Range(28, 30) }); + + assertProbationary(cache.dumpQueue(), { Range(24, 26), Range(10, 14) }); + assertProtected(cache.dumpQueue(), { Range(0, 9), Range(27, 27), Range(28, 30) }); + + assertEqual(cache.getFileSegmentInfos(key), { Range(0, 9), Range(10, 14), Range(24, 26), Range(27, 27), Range(28, 30) }); + ASSERT_EQ(cache.getFileSegmentsNum(), 5); + ASSERT_EQ(cache.getUsedCacheSize(), 22); + } + + { + ReadSettings read_settings; + read_settings.enable_filesystem_cache = true; + read_settings.local_fs_method = LocalFSReadMethod::pread; + + auto write_file = [](const std::string & filename, const std::string & s) + { + std::string file_path = fs::current_path() / filename; + auto wb = std::make_unique(file_path, DBMS_DEFAULT_BUFFER_SIZE); + wb->write(s.data(), s.size()); + wb->next(); + wb->finalize(); + return file_path; + }; + + DB::FileCacheSettings settings2; + settings2.base_path = cache_base_path2; + settings2.max_file_segment_size = 5; + settings2.max_size = 30; + settings2.max_elements = 6; + settings2.boundary_alignment = 1; + settings2.cache_policy = "SLRU"; + settings2.slru_size_ratio = 0.5; + + auto cache = std::make_shared("slru_2", settings2); + cache->initialize(); + + auto read_and_check = [&](const std::string & file, const FileCacheKey & key, const std::string & expect_result) + { + auto read_buffer_creator = [&]() + { + return createReadBufferFromFileBase(file, read_settings, std::nullopt, std::nullopt); + }; + + auto cached_buffer = std::make_shared( + file, key, cache, read_buffer_creator, read_settings, "test", expect_result.size(), false, false, std::nullopt, nullptr); + + WriteBufferFromOwnString result; + copyData(*cached_buffer, result); + ASSERT_EQ(result.str(), expect_result); + }; + + std::string data1(15, '*'); + auto file1 = write_file("test1", data1); + auto key1 = cache->createKeyForPath(file1); + + read_and_check(file1, key1, data1); + + assertEqual(cache->dumpQueue(), { Range(0, 4), Range(5, 9), Range(10, 14) }); + assertProbationary(cache->dumpQueue(), { Range(0, 4), Range(5, 9), Range(10, 14) }); + assertProtected(cache->dumpQueue(), Ranges{}); + + read_and_check(file1, key1, data1); + + assertEqual(cache->dumpQueue(), { Range(0, 4), Range(5, 9), Range(10, 14) }); + assertProbationary(cache->dumpQueue(), Ranges{}); + assertProtected(cache->dumpQueue(), { Range(0, 4), Range(5, 9), Range(10, 14) }); + + std::string data2(10, '*'); + auto file2 = write_file("test2", data2); + auto key2 = cache->createKeyForPath(file2); + + read_and_check(file2, key2, data2); + + auto dump = cache->dumpQueue(); + assertEqual(dump, { Range(0, 4), Range(5, 9), Range(0, 4), Range(5, 9), Range(10, 14) }); + + ASSERT_EQ(dump[0].key, key2); + ASSERT_EQ(dump[1].key, key2); + ASSERT_EQ(dump[2].key, key1); + ASSERT_EQ(dump[3].key, key1); + ASSERT_EQ(dump[4].key, key1); + + assertProbationary(cache->dumpQueue(), { Range(0, 4), Range(5, 9) }); + assertProtected(cache->dumpQueue(), { Range(0, 4), Range(5, 9), Range(10, 14) }); + + read_and_check(file2, key2, data2); + + dump = cache->dumpQueue(); + assertEqual(dump, { Range(0, 4), Range(5, 9), Range(10, 14), Range(0, 4), Range(5, 9) }); + + ASSERT_EQ(dump[0].key, key1); + ASSERT_EQ(dump[1].key, key1); + ASSERT_EQ(dump[2].key, key1); + ASSERT_EQ(dump[3].key, key2); + ASSERT_EQ(dump[4].key, key2); + + assertProbationary(cache->dumpQueue(), { Range(0, 4), Range(5, 9) }); + assertProtected(cache->dumpQueue(), { Range(10, 14), Range(0, 4), Range(5, 9) }); + } +} diff --git a/src/Loggers/ExtendedLogChannel.cpp b/src/Loggers/ExtendedLogChannel.cpp index 116892b9030..634ae489dc1 100644 --- a/src/Loggers/ExtendedLogChannel.cpp +++ b/src/Loggers/ExtendedLogChannel.cpp @@ -19,7 +19,7 @@ ExtendedLogMessage ExtendedLogMessage::getFrom(const Poco::Message & base) ::timeval tv; if (0 != gettimeofday(&tv, nullptr)) - DB::throwFromErrno("Cannot gettimeofday", ErrorCodes::CANNOT_GETTIMEOFDAY); + throw ErrnoException(ErrorCodes::CANNOT_GETTIMEOFDAY, "Cannot gettimeofday"); msg_ext.time_seconds = static_cast(tv.tv_sec); msg_ext.time_microseconds = static_cast(tv.tv_usec); diff --git a/src/Parsers/ASTAlterQuery.cpp b/src/Parsers/ASTAlterQuery.cpp index 84893011222..84355817b2c 100644 --- a/src/Parsers/ASTAlterQuery.cpp +++ b/src/Parsers/ASTAlterQuery.cpp @@ -453,6 +453,12 @@ void ASTAlterCommand::formatImpl(const FormatSettings & settings, FormatState & << (settings.hilite ? hilite_none : ""); select->formatImpl(settings, state, frame); } + else if (type == ASTAlterCommand::MODIFY_REFRESH) + { + settings.ostr << (settings.hilite ? hilite_keyword : "") << "MODIFY REFRESH " << settings.nl_or_ws + << (settings.hilite ? hilite_none : ""); + refresh->formatImpl(settings, state, frame); + } else if (type == ASTAlterCommand::LIVE_VIEW_REFRESH) { settings.ostr << (settings.hilite ? hilite_keyword : "") << "REFRESH " << (settings.hilite ? hilite_none : ""); @@ -466,6 +472,16 @@ void ASTAlterCommand::formatImpl(const FormatSettings & settings, FormatState & settings.ostr << (settings.hilite ? hilite_keyword : "") << " TO "; rename_to->formatImpl(settings, state, frame); } + else if (type == ASTAlterCommand::APPLY_DELETED_MASK) + { + settings.ostr << (settings.hilite ? hilite_keyword : "") << "APPLY DELETED MASK" << (settings.hilite ? hilite_none : ""); + + if (partition) + { + settings.ostr << (settings.hilite ? hilite_keyword : "") << " IN PARTITION " << (settings.hilite ? hilite_none : ""); + partition->formatImpl(settings, state, frame); + } + } else throw Exception(ErrorCodes::UNEXPECTED_AST_STRUCTURE, "Unexpected type of ALTER"); } diff --git a/src/Parsers/ASTAlterQuery.h b/src/Parsers/ASTAlterQuery.h index e601739595f..0b115537a6d 100644 --- a/src/Parsers/ASTAlterQuery.h +++ b/src/Parsers/ASTAlterQuery.h @@ -40,6 +40,7 @@ public: MODIFY_SETTING, RESET_SETTING, MODIFY_QUERY, + MODIFY_REFRESH, REMOVE_TTL, REMOVE_SAMPLE_BY, @@ -71,6 +72,7 @@ public: DELETE, UPDATE, + APPLY_DELETED_MASK, NO_TYPE, @@ -165,6 +167,9 @@ public: */ ASTPtr values; + /// For MODIFY REFRESH + ASTPtr refresh; + bool detach = false; /// true for DETACH PARTITION bool part = false; /// true for ATTACH PART, DROP DETACHED PART and MOVE diff --git a/src/Parsers/ASTCreateQuery.cpp b/src/Parsers/ASTCreateQuery.cpp index 1562586bd93..9d5f0bcddbd 100644 --- a/src/Parsers/ASTCreateQuery.cpp +++ b/src/Parsers/ASTCreateQuery.cpp @@ -2,7 +2,6 @@ #include #include #include -#include #include #include #include @@ -340,6 +339,12 @@ void ASTCreateQuery::formatQueryImpl(const FormatSettings & settings, FormatStat formatOnCluster(settings); } + if (refresh_strategy) + { + settings.ostr << settings.nl_or_ws; + refresh_strategy->formatImpl(settings, state, frame); + } + if (to_table_id) { assert((is_materialized_view || is_window_view) && to_inner_uuid == UUIDHelpers::Nil); diff --git a/src/Parsers/ASTCreateQuery.h b/src/Parsers/ASTCreateQuery.h index 28f5e05802b..49a0140625c 100644 --- a/src/Parsers/ASTCreateQuery.h +++ b/src/Parsers/ASTCreateQuery.h @@ -5,6 +5,7 @@ #include #include #include +#include #include namespace DB @@ -116,6 +117,7 @@ public: ASTExpressionList * dictionary_attributes_list = nullptr; /// attributes of ASTDictionary * dictionary = nullptr; /// dictionary definition (layout, primary key, etc.) + ASTRefreshStrategy * refresh_strategy = nullptr; // For CREATE MATERIALIZED VIEW ... REFRESH ... std::optional live_view_periodic_refresh; /// For CREATE LIVE VIEW ... WITH [PERIODIC] REFRESH ... bool is_watermark_strictly_ascending{false}; /// STRICTLY ASCENDING WATERMARK STRATEGY FOR WINDOW VIEW diff --git a/src/Parsers/ASTRefreshStrategy.cpp b/src/Parsers/ASTRefreshStrategy.cpp new file mode 100644 index 00000000000..2e0c6ee4638 --- /dev/null +++ b/src/Parsers/ASTRefreshStrategy.cpp @@ -0,0 +1,71 @@ +#include + +#include + +namespace DB +{ + +ASTPtr ASTRefreshStrategy::clone() const +{ + auto res = std::make_shared(*this); + res->children.clear(); + + if (period) + res->set(res->period, period->clone()); + if (offset) + res->set(res->offset, offset->clone()); + if (spread) + res->set(res->spread, spread->clone()); + if (settings) + res->set(res->settings, settings->clone()); + if (dependencies) + res->set(res->dependencies, dependencies->clone()); + res->schedule_kind = schedule_kind; + return res; +} + +void ASTRefreshStrategy::formatImpl( + const IAST::FormatSettings & f_settings, IAST::FormatState & state, IAST::FormatStateStacked frame) const +{ + frame.need_parens = false; + + f_settings.ostr << (f_settings.hilite ? hilite_keyword : "") << "REFRESH " << (f_settings.hilite ? hilite_none : ""); + using enum RefreshScheduleKind; + switch (schedule_kind) + { + case AFTER: + f_settings.ostr << "AFTER " << (f_settings.hilite ? hilite_none : ""); + period->formatImpl(f_settings, state, frame); + break; + case EVERY: + f_settings.ostr << "EVERY " << (f_settings.hilite ? hilite_none : ""); + period->formatImpl(f_settings, state, frame); + if (offset) + { + f_settings.ostr << (f_settings.hilite ? hilite_keyword : "") << " OFFSET " << (f_settings.hilite ? hilite_none : ""); + offset->formatImpl(f_settings, state, frame); + } + break; + default: + f_settings.ostr << (f_settings.hilite ? hilite_none : ""); + break; + } + + if (spread) + { + f_settings.ostr << (f_settings.hilite ? hilite_keyword : "") << " RANDOMIZE FOR " << (f_settings.hilite ? hilite_none : ""); + spread->formatImpl(f_settings, state, frame); + } + if (dependencies) + { + f_settings.ostr << (f_settings.hilite ? hilite_keyword : "") << " DEPENDS ON " << (f_settings.hilite ? hilite_none : ""); + dependencies->formatImpl(f_settings, state, frame); + } + if (settings) + { + f_settings.ostr << (f_settings.hilite ? hilite_keyword : "") << " SETTINGS " << (f_settings.hilite ? hilite_none : ""); + settings->formatImpl(f_settings, state, frame); + } +} + +} diff --git a/src/Parsers/ASTRefreshStrategy.h b/src/Parsers/ASTRefreshStrategy.h new file mode 100644 index 00000000000..ca248b76b40 --- /dev/null +++ b/src/Parsers/ASTRefreshStrategy.h @@ -0,0 +1,35 @@ +#pragma once + +#include +#include +#include + +namespace DB +{ + +enum class RefreshScheduleKind : UInt8 +{ + UNKNOWN = 0, + AFTER, + EVERY +}; + +/// Strategy for MATERIALIZED VIEW ... REFRESH .. +class ASTRefreshStrategy : public IAST +{ +public: + ASTSetQuery * settings = nullptr; + ASTExpressionList * dependencies = nullptr; + ASTTimeInterval * period = nullptr; + ASTTimeInterval * offset = nullptr; + ASTTimeInterval * spread = nullptr; + RefreshScheduleKind schedule_kind{RefreshScheduleKind::UNKNOWN}; + + String getID(char) const override { return "Refresh strategy definition"; } + + ASTPtr clone() const override; + + void formatImpl(const FormatSettings & s, FormatState & state, FormatStateStacked frame) const override; +}; + +} diff --git a/src/Parsers/ASTSelectQuery.cpp b/src/Parsers/ASTSelectQuery.cpp index 7c96db006c4..2115de1c124 100644 --- a/src/Parsers/ASTSelectQuery.cpp +++ b/src/Parsers/ASTSelectQuery.cpp @@ -144,7 +144,7 @@ void ASTSelectQuery::formatImpl(const FormatSettings & s, FormatState & state, F window()->as().formatImplMultiline(s, state, frame); } - if (orderBy()) + if (!order_by_all && orderBy()) { s.ostr << (s.hilite ? hilite_keyword : "") << s.nl_or_ws << indent_str << "ORDER BY" << (s.hilite ? hilite_none : ""); s.one_line @@ -163,6 +163,24 @@ void ASTSelectQuery::formatImpl(const FormatSettings & s, FormatState & state, F } } + if (order_by_all) + { + s.ostr << (s.hilite ? hilite_keyword : "") << s.nl_or_ws << indent_str << "ORDER BY ALL" << (s.hilite ? hilite_none : ""); + + auto * elem = orderBy()->children[0]->as(); + s.ostr << (s.hilite ? hilite_keyword : "") + << (elem->direction == -1 ? " DESC" : " ASC") + << (s.hilite ? hilite_none : ""); + + if (elem->nulls_direction_was_explicitly_specified) + { + s.ostr << (s.hilite ? hilite_keyword : "") + << " NULLS " + << (elem->nulls_direction == elem->direction ? "LAST" : "FIRST") + << (s.hilite ? hilite_none : ""); + } + } + if (limitByLength()) { s.ostr << (s.hilite ? hilite_keyword : "") << s.nl_or_ws << indent_str << "LIMIT " << (s.hilite ? hilite_none : ""); diff --git a/src/Parsers/ASTSelectQuery.h b/src/Parsers/ASTSelectQuery.h index 57f45a8aacd..eb171dc00ee 100644 --- a/src/Parsers/ASTSelectQuery.h +++ b/src/Parsers/ASTSelectQuery.h @@ -87,6 +87,7 @@ public: bool group_by_with_cube = false; bool group_by_with_constant_keys = false; bool group_by_with_grouping_sets = false; + bool order_by_all = false; bool limit_with_ties = false; ASTPtr & refSelect() { return getExpression(Expression::SELECT); } diff --git a/src/Parsers/ASTSystemQuery.cpp b/src/Parsers/ASTSystemQuery.cpp index 77235dfb6c2..c005d49a93d 100644 --- a/src/Parsers/ASTSystemQuery.cpp +++ b/src/Parsers/ASTSystemQuery.cpp @@ -179,7 +179,8 @@ void ASTSystemQuery::formatImpl(const FormatSettings & settings, FormatState &, || type == Type::RELOAD_DICTIONARY || type == Type::RELOAD_MODEL || type == Type::RELOAD_FUNCTION - || type == Type::RESTART_DISK) + || type == Type::RESTART_DISK + || type == Type::DROP_DISK_METADATA_CACHE) { if (table) { diff --git a/src/Parsers/ASTSystemQuery.h b/src/Parsers/ASTSystemQuery.h index 5f7ba5be330..fc26f5dee1c 100644 --- a/src/Parsers/ASTSystemQuery.h +++ b/src/Parsers/ASTSystemQuery.h @@ -32,6 +32,7 @@ public: DROP_COMPILED_EXPRESSION_CACHE, #endif DROP_FILESYSTEM_CACHE, + DROP_DISK_METADATA_CACHE, DROP_SCHEMA_CACHE, DROP_FORMAT_SCHEMA_CACHE, #if USE_AWS_S3 @@ -49,6 +50,8 @@ public: SYNC_DATABASE_REPLICA, SYNC_TRANSACTION_LOG, SYNC_FILE_CACHE, + REPLICA_READY, + REPLICA_UNREADY, RELOAD_DICTIONARY, RELOAD_DICTIONARIES, RELOAD_MODEL, @@ -87,6 +90,13 @@ public: STOP_CLEANUP, START_CLEANUP, RESET_COVERAGE, + REFRESH_VIEW, + START_VIEW, + START_VIEWS, + STOP_VIEW, + STOP_VIEWS, + CANCEL_VIEW, + TEST_VIEW, END }; @@ -130,6 +140,10 @@ public: ServerType server_type; + /// For SYSTEM TEST VIEW (SET FAKE TIME

+ disk_s3_plain_readonly +
+ + + + + diff --git a/tests/integration/test_attach_table_from_s3_plain_readonly/configs/settings.xml b/tests/integration/test_attach_table_from_s3_plain_readonly/configs/settings.xml new file mode 100644 index 00000000000..3e6d615557d --- /dev/null +++ b/tests/integration/test_attach_table_from_s3_plain_readonly/configs/settings.xml @@ -0,0 +1,12 @@ + + + + 1 + + + + + default + + + diff --git a/tests/integration/test_attach_table_from_s3_plain_readonly/test.py b/tests/integration/test_attach_table_from_s3_plain_readonly/test.py new file mode 100644 index 00000000000..15ba934e621 --- /dev/null +++ b/tests/integration/test_attach_table_from_s3_plain_readonly/test.py @@ -0,0 +1,112 @@ +import re +import os +import logging +import pytest + +from helpers.cluster import ClickHouseCluster +from minio.error import S3Error +from pathlib import Path + +cluster = ClickHouseCluster(__file__) + +node1 = cluster.add_instance( + "node1", + main_configs=["configs/config.xml"], + user_configs=["configs/settings.xml"], + with_zookeeper=True, + with_minio=True, + stay_alive=True, + macros={"shard": 1, "replica": 1}, +) + +node2 = cluster.add_instance( + "node2", + main_configs=["configs/config.xml"], + user_configs=["configs/settings.xml"], + with_zookeeper=True, + with_minio=True, + stay_alive=True, + macros={"shard": 1, "replica": 2}, +) + +uuid_regex = re.compile("[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}") + + +def upload_to_minio(minio_client, bucket_name, local_path, minio_path=""): + local_path = Path(local_path) + for root, _, files in os.walk(local_path): + for file in files: + local_file_path = Path(root) / file + minio_object_name = minio_path + str( + local_file_path.relative_to(local_path) + ) + + try: + with open(local_file_path, "rb") as data: + file_stat = os.stat(local_file_path) + minio_client.put_object( + bucket_name, minio_object_name, data, file_stat.st_size + ) + logging.info(f"Uploaded {local_file_path} to {minio_object_name}") + except S3Error as e: + logging.error(f"Error uploading {local_file_path}: {e}") + + +@pytest.fixture(scope="module") +def started_cluster(): + try: + cluster.start() + yield cluster + + finally: + cluster.shutdown() + + +def test_attach_table_from_s3_plain_readonly(started_cluster): + # Create an atomic DB with mergetree sample data + node1.query( + """ + create database local_db; + + create table local_db.test_table (num UInt32) engine=MergeTree() order by num; + + insert into local_db.test_table (*) Values (5) + """ + ) + + assert int(node1.query("select num from local_db.test_table limit 1")) == 5 + + # Copy local MergeTree data into minio bucket + table_data_path = os.path.join(node1.path, f"database/store") + minio = cluster.minio_client + upload_to_minio( + minio, cluster.minio_bucket, table_data_path, "data/disks/disk_s3_plain/store/" + ) + + # Drop the non-replicated table, we don't need it anymore + table_uuid = node1.query( + "SELECT uuid FROM system.tables WHERE database='local_db' AND table='test_table'" + ).strip() + node1.query("drop table local_db.test_table SYNC;") + + # Create a replicated database + node1.query( + "create database s3_plain_test_db ENGINE = Replicated('/test/s3_plain_test_db', 'shard1', 'replica1');" + ) + node2.query( + "create database s3_plain_test_db ENGINE = Replicated('/test/s3_plain_test_db', 'shard1', 'replica2');" + ) + + # Create a MergeTree table at one node, by attaching the merge tree data + node1.query( + f""" + attach table s3_plain_test_db.test_table UUID '{table_uuid}' (num UInt32) + engine=MergeTree() + order by num + settings storage_policy = 's3_plain_readonly' + """ + ) + + # Check that both nodes can query and get result. + assert int(node1.query("select num from s3_plain_test_db.test_table limit 1")) == 5 + assert int(node2.query("select num from s3_plain_test_db.test_table limit 1")) == 5 diff --git a/tests/integration/test_backup_restore_s3/configs/disk_s3.xml b/tests/integration/test_backup_restore_s3/configs/disk_s3.xml index d635e39e13f..45a1e17b039 100644 --- a/tests/integration/test_backup_restore_s3/configs/disk_s3.xml +++ b/tests/integration/test_backup_restore_s3/configs/disk_s3.xml @@ -58,5 +58,6 @@ disk_s3 disk_s3_plain disk_s3_cache + disk_s3_other_bucket
diff --git a/tests/integration/test_backup_restore_s3/test.py b/tests/integration/test_backup_restore_s3/test.py index 55d40b14ea7..cd8f70b3239 100644 --- a/tests/integration/test_backup_restore_s3/test.py +++ b/tests/integration/test_backup_restore_s3/test.py @@ -184,6 +184,32 @@ def test_backup_to_disk(storage_policy, to_disk): check_backup_and_restore(storage_policy, backup_destination) +@pytest.mark.parametrize( + "storage_policy, to_disk", + [ + pytest.param( + "policy_s3", + "disk_s3_other_bucket", + id="from_s3_to_s3", + ), + pytest.param( + "policy_s3_other_bucket", + "disk_s3", + id="from_s3_to_s3_other_bucket", + ), + ], +) +def test_backup_from_s3_to_s3_disk_native_copy(storage_policy, to_disk): + backup_name = new_backup_name() + backup_destination = f"Disk('{to_disk}', '{backup_name}')" + (backup_events, restore_events) = check_backup_and_restore( + storage_policy, backup_destination + ) + + assert backup_events["S3CopyObject"] > 0 + assert restore_events["S3CopyObject"] > 0 + + def test_backup_to_s3(): storage_policy = "default" backup_name = new_backup_name() @@ -419,3 +445,10 @@ def test_backup_with_fs_cache( # see MergeTreeData::initializeDirectoriesAndFormatVersion() if "CachedWriteBufferCacheWriteBytes" in restore_events: assert restore_events["CachedWriteBufferCacheWriteBytes"] <= 1 + + +def test_backup_to_zip(): + storage_policy = "default" + backup_name = new_backup_name() + backup_destination = f"S3('http://minio1:9001/root/data/backups/{backup_name}.zip', 'minio', 'minio123')" + check_backup_and_restore(storage_policy, backup_destination) diff --git a/tests/integration/test_config_substitutions/configs/000-config_with_env_subst.xml b/tests/integration/test_config_substitutions/configs/000-config_with_env_subst.xml index ffa26488874..b029dd3bd2e 100644 --- a/tests/integration/test_config_substitutions/configs/000-config_with_env_subst.xml +++ b/tests/integration/test_config_substitutions/configs/000-config_with_env_subst.xml @@ -2,6 +2,7 @@ + 1 diff --git a/tests/integration/test_config_substitutions/test.py b/tests/integration/test_config_substitutions/test.py index 46961e5da71..564985b2f50 100644 --- a/tests/integration/test_config_substitutions/test.py +++ b/tests/integration/test_config_substitutions/test.py @@ -1,6 +1,7 @@ import pytest from helpers.cluster import ClickHouseCluster +from helpers.client import QueryRuntimeException cluster = ClickHouseCluster(__file__) node1 = cluster.add_instance( @@ -36,9 +37,13 @@ node7 = cluster.add_instance( "configs/000-config_with_env_subst.xml", "configs/010-env_subst_override.xml", ], - env_variables={"MAX_QUERY_SIZE": "121212"}, + env_variables={ + # overridden with 424242 + "MAX_QUERY_SIZE": "121212", + "MAX_THREADS": "2", + }, instance_env_variables=True, -) # overridden with 424242 +) @pytest.fixture(scope="module") @@ -91,6 +96,65 @@ def test_config(start_cluster): node7.query("select value from system.settings where name = 'max_query_size'") == "424242\n" ) + assert ( + node7.query("select value from system.settings where name = 'max_threads'") + == "2\n" + ) + + +def test_config_invalid_overrides(start_cluster): + node7.replace_config( + "/etc/clickhouse-server/users.d/000-config_with_env_subst.xml", + """ + + + + + 100 + + + + + + default + default + + + + + + +""", + ) + with pytest.raises( + QueryRuntimeException, + match="Failed to preprocess config '/etc/clickhouse-server/users.xml': Exception: Element has value and does not have 'replace' attribute, can't process from_env substitution", + ): + node7.query("SYSTEM RELOAD CONFIG") + node7.replace_config( + "/etc/clickhouse-server/users.d/000-config_with_env_subst.xml", + """ + + + + + 1 + + + + + + default + default + + + + + + +""", + ) + node7.query("SYSTEM RELOAD CONFIG") def test_include_config(start_cluster): diff --git a/tests/integration/test_filesystem_cache/config.d/storage_conf_2.xml b/tests/integration/test_filesystem_cache/config.d/storage_conf_2.xml new file mode 100644 index 00000000000..a068d7b954c --- /dev/null +++ b/tests/integration/test_filesystem_cache/config.d/storage_conf_2.xml @@ -0,0 +1,24 @@ + + + + + local_blob_storage + / + + + cache + hdd_blob + /cache1/ + 1Mi + 1 + + + cache + hdd_blob + /cache1/ + 1Mi + 1 + + + + diff --git a/tests/integration/test_filesystem_cache/test.py b/tests/integration/test_filesystem_cache/test.py index 3a6a1ef76eb..ab1bc4e4344 100644 --- a/tests/integration/test_filesystem_cache/test.py +++ b/tests/integration/test_filesystem_cache/test.py @@ -21,6 +21,12 @@ def cluster(): ], stay_alive=True, ) + cluster.add_instance( + "node_caches_with_same_path", + main_configs=[ + "config.d/storage_conf_2.xml", + ], + ) logging.info("Starting cluster...") cluster.start() @@ -87,3 +93,104 @@ def test_parallel_cache_loading_on_startup(cluster, node_name): ) node.query("SELECT * FROM test FORMAT Null") assert count == int(node.query("SELECT count() FROM test")) + + +@pytest.mark.parametrize("node_name", ["node"]) +def test_caches_with_the_same_configuration(cluster, node_name): + node = cluster.instances[node_name] + cache_path = "cache1" + + node.query(f"SYSTEM DROP FILESYSTEM CACHE;") + for table in ["test", "test2"]: + node.query( + f""" + DROP TABLE IF EXISTS {table} SYNC; + + CREATE TABLE {table} (key UInt32, value String) + Engine=MergeTree() + ORDER BY value + SETTINGS disk = disk( + type = cache, + name = {table}, + path = '{cache_path}', + disk = 'hdd_blob', + max_file_segment_size = '1Ki', + boundary_alignment = '1Ki', + cache_on_write_operations=1, + max_size = '1Mi'); + + SET enable_filesystem_cache_on_write_operations=1; + INSERT INTO {table} SELECT * FROM generateRandom('a Int32, b String') + LIMIT 1000; + """ + ) + + size = int( + node.query( + "SELECT value FROM system.metrics WHERE name = 'FilesystemCacheSize'" + ) + ) + assert ( + node.query( + "SELECT cache_name, sum(size) FROM system.filesystem_cache GROUP BY cache_name ORDER BY cache_name" + ).strip() + == f"test\t{size}\ntest2\t{size}" + ) + + table = "test3" + assert ( + "Found more than one cache configuration with the same path, but with different cache settings" + in node.query_and_get_error( + f""" + DROP TABLE IF EXISTS {table} SYNC; + + CREATE TABLE {table} (key UInt32, value String) + Engine=MergeTree() + ORDER BY value + SETTINGS disk = disk( + type = cache, + name = {table}, + path = '{cache_path}', + disk = 'hdd_blob', + max_file_segment_size = '1Ki', + boundary_alignment = '1Ki', + cache_on_write_operations=0, + max_size = '2Mi'); + """ + ) + ) + + +@pytest.mark.parametrize("node_name", ["node_caches_with_same_path"]) +def test_caches_with_the_same_configuration_2(cluster, node_name): + node = cluster.instances[node_name] + cache_path = "cache1" + + node.query(f"SYSTEM DROP FILESYSTEM CACHE;") + for table in ["cache1", "cache2"]: + node.query( + f""" + DROP TABLE IF EXISTS {table} SYNC; + + CREATE TABLE {table} (key UInt32, value String) + Engine=MergeTree() + ORDER BY value + SETTINGS disk = '{table}'; + + SET enable_filesystem_cache_on_write_operations=1; + INSERT INTO {table} SELECT * FROM generateRandom('a Int32, b String') + LIMIT 1000; + """ + ) + + size = int( + node.query( + "SELECT value FROM system.metrics WHERE name = 'FilesystemCacheSize'" + ) + ) + assert ( + node.query( + "SELECT cache_name, sum(size) FROM system.filesystem_cache GROUP BY cache_name ORDER BY cache_name" + ).strip() + == f"cache1\t{size}\ncache2\t{size}" + ) diff --git a/tests/integration/test_grant_and_revoke/test.py b/tests/integration/test_grant_and_revoke/test.py index c8a0ee541e2..a86a1208f49 100644 --- a/tests/integration/test_grant_and_revoke/test.py +++ b/tests/integration/test_grant_and_revoke/test.py @@ -188,7 +188,7 @@ def test_grant_all_on_table(): instance.query("SHOW GRANTS FOR B") == "GRANT SHOW TABLES, SHOW COLUMNS, SHOW DICTIONARIES, SELECT, INSERT, ALTER TABLE, ALTER VIEW, CREATE TABLE, CREATE VIEW, CREATE DICTIONARY, " "DROP TABLE, DROP VIEW, DROP DICTIONARY, UNDROP TABLE, TRUNCATE, OPTIMIZE, BACKUP, CREATE ROW POLICY, ALTER ROW POLICY, DROP ROW POLICY, SHOW ROW POLICIES, " - "SYSTEM MERGES, SYSTEM TTL MERGES, SYSTEM FETCHES, SYSTEM MOVES, SYSTEM PULLING REPLICATION LOG, SYSTEM CLEANUP, SYSTEM SENDS, SYSTEM REPLICATION QUEUES, SYSTEM DROP REPLICA, SYSTEM SYNC REPLICA, " + "SYSTEM MERGES, SYSTEM TTL MERGES, SYSTEM FETCHES, SYSTEM MOVES, SYSTEM PULLING REPLICATION LOG, SYSTEM CLEANUP, SYSTEM VIEWS, SYSTEM SENDS, SYSTEM REPLICATION QUEUES, SYSTEM DROP REPLICA, SYSTEM SYNC REPLICA, " "SYSTEM RESTART REPLICA, SYSTEM RESTORE REPLICA, SYSTEM WAIT LOADING PARTS, SYSTEM FLUSH DISTRIBUTED, dictGet ON test.table TO B\n" ) instance.query("REVOKE ALL ON test.table FROM B", user="A") diff --git a/tests/integration/test_reverse_dns_query/__init__.py b/tests/integration/test_keeper_http_control/__init__.py similarity index 100% rename from tests/integration/test_reverse_dns_query/__init__.py rename to tests/integration/test_keeper_http_control/__init__.py diff --git a/tests/integration/test_keeper_http_control/configs/enable_keeper1.xml b/tests/integration/test_keeper_http_control/configs/enable_keeper1.xml new file mode 100644 index 00000000000..20e3c307f31 --- /dev/null +++ b/tests/integration/test_keeper_http_control/configs/enable_keeper1.xml @@ -0,0 +1,37 @@ + + + 9181 + 1 + /var/lib/clickhouse/coordination/log + /var/lib/clickhouse/coordination/snapshots + + + 5000 + 10000 + trace + + + + + 1 + node1 + 9234 + + + 2 + node2 + 9234 + true + + + 3 + node3 + 9234 + true + + + + 9182 + + + diff --git a/tests/integration/test_keeper_http_control/configs/enable_keeper2.xml b/tests/integration/test_keeper_http_control/configs/enable_keeper2.xml new file mode 100644 index 00000000000..b9002eb2436 --- /dev/null +++ b/tests/integration/test_keeper_http_control/configs/enable_keeper2.xml @@ -0,0 +1,37 @@ + + + 9181 + 2 + /var/lib/clickhouse/coordination/log + /var/lib/clickhouse/coordination/snapshots + + + 5000 + 10000 + trace + + + + + 1 + node1 + 9234 + + + 2 + node2 + 9234 + true + + + 3 + node3 + 9234 + true + + + + 9182 + + + diff --git a/tests/integration/test_keeper_http_control/configs/enable_keeper3.xml b/tests/integration/test_keeper_http_control/configs/enable_keeper3.xml new file mode 100644 index 00000000000..6e4e17399f7 --- /dev/null +++ b/tests/integration/test_keeper_http_control/configs/enable_keeper3.xml @@ -0,0 +1,37 @@ + + + 9181 + 3 + /var/lib/clickhouse/coordination/log + /var/lib/clickhouse/coordination/snapshots + + + 5000 + 10000 + trace + + + + + 1 + node1 + 9234 + + + 2 + node2 + 9234 + true + + + 3 + node3 + 9234 + true + + + + 9182 + + + diff --git a/tests/integration/test_keeper_http_control/test.py b/tests/integration/test_keeper_http_control/test.py new file mode 100644 index 00000000000..65dc5bea909 --- /dev/null +++ b/tests/integration/test_keeper_http_control/test.py @@ -0,0 +1,74 @@ +#!/usr/bin/env python3 + +import os +import pytest +import requests + +from helpers.cluster import ClickHouseCluster +from helpers.network import PartitionManager +import helpers.keeper_utils as keeper_utils + +cluster = ClickHouseCluster(__file__) +CONFIG_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "configs") + +node1 = cluster.add_instance( + "node1", main_configs=["configs/enable_keeper1.xml"], stay_alive=True +) +node2 = cluster.add_instance( + "node2", main_configs=["configs/enable_keeper2.xml"], stay_alive=True +) +node3 = cluster.add_instance( + "node3", main_configs=["configs/enable_keeper3.xml"], stay_alive=True +) + + +@pytest.fixture(scope="module") +def started_cluster(): + try: + cluster.start() + yield cluster + finally: + cluster.shutdown() + + +def test_http_readiness_basic_responses(started_cluster): + leader = keeper_utils.get_leader(cluster, [node1, node2, node3]) + response = requests.get( + "http://{host}:{port}/ready".format(host=leader.ip_address, port=9182) + ) + assert response.status_code == 200 + + readiness_data = response.json() + assert readiness_data["status"] == "ok" + assert readiness_data["details"]["role"] == "leader" + + follower = keeper_utils.get_any_follower(cluster, [node1, node2, node3]) + response = requests.get( + "http://{host}:{port}/ready".format(host=follower.ip_address, port=9182) + ) + assert response.status_code == 200 + + readiness_data = response.json() + assert readiness_data["status"] == "ok" + assert readiness_data["details"]["role"] == "follower" + assert readiness_data["details"]["hasLeader"] == True + + +def test_http_readiness_partitioned_cluster(started_cluster): + with PartitionManager() as pm: + leader = keeper_utils.get_leader(cluster, [node1, node2, node3]) + follower = keeper_utils.get_any_follower(cluster, [node1, node2, node3]) + + pm.partition_instances(leader, follower) + keeper_utils.wait_until_quorum_lost(cluster, follower) + + response = requests.get( + "http://{host}:{port}/ready".format(host=follower.ip_address, port=9182) + ) + print(response.json()) + assert response.status_code == 503 + + readiness_data = response.json() + assert readiness_data["status"] == "fail" + assert readiness_data["details"]["role"] == "follower" + assert readiness_data["details"]["hasLeader"] == False diff --git a/tests/integration/test_keeper_memory_soft_limit/configs/keeper_config1.xml b/tests/integration/test_keeper_memory_soft_limit/configs/keeper_config1.xml index fe45d09d915..642cf16414e 100644 --- a/tests/integration/test_keeper_memory_soft_limit/configs/keeper_config1.xml +++ b/tests/integration/test_keeper_memory_soft_limit/configs/keeper_config1.xml @@ -15,6 +15,7 @@ az-zoo1 1 + 200000000 10000 @@ -23,7 +24,6 @@ false 2000 4000 - 200000000 1 diff --git a/tests/integration/test_keeper_memory_soft_limit/configs/keeper_config2.xml b/tests/integration/test_keeper_memory_soft_limit/configs/keeper_config2.xml index f7f6a5718b5..25ececea3e8 100644 --- a/tests/integration/test_keeper_memory_soft_limit/configs/keeper_config2.xml +++ b/tests/integration/test_keeper_memory_soft_limit/configs/keeper_config2.xml @@ -16,6 +16,7 @@ az-zoo2 1 + 20000000 10000 @@ -24,7 +25,6 @@ false 2000 4000 - 20000000 1 diff --git a/tests/integration/test_keeper_memory_soft_limit/configs/keeper_config3.xml b/tests/integration/test_keeper_memory_soft_limit/configs/keeper_config3.xml index 82345aebc46..81e343b77c9 100644 --- a/tests/integration/test_keeper_memory_soft_limit/configs/keeper_config3.xml +++ b/tests/integration/test_keeper_memory_soft_limit/configs/keeper_config3.xml @@ -13,6 +13,8 @@ 2181 3 + 20000000 + 10000 15000 @@ -20,7 +22,6 @@ false 2000 4000 - 20000000 1 diff --git a/tests/integration/test_max_http_connections_for_replication/configs/remote_servers.xml b/tests/integration/test_max_http_connections_for_replication/configs/remote_servers.xml deleted file mode 100644 index e62425fe1bb..00000000000 --- a/tests/integration/test_max_http_connections_for_replication/configs/remote_servers.xml +++ /dev/null @@ -1,40 +0,0 @@ - - - - - true - - test - node1 - 9000 - - - test - node2 - 9000 - - - - - - true - - test - node3 - 9000 - - - test - node4 - 9000 - - - test - node5 - 9000 - - - - - - diff --git a/tests/integration/test_max_http_connections_for_replication/test.py b/tests/integration/test_max_http_connections_for_replication/test.py deleted file mode 100644 index bcb779ee913..00000000000 --- a/tests/integration/test_max_http_connections_for_replication/test.py +++ /dev/null @@ -1,157 +0,0 @@ -import time -from multiprocessing.dummy import Pool - -import pytest -from helpers.cluster import ClickHouseCluster -from helpers.test_tools import assert_eq_with_retry - - -def _fill_nodes(nodes, shard, connections_count): - for node in nodes: - node.query( - """ - CREATE DATABASE test; - - CREATE TABLE test_table(date Date, id UInt32, dummy UInt32) - ENGINE = ReplicatedMergeTree('/clickhouse/tables/test{shard}/replicated', '{replica}') - PARTITION BY date - ORDER BY id - SETTINGS - replicated_max_parallel_fetches_for_host={connections}, - index_granularity=8192; - """.format( - shard=shard, replica=node.name, connections=connections_count - ) - ) - - -cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance( - "node1", - user_configs=[], - main_configs=["configs/remote_servers.xml"], - with_zookeeper=True, -) -node2 = cluster.add_instance( - "node2", - user_configs=[], - main_configs=["configs/remote_servers.xml"], - with_zookeeper=True, -) - - -@pytest.fixture(scope="module") -def start_small_cluster(): - try: - cluster.start() - - _fill_nodes([node1, node2], 1, 1) - - yield cluster - - finally: - cluster.shutdown() - - -def test_single_endpoint_connections_count(start_small_cluster): - node1.query("TRUNCATE TABLE test_table") - node2.query("SYSTEM SYNC REPLICA test_table") - - def task(count): - print(("Inserting ten times from {}".format(count))) - for i in range(count, count + 10): - node1.query("insert into test_table values ('2017-06-16', {}, 0)".format(i)) - - p = Pool(10) - p.map(task, range(0, 100, 10)) - - assert_eq_with_retry(node1, "select count() from test_table", "100") - assert_eq_with_retry(node2, "select count() from test_table", "100") - - assert ( - node2.query( - "SELECT value FROM system.events where event='CreatedHTTPConnections'" - ) - == "1\n" - ) - - -def test_keepalive_timeout(start_small_cluster): - node1.query("TRUNCATE TABLE test_table") - node2.query("SYSTEM SYNC REPLICA test_table") - - node1.query("insert into test_table values ('2017-06-16', 777, 0)") - assert_eq_with_retry(node2, "select count() from test_table", str(1)) - # Server keepAliveTimeout is 3 seconds, default client session timeout is 8 - # lets sleep in that interval - time.sleep(4) - - node1.query("insert into test_table values ('2017-06-16', 888, 0)") - - time.sleep(3) - - assert_eq_with_retry(node2, "select count() from test_table", str(2)) - - assert not node2.contains_in_log( - "No message received" - ), "Found 'No message received' in clickhouse-server.log" - - -node3 = cluster.add_instance( - "node3", - user_configs=[], - main_configs=["configs/remote_servers.xml"], - with_zookeeper=True, -) -node4 = cluster.add_instance( - "node4", - user_configs=[], - main_configs=["configs/remote_servers.xml"], - with_zookeeper=True, -) -node5 = cluster.add_instance( - "node5", - user_configs=[], - main_configs=["configs/remote_servers.xml"], - with_zookeeper=True, -) - - -@pytest.fixture(scope="module") -def start_big_cluster(): - try: - cluster.start() - - _fill_nodes([node3, node4, node5], 2, 2) - - yield cluster - - finally: - cluster.shutdown() - - -def test_multiple_endpoint_connections_count(start_big_cluster): - def task(count): - print(("Inserting ten times from {}".format(count))) - if (count / 10) % 2 == 1: - node = node3 - else: - node = node4 - - for i in range(count, count + 10): - node.query("insert into test_table values ('2017-06-16', {}, 0)".format(i)) - - p = Pool(10) - p.map(task, range(0, 100, 10)) - - assert_eq_with_retry(node3, "select count() from test_table", "100") - assert_eq_with_retry(node4, "select count() from test_table", "100") - assert_eq_with_retry(node5, "select count() from test_table", "100") - - # Two per each host or sometimes less, if fetches are not performed in parallel. But not more. - assert ( - node5.query( - "SELECT value FROM system.events where event='CreatedHTTPConnections'" - ) - <= "4\n" - ) diff --git a/tests/integration/test_merge_tree_s3/configs/config.d/storage_conf.xml b/tests/integration/test_merge_tree_s3/configs/config.d/storage_conf.xml index c12bdf064ce..7087c348072 100644 --- a/tests/integration/test_merge_tree_s3/configs/config.d/storage_conf.xml +++ b/tests/integration/test_merge_tree_s3/configs/config.d/storage_conf.xml @@ -156,6 +156,10 @@ 0 1.0 + 0 + 0 + 0 + 0 0 diff --git a/tests/integration/test_parallel_replicas_distributed_read_from_all/test.py b/tests/integration/test_parallel_replicas_distributed_read_from_all/test.py index fa1dfbefe52..8af7bb12595 100644 --- a/tests/integration/test_parallel_replicas_distributed_read_from_all/test.py +++ b/tests/integration/test_parallel_replicas_distributed_read_from_all/test.py @@ -119,7 +119,6 @@ def test_read_equally_from_each_replica(start_cluster, prefer_localhost_replica) "allow_experimental_parallel_reading_from_replicas": 2, "prefer_localhost_replica": prefer_localhost_replica, "max_parallel_replicas": 3, - "use_hedged_requests": 0, }, ) == expected_result @@ -143,9 +142,15 @@ def test_read_equally_from_each_replica(start_cluster, prefer_localhost_replica) nodes[0].query(f"system start fetches {table_name}") nodes[1].query(f"system start fetches {table_name}") nodes[2].query(f"system start fetches {table_name}") + # ensure that replica in sync before querying it to get stable result + nodes[0].query(f"system start merges {table_name}") + nodes[0].query(f"system sync replica {table_name}") assert ( nodes[0].query( - f"SELECT count(), min(key), max(key), sum(key) FROM {table_name}_d" + f"SELECT count(), min(key), max(key), sum(key) FROM {table_name}_d", + settings={ + "allow_experimental_parallel_reading_from_replicas": 0, + }, ) == expected_result ) diff --git a/tests/integration/test_parallel_replicas_distributed_skip_shards/test.py b/tests/integration/test_parallel_replicas_distributed_skip_shards/test.py index 315a9781c8b..af114ade2d7 100644 --- a/tests/integration/test_parallel_replicas_distributed_skip_shards/test.py +++ b/tests/integration/test_parallel_replicas_distributed_skip_shards/test.py @@ -84,7 +84,6 @@ def test_skip_unavailable_shards(start_cluster, prefer_localhost_replica): settings={ "allow_experimental_parallel_reading_from_replicas": 2, "max_parallel_replicas": 3, - "use_hedged_requests": 0, "prefer_localhost_replica": prefer_localhost_replica, "skip_unavailable_shards": 1, "connections_with_failover_max_tries": 0, # just don't wait for unavailable replicas @@ -119,7 +118,6 @@ def test_error_on_unavailable_shards(start_cluster, prefer_localhost_replica): settings={ "allow_experimental_parallel_reading_from_replicas": 2, "max_parallel_replicas": 3, - "use_hedged_requests": 0, "prefer_localhost_replica": prefer_localhost_replica, "skip_unavailable_shards": 0, }, @@ -155,7 +153,6 @@ def test_no_unavailable_shards(start_cluster, skip_unavailable_shards): settings={ "allow_experimental_parallel_reading_from_replicas": 2, "max_parallel_replicas": 3, - "use_hedged_requests": 0, "prefer_localhost_replica": 0, "skip_unavailable_shards": skip_unavailable_shards, }, diff --git a/tests/integration/test_parallel_replicas_over_distributed/test.py b/tests/integration/test_parallel_replicas_over_distributed/test.py index ecfc2ddea63..3d35ec3fa8e 100644 --- a/tests/integration/test_parallel_replicas_over_distributed/test.py +++ b/tests/integration/test_parallel_replicas_over_distributed/test.py @@ -129,6 +129,9 @@ def test_parallel_replicas_over_distributed( node = nodes[0] expected_result = f"6003\t-1999\t1999\t3\n" + # sync all replicas to get consistent result + node.query(f"SYSTEM SYNC REPLICA ON CLUSTER {cluster} {table_name}") + # parallel replicas assert ( node.query( @@ -137,17 +140,18 @@ def test_parallel_replicas_over_distributed( "allow_experimental_parallel_reading_from_replicas": 2, "prefer_localhost_replica": prefer_localhost_replica, "max_parallel_replicas": max_parallel_replicas, - "use_hedged_requests": 0, }, ) == expected_result ) - # sync all replicas to get consistent result by next distributed query - node.query(f"SYSTEM SYNC REPLICA ON CLUSTER {cluster} {table_name}") - # w/o parallel replicas assert ( - node.query(f"SELECT count(), min(key), max(key), sum(key) FROM {table_name}_d") + node.query( + f"SELECT count(), min(key), max(key), sum(key) FROM {table_name}_d", + settings={ + "allow_experimental_parallel_reading_from_replicas": 0, + }, + ) == expected_result ) diff --git a/tests/integration/test_parallel_replicas_skip_shards/test.py b/tests/integration/test_parallel_replicas_skip_shards/test.py index 3df80ba061e..a18c82a53a9 100644 --- a/tests/integration/test_parallel_replicas_skip_shards/test.py +++ b/tests/integration/test_parallel_replicas_skip_shards/test.py @@ -38,7 +38,6 @@ def test_skip_unavailable_shards(start_cluster): settings={ "allow_experimental_parallel_reading_from_replicas": 2, "max_parallel_replicas": 3, - "use_hedged_requests": 0, "skip_unavailable_shards": 1, # "async_socket_for_remote" : 0, # "async_query_sending_for_remote" : 0, @@ -65,7 +64,6 @@ def test_error_on_unavailable_shards(start_cluster): settings={ "allow_experimental_parallel_reading_from_replicas": 2, "max_parallel_replicas": 3, - "use_hedged_requests": 0, "skip_unavailable_shards": 0, }, ) diff --git a/tests/queries/0_stateless/01658_substring_ubsan.reference b/tests/integration/test_parallel_replicas_working_set/__init__.py similarity index 100% rename from tests/queries/0_stateless/01658_substring_ubsan.reference rename to tests/integration/test_parallel_replicas_working_set/__init__.py diff --git a/tests/integration/test_parallel_replicas_working_set/configs/remote_servers.xml b/tests/integration/test_parallel_replicas_working_set/configs/remote_servers.xml new file mode 100644 index 00000000000..02a315479f8 --- /dev/null +++ b/tests/integration/test_parallel_replicas_working_set/configs/remote_servers.xml @@ -0,0 +1,22 @@ + + + + + true + + n1 + 9000 + + + n2 + 9000 + + + n3 + 9000 + + + + + + diff --git a/tests/integration/test_parallel_replicas_working_set/test.py b/tests/integration/test_parallel_replicas_working_set/test.py new file mode 100644 index 00000000000..0ede9d9b1a5 --- /dev/null +++ b/tests/integration/test_parallel_replicas_working_set/test.py @@ -0,0 +1,140 @@ +import pytest +from helpers.cluster import ClickHouseCluster + +cluster = ClickHouseCluster(__file__) + +nodes = [ + cluster.add_instance( + f"n{i}", main_configs=["configs/remote_servers.xml"], with_zookeeper=True + ) + for i in (1, 2, 3) +] + + +@pytest.fixture(scope="module", autouse=True) +def start_cluster(): + try: + cluster.start() + yield cluster + finally: + cluster.shutdown() + + +def create_tables(cluster, table_name, node_with_covering_part): + # create replicated tables + for node in nodes: + node.query(f"DROP TABLE IF EXISTS {table_name} SYNC") + + nodes[0].query( + f"""CREATE TABLE IF NOT EXISTS {table_name} (key Int64, value String) Engine=ReplicatedMergeTree('/test_parallel_replicas/shard1/{table_name}', 'r1') + ORDER BY (key)""" + ) + nodes[1].query( + f"""CREATE TABLE IF NOT EXISTS {table_name} (key Int64, value String) Engine=ReplicatedMergeTree('/test_parallel_replicas/shard1/{table_name}', 'r2') + ORDER BY (key)""" + ) + nodes[2].query( + f"""CREATE TABLE IF NOT EXISTS {table_name} (key Int64, value String) Engine=ReplicatedMergeTree('/test_parallel_replicas/shard1/{table_name}', 'r3') + ORDER BY (key)""" + ) + # stop merges to keep original parts + # stop fetches to keep only parts created on the nodes + for i in (0, 1, 2): + if i != node_with_covering_part: + nodes[i].query(f"system stop fetches {table_name}") + nodes[i].query(f"system stop merges {table_name}") + + # populate data, equal number of rows for each replica + nodes[0].query( + f"INSERT INTO {table_name} SELECT number, number FROM numbers(10)", + ) + nodes[0].query( + f"INSERT INTO {table_name} SELECT number, number FROM numbers(10, 10)" + ) + nodes[1].query( + f"INSERT INTO {table_name} SELECT number, number FROM numbers(20, 10)" + ) + nodes[1].query( + f"INSERT INTO {table_name} SELECT number, number FROM numbers(30, 10)" + ) + nodes[2].query( + f"INSERT INTO {table_name} SELECT number, number FROM numbers(40, 10)" + ) + nodes[2].query( + f"INSERT INTO {table_name} SELECT number, number FROM numbers(50, 10)" + ) + nodes[node_with_covering_part].query(f"system sync replica {table_name}") + nodes[node_with_covering_part].query(f"optimize table {table_name}") + + # check we have expected set of parts + expected_active_parts = "" + if node_with_covering_part == 0: + expected_active_parts = ( + "all_0_5_1\nall_2_2_0\nall_3_3_0\nall_4_4_0\nall_5_5_0\n" + ) + + if node_with_covering_part == 1: + expected_active_parts = ( + "all_0_0_0\nall_0_5_1\nall_1_1_0\nall_4_4_0\nall_5_5_0\n" + ) + + if node_with_covering_part == 2: + expected_active_parts = ( + "all_0_0_0\nall_0_5_1\nall_1_1_0\nall_2_2_0\nall_3_3_0\n" + ) + + assert ( + nodes[0].query( + f"select distinct name from clusterAllReplicas({cluster}, system.parts) where table='{table_name}' and active order by name" + ) + == expected_active_parts + ) + + +@pytest.mark.parametrize("node_with_covering_part", [0, 1, 2]) +def test_covering_part_in_announcement(start_cluster, node_with_covering_part): + """create and populate table in special way (see create_table()), + node_with_covering_part contains all parts merged into one, + other nodes contain only parts which are result of insert via the node + """ + + cluster = "test_single_shard_multiple_replicas" + table_name = "test_table" + create_tables(cluster, table_name, node_with_covering_part) + + # query result can be one of the following outcomes + # (1) query result if parallel replicas working set contains all_0_5_1 + expected_full_result = "60\t0\t59\t1770\n" + expected_results = {expected_full_result} + + # (2) query result if parallel replicas working set DOESN'T contain all_0_5_1 + if node_with_covering_part == 0: + expected_results.add("40\t20\t59\t1580\n") + if node_with_covering_part == 1: + expected_results.add("40\t0\t59\t1180\n") + if node_with_covering_part == 2: + expected_results.add("40\t0\t39\t780\n") + + # parallel replicas + result = nodes[0].query( + f"SELECT count(), min(key), max(key), sum(key) FROM {table_name}", + settings={ + "allow_experimental_parallel_reading_from_replicas": 2, + "prefer_localhost_replica": 0, + "max_parallel_replicas": 3, + "use_hedged_requests": 0, + "cluster_for_parallel_replicas": cluster, + }, + ) + assert result in expected_results + + # w/o parallel replicas + assert ( + nodes[node_with_covering_part].query( + f"SELECT count(), min(key), max(key), sum(key) FROM {table_name}", + settings={ + "allow_experimental_parallel_reading_from_replicas": 0, + }, + ) + == expected_full_result + ) diff --git a/tests/integration/test_postgresql_replica_database_engine_1/test.py b/tests/integration/test_postgresql_replica_database_engine_1/test.py index c118080a572..f04425d83d4 100644 --- a/tests/integration/test_postgresql_replica_database_engine_1/test.py +++ b/tests/integration/test_postgresql_replica_database_engine_1/test.py @@ -393,18 +393,19 @@ def test_table_schema_changes(started_cluster): def test_many_concurrent_queries(started_cluster): + table = "test_many_conc" query_pool = [ - "DELETE FROM postgresql_replica_{} WHERE (value*value) % 3 = 0;", - "UPDATE postgresql_replica_{} SET value = value - 125 WHERE key % 2 = 0;", - "DELETE FROM postgresql_replica_{} WHERE key % 10 = 0;", - "UPDATE postgresql_replica_{} SET value = value*5 WHERE key % 2 = 1;", - "DELETE FROM postgresql_replica_{} WHERE value % 2 = 0;", - "UPDATE postgresql_replica_{} SET value = value + 2000 WHERE key % 5 = 0;", - "DELETE FROM postgresql_replica_{} WHERE value % 3 = 0;", - "UPDATE postgresql_replica_{} SET value = value * 2 WHERE key % 3 = 0;", - "DELETE FROM postgresql_replica_{} WHERE value % 9 = 2;", - "UPDATE postgresql_replica_{} SET value = value + 2 WHERE key % 3 = 1;", - "DELETE FROM postgresql_replica_{} WHERE value%5 = 0;", + "DELETE FROM {} WHERE (value*value) % 3 = 0;", + "UPDATE {} SET value = value - 125 WHERE key % 2 = 0;", + "DELETE FROM {} WHERE key % 10 = 0;", + "UPDATE {} SET value = value*5 WHERE key % 2 = 1;", + "DELETE FROM {} WHERE value % 2 = 0;", + "UPDATE {} SET value = value + 2000 WHERE key % 5 = 0;", + "DELETE FROM {} WHERE value % 3 = 0;", + "UPDATE {} SET value = value * 2 WHERE key % 3 = 0;", + "DELETE FROM {} WHERE value % 9 = 2;", + "UPDATE {} SET value = value + 2 WHERE key % 3 = 1;", + "DELETE FROM {} WHERE value%5 = 0;", ] NUM_TABLES = 5 @@ -415,7 +416,9 @@ def test_many_concurrent_queries(started_cluster): database=True, ) cursor = conn.cursor() - pg_manager.create_and_fill_postgres_tables(NUM_TABLES, numbers=10000) + pg_manager.create_and_fill_postgres_tables( + NUM_TABLES, numbers=10000, table_name_base=table + ) def attack(thread_id): print("thread {}".format(thread_id)) @@ -423,17 +426,23 @@ def test_many_concurrent_queries(started_cluster): for i in range(20): query_id = random.randrange(0, len(query_pool) - 1) table_id = random.randrange(0, 5) # num tables + random_table_name = f"{table}_{table_id}" + table_name = f"{table}_{thread_id}" # random update / delete query - cursor.execute(query_pool[query_id].format(table_id)) - print("table {} query {} ok".format(table_id, query_id)) + cursor.execute(query_pool[query_id].format(random_table_name)) + print( + "Executing for table {} query: {}".format( + random_table_name, query_pool[query_id] + ) + ) # allow some thread to do inserts (not to violate key constraints) if thread_id < 5: print("try insert table {}".format(thread_id)) instance.query( - "INSERT INTO postgres_database.postgresql_replica_{} SELECT {}*10000*({} + number), number from numbers(1000)".format( - i, thread_id, k + "INSERT INTO postgres_database.{} SELECT {}*10000*({} + number), number from numbers(1000)".format( + table_name, thread_id, k ) ) k += 1 @@ -443,8 +452,8 @@ def test_many_concurrent_queries(started_cluster): # also change primary key value print("try update primary key {}".format(thread_id)) cursor.execute( - "UPDATE postgresql_replica_{} SET key=key%100000+100000*{} WHERE key%{}=0".format( - thread_id, i + 1, i + 1 + "UPDATE {table}_{} SET key=key%100000+100000*{} WHERE key%{}=0".format( + table_name, i + 1, i + 1 ) ) print("update primary key {} ok".format(thread_id)) @@ -467,25 +476,25 @@ def test_many_concurrent_queries(started_cluster): n[0] = 50000 for table_id in range(NUM_TABLES): n[0] += 1 + table_name = f"{table}_{table_id}" instance.query( - "INSERT INTO postgres_database.postgresql_replica_{} SELECT {} + number, number from numbers(5000)".format( - table_id, n[0] + "INSERT INTO postgres_database.{} SELECT {} + number, number from numbers(5000)".format( + table_name, n[0] ) ) - # cursor.execute("UPDATE postgresql_replica_{} SET key=key%100000+100000*{} WHERE key%{}=0".format(table_id, table_id+1, table_id+1)) + # cursor.execute("UPDATE {table}_{} SET key=key%100000+100000*{} WHERE key%{}=0".format(table_id, table_id+1, table_id+1)) for thread in threads: thread.join() for i in range(NUM_TABLES): - check_tables_are_synchronized(instance, "postgresql_replica_{}".format(i)) + table_name = f"{table}_{i}" + check_tables_are_synchronized(instance, table_name) count1 = instance.query( - "SELECT count() FROM postgres_database.postgresql_replica_{}".format(i) + "SELECT count() FROM postgres_database.{}".format(table_name) ) count2 = instance.query( - "SELECT count() FROM (SELECT * FROM test_database.postgresql_replica_{})".format( - i - ) + "SELECT count() FROM (SELECT * FROM test_database.{})".format(table_name) ) assert int(count1) == int(count2) print(count1, count2) diff --git a/tests/integration/test_postgresql_replica_database_engine_2/configs/log_conf.xml b/tests/integration/test_postgresql_replica_database_engine_2/configs/log_conf.xml index 6cc1128e130..c9f6195a014 100644 --- a/tests/integration/test_postgresql_replica_database_engine_2/configs/log_conf.xml +++ b/tests/integration/test_postgresql_replica_database_engine_2/configs/log_conf.xml @@ -24,4 +24,10 @@ postgres_database + + system +
text_log
+ 7500 + Test + diff --git a/tests/integration/test_postgresql_replica_database_engine_2/test.py b/tests/integration/test_postgresql_replica_database_engine_2/test.py index df72a2f705c..5553f400c0d 100644 --- a/tests/integration/test_postgresql_replica_database_engine_2/test.py +++ b/tests/integration/test_postgresql_replica_database_engine_2/test.py @@ -944,6 +944,156 @@ def test_symbols_in_publication_name(started_cluster): ) +def test_generated_columns(started_cluster): + table = "test_generated_columns" + + pg_manager.create_postgres_table( + table, + "", + f"""CREATE TABLE {table} ( + key integer PRIMARY KEY, + x integer, + y integer GENERATED ALWAYS AS (x*2) STORED, + z text); + """, + ) + + pg_manager.execute(f"insert into {table} (key, x, z) values (1,1,'1');") + pg_manager.execute(f"insert into {table} (key, x, z) values (2,2,'2');") + + pg_manager.create_materialized_db( + ip=started_cluster.postgres_ip, + port=started_cluster.postgres_port, + settings=[ + f"materialized_postgresql_tables_list = '{table}'", + "materialized_postgresql_backoff_min_ms = 100", + "materialized_postgresql_backoff_max_ms = 100", + ], + ) + + check_tables_are_synchronized( + instance, table, postgres_database=pg_manager.get_default_database() + ) + + pg_manager.execute(f"insert into {table} (key, x, z) values (3,3,'3');") + pg_manager.execute(f"insert into {table} (key, x, z) values (4,4,'4');") + + check_tables_are_synchronized( + instance, table, postgres_database=pg_manager.get_default_database() + ) + + pg_manager.execute(f"insert into {table} (key, x, z) values (5,5,'5');") + pg_manager.execute(f"insert into {table} (key, x, z) values (6,6,'6');") + + check_tables_are_synchronized( + instance, table, postgres_database=pg_manager.get_default_database() + ) + + +def test_default_columns(started_cluster): + table = "test_default_columns" + + pg_manager.create_postgres_table( + table, + "", + f"""CREATE TABLE {table} ( + key integer PRIMARY KEY, + x integer, + y text DEFAULT 'y1', + z integer, + a text DEFAULT 'a1', + b integer); + """, + ) + + pg_manager.execute(f"insert into {table} (key, x, z, b) values (1,1,1,1);") + pg_manager.execute(f"insert into {table} (key, x, z, b) values (2,2,2,2);") + + pg_manager.create_materialized_db( + ip=started_cluster.postgres_ip, + port=started_cluster.postgres_port, + settings=[ + f"materialized_postgresql_tables_list = '{table}'", + "materialized_postgresql_backoff_min_ms = 100", + "materialized_postgresql_backoff_max_ms = 100", + ], + ) + + check_tables_are_synchronized( + instance, table, postgres_database=pg_manager.get_default_database() + ) + + pg_manager.execute(f"insert into {table} (key, x, z, b) values (3,3,3,3);") + pg_manager.execute(f"insert into {table} (key, x, z, b) values (4,4,4,4);") + + check_tables_are_synchronized( + instance, table, postgres_database=pg_manager.get_default_database() + ) + + pg_manager.execute(f"insert into {table} (key, x, z, b) values (5,5,5,5);") + pg_manager.execute(f"insert into {table} (key, x, z, b) values (6,6,6,6);") + + check_tables_are_synchronized( + instance, table, postgres_database=pg_manager.get_default_database() + ) + + +def test_dependent_loading(started_cluster): + table = "test_dependent_loading" + + pg_manager.create_postgres_table(table) + instance.query( + f"INSERT INTO postgres_database.{table} SELECT number, number from numbers(0, 50)" + ) + + instance.query( + f""" + SET allow_experimental_materialized_postgresql_table=1; + CREATE TABLE {table} (key Int32, value Int32) + ENGINE=MaterializedPostgreSQL('{started_cluster.postgres_ip}:{started_cluster.postgres_port}', 'postgres_database', '{table}', 'postgres', 'mysecretpassword') ORDER BY key + """ + ) + + check_tables_are_synchronized( + instance, + table, + postgres_database=pg_manager.get_default_database(), + materialized_database="default", + ) + + assert 50 == int(instance.query(f"SELECT count() FROM {table}")) + + instance.restart_clickhouse() + + check_tables_are_synchronized( + instance, + table, + postgres_database=pg_manager.get_default_database(), + materialized_database="default", + ) + + assert 50 == int(instance.query(f"SELECT count() FROM {table}")) + + uuid = instance.query( + f"SELECT uuid FROM system.tables WHERE name='{table}' and database='default' limit 1" + ).strip() + nested_table = f"default.`{uuid}_nested`" + instance.contains_in_log( + f"Table default.{table} has 1 dependencies: {nested_table} (level 1)" + ) + + instance.query("SYSTEM FLUSH LOGS") + nested_time = instance.query( + f"SELECT event_time_microseconds FROM system.text_log WHERE message like 'Loading table default.{uuid}_nested' and message not like '%like%'" + ).strip() + time = instance.query( + f"SELECT event_time_microseconds FROM system.text_log WHERE message like 'Loading table default.{table}' and message not like '%like%'" + ).strip() + instance.query( + f"SELECT toDateTime64('{nested_time}', 6) < toDateTime64('{time}', 6)" + ) + + if __name__ == "__main__": cluster.start() input("Cluster created, press any key to destroy...") diff --git a/tests/integration/test_quorum_inserts_parallel/test.py b/tests/integration/test_quorum_inserts_parallel/test.py index 72780c16319..f30f57cc1d6 100644 --- a/tests/integration/test_quorum_inserts_parallel/test.py +++ b/tests/integration/test_quorum_inserts_parallel/test.py @@ -115,9 +115,8 @@ def test_parallel_quorum_actually_quorum(started_cluster): error = node.query_and_get_error( "INSERT INTO q VALUES(3, 'Hi')", settings=settings ) - assert "DB::Exception: Unknown status, client must retry." in error, error assert ( - "DB::Exception: Timeout while waiting for quorum. (TIMEOUT_EXCEEDED)" + "DB::Exception: Unknown quorum status. The data was inserted in the local replica but we could not verify quorum. Reason: Timeout while waiting for quorum" in error ), error diff --git a/tests/integration/test_reload_query_masking_rules/configs/changed_settings.xml b/tests/integration/test_reload_query_masking_rules/configs/changed_settings.xml deleted file mode 100644 index d681496d843..00000000000 --- a/tests/integration/test_reload_query_masking_rules/configs/changed_settings.xml +++ /dev/null @@ -1,19 +0,0 @@ - - - system - query_log
- toYYYYMM(event_date) - 7500 - 1048576 - 8192 - 524288 - false -
- - - - TOPSECRET.TOPSECRET - [hidden] - - -
diff --git a/tests/integration/test_reload_query_masking_rules/configs/empty_settings.xml b/tests/integration/test_reload_query_masking_rules/configs/empty_settings.xml deleted file mode 100644 index 82647ff82b5..00000000000 --- a/tests/integration/test_reload_query_masking_rules/configs/empty_settings.xml +++ /dev/null @@ -1,12 +0,0 @@ - - - system - query_log
- toYYYYMM(event_date) - 7500 - 1048576 - 8192 - 524288 - false -
-
diff --git a/tests/integration/test_reload_query_masking_rules/test.py b/tests/integration/test_reload_query_masking_rules/test.py deleted file mode 100644 index f269aefbacb..00000000000 --- a/tests/integration/test_reload_query_masking_rules/test.py +++ /dev/null @@ -1,57 +0,0 @@ -import pytest -import os -from helpers.cluster import ClickHouseCluster -from helpers.test_tools import assert_eq_with_retry, assert_logs_contain_with_retry - -SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) -cluster = ClickHouseCluster(__file__) -node = cluster.add_instance("node", user_configs=["configs/empty_settings.xml"]) - - -@pytest.fixture(scope="module", autouse=True) -def started_cluster(): - try: - cluster.start() - yield cluster - finally: - cluster.shutdown() - - -@pytest.fixture(autouse=True) -def reset_to_normal_settings_after_test(): - try: - node.copy_file_to_container( - os.path.join(SCRIPT_DIR, "configs/empty_settings.xml"), - "/etc/clickhouse-server/config.d/z.xml", - ) - node.query("SYSTEM RELOAD CONFIG") - yield - finally: - pass - - -# @pytest.mark.parametrize("reload_strategy", ["force", "timeout"]) -def test_reload_query_masking_rules(): - # At first, empty configuration is fed to ClickHouse. The query - # "SELECT 'TOPSECRET.TOPSECRET'" will not be redacted, and the new masking - # event will not be registered - node.query("SELECT 'TOPSECRET.TOPSECRET'") - assert_logs_contain_with_retry(node, "SELECT 'TOPSECRET.TOPSECRET'") - assert not node.contains_in_log(r"SELECT '\[hidden\]'") - node.rotate_logs() - - node.copy_file_to_container( - os.path.join(SCRIPT_DIR, "configs/changed_settings.xml"), - "/etc/clickhouse-server/config.d/z.xml", - ) - - node.query("SYSTEM RELOAD CONFIG") - - # Now the same query will be redacted in the logs and the counter of events - # will be incremented - node.query("SELECT 'TOPSECRET.TOPSECRET'") - - assert_logs_contain_with_retry(node, r"SELECT '\[hidden\]'") - assert not node.contains_in_log("SELECT 'TOPSECRET.TOPSECRET'") - - node.rotate_logs() diff --git a/tests/integration/test_replicated_fetches_timeouts/configs/timeouts_for_fetches.xml b/tests/integration/test_replicated_fetches_timeouts/configs/timeouts_for_fetches.xml new file mode 100644 index 00000000000..b163c6f54a1 --- /dev/null +++ b/tests/integration/test_replicated_fetches_timeouts/configs/timeouts_for_fetches.xml @@ -0,0 +1 @@ + diff --git a/tests/integration/test_replicated_fetches_timeouts/test.py b/tests/integration/test_replicated_fetches_timeouts/test.py index 7d5da55549c..55fa4b909ba 100644 --- a/tests/integration/test_replicated_fetches_timeouts/test.py +++ b/tests/integration/test_replicated_fetches_timeouts/test.py @@ -10,13 +10,25 @@ from helpers.network import PartitionManager cluster = ClickHouseCluster(__file__) node1 = cluster.add_instance( - "node1", with_zookeeper=True, main_configs=["configs/server.xml"] + "node1", + with_zookeeper=True, + main_configs=["configs/server.xml", "configs/timeouts_for_fetches.xml"], ) node2 = cluster.add_instance( - "node2", with_zookeeper=True, main_configs=["configs/server.xml"] + "node2", + with_zookeeper=True, + stay_alive=True, + main_configs=["configs/server.xml", "configs/timeouts_for_fetches.xml"], ) +config = """ + + 30 + 1 + +""" + @pytest.fixture(scope="module") def started_cluster(): @@ -49,14 +61,10 @@ def test_no_stall(started_cluster): node2.query("SYSTEM STOP FETCHES t") node1.query( - "INSERT INTO t SELECT 1, '{}' FROM numbers(500)".format( - get_random_string(104857) - ) + f"INSERT INTO t SELECT 1, '{get_random_string(104857)}' FROM numbers(500)" ) node1.query( - "INSERT INTO t SELECT 2, '{}' FROM numbers(500)".format( - get_random_string(104857) - ) + f"INSERT INTO t SELECT 2, '{get_random_string(104857)}' FROM numbers(500)" ) with PartitionManager() as pm: @@ -82,14 +90,12 @@ def test_no_stall(started_cluster): print("Connection timeouts tested!") - # Increase connection timeout and wait for receive timeouts. - node2.query( - """ - ALTER TABLE t - MODIFY SETTING replicated_fetches_http_connection_timeout = 30, - replicated_fetches_http_receive_timeout = 1""" + node2.replace_config( + "/etc/clickhouse-server/config.d/timeouts_for_fetches.xml", config ) + node2.restart_clickhouse() + while True: timeout_exceptions = int( node2.query( diff --git a/tests/integration/test_replicated_user_defined_functions/test.py b/tests/integration/test_replicated_user_defined_functions/test.py index f54be21c4c0..e5f6683b90b 100644 --- a/tests/integration/test_replicated_user_defined_functions/test.py +++ b/tests/integration/test_replicated_user_defined_functions/test.py @@ -116,7 +116,7 @@ def test_create_and_replace(): node1.query("CREATE FUNCTION f1 AS (x, y) -> x + y") assert node1.query("SELECT f1(12, 3)") == "15\n" - expected_error = "User-defined function 'f1' already exists" + expected_error = "User-defined object 'f1' already exists" assert expected_error in node1.query_and_get_error( "CREATE FUNCTION f1 AS (x, y) -> x + 2 * y" ) @@ -135,7 +135,7 @@ def test_drop_if_exists(): node1.query("DROP FUNCTION IF EXISTS f1") node1.query("DROP FUNCTION IF EXISTS f1") - expected_error = "User-defined function 'f1' doesn't exist" + expected_error = "User-defined object 'f1' doesn't exist" assert expected_error in node1.query_and_get_error("DROP FUNCTION f1") diff --git a/tests/integration/test_replicated_users/test.py b/tests/integration/test_replicated_users/test.py index 489724ed4fb..e34495a0071 100644 --- a/tests/integration/test_replicated_users/test.py +++ b/tests/integration/test_replicated_users/test.py @@ -114,6 +114,41 @@ def test_create_replicated_on_cluster_ignore(started_cluster, entity): node1.query(f"DROP {entity.keyword} {entity.name} {entity.options}") +@pytest.mark.parametrize( + "use_on_cluster", + [ + pytest.param(False, id="Without_on_cluster"), + pytest.param(True, id="With_ignored_on_cluster"), + ], +) +def test_grant_revoke_replicated(started_cluster, use_on_cluster: bool): + node1.replace_config( + "/etc/clickhouse-server/users.d/users.xml", + inspect.cleandoc( + f""" + + + + {int(use_on_cluster)} + + + + """ + ), + ) + node1.query("SYSTEM RELOAD CONFIG") + on_cluster = "ON CLUSTER default" if use_on_cluster else "" + + node1.query(f"CREATE USER theuser {on_cluster}") + + assert node1.query(f"GRANT {on_cluster} SELECT ON *.* to theuser") == "" + + assert node2.query(f"SHOW GRANTS FOR theuser") == "GRANT SELECT ON *.* TO theuser\n" + + assert node1.query(f"REVOKE {on_cluster} SELECT ON *.* from theuser") == "" + node1.query(f"DROP USER theuser {on_cluster}") + + @pytest.mark.parametrize("entity", entities, ids=get_entity_id) def test_create_replicated_if_not_exists_on_cluster(started_cluster, entity): node1.query( diff --git a/tests/integration/test_reverse_dns_query/configs/config.xml b/tests/integration/test_reverse_dns_query/configs/config.xml deleted file mode 100644 index 5ce55afa2a7..00000000000 --- a/tests/integration/test_reverse_dns_query/configs/config.xml +++ /dev/null @@ -1,3 +0,0 @@ - - 1 - diff --git a/tests/integration/test_reverse_dns_query/configs/listen_host.xml b/tests/integration/test_reverse_dns_query/configs/listen_host.xml deleted file mode 100644 index 9c27c612f63..00000000000 --- a/tests/integration/test_reverse_dns_query/configs/listen_host.xml +++ /dev/null @@ -1,5 +0,0 @@ - - :: - 0.0.0.0 - 1 - diff --git a/tests/integration/test_reverse_dns_query/configs/reverse_dns_function.xml b/tests/integration/test_reverse_dns_query/configs/reverse_dns_function.xml deleted file mode 100644 index 35d0a07c6a6..00000000000 --- a/tests/integration/test_reverse_dns_query/configs/reverse_dns_function.xml +++ /dev/null @@ -1,3 +0,0 @@ - - 1 - diff --git a/tests/integration/test_reverse_dns_query/coredns_config/Corefile b/tests/integration/test_reverse_dns_query/coredns_config/Corefile deleted file mode 100644 index 3edf37dafa5..00000000000 --- a/tests/integration/test_reverse_dns_query/coredns_config/Corefile +++ /dev/null @@ -1,8 +0,0 @@ -. { - hosts /example.com { - reload "20ms" - fallthrough - } - forward . 127.0.0.11 - log -} diff --git a/tests/integration/test_reverse_dns_query/coredns_config/example.com b/tests/integration/test_reverse_dns_query/coredns_config/example.com deleted file mode 100644 index 6c6e4cbee2e..00000000000 --- a/tests/integration/test_reverse_dns_query/coredns_config/example.com +++ /dev/null @@ -1 +0,0 @@ -filled in runtime, but needs to exist in order to be volume mapped in docker diff --git a/tests/integration/test_reverse_dns_query/test.py b/tests/integration/test_reverse_dns_query/test.py deleted file mode 100644 index 00c3956f74f..00000000000 --- a/tests/integration/test_reverse_dns_query/test.py +++ /dev/null @@ -1,74 +0,0 @@ -import pytest -import socket -from helpers.cluster import ClickHouseCluster, get_docker_compose_path, run_and_check -from time import sleep -import os - -DOCKER_COMPOSE_PATH = get_docker_compose_path() -SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) - -cluster = ClickHouseCluster(__file__) - -ch_server = cluster.add_instance( - "clickhouse-server", - with_coredns=True, - main_configs=[ - "configs/config.xml", - "configs/reverse_dns_function.xml", - "configs/listen_host.xml", - ], -) - - -@pytest.fixture(scope="module") -def started_cluster(): - global cluster - try: - cluster.start() - yield cluster - - finally: - cluster.shutdown() - - -def check_ptr_record(ip, hostname): - try: - host, aliaslist, ipaddrlist = socket.gethostbyaddr(ip) - if hostname.lower() == host.lower(): - return True - except socket.herror: - pass - return False - - -def setup_dns_server(ip): - domains_string = "test.example.com" - example_file_path = f'{ch_server.env_variables["COREDNS_CONFIG_DIR"]}/example.com' - run_and_check(f"echo '{ip} {domains_string}' > {example_file_path}", shell=True) - - # DNS server takes time to reload the configuration. - for try_num in range(10): - if all(check_ptr_record(ip, host) for host in domains_string.split()): - break - sleep(1) - - -def setup_ch_server(dns_server_ip): - ch_server.exec_in_container( - (["bash", "-c", f"echo 'nameserver {dns_server_ip}' > /etc/resolv.conf"]) - ) - ch_server.exec_in_container( - (["bash", "-c", "echo 'options ndots:0' >> /etc/resolv.conf"]) - ) - ch_server.query("SYSTEM DROP DNS CACHE") - - -def test_reverse_dns_query(started_cluster): - dns_server_ip = cluster.get_instance_ip(cluster.coredns_host) - random_ipv6 = "4ae8:fa0f:ee1d:68c5:0b76:1b79:7ae6:1549" # https://commentpicker.com/ip-address-generator.php - setup_dns_server(random_ipv6) - setup_ch_server(dns_server_ip) - - for _ in range(0, 200): - response = ch_server.query(f"select reverseDNSQuery('{random_ipv6}')") - assert response == "['test.example.com']\n" diff --git a/tests/integration/test_s3_zero_copy_replication/configs/config.d/s3.xml b/tests/integration/test_s3_zero_copy_replication/configs/config.d/s3.xml index 7cb7f50582c..8df9e8e8c26 100644 --- a/tests/integration/test_s3_zero_copy_replication/configs/config.d/s3.xml +++ b/tests/integration/test_s3_zero_copy_replication/configs/config.d/s3.xml @@ -69,6 +69,9 @@ 1024 1 + 0 + 0 + 0 true 1.0 diff --git a/tests/integration/test_storage_azure_blob_storage/test.py b/tests/integration/test_storage_azure_blob_storage/test.py index 96fff6b891f..3cccd07c134 100644 --- a/tests/integration/test_storage_azure_blob_storage/test.py +++ b/tests/integration/test_storage_azure_blob_storage/test.py @@ -910,6 +910,66 @@ def check_cache(instance, expected_files): ) +def test_union_schema_inference_mode(cluster): + node = cluster.instances["node"] + storage_account_url = cluster.env_variables["AZURITE_STORAGE_ACCOUNT_URL"] + account_name = "devstoreaccount1" + account_key = "Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==" + azure_query( + node, + f"INSERT INTO TABLE FUNCTION azureBlobStorage('{storage_account_url}', 'cont', 'test_union_schema_inference1.jsonl', '{account_name}', '{account_key}', 'JSONEachRow', 'auto', 'a UInt32') VALUES (1)", + ) + + azure_query( + node, + f"INSERT INTO TABLE FUNCTION azureBlobStorage('{storage_account_url}', 'cont', 'test_union_schema_inference2.jsonl', '{account_name}', '{account_key}', 'JSONEachRow', 'auto', 'b UInt32') VALUES (2)", + ) + + node.query("system drop schema cache for azure") + + result = azure_query( + node, + f"desc azureBlobStorage('{storage_account_url}', 'cont', 'test_union_schema_inference*.jsonl', '{account_name}', '{account_key}', 'auto', 'auto', 'auto') settings schema_inference_mode='union', describe_compact_output=1 format TSV", + ) + assert result == "a\tNullable(Int64)\nb\tNullable(Int64)\n" + + result = node.query( + "select schema_inference_mode, splitByChar('/', source)[-1] as file, schema from system.schema_inference_cache where source like '%test_union_schema_inference%' order by file format TSV" + ) + assert ( + result == "UNION\ttest_union_schema_inference1.jsonl\ta Nullable(Int64)\n" + "UNION\ttest_union_schema_inference2.jsonl\tb Nullable(Int64)\n" + ) + result = azure_query( + node, + f"select * from azureBlobStorage('{storage_account_url}', 'cont', 'test_union_schema_inference*.jsonl', '{account_name}', '{account_key}', 'auto', 'auto', 'auto') order by tuple(*) settings schema_inference_mode='union' format TSV", + ) + assert result == "1\t\\N\n" "\\N\t2\n" + node.query(f"system drop schema cache for hdfs") + result = azure_query( + node, + f"desc azureBlobStorage('{storage_account_url}', 'cont', 'test_union_schema_inference2.jsonl', '{account_name}', '{account_key}', 'auto', 'auto', 'auto') settings schema_inference_mode='union', describe_compact_output=1 format TSV", + ) + assert result == "b\tNullable(Int64)\n" + + result = azure_query( + node, + f"desc azureBlobStorage('{storage_account_url}', 'cont', 'test_union_schema_inference*.jsonl', '{account_name}', '{account_key}', 'auto', 'auto', 'auto') settings schema_inference_mode='union', describe_compact_output=1 format TSV", + ) + assert result == "a\tNullable(Int64)\n" "b\tNullable(Int64)\n" + azure_query( + node, + f"INSERT INTO TABLE FUNCTION azureBlobStorage('{storage_account_url}', 'cont', 'test_union_schema_inference3.jsonl', '{account_name}', '{account_key}', 'CSV', 'auto', 's String') VALUES ('Error')", + ) + + error = azure_query( + node, + f"desc azureBlobStorage('{storage_account_url}', 'cont', 'test_union_schema_inference*.jsonl', '{account_name}', '{account_key}', 'auto', 'auto', 'auto') settings schema_inference_mode='union', describe_compact_output=1 format TSV", + expect_error="true", + ) + assert "Cannot extract table structure" in error + + def test_schema_inference_cache(cluster): node = cluster.instances["node"] connection_string = cluster.env_variables["AZURITE_CONNECTION_STRING"] diff --git a/tests/integration/test_storage_azure_blob_storage/test_cluster.py b/tests/integration/test_storage_azure_blob_storage/test_cluster.py index afcab6c216e..2bd3f24d25f 100644 --- a/tests/integration/test_storage_azure_blob_storage/test_cluster.py +++ b/tests/integration/test_storage_azure_blob_storage/test_cluster.py @@ -203,7 +203,7 @@ def test_unset_skip_unavailable_shards(cluster): ) result = azure_query( node, - f"SELECT count(*) from azureBlobStorageCluster('cluster_non_existent_port','{storage_account_url}', 'cont', 'test_skip_unavailable.csv', " + f"SELECT count(*) from azureBlobStorageCluster('cluster_non_existent_port','{storage_account_url}', 'cont', 'test_unset_skip_unavailable.csv', " f"'devstoreaccount1','Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==')", ) diff --git a/tests/integration/test_storage_hdfs/test.py b/tests/integration/test_storage_hdfs/test.py index eacb5295079..8ed1e4b6c0e 100644 --- a/tests/integration/test_storage_hdfs/test.py +++ b/tests/integration/test_storage_hdfs/test.py @@ -998,6 +998,55 @@ def test_read_subcolumns(started_cluster): ) +def test_union_schema_inference_mode(started_cluster): + node = started_cluster.instances["node1"] + + node.query( + "insert into function hdfs('hdfs://hdfs1:9000/test_union_schema_inference1.jsonl') select 1 as a" + ) + + node.query( + "insert into function hdfs('hdfs://hdfs1:9000/test_union_schema_inference2.jsonl') select 2 as b" + ) + + node.query("system drop schema cache for hdfs") + + result = node.query( + "desc hdfs('hdfs://hdfs1:9000/test_union_schema_inference*.jsonl') settings schema_inference_mode='union', describe_compact_output=1 format TSV" + ) + assert result == "a\tNullable(Int64)\nb\tNullable(Int64)\n" + + result = node.query( + "select schema_inference_mode, splitByChar('/', source)[-1] as file, schema from system.schema_inference_cache where source like '%test_union_schema_inference%' order by file format TSV" + ) + assert ( + result == "UNION\ttest_union_schema_inference1.jsonl\ta Nullable(Int64)\n" + "UNION\ttest_union_schema_inference2.jsonl\tb Nullable(Int64)\n" + ) + result = node.query( + "select * from hdfs('hdfs://hdfs1:9000/test_union_schema_inference*.jsonl') order by tuple(*) settings schema_inference_mode='union', describe_compact_output=1 format TSV" + ) + assert result == "1\t\\N\n" "\\N\t2\n" + node.query(f"system drop schema cache for hdfs") + result = node.query( + "desc hdfs('hdfs://hdfs1:9000/test_union_schema_inference2.jsonl') settings schema_inference_mode='union', describe_compact_output=1 format TSV" + ) + assert result == "b\tNullable(Int64)\n" + + result = node.query( + "desc hdfs('hdfs://hdfs1:9000/test_union_schema_inference*.jsonl') settings schema_inference_mode='union', describe_compact_output=1 format TSV" + ) + assert result == "a\tNullable(Int64)\n" "b\tNullable(Int64)\n" + node.query( + f"insert into function hdfs('hdfs://hdfs1:9000/test_union_schema_inference3.jsonl', TSV) select 'Error'" + ) + + error = node.query_and_get_error( + "desc hdfs('hdfs://hdfs1:9000/test_union_schema_inference*.jsonl') settings schema_inference_mode='union', describe_compact_output=1 format TSV" + ) + assert "Cannot extract table structure" in error + + if __name__ == "__main__": cluster.start() input("Cluster created, press any key to destroy...") diff --git a/tests/integration/test_storage_kafka/clickhouse_path/format_schemas/key_value_message.capnp b/tests/integration/test_storage_kafka/clickhouse_path/format_schemas/key_value_message.capnp new file mode 100644 index 00000000000..19b7029dba3 --- /dev/null +++ b/tests/integration/test_storage_kafka/clickhouse_path/format_schemas/key_value_message.capnp @@ -0,0 +1,7 @@ +@0x99f75f775fe63dae; + +struct Message +{ + key @0 : UInt64; + value @1 : UInt64; +} \ No newline at end of file diff --git a/tests/integration/test_storage_kafka/clickhouse_path/format_schemas/key_value_message.proto b/tests/integration/test_storage_kafka/clickhouse_path/format_schemas/key_value_message.proto new file mode 100644 index 00000000000..7c9d4ad0850 --- /dev/null +++ b/tests/integration/test_storage_kafka/clickhouse_path/format_schemas/key_value_message.proto @@ -0,0 +1,6 @@ +syntax = "proto3"; + +message Message { + uint64 key = 1; + uint64 value = 1; +} diff --git a/tests/integration/test_storage_kafka/test.py b/tests/integration/test_storage_kafka/test.py index b1191af60b7..2176b0151ff 100644 --- a/tests/integration/test_storage_kafka/test.py +++ b/tests/integration/test_storage_kafka/test.py @@ -4834,6 +4834,103 @@ JSONExtractString(rdkafka_stat, 'type'): consumer kafka_delete_topic(admin_client, topic) +def test_formats_errors(kafka_cluster): + admin_client = KafkaAdminClient( + bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port) + ) + + for format_name in [ + "Template", + "Regexp", + "TSV", + "TSVWithNamesAndTypes", + "TSKV", + "CSV", + "CSVWithNames", + "CSVWithNamesAndTypes", + "CustomSeparated", + "CustomSeparatedWithNames", + "CustomSeparatedWithNamesAndTypes", + "Values", + "JSON", + "JSONEachRow", + "JSONStringsEachRow", + "JSONCompactEachRow", + "JSONCompactEachRowWithNamesAndTypes", + "JSONObjectEachRow", + "Avro", + "RowBinary", + "RowBinaryWithNamesAndTypes", + "MsgPack", + "JSONColumns", + "JSONCompactColumns", + "JSONColumnsWithMetadata", + "BSONEachRow", + "Native", + "Arrow", + "Parquet", + "ORC", + "JSONCompactColumns", + "Npy", + "ParquetMetadata", + "CapnProto", + "Protobuf", + "ProtobufSingle", + "ProtobufList", + "DWARF", + "HiveText", + "MySQLDump", + ]: + kafka_create_topic(admin_client, format_name) + table_name = f"kafka_{format_name}" + + instance.query( + f""" + DROP TABLE IF EXISTS test.view; + DROP TABLE IF EXISTS test.{table_name}; + + CREATE TABLE test.{table_name} (key UInt64, value UInt64) + ENGINE = Kafka + SETTINGS kafka_broker_list = 'kafka1:19092', + kafka_topic_list = '{format_name}', + kafka_group_name = '{format_name}', + kafka_format = '{format_name}', + kafka_max_rows_per_message = 5, + format_template_row='template_row.format', + format_regexp='id: (.+?)', + input_format_with_names_use_header=0, + format_schema='key_value_message:Message'; + + CREATE MATERIALIZED VIEW test.view Engine=Log AS + SELECT key, value FROM test.{table_name}; + """ + ) + + kafka_produce( + kafka_cluster, + format_name, + ["Broken message\nBroken message\nBroken message\n"], + ) + + attempt = 0 + num_errors = 0 + while attempt < 200: + num_errors = int( + instance.query( + f"SELECT length(exceptions.text) from system.kafka_consumers where database = 'test' and table = '{table_name}'" + ) + ) + if num_errors > 0: + break + attempt += 1 + + assert num_errors > 0 + + kafka_delete_topic(admin_client, format_name) + instance.query(f"DROP TABLE test.{table_name}") + instance.query("DROP TABLE test.view") + + if __name__ == "__main__": cluster.start() input("Cluster created, press any key to destroy...") diff --git a/tests/integration/test_storage_postgresql/configs/named_collections.xml b/tests/integration/test_storage_postgresql/configs/named_collections.xml index 129225f36b9..4923c21d0a6 100644 --- a/tests/integration/test_storage_postgresql/configs/named_collections.xml +++ b/tests/integration/test_storage_postgresql/configs/named_collections.xml @@ -29,5 +29,12 @@ postgres test_replicas
+ + postgres + mysecretpassword + postgres1:5432 + postgres + test_table
+
diff --git a/tests/integration/test_storage_postgresql/test.py b/tests/integration/test_storage_postgresql/test.py index 11729a5ab18..d9f3a9917ab 100644 --- a/tests/integration/test_storage_postgresql/test.py +++ b/tests/integration/test_storage_postgresql/test.py @@ -82,6 +82,30 @@ def test_postgres_select_insert(started_cluster): cursor.execute(f"DROP TABLE {table_name} ") +def test_postgres_addresses_expr(started_cluster): + cursor = started_cluster.postgres_conn.cursor() + table_name = "test_table" + table = f"""postgresql(`postgres5`)""" + cursor.execute(f"DROP TABLE IF EXISTS {table_name}") + cursor.execute(f"CREATE TABLE {table_name} (a integer, b text, c integer)") + + node1.query( + f""" + INSERT INTO TABLE FUNCTION {table} + SELECT number, concat('name_', toString(number)), 3 from numbers(10000)""" + ) + check1 = f"SELECT count() FROM {table}" + check2 = f"SELECT Sum(c) FROM {table}" + check3 = f"SELECT count(c) FROM {table} WHERE a % 2 == 0" + check4 = f"SELECT count() FROM {table} WHERE b LIKE concat('name_', toString(1))" + assert (node1.query(check1)).rstrip() == "10000" + assert (node1.query(check2)).rstrip() == "30000" + assert (node1.query(check3)).rstrip() == "5000" + assert (node1.query(check4)).rstrip() == "1" + + cursor.execute(f"DROP TABLE {table_name} ") + + def test_postgres_conversions(started_cluster): cursor = started_cluster.postgres_conn.cursor() cursor.execute(f"DROP TABLE IF EXISTS test_types") @@ -90,20 +114,20 @@ def test_postgres_conversions(started_cluster): cursor.execute( """CREATE TABLE test_types ( a smallint, b integer, c bigint, d real, e double precision, f serial, g bigserial, - h timestamp, i date, j decimal(5, 3), k numeric, l boolean)""" + h timestamp, i date, j decimal(5, 3), k numeric, l boolean, "M" integer)""" ) node1.query( """ INSERT INTO TABLE FUNCTION postgresql('postgres1:5432', 'postgres', 'test_types', 'postgres', 'mysecretpassword') VALUES - (-32768, -2147483648, -9223372036854775808, 1.12345, 1.1234567890, 2147483647, 9223372036854775807, '2000-05-12 12:12:12.012345', '2000-05-12', 22.222, 22.222, 1)""" + (-32768, -2147483648, -9223372036854775808, 1.12345, 1.1234567890, 2147483647, 9223372036854775807, '2000-05-12 12:12:12.012345', '2000-05-12', 22.222, 22.222, 1, 42)""" ) result = node1.query( """ - SELECT a, b, c, d, e, f, g, h, i, j, toDecimal128(k, 3), l FROM postgresql('postgres1:5432', 'postgres', 'test_types', 'postgres', 'mysecretpassword')""" + SELECT a, b, c, d, e, f, g, h, i, j, toDecimal128(k, 3), l, "M" FROM postgresql('postgres1:5432', 'postgres', 'test_types', 'postgres', 'mysecretpassword')""" ) assert ( result - == "-32768\t-2147483648\t-9223372036854775808\t1.12345\t1.123456789\t2147483647\t9223372036854775807\t2000-05-12 12:12:12.012345\t2000-05-12\t22.222\t22.222\t1\n" + == "-32768\t-2147483648\t-9223372036854775808\t1.12345\t1.123456789\t2147483647\t9223372036854775807\t2000-05-12 12:12:12.012345\t2000-05-12\t22.222\t22.222\t1\t42\n" ) cursor.execute( @@ -132,7 +156,8 @@ def test_postgres_conversions(started_cluster): i Char(2)[][][][], -- Nullable(String) j Char(2)[], -- Nullable(String) k UUID[], -- Nullable(UUID) - l UUID[][] -- Nullable(UUID) + l UUID[][], -- Nullable(UUID) + "M" integer[] NOT NULL -- Int32 (mixed-case identifier) )""" ) @@ -152,7 +177,8 @@ def test_postgres_conversions(started_cluster): "i\tArray(Array(Array(Array(Nullable(String)))))\t\t\t\t\t\n" "j\tArray(Nullable(String))\t\t\t\t\t\n" "k\tArray(Nullable(UUID))\t\t\t\t\t\n" - "l\tArray(Array(Nullable(UUID)))" + "l\tArray(Array(Nullable(UUID)))\t\t\t\t\t\n" + "M\tArray(Int32)" "" ) assert result.rstrip() == expected @@ -171,7 +197,8 @@ def test_postgres_conversions(started_cluster): "[[[[NULL]]]], " "[], " "['2a0c0bfc-4fec-4e32-ae3a-7fc8eea6626a', '42209d53-d641-4d73-a8b6-c038db1e75d6', NULL], " - "[[NULL, '42209d53-d641-4d73-a8b6-c038db1e75d6'], ['2a0c0bfc-4fec-4e32-ae3a-7fc8eea6626a', NULL], [NULL, NULL]]" + "[[NULL, '42209d53-d641-4d73-a8b6-c038db1e75d6'], ['2a0c0bfc-4fec-4e32-ae3a-7fc8eea6626a', NULL], [NULL, NULL]]," + "[42, 42, 42]" ")" ) @@ -191,7 +218,8 @@ def test_postgres_conversions(started_cluster): "[[[[NULL]]]]\t" "[]\t" "['2a0c0bfc-4fec-4e32-ae3a-7fc8eea6626a','42209d53-d641-4d73-a8b6-c038db1e75d6',NULL]\t" - "[[NULL,'42209d53-d641-4d73-a8b6-c038db1e75d6'],['2a0c0bfc-4fec-4e32-ae3a-7fc8eea6626a',NULL],[NULL,NULL]]\n" + "[[NULL,'42209d53-d641-4d73-a8b6-c038db1e75d6'],['2a0c0bfc-4fec-4e32-ae3a-7fc8eea6626a',NULL],[NULL,NULL]]\t" + "[42,42,42]\n" ) assert result == expected @@ -199,6 +227,66 @@ def test_postgres_conversions(started_cluster): cursor.execute(f"DROP TABLE test_array_dimensions") +def test_postgres_array_ndim_error_messges(started_cluster): + cursor = started_cluster.postgres_conn.cursor() + + # cleanup + cursor.execute("DROP VIEW IF EXISTS array_ndim_view;") + cursor.execute("DROP TABLE IF EXISTS array_ndim_table;") + + # setup + cursor.execute( + 'CREATE TABLE array_ndim_table (x INTEGER, "Mixed-case with spaces" INTEGER[]);' + ) + cursor.execute("CREATE VIEW array_ndim_view AS SELECT * FROM array_ndim_table;") + describe_table = """ + DESCRIBE TABLE postgresql( + 'postgres1:5432', 'postgres', 'array_ndim_view', + 'postgres', 'mysecretpassword' + ) + """ + + # View with array column cannot be empty. Should throw a useful error message. + # (Cannot infer array dimension.) + try: + node1.query(describe_table) + assert False + except Exception as error: + assert ( + "PostgreSQL relation containing arrays cannot be empty: array_ndim_view" + in str(error) + ) + + # View cannot have empty array. Should throw useful error message. + # (Cannot infer array dimension.) + cursor.execute("TRUNCATE array_ndim_table;") + cursor.execute("INSERT INTO array_ndim_table VALUES (1234, '{}');") + try: + node1.query(describe_table) + assert False + except Exception as error: + assert ( + 'PostgreSQL cannot infer dimensions of an empty array: array_ndim_view."Mixed-case with spaces"' + in str(error) + ) + + # View cannot have NULL array value. Should throw useful error message. + cursor.execute("TRUNCATE array_ndim_table;") + cursor.execute("INSERT INTO array_ndim_table VALUES (1234, NULL);") + try: + node1.query(describe_table) + assert False + except Exception as error: + assert ( + 'PostgreSQL array cannot be NULL: array_ndim_view."Mixed-case with spaces"' + in str(error) + ) + + # cleanup + cursor.execute("DROP VIEW IF EXISTS array_ndim_view;") + cursor.execute("DROP TABLE IF EXISTS array_ndim_table;") + + def test_non_default_schema(started_cluster): node1.query("DROP TABLE IF EXISTS test_pg_table_schema") node1.query("DROP TABLE IF EXISTS test_pg_table_schema_with_dots") diff --git a/tests/integration/test_storage_s3/test.py b/tests/integration/test_storage_s3/test.py index f5c6f54a1ea..2549cb0d473 100644 --- a/tests/integration/test_storage_s3/test.py +++ b/tests/integration/test_storage_s3/test.py @@ -626,7 +626,7 @@ def test_wrong_s3_syntax(started_cluster): instance = started_cluster.instances["dummy"] # type: ClickHouseInstance expected_err_msg = "Code: 42" # NUMBER_OF_ARGUMENTS_DOESNT_MATCH - query = "create table test_table_s3_syntax (id UInt32) ENGINE = S3('', '', '', '', '', '')" + query = "create table test_table_s3_syntax (id UInt32) ENGINE = S3('', '', '', '', '', '', '')" assert expected_err_msg in instance.query_and_get_error(query) expected_err_msg = "Code: 36" # BAD_ARGUMENTS @@ -1395,6 +1395,7 @@ def test_schema_inference_from_globs(started_cluster): def test_signatures(started_cluster): + session_token = "session token that will not be checked by MiniIO" bucket = started_cluster.minio_bucket instance = started_cluster.instances["dummy"] @@ -1417,6 +1418,11 @@ def test_signatures(started_cluster): ) assert int(result) == 1 + result = instance.query( + f"select * from s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test.arrow', 'minio', 'minio123', '{session_token}')" + ) + assert int(result) == 1 + result = instance.query( f"select * from s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test.arrow', 'Arrow', 'x UInt64', 'auto')" ) @@ -1427,6 +1433,21 @@ def test_signatures(started_cluster): ) assert int(result) == 1 + result = instance.query( + f"select * from s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test.arrow', 'minio', 'minio123', '{session_token}', 'Arrow')" + ) + assert int(result) == 1 + + lt = instance.query( + f"select * from s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test.arrow', 'minio', 'minio123', '{session_token}', 'Arrow', 'x UInt64')" + ) + assert int(result) == 1 + + lt = instance.query( + f"select * from s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test.arrow', 'minio', 'minio123', '{session_token}', 'Arrow', 'x UInt64', 'auto')" + ) + assert int(result) == 1 + def test_select_columns(started_cluster): bucket = started_cluster.minio_bucket @@ -2111,3 +2132,65 @@ def test_filtering_by_file_or_path(started_cluster): ) assert int(result) == 1 + + +def test_union_schema_inference_mode(started_cluster): + bucket = started_cluster.minio_bucket + instance = started_cluster.instances["s3_non_default"] + + instance.query( + f"insert into function s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test_union_schema_inference1.jsonl') select 1 as a" + ) + + instance.query( + f"insert into function s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test_union_schema_inference2.jsonl') select 2 as b" + ) + + instance.query( + f"insert into function s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test_union_schema_inference3.jsonl') select 2 as c" + ) + + instance.query( + f"insert into function s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test_union_schema_inference4.jsonl', TSV) select 'Error'" + ) + + for engine in ["s3", "url"]: + instance.query("system drop schema cache for s3") + + result = instance.query( + f"desc {engine}('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test_union_schema_inference{{1,2,3}}.jsonl') settings schema_inference_mode='union', describe_compact_output=1 format TSV" + ) + assert result == "a\tNullable(Int64)\nb\tNullable(Int64)\nc\tNullable(Int64)\n" + + result = instance.query( + "select schema_inference_mode, splitByChar('/', source)[-1] as file, schema from system.schema_inference_cache where source like '%test_union_schema_inference%' order by file format TSV" + ) + assert ( + result == "UNION\ttest_union_schema_inference1.jsonl\ta Nullable(Int64)\n" + "UNION\ttest_union_schema_inference2.jsonl\tb Nullable(Int64)\n" + "UNION\ttest_union_schema_inference3.jsonl\tc Nullable(Int64)\n" + ) + result = instance.query( + f"select * from {engine}('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test_union_schema_inference{{1,2,3}}.jsonl') order by tuple(*) settings schema_inference_mode='union', describe_compact_output=1 format TSV" + ) + assert result == "1\t\\N\t\\N\n" "\\N\t2\t\\N\n" "\\N\t\\N\t2\n" + + instance.query(f"system drop schema cache for {engine}") + result = instance.query( + f"desc {engine}('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test_union_schema_inference2.jsonl') settings schema_inference_mode='union', describe_compact_output=1 format TSV" + ) + assert result == "b\tNullable(Int64)\n" + + result = instance.query( + f"desc {engine}('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test_union_schema_inference{{1,2,3}}.jsonl') settings schema_inference_mode='union', describe_compact_output=1 format TSV" + ) + assert ( + result == "a\tNullable(Int64)\n" + "b\tNullable(Int64)\n" + "c\tNullable(Int64)\n" + ) + + error = instance.query_and_get_error( + f"desc {engine}('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test_union_schema_inference{{1,2,3,4}}.jsonl') settings schema_inference_mode='union', describe_compact_output=1 format TSV" + ) + assert "Cannot extract table structure" in error diff --git a/tests/integration/test_storage_s3_queue/test.py b/tests/integration/test_storage_s3_queue/test.py index b1163a549b1..b83c095a7a6 100644 --- a/tests/integration/test_storage_s3_queue/test.py +++ b/tests/integration/test_storage_s3_queue/test.py @@ -919,4 +919,6 @@ def test_drop_table(started_cluster): node.query(f"DROP TABLE {table_name} SYNC") assert node.contains_in_log( f"StorageS3Queue ({table_name}): Table is being dropped" + ) or node.contains_in_log( + f"StorageS3Queue ({table_name}): Shutdown was called, stopping sync" ) diff --git a/tests/integration/test_user_valid_until/test.py b/tests/integration/test_user_valid_until/test.py index e34771e55a9..d6d5bf8b18e 100644 --- a/tests/integration/test_user_valid_until/test.py +++ b/tests/integration/test_user_valid_until/test.py @@ -78,7 +78,9 @@ def test_details(started_cluster): # 2. Time only is not supported node.query("CREATE USER user_details_time_only VALID UNTIL '22:03:40'") + until_year = datetime.today().strftime("%Y") + assert ( node.query("SHOW CREATE USER user_details_time_only") - == "CREATE USER user_details_time_only VALID UNTIL \\'2023-01-01 22:03:40\\'\n" + == f"CREATE USER user_details_time_only VALID UNTIL \\'{until_year}-01-01 22:03:40\\'\n" ) diff --git a/tests/integration/test_wrong_db_or_table_name/test.py b/tests/integration/test_wrong_db_or_table_name/test.py index 68af383b6c3..641501eac84 100644 --- a/tests/integration/test_wrong_db_or_table_name/test.py +++ b/tests/integration/test_wrong_db_or_table_name/test.py @@ -61,6 +61,7 @@ def test_wrong_table_name(start): node.query( """ CREATE DATABASE test; + CREATE DATABASE test2; CREATE TABLE test.table_test (i Int64) ENGINE=Memory; CREATE TABLE test.table_test2 (i Int64) ENGINE=Memory; INSERT INTO test.table_test SELECT 1; @@ -68,7 +69,7 @@ def test_wrong_table_name(start): ) with pytest.raises( QueryRuntimeException, - match="DB::Exception: Table test.table_test1 does not exist. Maybe you meant table_test?.", + match="DB::Exception: Table test.table_test1 does not exist. Maybe you meant test.table_test?.", ): node.query( """ @@ -76,11 +77,23 @@ def test_wrong_table_name(start): """ ) assert int(node.query("SELECT count() FROM test.table_test;")) == 1 + + with pytest.raises( + QueryRuntimeException, + match="DB::Exception: Table test2.table_test1 does not exist. Maybe you meant test.table_test?.", + ): + node.query( + """ + SELECT * FROM test2.table_test1 LIMIT 1; + """ + ) + node.query( """ DROP TABLE test.table_test; DROP TABLE test.table_test2; DROP DATABASE test; + DROP DATABASE test2; """ ) @@ -89,6 +102,7 @@ def test_drop_wrong_table_name(start): node.query( """ CREATE DATABASE test; + CREATE DATABASE test2; CREATE TABLE test.table_test (i Int64) ENGINE=Memory; INSERT INTO test.table_test SELECT 1; """ @@ -96,13 +110,21 @@ def test_drop_wrong_table_name(start): with pytest.raises( QueryRuntimeException, - match="DB::Exception: Table test.table_tes does not exist. Maybe you meant table_test?.", + match="DB::Exception: Table test.table_test1 does not exist. Maybe you meant test.table_test?.", ): - node.query("DROP TABLE test.table_tes;") + node.query("DROP TABLE test.table_test1;") assert int(node.query("SELECT count() FROM test.table_test;")) == 1 + + with pytest.raises( + QueryRuntimeException, + match="DB::Exception: Table test2.table_test does not exist. Maybe you meant test.table_test?.", + ): + node.query("DROP TABLE test2.table_test;") + node.query( """ DROP TABLE test.table_test; DROP DATABASE test; + DROP DATABASE test2; """ ) diff --git a/tests/performance/README.md b/tests/performance/README.md index f554e96203b..289ecaba034 100644 --- a/tests/performance/README.md +++ b/tests/performance/README.md @@ -18,5 +18,5 @@ TODO @akuzm ``` pip3 install clickhouse_driver scipy -../../docker/test/performance-comparison/perf.py --runs 1 insert_parallel.xml +../../tests/performance/scripts/perf.py --runs 1 insert_parallel.xml ``` diff --git a/tests/performance/group_by_consecutive_keys.xml b/tests/performance/group_by_consecutive_keys.xml new file mode 100644 index 00000000000..c5c885d2bb6 --- /dev/null +++ b/tests/performance/group_by_consecutive_keys.xml @@ -0,0 +1,8 @@ + + SELECT toUInt64(intDiv(number, 1000000)) AS n, count(), sum(number) FROM numbers(10000000) GROUP BY n FORMAT Null + SELECT toString(intDiv(number, 1000000)) AS n, count(), sum(number) FROM numbers(10000000) GROUP BY n FORMAT Null + SELECT toUInt64(intDiv(number, 1000000)) AS n, count(), uniq(number) FROM numbers(10000000) GROUP BY n FORMAT Null + SELECT toUInt64(intDiv(number, 100000)) AS n, count(), sum(number) FROM numbers(10000000) GROUP BY n FORMAT Null + SELECT toUInt64(intDiv(number, 100)) AS n, count(), sum(number) FROM numbers(10000000) GROUP BY n FORMAT Null + SELECT toUInt64(intDiv(number, 10)) AS n, count(), sum(number) FROM numbers(10000000) GROUP BY n FORMAT Null + diff --git a/tests/performance/hashjoin_with_large_output.xml b/tests/performance/hashjoin_with_large_output.xml new file mode 100644 index 00000000000..f4b61c15f82 --- /dev/null +++ b/tests/performance/hashjoin_with_large_output.xml @@ -0,0 +1,64 @@ + + + 16 + 10G + + + + + settings + + join_algorithm='hash' + join_algorithm='grace_hash' + + + + + + create table test_left + ( + k1 String, + v1 String + ) + engine = Memory(); + + + create table test_right + ( + k1 String, + v1 String, + v2 String, + v3 String, + v4 String, + v5 String, + v6 String, + v7 String, + v8 String, + v9 String + ) + engine = Memory(); + + insert into test_left SELECT toString(number % 20), toString(number) from system.numbers limit 10000; + + insert into test_right + SELECT + toString(number % 20), + toString(number * 10000), + toString(number * 10000 + 1), + toString(number * 10000 + 2), + toString(number * 10000 + 3), + toString(number * 10000 + 4), + toString(number * 10000 + 5), + toString(number * 10000 + 6), + toString(number * 10000 + 7), + toString(number * 10000 + 8) + from system.numbers limit 10000; + + + + select * from test_left all inner join test_right on test_left.k1 = test_right.k1 SETTINGS {settings} format Null + + + DROP TABLE IF EXISTS test_left + DROP TABLE IF EXISTS test_right + diff --git a/tests/performance/if.xml b/tests/performance/if.xml new file mode 100644 index 00000000000..f4d0e8f9773 --- /dev/null +++ b/tests/performance/if.xml @@ -0,0 +1,12 @@ + + + 42949673, zero + 1, zero + 2)) ]]> + + + + + + + + + diff --git a/docker/test/performance-comparison/README.md b/tests/performance/scripts/README.md similarity index 93% rename from docker/test/performance-comparison/README.md rename to tests/performance/scripts/README.md index fd9001e23c7..0a0580c62a0 100644 --- a/docker/test/performance-comparison/README.md +++ b/tests/performance/scripts/README.md @@ -25,7 +25,7 @@ The check status summarizes the report in a short text message like `1 faster, 1 * `1 unstable` -- how many queries have unstable results, * `1 errors` -- how many errors there are in total. Action is required for every error, this number must be zero. The number of errors includes slower tests, tests that are too long, errors while running the tests and building reports, etc. Please look at the main report page to investigate these errors. -The report page itself constists of a several tables. Some of them always signify errors, e.g. "Run errors" -- the very presence of this table indicates that there were errors during the test, that are not normal and must be fixed. Some tables are mostly informational, e.g. "Test times" -- they reflect normal test results. But if a cell in such table is marked in red, this also means an error, e.g., a test is taking too long to run. +The report page itself consists of a several tables. Some of them always signify errors, e.g. "Run errors" -- the very presence of this table indicates that there were errors during the test, that are not normal and must be fixed. Some tables are mostly informational, e.g. "Test times" -- they reflect normal test results. But if a cell in such table is marked in red, this also means an error, e.g., a test is taking too long to run. #### Tested Commits Informational, no action required. Log messages for the commits that are tested. Note that for the right commit, we show nominal tested commit `pull/*/head` and real tested commit `pull/*/merge`, which is generated by GitHub by merging latest master to the `pull/*/head` and which we actually build and test in CI. @@ -33,12 +33,12 @@ Informational, no action required. Log messages for the commits that are tested. #### Error Summary Action required for every item. -This table summarizes all errors that ocurred during the test. Click the links to go to the description of a particular error. +This table summarizes all errors that occurred during the test. Click the links to go to the description of a particular error. #### Run Errors Action required for every item -- these are errors that must be fixed. -The errors that ocurred when running some test queries. For more information about the error, download test output archive and see `test-name-err.log`. To reproduce, see 'How to run' below. +The errors that occurred when running some test queries. For more information about the error, download test output archive and see `test-name-err.log`. To reproduce, see 'How to run' below. #### Slow on Client Action required for every item -- these are errors that must be fixed. @@ -65,7 +65,7 @@ You can find flame graphs for queries with performance changes in the test outpu #### Unstable Queries Action required for the cells marked in red. -These are the queries for which we did not observe a statistically significant change in performance, but for which the variance in query performance is very high. This means that we are likely to observe big changes in performance even in the absence of real changes, e.g. when comparing the server to itself. Such queries are going to have bad sensitivity as performance tests -- if a query has, say, 50% expected variability, this means we are going to see changes in performance up to 50%, even when there were no real changes in the code. And because of this, we won't be able to detect changes less than 50% with such a query, which is pretty bad. The reasons for the high variability must be investigated and fixed; ideally, the variability should be brought under 5-10%. +These are the queries for which we did not observe a statistically significant change in performance, but for which the variance in query performance is very high. This means that we are likely to observe big changes in performance even in the absence of real changes, e.g. when comparing the server to itself. Such queries are going to have bad sensitivity as performance tests -- if a query has, say, 50% expected variability, this means we are going to see changes in performance up to 50%, even when there were no real changes in the code. And because of this, we won't be able to detect changes less than 50% with such a query, which is pretty bad. The reasons for the high variability must be investigated and fixed; ideally, the variability should be brought under 5-10%. The most frequent reason for instability is that the query is just too short -- e.g. below 0.1 seconds. Bringing query time to 0.2 seconds or above usually helps. Other reasons may include: @@ -88,7 +88,7 @@ This table summarizes the changes in performance of queries in each test -- how Action required for the cells marked in red. This table shows the run times for all the tests. You may have to fix two kinds of errors in this table: -1) Average query run time is too long -- probalby means that the preparatory steps such as creating the table and filling them with data are taking too long. Try to make them faster. +1) Average query run time is too long -- probably means that the preparatory steps such as creating the table and filling them with data are taking too long. Try to make them faster. 2) Longest query run time is too long -- some particular queries are taking too long, try to make them faster. The ideal query run time is between 0.1 and 1 s. #### Metric Changes @@ -186,4 +186,4 @@ analytically, but I don't know enough math to do it. It would be something close to Wilcoxon test distribution. ### References -1\. Box, Hunter, Hunter "Statictics for exprerimenters", p. 78: "A Randomized Design Used in the Comparison of Standard and Modified Fertilizer Mixtures for Tomato Plants." +1\. Box, Hunter, Hunter "Statistics for exprerimenters", p. 78: "A Randomized Design Used in the Comparison of Standard and Modified Fertilizer Mixtures for Tomato Plants." diff --git a/docker/test/performance-comparison/compare-releases.sh b/tests/performance/scripts/compare-releases.sh similarity index 99% rename from docker/test/performance-comparison/compare-releases.sh rename to tests/performance/scripts/compare-releases.sh index dc7681815d4..6e982168fb1 100755 --- a/docker/test/performance-comparison/compare-releases.sh +++ b/tests/performance/scripts/compare-releases.sh @@ -79,4 +79,3 @@ run rm output.7z 7z a output.7z ./*.{log,tsv,html,txt,rep,svg} {right,left}/{performance,db/preprocessed_configs} - diff --git a/docker/test/performance-comparison/compare.sh b/tests/performance/scripts/compare.sh similarity index 98% rename from docker/test/performance-comparison/compare.sh rename to tests/performance/scripts/compare.sh index f10236b7135..6d1a271355e 100755 --- a/docker/test/performance-comparison/compare.sh +++ b/tests/performance/scripts/compare.sh @@ -236,7 +236,7 @@ function run_tests fi fi - # For PRs w/o changes in test definitons, test only a subset of queries, + # For PRs w/o changes in test definitions, test only a subset of queries, # and run them less times. If the corresponding environment variables are # already set, keep those values. # @@ -1220,15 +1220,23 @@ create table ci_checks engine File(TSVWithNamesAndTypes, 'ci-checks.tsv') 0 test_duration_ms, 'https://s3.amazonaws.com/clickhouse-test-reports/$PR_TO_TEST/$SHA_TO_TEST/${CLICKHOUSE_PERFORMANCE_COMPARISON_CHECK_NAME_PREFIX}/report.html#fail1' report_url union all - select test || ' #' || toString(query_index), 'slower' test_status, 0 test_duration_ms, - 'https://s3.amazonaws.com/clickhouse-test-reports/$PR_TO_TEST/$SHA_TO_TEST/${CLICKHOUSE_PERFORMANCE_COMPARISON_CHECK_NAME_PREFIX}/report.html#changes-in-performance.' - || test || '.' || toString(query_index) report_url - from queries where changed_fail != 0 and diff > 0 + select + test || ' #' || toString(query_index) || '::' || test_desc_.1 test_name, + 'slower' test_status, + test_desc_.2 test_duration_ms, + 'https://s3.amazonaws.com/clickhouse-test-reports/$PR_TO_TEST/$SHA_TO_TEST/${CLICKHOUSE_PERFORMANCE_COMPARISON_CHECK_NAME_PREFIX}/report.html#changes-in-performance.' || test || '.' || toString(query_index) report_url + from queries + array join map('old', left, 'new', right) as test_desc_ + where changed_fail != 0 and diff > 0 union all - select test || ' #' || toString(query_index), 'unstable' test_status, 0 test_duration_ms, - 'https://s3.amazonaws.com/clickhouse-test-reports/$PR_TO_TEST/$SHA_TO_TEST/${CLICKHOUSE_PERFORMANCE_COMPARISON_CHECK_NAME_PREFIX}/report.html#unstable-queries.' - || test || '.' || toString(query_index) report_url - from queries where unstable_fail != 0 + select + test || ' #' || toString(query_index) || '::' || test_desc_.1 test_name, + 'unstable' test_status, + test_desc_.2 test_duration_ms, + 'https://s3.amazonaws.com/clickhouse-test-reports/$PR_TO_TEST/$SHA_TO_TEST/${CLICKHOUSE_PERFORMANCE_COMPARISON_CHECK_NAME_PREFIX}/report.html#unstable-queries.' || test || '.' || toString(query_index) report_url + from queries + array join map('old', left, 'new', right) as test_desc_ + where unstable_fail != 0 ) ; " diff --git a/docker/test/performance-comparison/config/client_config.xml b/tests/performance/scripts/config/client_config.xml similarity index 100% rename from docker/test/performance-comparison/config/client_config.xml rename to tests/performance/scripts/config/client_config.xml diff --git a/docker/test/performance-comparison/config/config.d/top_level_domains_lists.xml b/tests/performance/scripts/config/config.d/top_level_domains_lists.xml similarity index 100% rename from docker/test/performance-comparison/config/config.d/top_level_domains_lists.xml rename to tests/performance/scripts/config/config.d/top_level_domains_lists.xml diff --git a/docker/test/performance-comparison/config/config.d/user_files.xml b/tests/performance/scripts/config/config.d/user_files.xml similarity index 100% rename from docker/test/performance-comparison/config/config.d/user_files.xml rename to tests/performance/scripts/config/config.d/user_files.xml diff --git a/docker/test/performance-comparison/config/config.d/zzz-perf-comparison-tweaks-config.xml b/tests/performance/scripts/config/config.d/zzz-perf-comparison-tweaks-config.xml similarity index 100% rename from docker/test/performance-comparison/config/config.d/zzz-perf-comparison-tweaks-config.xml rename to tests/performance/scripts/config/config.d/zzz-perf-comparison-tweaks-config.xml diff --git a/docker/test/performance-comparison/config/users.d/perf-comparison-tweaks-users.xml b/tests/performance/scripts/config/users.d/perf-comparison-tweaks-users.xml similarity index 100% rename from docker/test/performance-comparison/config/users.d/perf-comparison-tweaks-users.xml rename to tests/performance/scripts/config/users.d/perf-comparison-tweaks-users.xml diff --git a/docker/test/performance-comparison/download.sh b/tests/performance/scripts/download.sh similarity index 100% rename from docker/test/performance-comparison/download.sh rename to tests/performance/scripts/download.sh diff --git a/docker/test/performance-comparison/entrypoint.sh b/tests/performance/scripts/entrypoint.sh similarity index 89% rename from docker/test/performance-comparison/entrypoint.sh rename to tests/performance/scripts/entrypoint.sh index fb5e6bd2a7a..95ffe44b654 100755 --- a/docker/test/performance-comparison/entrypoint.sh +++ b/tests/performance/scripts/entrypoint.sh @@ -7,8 +7,9 @@ export CHPC_CHECK_START_TIMESTAMP S3_URL=${S3_URL:="https://clickhouse-builds.s3.amazonaws.com"} BUILD_NAME=${BUILD_NAME:-package_release} export S3_URL BUILD_NAME +SCRIPT_DIR="$(dirname "$(readlink -f "$0")")" -# Sometimes AWS responde with DNS error and it's impossible to retry it with +# Sometimes AWS responds with DNS error and it's impossible to retry it with # current curl version options. function curl_with_retry { @@ -88,19 +89,9 @@ chmod 777 workspace output cd workspace -# Download the package for the version we are going to test. -# A temporary solution for migrating into PRs directory -for prefix in "$S3_URL/PRs" "$S3_URL"; -do - if curl_with_retry "$prefix/$PR_TO_TEST/$SHA_TO_TEST/$BUILD_NAME/performance.tar.zst" - then - right_path="$prefix/$PR_TO_TEST/$SHA_TO_TEST/$BUILD_NAME/performance.tar.zst" - break - fi -done - -mkdir right -wget -nv -nd -c "$right_path" -O- | tar -C right --no-same-owner --strip-components=1 --zstd --extract --verbose +[ ! -e "/artifacts/performance.tar.zst" ] && echo "ERROR: performance.tar.zst not found" && exit 1 +mkdir -p right +tar -xf "/artifacts/performance.tar.zst" -C right --no-same-owner --strip-components=1 --zstd --extract --verbose # Find reference revision if not specified explicitly if [ "$REF_SHA" == "" ]; then find_reference_sha; fi @@ -158,7 +149,7 @@ cat /proc/sys/kernel/core_pattern # Start the main comparison script. { - time ../download.sh "$REF_PR" "$REF_SHA" "$PR_TO_TEST" "$SHA_TO_TEST" && \ + time $SCRIPT_DIR/download.sh "$REF_PR" "$REF_SHA" "$PR_TO_TEST" "$SHA_TO_TEST" && \ time stage=configure "$script_path"/compare.sh ; \ } 2>&1 | ts "$(printf '%%Y-%%m-%%d %%H:%%M:%%S\t')" | tee compare.log diff --git a/docker/test/performance-comparison/eqmed.sql b/tests/performance/scripts/eqmed.sql similarity index 97% rename from docker/test/performance-comparison/eqmed.sql rename to tests/performance/scripts/eqmed.sql index d0111550ee6..94e6733a3d7 100644 --- a/docker/test/performance-comparison/eqmed.sql +++ b/tests/performance/scripts/eqmed.sql @@ -12,7 +12,7 @@ from -- quantiles of randomization distributions -- note that for small number of runs, the exact quantile might not make -- sense, because the last possible value of randomization distribution - -- might take a larger percentage of distirbution (i.e. the distribution + -- might take a larger percentage of distribution (i.e. the distribution -- actually has discrete values, and the last step can be large). select quantileExactForEach(0.99)( arrayMap(x, y -> abs(x - y), metrics_by_label[1], metrics_by_label[2]) as d @@ -44,7 +44,7 @@ from -- for each virtual run, randomly reorder measurements order by virtual_run, rand() ) virtual_runs - ) relabeled + ) relabeled group by virtual_run, random_label ) virtual_medians group by virtual_run -- aggregate by random_label diff --git a/docker/test/performance-comparison/manual-run.sh b/tests/performance/scripts/manual-run.sh similarity index 99% rename from docker/test/performance-comparison/manual-run.sh rename to tests/performance/scripts/manual-run.sh index 2cc40bf4648..82609489d72 100755 --- a/docker/test/performance-comparison/manual-run.sh +++ b/tests/performance/scripts/manual-run.sh @@ -51,4 +51,3 @@ run rm output.7z 7z a output.7z ./*.{log,tsv,html,txt,rep,svg} {right,left}/{performance,db/preprocessed_configs} - diff --git a/docker/test/performance-comparison/perf.py b/tests/performance/scripts/perf.py similarity index 99% rename from docker/test/performance-comparison/perf.py rename to tests/performance/scripts/perf.py index d23a9ac61c1..e98c158249a 100755 --- a/docker/test/performance-comparison/perf.py +++ b/tests/performance/scripts/perf.py @@ -357,7 +357,7 @@ for query_index in queries_to_run: prewarm_id = f"{query_prefix}.prewarm0" try: - # During the warmup runs, we will also: + # During the warm-up runs, we will also: # * detect queries that are exceedingly long, to fail fast, # * collect profiler traces, which might be helpful for analyzing # test coverage. We disable profiler for normal runs because @@ -390,7 +390,7 @@ for query_index in queries_to_run: query_error_on_connection[conn_index] = traceback.format_exc() continue - # Report all errors that ocurred during prewarm and decide what to do next. + # Report all errors that occurred during prewarm and decide what to do next. # If prewarm fails for the query on all servers -- skip the query and # continue testing the next query. # If prewarm fails on one of the servers, run the query on the rest of them. diff --git a/docker/test/performance-comparison/report.py b/tests/performance/scripts/report.py similarity index 100% rename from docker/test/performance-comparison/report.py rename to tests/performance/scripts/report.py diff --git a/tests/queries/0_stateless/00002_log_and_exception_messages_formatting.reference b/tests/queries/0_stateless/00002_log_and_exception_messages_formatting.reference index cd9f0142d45..d8c0db3b996 100644 --- a/tests/queries/0_stateless/00002_log_and_exception_messages_formatting.reference +++ b/tests/queries/0_stateless/00002_log_and_exception_messages_formatting.reference @@ -1,15 +1,15 @@ -runtime messages 0.001 -runtime exceptions 0.05 -unknown runtime exceptions 0.01 -messages shorter than 10 1 -messages shorter than 16 3 -exceptions shorter than 30 3 [] -noisy messages 0.3 -noisy Trace messages 0.16 -noisy Debug messages 0.09 -noisy Info messages 0.05 -noisy Warning messages 0.01 -noisy Error messages 0.02 +runtime messages 0.001 [] +runtime exceptions 0.05 [] +unknown runtime exceptions 0.01 [] +messages shorter than 10 1 [] +messages shorter than 16 1 [] +exceptions shorter than 30 1 [] +noisy messages 0.3 +noisy Trace messages 0.16 +noisy Debug messages 0.09 +noisy Info messages 0.05 +noisy Warning messages 0.01 +noisy Error messages 0.03 no Fatal messages 0 number of too noisy messages 3 number of noisy messages 10 diff --git a/tests/queries/0_stateless/00002_log_and_exception_messages_formatting.sql b/tests/queries/0_stateless/00002_log_and_exception_messages_formatting.sql index 062806baae9..3a83126ea11 100644 --- a/tests/queries/0_stateless/00002_log_and_exception_messages_formatting.sql +++ b/tests/queries/0_stateless/00002_log_and_exception_messages_formatting.sql @@ -9,57 +9,174 @@ create view logs as select * from system.text_log where now() - toIntervalMinute -- Check that we don't have too many messages formatted with fmt::runtime or strings concatenation. -- 0.001 threshold should be always enough, the value was about 0.00025 -select 'runtime messages', greatest(coalesce(sum(length(message_format_string) = 0) / countOrNull(), 0), 0.001) from logs - where message not like '% Received from %clickhouse-staging.com:9440%'; +WITH 0.001 AS threshold +SELECT + 'runtime messages', + greatest(coalesce(sum(length(message_format_string) = 0) / countOrNull(), 0) as v, threshold), + v <= threshold ? [] : + (SELECT groupArray((message, c)) FROM ( + SELECT message, count() as c FROM logs + WHERE + length(message_format_string) = 0 + AND message not like '% Received from %clickhouse-staging.com:9440%' + AND source_file not like '%/AWSLogger.cpp%' + GROUP BY message ORDER BY c LIMIT 10 + )) +FROM logs +WHERE + message NOT LIKE '% Received from %clickhouse-staging.com:9440%' + AND source_file not like '%/AWSLogger.cpp%'; -- Check the same for exceptions. The value was 0.03 -select 'runtime exceptions', greatest(coalesce(sum(length(message_format_string) = 0) / countOrNull(), 0), 0.05) from logs - where (message like '%DB::Exception%' or message like '%Coordination::Exception%') - and message not like '% Received from %clickhouse-staging.com:9440%'; +WITH 0.05 AS threshold +SELECT + 'runtime exceptions', + greatest(coalesce(sum(length(message_format_string) = 0) / countOrNull(), 0) as v, threshold), + v <= threshold ? [] : + (SELECT groupArray((message, c)) FROM ( + SELECT message, count() as c FROM logs + WHERE + length(message_format_string) = 0 + AND (message like '%DB::Exception%' or message like '%Coordination::Exception%') + AND message not like '% Received from %clickhouse-staging.com:9440%' + GROUP BY message ORDER BY c LIMIT 10 + )) +FROM logs +WHERE + message NOT LIKE '% Received from %clickhouse-staging.com:9440%' + AND (message like '%DB::Exception%' or message like '%Coordination::Exception%'); + +WITH 0.01 AS threshold +SELECT + 'unknown runtime exceptions', + greatest(coalesce(sum(length(message_format_string) = 0) / countOrNull(), 0) as v, threshold), + v <= threshold ? [] : + (SELECT groupArray((message, c)) FROM ( + SELECT message, count() as c FROM logs + WHERE + length(message_format_string) = 0 + AND (message like '%DB::Exception%' or message like '%Coordination::Exception%') + AND message not like '% Received from %' and message not like '%(SYNTAX_ERROR)%' + GROUP BY message ORDER BY c LIMIT 10 + )) +FROM logs +WHERE + (message like '%DB::Exception%' or message like '%Coordination::Exception%') + AND message not like '% Received from %' and message not like '%(SYNTAX_ERROR)%'; -select 'unknown runtime exceptions', greatest(coalesce(sum(length(message_format_string) = 0) / countOrNull(), 0), 0.01) from logs where - (message like '%DB::Exception%' or message like '%Coordination::Exception%') - and message not like '% Received from %' and message not like '%(SYNTAX_ERROR)%'; -- FIXME some of the following messages are not informative and it has to be fixed -create temporary table known_short_messages (s String) as select * from (select -['', '{} ({})', '({}) Keys: {}', '({}) {}', 'Aggregating', 'Became leader', 'Cleaning queue', -'Creating set.', 'Cyclic aliases', 'Detaching {}', 'Executing {}', 'Fire events: {}', -'Found part {}', 'Loaded queue', 'No sharding key', 'No tables', 'Query: {}', 'Removed', -'Removed part {}', 'Removing parts.', 'Request URI: {}', 'Sending part {}', -'Sent handshake', 'Starting {}', 'Will mimic {}', 'Writing to {}', 'dropIfEmpty', -'loadAll {}', '{} ({}:{})', '{} -> {}', '{} {}', '{}: {}', '{}%', 'Read object: {}', -'New segment: {}', 'Convert overflow', 'Division by zero', 'Files set to {}', -'Bytes set to {}', 'Numeric overflow', 'Invalid mode: {}', -'Write file: {}', 'Unable to parse JSONPath', 'Host is empty in S3 URI.', 'Expected end of line', -'inflate failed: {}{}', 'Center is not valid', 'Column ''{}'' is ambiguous', 'Cannot parse object', 'Invalid date: {}', -'There is no cache by name: {}', 'No part {} in table', '`{}` should be a String', 'There are duplicate id {}', -'Invalid replica name: {}', 'Unexpected value {} in enum', 'Unknown BSON type: {}', 'Point is not valid', -'Invalid qualified name: {}', 'INTO OUTFILE is not allowed', 'Arguments must not be NaN', 'Cell is not valid', -'brotli decode error{}', 'Invalid H3 index: {}', 'Too large node state size', 'No additional keys found.', -'Attempt to read after EOF.', 'Replication was stopped', '{} building file infos', 'Cannot parse uuid {}', -'Query was cancelled', 'Cancelled merging parts', 'Cancelled mutating parts', 'Log pulling is cancelled', -'Transaction was cancelled', 'Could not find table: {}', 'Table {} does not exist', -'Database {} does not exist', 'Dictionary ({}) not found', 'Unknown table function {}', -'Unknown format {}', 'Unknown explain kind ''{}''', 'Unknown setting {}', 'Unknown input format {}', -'Unknown identifier: ''{}''', 'User name is empty', 'Expected function, got: {}', -'Attempt to read after eof', 'String size is too big ({}), maximum: {}', -'Processed: {}%', 'Creating {}: {}', 'Table {}.{} doesn''t exist', 'Invalid cache key hex: {}', -'User has been dropped', 'Illegal type {} of argument of function {}. Should be DateTime or DateTime64', -'Unknown statistic column: {}', -'Bad SSH public key provided', 'Database {} does not exist', 'Substitution {} is not set', 'Invalid cache key hex: {}' -] as arr) array join arr; +create temporary table known_short_messages (s String) as select * from (select [ + '', + '({}) Keys: {}', + '({}) {}', + 'Aggregating', + 'Attempt to read after EOF.', + 'Attempt to read after eof', + 'Bad SSH public key provided', + 'Became leader', + 'Bytes set to {}', + 'Cancelled merging parts', + 'Cancelled mutating parts', + 'Cannot parse date here: {}', + 'Cannot parse object', + 'Cannot parse uuid {}', + 'Cleaning queue', + 'Column \'{}\' is ambiguous', + 'Convert overflow', + 'Could not find table: {}', + 'Creating {}: {}', + 'Cyclic aliases', + 'Database {} does not exist', + 'Detaching {}', + 'Dictionary ({}) not found', + 'Division by zero', + 'Executing {}', + 'Expected end of line', + 'Expected function, got: {}', + 'Files set to {}', + 'Fire events: {}', + 'Found part {}', + 'Host is empty in S3 URI.', + 'INTO OUTFILE is not allowed', + 'Illegal type {} of argument of function {}. Should be DateTime or DateTime64', + 'Illegal UTF-8 sequence, while processing \'{}\'', + 'Invalid cache key hex: {}', + 'Invalid date: {}', + 'Invalid mode: {}', + 'Invalid qualified name: {}', + 'Invalid replica name: {}', + 'Loaded queue', + 'Log pulling is cancelled', + 'New segment: {}', + 'No additional keys found.', + 'No part {} in table', + 'No sharding key', + 'No tables', + 'Numeric overflow', + 'Path to archive is empty', + 'Processed: {}%', + 'Query was cancelled', + 'Query: {}', + 'Read object: {}', + 'Removed part {}', + 'Removing parts.', + 'Replication was stopped', + 'Request URI: {}', + 'Sending part {}', + 'Sent handshake', + 'Starting {}', + 'String size is too big ({}), maximum: {}', + 'Substitution {} is not set', + 'Table {} does not exist', + 'Table {}.{} doesn\'t exist', + 'There are duplicate id {}', + 'There is no cache by name: {}', + 'Too large node state size', + 'Transaction was cancelled', + 'Unable to parse JSONPath', + 'Unexpected value {} in enum', + 'Unknown BSON type: {}', + 'Unknown explain kind \'{}\'', + 'Unknown format {}', + 'Unknown identifier: \'{}\'', + 'Unknown input format {}', + 'Unknown setting {}', + 'Unknown statistic column: {}', + 'Unknown table function {}', + 'User has been dropped', + 'User name is empty', + 'Will mimic {}', + 'Write file: {}', + 'Writing to {}', + '`{}` should be a String', + 'brotli decode error{}', + 'dropIfEmpty', + 'inflate failed: {}{}', + 'loadAll {}', + '{} ({})', + '{} ({}:{})', + '{} -> {}', + '{} {}', + '{}%', + '{}: {}' + ] as arr) array join arr; -- Check that we don't have too many short meaningless message patterns. +WITH 1 AS max_messages select 'messages shorter than 10', - greatest(uniqExact(message_format_string), 1) + (uniqExact(message_format_string) as c) <= max_messages, + c <= max_messages ? [] : groupUniqArray(message_format_string) from logs where length(message_format_string) < 10 and message_format_string not in known_short_messages; -- Same as above. Feel free to update the threshold or remove this query if really necessary +WITH 3 AS max_messages select 'messages shorter than 16', - greatest(uniqExact(message_format_string), 3) - from logs where length(message_format_string) < 16 and message_format_string not in known_short_messages; + (uniqExact(message_format_string) as c) <= max_messages, + c <= max_messages ? [] : groupUniqArray(message_format_string) + from logs + where length(message_format_string) < 16 and message_format_string not in known_short_messages; -- Unlike above, here we look at length of the formatted message, not format string. Most short format strings are fine because they end up decorated with context from outer or inner exceptions, e.g.: -- "Expected end of line" -> "Code: 117. DB::Exception: Expected end of line: (in file/uri /var/lib/clickhouse/user_files/data_02118): (at row 1)" @@ -68,40 +185,53 @@ select 'messages shorter than 16', -- This table currently doesn't have enough information to do this reliably, so we just regex search for " (ERROR_NAME_IN_CAPS)" and hope that's good enough. -- For the "Code: 123. DB::Exception: " part, we just subtract 26 instead of searching for it. Because sometimes it's not at the start, e.g.: -- "Unexpected error, will try to restart main thread: Code: 341. DB::Exception: Unexpected error: Code: 57. DB::Exception:[...]" +WITH 3 AS max_messages select 'exceptions shorter than 30', - greatest(uniqExact(message_format_string), 3) AS c, - c = 3 ? [] : groupUniqArray(message_format_string) + (uniqExact(message_format_string) as c) <= max_messages, + c <= max_messages ? [] : groupUniqArray(message_format_string) from logs where message ilike '%DB::Exception%' and if(length(extract(message, '(.*)\\([A-Z0-9_]+\\)')) as pref > 0, pref, length(message)) < 30 + 26 and message_format_string not in known_short_messages; - -- Avoid too noisy messages: top 1 message frequency must be less than 30%. We should reduce the threshold -select 'noisy messages', - greatest((select count() from logs group by message_format_string order by count() desc limit 1) / (select count() from logs), 0.30); +WITH 0.30 as threshold +select + 'noisy messages', + greatest(coalesce(((select message_format_string, count() from logs group by message_format_string order by count() desc limit 1) as top_message).2, 0) / (select count() from logs), threshold) as r, + r <= threshold ? '' : top_message.1; -- Same as above, but excluding Test level (actually finds top 1 Trace message) -with ('Access granted: {}{}', '{} -> {}') as frequent_in_tests -select 'noisy Trace messages', - greatest((select count() from logs where level!='Test' and message_format_string not in frequent_in_tests - group by message_format_string order by count() desc limit 1) / (select count() from logs), 0.16); +with 0.16 as threshold +select + 'noisy Trace messages', + greatest(coalesce(((select message_format_string, count() from logs where level = 'Trace' and message_format_string not in ('Access granted: {}{}', '{} -> {}') + group by message_format_string order by count() desc limit 1) as top_message).2, 0) / (select count() from logs), threshold) as r, + r <= threshold ? '' : top_message.1; -- Same as above for Debug +WITH 0.09 as threshold select 'noisy Debug messages', - greatest((select count() from logs where level <= 'Debug' group by message_format_string order by count() desc limit 1) / (select count() from logs), 0.09); + greatest(coalesce(((select message_format_string, count() from logs where level = 'Debug' group by message_format_string order by count() desc limit 1) as top_message).2, 0) / (select count() from logs), threshold) as r, + r <= threshold ? '' : top_message.1; -- Same as above for Info +WITH 0.05 as threshold select 'noisy Info messages', - greatest((select count() from logs where level <= 'Information' group by message_format_string order by count() desc limit 1) / (select count() from logs), 0.05); + greatest(coalesce(((select message_format_string, count() from logs where level = 'Information' group by message_format_string order by count() desc limit 1) as top_message).2, 0) / (select count() from logs), threshold) as r, + r <= threshold ? '' : top_message.1; -- Same as above for Warning -with ('Not enabled four letter command {}') as frequent_in_tests -select 'noisy Warning messages', - greatest(coalesce((select count() from logs where level = 'Warning' and message_format_string not in frequent_in_tests - group by message_format_string order by count() desc limit 1), 0) / (select count() from logs), 0.01); +with 0.01 as threshold +select + 'noisy Warning messages', + greatest(coalesce(((select message_format_string, count() from logs where level = 'Warning' and message_format_string not in ('Not enabled four letter command {}') + group by message_format_string order by count() desc limit 1) as top_message).2, 0) / (select count() from logs), threshold) as r, + r <= threshold ? '' : top_message.1; -- Same as above for Error +WITH 0.03 as threshold select 'noisy Error messages', - greatest(coalesce((select count() from logs where level = 'Error' group by message_format_string order by count() desc limit 1), 0) / (select count() from logs), 0.02); + greatest(coalesce(((select message_format_string, count() from logs where level = 'Error' group by message_format_string order by count() desc limit 1) as top_message).2, 0) / (select count() from logs), threshold) as r, + r <= threshold ? '' : top_message.1; select 'no Fatal messages', count() from logs where level = 'Fatal'; diff --git a/tests/queries/0_stateless/00109_shard_totals_after_having.sql b/tests/queries/0_stateless/00109_shard_totals_after_having.sql index b17accc0dae..dce265e0552 100644 --- a/tests/queries/0_stateless/00109_shard_totals_after_having.sql +++ b/tests/queries/0_stateless/00109_shard_totals_after_having.sql @@ -4,6 +4,9 @@ SET max_rows_to_group_by = 100000; SET max_block_size = 100001; SET group_by_overflow_mode = 'any'; +-- Settings 'max_rows_to_group_by' and 'max_bytes_before_external_group_by' are mutually exclusive. +SET max_bytes_before_external_group_by = 0; + DROP TABLE IF EXISTS numbers500k; CREATE TABLE numbers500k (number UInt32) ENGINE = TinyLog; diff --git a/tests/queries/0_stateless/00119_storage_join.sql b/tests/queries/0_stateless/00119_storage_join.sql index 2569a64d2c3..cd255cdfe24 100644 --- a/tests/queries/0_stateless/00119_storage_join.sql +++ b/tests/queries/0_stateless/00119_storage_join.sql @@ -12,7 +12,7 @@ SELECT x, s, k FROM (SELECT number AS k FROM system.numbers LIMIT 10) js1 ANY LE SELECT 1, x, 2, s, 3, k, 4 FROM (SELECT number AS k FROM system.numbers LIMIT 10) js1 ANY LEFT JOIN t2 USING k; SELECT t1.k, t1.s, t2.x -FROM ( SELECT number AS k, 'a' AS s FROM numbers(2) GROUP BY number WITH TOTALS ) AS t1 +FROM ( SELECT number AS k, 'a' AS s FROM numbers(2) GROUP BY number WITH TOTALS ORDER BY number) AS t1 ANY LEFT JOIN t2 AS t2 USING(k); DROP TABLE t2; diff --git a/tests/queries/0_stateless/00133_long_shard_memory_tracker_and_exception_safety.sh b/tests/queries/0_stateless/00133_long_shard_memory_tracker_and_exception_safety.sh index 389a2cd9684..a42fd58190a 100755 --- a/tests/queries/0_stateless/00133_long_shard_memory_tracker_and_exception_safety.sh +++ b/tests/queries/0_stateless/00133_long_shard_memory_tracker_and_exception_safety.sh @@ -16,7 +16,7 @@ if [ -n "$DBMS_TESTS_UNDER_VALGRIND" ]; then fi for i in $(seq 1000000 $((20000 * $STEP_MULTIPLIER)) 10000000 && seq 10100000 $((100000 * $STEP_MULTIPLIER)) 50000000); do - $CLICKHOUSE_CLIENT --max_memory_usage="$i" --query=" + $CLICKHOUSE_CLIENT --max_memory_usage="$i" --max_bytes_before_external_group_by 0 --query=" SELECT intDiv(number, 5) AS k, max(toString(number)) FROM remote('127.0.0.{2,3}', ${CLICKHOUSE_DATABASE}.numbers_100k) GROUP BY k ORDER BY k LIMIT 1; " 2> /dev/null; CODE=$?; diff --git a/tests/queries/0_stateless/00155_long_merges.sh b/tests/queries/0_stateless/00155_long_merges.sh index 9ed0f2c6de1..8ecca0aeb42 100755 --- a/tests/queries/0_stateless/00155_long_merges.sh +++ b/tests/queries/0_stateless/00155_long_merges.sh @@ -34,32 +34,40 @@ function test { SETTINGS="--min_insert_block_size_rows=0 --min_insert_block_size_bytes=0 --max_block_size=65505" + $CLICKHOUSE_CLIENT --query="SYSTEM STOP MERGES summing_00155" $CLICKHOUSE_CLIENT $SETTINGS --query="INSERT INTO summing_00155 (x) SELECT number AS x FROM system.numbers LIMIT $1" $CLICKHOUSE_CLIENT $SETTINGS --query="INSERT INTO summing_00155 (x) SELECT number AS x FROM system.numbers LIMIT $2" + $CLICKHOUSE_CLIENT --query="SYSTEM STOP MERGES collapsing_00155" $CLICKHOUSE_CLIENT $SETTINGS --query="INSERT INTO collapsing_00155 (x) SELECT number AS x FROM system.numbers LIMIT $1" $CLICKHOUSE_CLIENT $SETTINGS --query="INSERT INTO collapsing_00155 (x) SELECT number AS x FROM system.numbers LIMIT $2" + $CLICKHOUSE_CLIENT --query="SYSTEM STOP MERGES aggregating_00155" $CLICKHOUSE_CLIENT $SETTINGS --query="INSERT INTO aggregating_00155 (d, x, s) SELECT today() AS d, number AS x, sumState(materialize(toUInt64(1))) AS s FROM (SELECT number FROM system.numbers LIMIT $1) GROUP BY number" $CLICKHOUSE_CLIENT $SETTINGS --query="INSERT INTO aggregating_00155 (d, x, s) SELECT today() AS d, number AS x, sumState(materialize(toUInt64(1))) AS s FROM (SELECT number FROM system.numbers LIMIT $2) GROUP BY number" + $CLICKHOUSE_CLIENT --query="SYSTEM STOP MERGES replacing_00155" $CLICKHOUSE_CLIENT $SETTINGS --query="INSERT INTO replacing_00155 (x, v) SELECT number AS x, toUInt64(number % 3 == 0) FROM system.numbers LIMIT $1" $CLICKHOUSE_CLIENT $SETTINGS --query="INSERT INTO replacing_00155 (x, v) SELECT number AS x, toUInt64(number % 3 == 1) FROM system.numbers LIMIT $2" $CLICKHOUSE_CLIENT --query="SELECT count() = $SUM, sum(s) = $SUM FROM summing_00155" + $CLICKHOUSE_CLIENT --query="SYSTEM START MERGES summing_00155" $CLICKHOUSE_CLIENT --query="OPTIMIZE TABLE summing_00155" $CLICKHOUSE_CLIENT --query="SELECT count() = $MAX, sum(s) = $SUM FROM summing_00155" echo $CLICKHOUSE_CLIENT --query="SELECT count() = $SUM, sum(s) = $SUM FROM collapsing_00155" - $CLICKHOUSE_CLIENT --query="OPTIMIZE TABLE collapsing_00155" --server_logs_file='/dev/null'; + $CLICKHOUSE_CLIENT --query="SYSTEM START MERGES collapsing_00155" + $CLICKHOUSE_CLIENT --query="OPTIMIZE TABLE collapsing_00155 FINAL" --server_logs_file='/dev/null'; $CLICKHOUSE_CLIENT --query="SELECT count() = $MAX, sum(s) = $MAX FROM collapsing_00155" echo $CLICKHOUSE_CLIENT --query="SELECT count() = $SUM, sumMerge(s) = $SUM FROM aggregating_00155" - $CLICKHOUSE_CLIENT --query="OPTIMIZE TABLE aggregating_00155" + $CLICKHOUSE_CLIENT --query="SYSTEM START MERGES aggregating_00155" + $CLICKHOUSE_CLIENT --query="OPTIMIZE TABLE aggregating_00155 FINAL" $CLICKHOUSE_CLIENT --query="SELECT count() = $MAX, sumMerge(s) = $SUM FROM aggregating_00155" echo $CLICKHOUSE_CLIENT --query="SELECT count() = $SUM, sum(s) = $SUM FROM replacing_00155" - $CLICKHOUSE_CLIENT --query="OPTIMIZE TABLE replacing_00155" + $CLICKHOUSE_CLIENT --query="SYSTEM START MERGES replacing_00155" + $CLICKHOUSE_CLIENT --query="OPTIMIZE TABLE replacing_00155 FINAL" $CLICKHOUSE_CLIENT --query="SELECT count() = $MAX, sum(s) = $MAX FROM replacing_00155" $CLICKHOUSE_CLIENT --query="SELECT count() = sum(v) FROM replacing_00155 where x % 3 == 0 and x < $1" $CLICKHOUSE_CLIENT --query="SELECT count() = sum(v) FROM replacing_00155 where x % 3 == 1 and x < $2" diff --git a/tests/queries/0_stateless/00166_functions_of_aggregation_states.sql b/tests/queries/0_stateless/00166_functions_of_aggregation_states.sql index b73a04e19b9..0a5a84bbb46 100644 --- a/tests/queries/0_stateless/00166_functions_of_aggregation_states.sql +++ b/tests/queries/0_stateless/00166_functions_of_aggregation_states.sql @@ -1 +1,4 @@ +-- Disable external aggregation because the state is reset for each new block of data in 'runningAccumulate' function. +SET max_bytes_before_external_group_by = 0; + SELECT k, finalizeAggregation(sum_state), runningAccumulate(sum_state) FROM (SELECT intDiv(number, 50000) AS k, sumState(number) AS sum_state FROM (SELECT number FROM system.numbers LIMIT 1000000) GROUP BY k ORDER BY k); diff --git a/tests/queries/0_stateless/00184_shard_distributed_group_by_no_merge.reference b/tests/queries/0_stateless/00184_shard_distributed_group_by_no_merge.reference index b2b0b43e490..72828aae5a9 100644 --- a/tests/queries/0_stateless/00184_shard_distributed_group_by_no_merge.reference +++ b/tests/queries/0_stateless/00184_shard_distributed_group_by_no_merge.reference @@ -36,9 +36,9 @@ GROUP BY ORDER BY 1 GROUP BY w/ ALIAS 0 -1 0 1 +1 ORDER BY w/ ALIAS 0 func(aggregate function) GROUP BY diff --git a/tests/queries/0_stateless/00184_shard_distributed_group_by_no_merge.sql b/tests/queries/0_stateless/00184_shard_distributed_group_by_no_merge.sql index 422f4a010f1..1bd6cbe8948 100644 --- a/tests/queries/0_stateless/00184_shard_distributed_group_by_no_merge.sql +++ b/tests/queries/0_stateless/00184_shard_distributed_group_by_no_merge.sql @@ -34,7 +34,7 @@ SELECT uniq(number) u FROM remote('127.0.0.{2,3}', currentDatabase(), data_00184 -- cover possible tricky issues SELECT 'GROUP BY w/ ALIAS'; -SELECT n FROM remote('127.0.0.{2,3}', currentDatabase(), data_00184) GROUP BY number AS n SETTINGS distributed_group_by_no_merge=2; +SELECT n FROM remote('127.0.0.{2,3}', currentDatabase(), data_00184) GROUP BY number AS n ORDER BY n SETTINGS distributed_group_by_no_merge=2; SELECT 'ORDER BY w/ ALIAS'; SELECT n FROM remote('127.0.0.{2,3}', currentDatabase(), data_00184) ORDER BY number AS n LIMIT 1 SETTINGS distributed_group_by_no_merge=2; diff --git a/tests/queries/0_stateless/00273_quantiles.sql b/tests/queries/0_stateless/00273_quantiles.sql index 9fef1f63057..f5b739b8be1 100644 --- a/tests/queries/0_stateless/00273_quantiles.sql +++ b/tests/queries/0_stateless/00273_quantiles.sql @@ -8,4 +8,7 @@ SELECT quantilesExact(0, 0.001, 0.01, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0 SELECT quantilesTDigest(0, 0.001, 0.01, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.95, 0.99, 0.999, 1)(x) FROM (SELECT number AS x FROM system.numbers LIMIT 1001); SELECT quantilesDeterministic(0, 0.001, 0.01, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.95, 0.99, 0.999, 1)(x, x) FROM (SELECT number AS x FROM system.numbers LIMIT 1001); +-- The result slightly differs but it's ok since `quantilesDeterministic` is an approximate function. +SET max_bytes_before_external_group_by = 0; + SELECT round(1000000 / (number + 1)) AS k, count() AS c, arrayMap(x -> round(x, 6), quantilesDeterministic(0.1, 0.5, 0.9)(number, intHash64(number))) AS q1, quantilesExact(0.1, 0.5, 0.9)(number) AS q2 FROM (SELECT number FROM system.numbers LIMIT 1000000) GROUP BY k ORDER BY k; diff --git a/tests/queries/0_stateless/00410_aggregation_combinators_with_arenas.sql b/tests/queries/0_stateless/00410_aggregation_combinators_with_arenas.sql index 67f5cc54afd..a3abbb9fd58 100644 --- a/tests/queries/0_stateless/00410_aggregation_combinators_with_arenas.sql +++ b/tests/queries/0_stateless/00410_aggregation_combinators_with_arenas.sql @@ -7,4 +7,8 @@ DROP TABLE IF EXISTS arena; SELECT length(arrayReduce('groupUniqArray', [[1, 2], [1], emptyArrayUInt8(), [1], [1, 2]])); SELECT min(x), max(x) FROM (SELECT length(arrayReduce('groupUniqArray', [hex(number), hex(number+1), hex(number)])) AS x FROM system.numbers LIMIT 100000); + +-- Disable external aggregation because the state is reset for each new block of data in 'runningAccumulate' function. +SET max_bytes_before_external_group_by = 0; + SELECT sum(length(runningAccumulate(x))) FROM (SELECT groupUniqArrayState(toString(number % 10)) AS x, number FROM (SELECT * FROM system.numbers LIMIT 11) GROUP BY number ORDER BY number); diff --git a/tests/queries/0_stateless/00493_substring_of_fixedstring.reference b/tests/queries/0_stateless/00493_substring_of_fixedstring.reference deleted file mode 100644 index 72d55841a37..00000000000 --- a/tests/queries/0_stateless/00493_substring_of_fixedstring.reference +++ /dev/null @@ -1,52 +0,0 @@ -hello\0\0\0 -hello\0\0\0 -0\0\0\0\0\0\0\0 -1\0\0\0\0\0\0\0 -2\0\0\0\0\0\0\0 -3\0\0\0\0\0\0\0 -4\0\0\0\0\0\0\0 -5\0\0\0\0\0\0\0 -6\0\0\0\0\0\0\0 -7\0\0\0\0\0\0\0 -8\0\0\0\0\0\0\0 -9\0\0\0\0\0\0\0 -995 -996 -997 -998 -999 -100 -100 -100 -100 -100 - -9 -99 -998 -999\0 - -1 -10 -100 -1004 -995\0 -96\0 -7\0 -\0 - -1000 -001 -02 -3 - -995 -9 -7\0 -\0 - -10 -001 -0 -3 - diff --git a/tests/queries/0_stateless/00493_substring_of_fixedstring.sql b/tests/queries/0_stateless/00493_substring_of_fixedstring.sql deleted file mode 100644 index e267e1d54bb..00000000000 --- a/tests/queries/0_stateless/00493_substring_of_fixedstring.sql +++ /dev/null @@ -1,7 +0,0 @@ -SELECT substring(toFixedString('hello', 16), 1, 8); -SELECT substring(toFixedString(materialize('hello'), 16), 1, 8); -SELECT substring(toFixedString(toString(number), 16), 1, 8) FROM system.numbers LIMIT 10; -SELECT substring(toFixedString(toString(number), 4), 1, 3) FROM system.numbers LIMIT 995, 10; -SELECT substring(toFixedString(toString(number), 4), 1, number % 5) FROM system.numbers LIMIT 995, 10; -SELECT substring(toFixedString(toString(number), 4), 1 + number % 5) FROM system.numbers LIMIT 995, 10; -SELECT substring(toFixedString(toString(number), 4), 1 + number % 5, 1 + number % 3) FROM system.numbers LIMIT 995, 10; diff --git a/tests/queries/0_stateless/00496_substring_negative_offset.reference b/tests/queries/0_stateless/00496_substring_negative_offset.reference deleted file mode 100644 index b592f370dea..00000000000 --- a/tests/queries/0_stateless/00496_substring_negative_offset.reference +++ /dev/null @@ -1,40 +0,0 @@ -abc -abc -abc -bc -c - -abc -bc -c - -abc -abc -abc -bc -c - -abc -bc -c - -abc -abc -abc -bc -c - -abc -bc -c - -abc -abc -abc -bc -c - -abc -bc -c - diff --git a/tests/queries/0_stateless/00496_substring_negative_offset.sql b/tests/queries/0_stateless/00496_substring_negative_offset.sql deleted file mode 100644 index 170af8f79b4..00000000000 --- a/tests/queries/0_stateless/00496_substring_negative_offset.sql +++ /dev/null @@ -1,4 +0,0 @@ -SELECT substring('abc', number - 5) FROM system.numbers LIMIT 10; -SELECT substring(materialize('abc'), number - 5) FROM system.numbers LIMIT 10; -SELECT substring(toFixedString('abc', 3), number - 5) FROM system.numbers LIMIT 10; -SELECT substring(materialize(toFixedString('abc', 3)), number - 5) FROM system.numbers LIMIT 10; diff --git a/tests/queries/0_stateless/00502_sum_map.reference b/tests/queries/0_stateless/00502_sum_map.reference index b1cd0303004..0c9bebefd0b 100644 --- a/tests/queries/0_stateless/00502_sum_map.reference +++ b/tests/queries/0_stateless/00502_sum_map.reference @@ -63,7 +63,7 @@ SELECT sumMap(statusMap.goal_id, statusMap.revenue) FROM sum_map_decimal; SELECT sumMapWithOverflow(statusMap.goal_id, statusMap.revenue) FROM sum_map_decimal; ([1,2,3,4,5,6,7,8],[1,2,6,8,10,12,7,8]) DROP TABLE sum_map_decimal; -CREATE TABLE sum_map_decimal_nullable (`statusMap` Array(Tuple(goal_id UInt16, revenue Nullable(Decimal(9, 5))))) engine=Log; +CREATE TABLE sum_map_decimal_nullable (`statusMap` Nested(goal_id UInt16, revenue Nullable(Decimal(9, 5)))) engine=Log; INSERT INTO sum_map_decimal_nullable VALUES ([1, 2, 3], [1.0, 2.0, 3.0]), ([3, 4, 5], [3.0, 4.0, 5.0]), ([4, 5, 6], [4.0, 5.0, 6.0]), ([6, 7, 8], [6.0, 7.0, 8.0]); SELECT sumMap(statusMap.goal_id, statusMap.revenue) FROM sum_map_decimal_nullable; ([1,2,3,4,5,6,7,8],[1,2,6,8,10,12,7,8]) diff --git a/tests/queries/0_stateless/00502_sum_map.sql b/tests/queries/0_stateless/00502_sum_map.sql index 30037d49784..7d44bde6d50 100644 --- a/tests/queries/0_stateless/00502_sum_map.sql +++ b/tests/queries/0_stateless/00502_sum_map.sql @@ -56,7 +56,7 @@ SELECT sumMapWithOverflow(statusMap.goal_id, statusMap.revenue) FROM sum_map_dec DROP TABLE sum_map_decimal; -CREATE TABLE sum_map_decimal_nullable (`statusMap` Array(Tuple(goal_id UInt16, revenue Nullable(Decimal(9, 5))))) engine=Log; +CREATE TABLE sum_map_decimal_nullable (`statusMap` Nested(goal_id UInt16, revenue Nullable(Decimal(9, 5)))) engine=Log; INSERT INTO sum_map_decimal_nullable VALUES ([1, 2, 3], [1.0, 2.0, 3.0]), ([3, 4, 5], [3.0, 4.0, 5.0]), ([4, 5, 6], [4.0, 5.0, 6.0]), ([6, 7, 8], [6.0, 7.0, 8.0]); SELECT sumMap(statusMap.goal_id, statusMap.revenue) FROM sum_map_decimal_nullable; DROP TABLE sum_map_decimal_nullable; diff --git a/tests/queries/0_stateless/00577_replacing_merge_tree_vertical_merge.sql b/tests/queries/0_stateless/00577_replacing_merge_tree_vertical_merge.sql index e3c1bb10426..871f96bb019 100644 --- a/tests/queries/0_stateless/00577_replacing_merge_tree_vertical_merge.sql +++ b/tests/queries/0_stateless/00577_replacing_merge_tree_vertical_merge.sql @@ -3,7 +3,7 @@ set optimize_on_insert = 0; drop table if exists tab_00577; create table tab_00577 (date Date, version UInt64, val UInt64) engine = ReplacingMergeTree(version) partition by date order by date settings enable_vertical_merge_algorithm = 1, vertical_merge_algorithm_min_rows_to_activate = 0, vertical_merge_algorithm_min_columns_to_activate = 0, min_rows_for_wide_part = 0, - min_bytes_for_wide_part = 0; + min_bytes_for_wide_part = 0, allow_experimental_replacing_merge_with_cleanup=1; insert into tab_00577 values ('2018-01-01', 2, 2), ('2018-01-01', 1, 1); insert into tab_00577 values ('2018-01-01', 0, 0); select * from tab_00577 order by version; @@ -16,7 +16,7 @@ DROP TABLE IF EXISTS testCleanupR1; CREATE TABLE testCleanupR1 (uid String, version UInt32, is_deleted UInt8) ENGINE = ReplicatedReplacingMergeTree('/clickhouse/{database}/tables/test_cleanup/', 'r1', version, is_deleted) ORDER BY uid SETTINGS enable_vertical_merge_algorithm = 1, vertical_merge_algorithm_min_rows_to_activate = 0, vertical_merge_algorithm_min_columns_to_activate = 0, min_rows_for_wide_part = 0, - min_bytes_for_wide_part = 0; + min_bytes_for_wide_part = 0, allow_experimental_replacing_merge_with_cleanup=1; INSERT INTO testCleanupR1 (*) VALUES ('d1', 1, 0),('d2', 1, 0),('d3', 1, 0),('d4', 1, 0); INSERT INTO testCleanupR1 (*) VALUES ('d3', 2, 1); INSERT INTO testCleanupR1 (*) VALUES ('d1', 2, 1); diff --git a/tests/queries/0_stateless/00731_long_merge_tree_select_opened_files.sh b/tests/queries/0_stateless/00731_long_merge_tree_select_opened_files.sh index 11396dd34eb..1bb4dbd34de 100755 --- a/tests/queries/0_stateless/00731_long_merge_tree_select_opened_files.sh +++ b/tests/queries/0_stateless/00731_long_merge_tree_select_opened_files.sh @@ -8,7 +8,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -settings="--log_queries=1 --log_query_threads=1 --log_profile_events=1 --log_query_settings=1 --allow_deprecated_syntax_for_merge_tree=1" +settings="--log_queries=1 --log_query_threads=1 --log_profile_events=1 --log_query_settings=1 --allow_deprecated_syntax_for_merge_tree=1 --max_bytes_before_external_group_by 0 --max_bytes_before_external_sort 0" # Test insert logging on each block and checkPacket() method diff --git a/tests/queries/0_stateless/00732_quorum_insert_have_data_before_quorum_zookeeper_long.sql b/tests/queries/0_stateless/00732_quorum_insert_have_data_before_quorum_zookeeper_long.sql index 23b368549f8..bff8c7e73ee 100644 --- a/tests/queries/0_stateless/00732_quorum_insert_have_data_before_quorum_zookeeper_long.sql +++ b/tests/queries/0_stateless/00732_quorum_insert_have_data_before_quorum_zookeeper_long.sql @@ -20,7 +20,6 @@ SET select_sequential_consistency=1; SELECT x FROM quorum1 ORDER BY x; SELECT x FROM quorum2 ORDER BY x; -SET insert_keeper_fault_injection_probability=0; SET insert_quorum=2, insert_quorum_parallel=0; INSERT INTO quorum1 VALUES (4, '1990-11-15'); diff --git a/tests/queries/0_stateless/00732_quorum_insert_lost_part_and_alive_part_zookeeper_long.sql b/tests/queries/0_stateless/00732_quorum_insert_lost_part_and_alive_part_zookeeper_long.sql index 74399c9f27c..a1859220c6c 100644 --- a/tests/queries/0_stateless/00732_quorum_insert_lost_part_and_alive_part_zookeeper_long.sql +++ b/tests/queries/0_stateless/00732_quorum_insert_lost_part_and_alive_part_zookeeper_long.sql @@ -11,7 +11,6 @@ CREATE TABLE quorum2(x UInt32, y Date) ENGINE ReplicatedMergeTree('/clickhouse/t SET insert_quorum=2, insert_quorum_parallel=0; SET select_sequential_consistency=1; -SET insert_keeper_fault_injection_probability=0; INSERT INTO quorum1 VALUES (1, '2018-11-15'); INSERT INTO quorum1 VALUES (2, '2018-11-15'); diff --git a/tests/queries/0_stateless/00732_quorum_insert_lost_part_zookeeper_long.sql b/tests/queries/0_stateless/00732_quorum_insert_lost_part_zookeeper_long.sql index a61672249a8..61394447c3d 100644 --- a/tests/queries/0_stateless/00732_quorum_insert_lost_part_zookeeper_long.sql +++ b/tests/queries/0_stateless/00732_quorum_insert_lost_part_zookeeper_long.sql @@ -11,7 +11,6 @@ CREATE TABLE quorum2(x UInt32, y Date) ENGINE ReplicatedMergeTree('/clickhouse/t SET insert_quorum=2, insert_quorum_parallel=0; SET select_sequential_consistency=1; -SET insert_keeper_fault_injection_probability=0; SET insert_quorum_timeout=0; diff --git a/tests/queries/0_stateless/00732_quorum_insert_select_with_old_data_and_without_quorum_zookeeper_long.sql b/tests/queries/0_stateless/00732_quorum_insert_select_with_old_data_and_without_quorum_zookeeper_long.sql index e821d7587ee..e3e5aa7949f 100644 --- a/tests/queries/0_stateless/00732_quorum_insert_select_with_old_data_and_without_quorum_zookeeper_long.sql +++ b/tests/queries/0_stateless/00732_quorum_insert_select_with_old_data_and_without_quorum_zookeeper_long.sql @@ -17,7 +17,6 @@ SYSTEM SYNC REPLICA quorum2; SET select_sequential_consistency=1; SET insert_quorum=2, insert_quorum_parallel=0; -SET insert_keeper_fault_injection_probability=0; SET insert_quorum_timeout=0; diff --git a/tests/queries/0_stateless/00732_quorum_insert_simple_test_1_parts_zookeeper_long.sql b/tests/queries/0_stateless/00732_quorum_insert_simple_test_1_parts_zookeeper_long.sql index 22fb40f9f85..4eb263c75c2 100644 --- a/tests/queries/0_stateless/00732_quorum_insert_simple_test_1_parts_zookeeper_long.sql +++ b/tests/queries/0_stateless/00732_quorum_insert_simple_test_1_parts_zookeeper_long.sql @@ -11,7 +11,6 @@ CREATE TABLE quorum2(x UInt32, y Date) ENGINE ReplicatedMergeTree('/clickhouse/t SET insert_quorum=2, insert_quorum_parallel=0; SET select_sequential_consistency=1; -SET insert_keeper_fault_injection_probability=0; INSERT INTO quorum1 VALUES (1, '2018-11-15'); INSERT INTO quorum1 VALUES (2, '2018-11-15'); diff --git a/tests/queries/0_stateless/00732_quorum_insert_simple_test_2_parts_zookeeper_long.sql b/tests/queries/0_stateless/00732_quorum_insert_simple_test_2_parts_zookeeper_long.sql index a97b7438da0..7fb23936819 100644 --- a/tests/queries/0_stateless/00732_quorum_insert_simple_test_2_parts_zookeeper_long.sql +++ b/tests/queries/0_stateless/00732_quorum_insert_simple_test_2_parts_zookeeper_long.sql @@ -11,7 +11,6 @@ CREATE TABLE quorum2(x UInt32, y Date) ENGINE ReplicatedMergeTree('/clickhouse/t SET insert_quorum=2, insert_quorum_parallel=0; SET select_sequential_consistency=1; -SET insert_keeper_fault_injection_probability=0; INSERT INTO quorum1 VALUES (1, '2018-11-15'); INSERT INTO quorum1 VALUES (2, '2018-11-15'); diff --git a/tests/queries/0_stateless/00753_system_columns_and_system_tables_long.reference b/tests/queries/0_stateless/00753_system_columns_and_system_tables_long.reference index 3de05d66188..dd5860ae491 100644 --- a/tests/queries/0_stateless/00753_system_columns_and_system_tables_long.reference +++ b/tests/queries/0_stateless/00753_system_columns_and_system_tables_long.reference @@ -6,7 +6,7 @@ │ name2 │ 1 │ 0 │ 0 │ 0 │ │ name3 │ 0 │ 0 │ 0 │ 0 │ └───────┴─────────────────────┴───────────────────┴───────────────────┴────────────────────┘ -231 1 +3 231 1 ┌─name────────────────┬─partition_key─┬─sorting_key───┬─primary_key─┬─sampling_key─┐ │ check_system_tables │ date │ date, version │ date │ │ └─────────────────────┴───────────────┴───────────────┴─────────────┴──────────────┘ @@ -51,3 +51,6 @@ Check total_bytes/total_rows for Set Check total_bytes/total_rows for Join 1 50 1 100 +Check total_uncompressed_bytes/total_bytes/total_rows for Materialized views +0 0 0 +1 1 1 diff --git a/tests/queries/0_stateless/00753_system_columns_and_system_tables_long.sql b/tests/queries/0_stateless/00753_system_columns_and_system_tables_long.sql index ae9db656f00..51818228913 100644 --- a/tests/queries/0_stateless/00753_system_columns_and_system_tables_long.sql +++ b/tests/queries/0_stateless/00753_system_columns_and_system_tables_long.sql @@ -23,7 +23,7 @@ FROM system.columns WHERE table = 'check_system_tables' AND database = currentDa FORMAT PrettyCompactNoEscapes; INSERT INTO check_system_tables VALUES (1, 1, 1); -SELECT total_bytes, total_rows FROM system.tables WHERE name = 'check_system_tables' AND database = currentDatabase(); +SELECT total_bytes_uncompressed, total_bytes, total_rows FROM system.tables WHERE name = 'check_system_tables' AND database = currentDatabase(); DROP TABLE IF EXISTS check_system_tables; @@ -138,3 +138,23 @@ SELECT total_bytes BETWEEN 5000 AND 15000, total_rows FROM system.tables WHERE n INSERT INTO check_system_tables SELECT number+50 FROM numbers(50); SELECT total_bytes BETWEEN 5000 AND 15000, total_rows FROM system.tables WHERE name = 'check_system_tables' AND database = currentDatabase(); DROP TABLE check_system_tables; + +-- Build MergeTree table for Materialized view +CREATE TABLE check_system_tables + ( + name1 UInt8, + name2 UInt8, + name3 UInt8 + ) ENGINE = MergeTree() + ORDER BY name1 + PARTITION BY name2 + SAMPLE BY name1 + SETTINGS min_bytes_for_wide_part = 0, compress_marks = false, compress_primary_key = false, ratio_of_defaults_for_sparse_serialization = 1; + +SELECT 'Check total_uncompressed_bytes/total_bytes/total_rows for Materialized views'; +CREATE MATERIALIZED VIEW check_system_tables_mv ENGINE = MergeTree() ORDER BY name2 AS SELECT name1, name2, name3 FROM check_system_tables; +SELECT total_bytes_uncompressed, total_bytes, total_rows FROM system.tables WHERE name = 'check_system_tables_mv' AND database = currentDatabase(); +INSERT INTO check_system_tables VALUES (1, 1, 1); +SELECT total_bytes_uncompressed > 0, total_bytes > 0, total_rows FROM system.tables WHERE name = 'check_system_tables_mv' AND database = currentDatabase(); +DROP TABLE check_system_tables_mv; +DROP TABLE check_system_tables; diff --git a/tests/queries/0_stateless/00808_not_optimize_predicate.sql b/tests/queries/0_stateless/00808_not_optimize_predicate.sql index ba8f5eb5753..d0dda14e026 100644 --- a/tests/queries/0_stateless/00808_not_optimize_predicate.sql +++ b/tests/queries/0_stateless/00808_not_optimize_predicate.sql @@ -48,7 +48,8 @@ SELECT intDiv(number, 25) AS n, avgState(number) AS s FROM numbers(2500) -GROUP BY n; +GROUP BY n +ORDER BY n; SET force_primary_key = 1, enable_optimize_predicate_expression = 1; @@ -60,7 +61,8 @@ FROM finalizeAggregation(s) FROM test_00808_push_down_with_finalizeAggregation ) -WHERE (n >= 2) AND (n <= 5); +WHERE (n >= 2) AND (n <= 5) +ORDER BY n; EXPLAIN SYNTAX SELECT * FROM diff --git a/tests/queries/0_stateless/00877_memory_limit_for_new_delete.sql b/tests/queries/0_stateless/00877_memory_limit_for_new_delete.sql index 3864293751f..8eb9d83b730 100644 --- a/tests/queries/0_stateless/00877_memory_limit_for_new_delete.sql +++ b/tests/queries/0_stateless/00877_memory_limit_for_new_delete.sql @@ -2,6 +2,7 @@ -- Tag no-msan: memory limits don't work correctly under msan because it replaces malloc/free SET max_memory_usage = 1000000000; +SET max_bytes_before_external_group_by = 0; SELECT sum(ignore(*)) FROM ( SELECT number, argMax(number, (number, toFixedString(toString(number), 1024))) diff --git a/tests/queries/0_stateless/00947_ml_test.sql b/tests/queries/0_stateless/00947_ml_test.sql index 94e4f3b4626..72000103a44 100644 --- a/tests/queries/0_stateless/00947_ml_test.sql +++ b/tests/queries/0_stateless/00947_ml_test.sql @@ -40,10 +40,10 @@ INSERT INTO grouptest VALUES (1, 1.732, 3.653, 11.422), (1, 2.150, 2.103, 7.609), (1, 0.061, 3.310, 7.052), (1, 1.030, 3.671, 10.075), (1, 1.879, 0.578, 2.492), (1, 0.922, 2.552, 6.499), (1, 1.145, -0.095, -0.993), (1, 1.920, 0.373, 1.959), (1, 0.458, 0.094, -1.801), (1, -0.118, 3.273, 6.582), (1, 2.667, 1.472, 6.752), (1, -0.387, -0.529, -5.360), (1, 2.219, 1.790, 6.810), (1, -0.754, 2.139, 1.908), (1, -0.446, -0.668, -5.896), (1, 1.729, 0.914, 3.199), (1, 2.908, -0.420, 1.556), (1, 1.645, 3.581, 11.034), (1, 0.358, -0.950, -5.136), (1, -0.467, 2.339, 3.084), (1, 3.629, 2.959, 13.135), (1, 2.393, 0.926, 4.563), (1, -0.945, 0.281, -4.047), (1, 3.688, -0.570, 2.667), (1, 3.016, 1.775, 8.356), (1, 2.571, 0.139, 2.559), (1, 2.999, 0.956, 5.866), (1, 1.754, -0.809, -1.920), (1, 3.943, 0.382, 6.030), (1, -0.970, 2.315, 2.004), (1, 1.503, 0.790, 2.376), (1, -0.775, 2.563, 3.139), (1, 1.211, 0.113, -0.240), (1, 3.058, 0.977, 6.048), (1, 2.729, 1.634, 7.360), (1, 0.307, 2.759, 5.893), (1, 3.272, 0.181, 4.089), (1, 1.192, 1.963, 5.273), (1, 0.931, 1.447, 3.203), (1, 3.835, 3.447, 15.011), (1, 0.709, 0.008, -1.559), (1, 3.155, -0.676, 1.283), (1, 2.342, 1.047, 4.824), (1, 2.059, 1.262, 4.903), (1, 2.797, 0.855, 5.159), (1, 0.387, 0.645, -0.292), (1, 1.418, 0.408, 1.060), (1, 2.719, -0.826, -0.039), (1, 2.735, 3.736, 13.678), (1, 0.205, 0.777, -0.260), (1, 3.117, 2.063, 9.424), (1, 0.601, 0.178, -1.263), (1, 0.064, 0.157, -2.401), (1, 3.104, -0.455, 1.842), (1, -0.253, 0.672, -1.490), (1, 2.592, -0.408, 0.961), (1, -0.909, 1.314, -0.878), (1, 0.625, 2.594, 6.031), (1, 2.749, -0.210, 1.869), (1, -0.469, 1.532, 0.657), (1, 1.954, 1.827, 6.388), (1, -0.528, 1.136, -0.647), (1, 0.802, -0.583, -3.146), (1, -0.176, 1.584, 1.400), (1, -0.705, -0.785, -6.766), (1, 1.660, 2.365, 7.416), (1, 2.278, 3.977, 13.485), (1, 2.846, 3.845, 14.229), (1, 3.588, -0.401, 2.974), (1, 3.525, 3.831, 15.542), (1, 0.191, 3.312, 7.318), (1, 2.615, -0.287, 1.370), (1, 2.701, -0.446, 1.064), (1, 2.065, -0.556, -0.538), (1, 2.572, 3.618, 12.997), (1, 3.743, -0.708, 2.362), (1, 3.734, 2.319, 11.425), (1, 3.768, 2.777, 12.866), (1, 3.203, 0.958, 6.280), (1, 1.512, 2.635, 7.927), (1, 2.194, 2.323, 8.356), (1, -0.726, 2.729, 3.735), (1, 0.020, 1.704, 2.152), (1, 2.173, 2.856, 9.912), (1, 3.124, 1.705, 8.364), (1, -0.834, 2.142, 1.759), (1, -0.702, 3.024, 4.666), (1, 1.393, 0.583, 1.535), (1, 2.136, 3.770, 12.581), (1, -0.445, 0.991, -0.917), (1, 0.244, -0.835, -5.016), (1, 2.789, 0.691, 4.652), (1, 0.246, 2.661, 5.475), (1, 3.793, 2.671, 12.601), (1, 1.645, -0.973, -2.627), (1, 2.405, 1.842, 7.336), (1, 3.221, 3.109, 12.769), (1, -0.638, 3.220, 5.385), (1, 1.836, 3.025, 9.748), (1, -0.660, 1.818, 1.133), (1, 0.901, 0.981, 1.744), (1, -0.236, 3.087, 5.789), (1, 1.744, 3.864, 12.078), (1, -0.166, 3.186, 6.226), (1, 3.536, -0.090, 3.803), (1, 3.284, 2.026, 9.648), (1, 1.327, 2.822, 8.119), (1, -0.709, 0.105, -4.104), (1, 0.509, -0.989, -4.949), (1, 0.180, -0.934, -5.440), (1, 3.522, 1.374, 8.168), (1, 1.497, -0.764, -2.297), (1, 1.696, 2.364, 7.482), (1, -0.202, -0.032, -3.500), (1, 3.109, -0.138, 2.804), (1, -0.238, 2.992, 5.501), (1, 1.639, 1.634, 5.181), (1, 1.919, 0.341, 1.859), (1, -0.563, 1.750, 1.124), (1, 0.886, 3.589, 9.539), (1, 3.619, 3.020, 13.299), (1, 1.703, -0.493, -1.073), (1, 2.364, 3.764, 13.022), (1, 1.820, 1.854, 6.201), (1, 1.437, -0.765, -2.421), (1, 1.396, 0.959, 2.668), (1, 2.608, 2.032, 8.312), (1, 0.333, -0.040, -2.455), (1, 3.441, 0.824, 6.355), (1, 1.303, 2.767, 7.908), (1, 1.359, 2.404, 6.932), (1, 0.674, 0.241, -0.930), (1, 2.708, -0.077, 2.183), (1, 3.821, 3.215, 14.287), (1, 3.316, 1.591, 8.404), (1, -0.848, 1.145, -1.259), (1, 3.455, 3.081, 13.153), (1, 2.568, 0.259, 2.914), (1, 2.866, 2.636, 10.642), (1, 2.776, -0.309, 1.626), (1, 2.087, 0.619, 3.031), (1, 1.682, 1.201, 3.967), (1, 3.800, 2.600, 12.399), (1, 3.344, -0.780, 1.347), (1, 1.053, -0.817, -3.346), (1, 0.805, 3.085, 7.865), (1, 0.173, 0.069, -2.449), (1, 2.018, 1.309, 4.964), (1, 3.713, 3.804, 15.838), (1, 3.805, -0.063, 4.421), (1, 3.587, 2.854, 12.738), (1, 2.426, -0.179, 1.315), (1, 0.535, 0.572, -0.213), (1, -0.558, 0.142, -3.690), (1, -0.875, 2.700, 3.349), (1, 2.405, 3.933, 13.610), (1, 1.633, 1.222, 3.934), (1, 0.049, 2.853, 5.657), (1, 1.146, 0.907, 2.015), (1, 0.300, 0.219, -1.744), (1, 2.226, 2.526, 9.029), (1, 2.545, -0.762, -0.198), (1, 2.553, 3.956, 13.974), (1, -0.898, 2.836, 3.713), (1, 3.796, -0.202, 3.985), (1, -0.810, 2.963, 4.268), (1, 0.511, 2.104, 4.334), (1, 3.527, 3.741, 15.275), (1, -0.921, 3.094, 4.440), (1, 0.856, 3.108, 8.036), (1, 0.815, 0.565, 0.323), (1, 3.717, 0.693, 6.512), (1, 3.052, 3.558, 13.778), (1, 2.942, 3.034, 11.986), (1, 0.765, 3.177, 8.061), (1, 3.175, -0.525, 1.776), (1, 0.309, 1.006, 0.638), (1, 1.922, 0.835, 3.349), (1, 3.678, 3.314, 14.297), (1, 2.840, -0.486, 1.221), (1, 1.195, 3.396, 9.578), (1, -0.157, 3.122, 6.053), (1, 2.404, 1.434, 6.110), (1, 3.108, 2.210, 9.845), (1, 2.289, 1.188, 5.142), (1, -0.319, -0.044, -3.769), (1, -0.625, 3.701, 6.854), (1, 2.269, -0.276, 0.710), (1, 0.777, 1.963, 4.442), (1, 0.411, 1.893, 3.501), (1, 1.173, 0.461, 0.728), (1, 1.767, 3.077, 9.765), (1, 0.853, 3.076, 7.933), (1, -0.013, 3.149, 6.421), (1, 3.841, 1.526, 9.260), (1, -0.950, 0.277, -4.070), (1, -0.644, -0.747, -6.527), (1, -0.923, 1.733, 0.353), (1, 0.044, 3.037, 6.201), (1, 2.074, 2.494, 8.631), (1, 0.016, 0.961, -0.085), (1, -0.780, -0.448, -5.904), (1, 0.170, 1.936, 3.148), (1, -0.420, 3.730, 7.349), (1, -0.630, 1.504, 0.254), (1, -0.006, 0.045, -2.879), (1, 1.101, -0.985, -3.753), (1, 1.618, 0.555, 1.900), (1, -0.336, 1.408, 0.552), (1, 1.086, 3.284, 9.024), (1, -0.815, 2.032, 1.466), (1, 3.144, -0.380, 2.148), (1, 2.326, 2.077, 7.883), (1, -0.571, 0.964, -1.251), (1, 2.416, 1.255, 5.595), (1, 3.964, 1.379, 9.065), (1, 3.897, 1.553, 9.455), (1, 1.806, 2.667, 8.611), (1, 0.323, 3.809, 9.073), (1, 0.501, 3.256, 7.769), (1, -0.679, 3.539, 6.259), (1, 2.825, 3.856, 14.219), (1, 0.288, -0.536, -4.032), (1, 3.009, 0.725, 5.193), (1, -0.763, 1.140, -1.105), (1, 1.124, 3.807, 10.670), (1, 2.478, 0.204, 2.570), (1, 2.825, 2.639, 10.566), (1, 1.878, -0.883, -1.892), (1, 3.380, 2.942, 12.587), (1, 2.202, 1.739, 6.621), (1, -0.711, -0.680, -6.463), (1, -0.266, 1.827, 1.951), (1, -0.846, 1.003, -1.683), (1, 3.201, 0.132, 3.798), (1, 2.797, 0.085, 2.849), (1, 1.632, 3.269, 10.072), (1, 2.410, 2.727, 10.003), (1, -0.624, 0.853, -1.690), (1, 1.314, 3.268, 9.433), (1, -0.395, 0.450, -2.440), (1, 0.992, 3.168, 8.489), (1, 3.355, 2.106, 10.028), (1, 0.509, -0.888, -4.647), (1, 1.007, 0.797, 1.405), (1, 0.045, 0.211, -2.278), (1, -0.911, 1.093, -1.544), (1, 2.409, 0.273, 2.637), (1, 2.640, 3.540, 12.899), (1, 2.668, -0.433, 1.038), (1, -0.014, 0.341, -2.005), (1, -0.525, -0.344, -5.083), (1, 2.278, 3.517, 12.105), (1, 3.712, 0.901, 7.128), (1, -0.689, 2.842, 4.149), (1, -0.467, 1.263, -0.147), (1, 0.963, -0.653, -3.034), (1, 2.559, 2.590, 9.889), (1, 1.566, 1.393, 4.312), (1, -1.000, 1.809, 0.429), (1, -0.297, 3.221, 6.070), (1, 2.199, 3.820, 12.856), (1, 3.096, 3.251, 12.944), (1, 1.479, 1.835, 5.461), (1, 0.276, 0.773, -0.130), (1, 0.607, 1.382, 2.360), (1, 1.169, -0.108, -0.985), (1, 3.429, 0.475, 5.282), (1, 2.626, 0.104, 2.563), (1, 1.156, 3.512, 9.850), (1, 3.947, 0.796, 7.282), (1, -0.462, 2.425, 3.351), (1, 3.957, 0.366, 6.014), (1, 3.763, -0.330, 3.536), (1, 0.667, 3.361, 8.417), (1, -0.583, 0.892, -1.492), (1, -0.505, 1.344, 0.021), (1, -0.474, 2.714, 4.195), (1, 3.455, 0.014, 3.950), (1, 1.016, 1.828, 4.516), (1, 1.845, 0.193, 1.269), (1, -0.529, 3.930, 7.731), (1, 2.636, 0.045, 2.408), (1, 3.757, -0.918, 1.760), (1, -0.808, 1.160, -1.137), (1, 0.744, 1.435, 2.793), (1, 3.457, 3.566, 14.613), (1, 1.061, 3.140, 8.544), (1, 3.733, 3.368, 14.570), (1, -0.969, 0.879, -2.301), (1, 3.940, 3.136, 14.287), (1, -0.730, 2.107, 1.860), (1, 3.699, 2.820, 12.858), (1, 2.197, -0.636, -0.514), (1, 0.775, -0.979, -4.387), (1, 2.019, 2.828, 9.521), (1, 1.415, 0.113, 0.170), (1, 1.567, 3.410, 10.363), (1, 0.984, -0.960, -3.913), (1, 1.809, 2.487, 8.079), (1, 1.550, 1.130, 3.489), (1, -0.770, 3.027, 4.542), (1, -0.358, 3.326, 6.262), (1, 3.140, 0.096, 3.567), (1, -0.685, 2.213, 2.270), (1, 0.916, 0.692, 0.907), (1, 1.526, 1.159, 3.527), (1, 2.675, -0.568, 0.645), (1, 1.740, 3.019, 9.538), (1, 1.223, 2.088, 5.709), (1, 1.572, -0.125, -0.230), (1, 3.641, 0.362, 5.369), (1, 2.944, 3.897, 14.578), (1, 2.775, 2.461, 9.932), (1, -0.200, 2.492, 4.076), (1, 0.065, 2.055, 3.296), (1, 2.375, -0.639, -0.167), (1, -0.133, 1.138, 0.149), (1, -0.385, 0.163, -3.281), (1, 2.200, 0.863, 3.989), (1, -0.470, 3.492, 6.536), (1, -0.916, -0.547, -6.472), (1, 0.634, 0.927, 1.049), (1, 2.930, 2.655, 10.825), (1, 3.094, 2.802, 11.596), (1, 0.457, 0.539, -0.470), (1, 1.277, 2.229, 6.240), (1, -0.157, 1.270, 0.496), (1, 3.320, 0.640, 5.559), (1, 2.836, 1.067, 5.872), (1, 0.921, -0.716, -3.307), (1, 3.886, 1.487, 9.233), (1, 0.306, -0.142, -2.815), (1, 3.727, -0.410, 3.225), (1, 1.268, -0.801, -2.866), (1, 2.302, 2.493, 9.084), (1, 0.331, 0.373, -1.220), (1, 3.224, -0.857, 0.879), (1, 1.328, 2.786, 8.014), (1, 3.639, 1.601, 9.081), (1, 3.201, -0.484, 1.949), (1, 3.447, -0.734, 1.692), (1, 2.773, -0.143, 2.117), (1, 1.517, -0.493, -1.445), (1, 1.778, -0.428, -0.728), (1, 3.989, 0.099, 5.274), (1, 1.126, 3.985, 11.206), (1, 0.348, 0.756, -0.035), (1, 2.399, 2.576, 9.525), (1, 0.866, 1.800, 4.132), (1, 3.612, 1.598, 9.017), (1, 0.495, 2.239, 4.707), (1, 2.442, 3.712, 13.019), (1, 0.238, -0.844, -5.057), (1, 1.404, 3.095, 9.093), (1, 2.842, 2.044, 8.816), (1, 0.622, 0.322, -0.791), (1, -0.561, 1.242, -0.395), (1, 0.679, 3.822, 9.823), (1, 1.875, 3.526, 11.327), (1, 3.587, 1.050, 7.324), (1, 1.467, 0.588, 1.699), (1, 3.180, 1.571, 8.074), (1, 1.402, 0.430, 1.093), (1, 1.834, 2.209, 7.294), (1, 3.542, -0.259, 3.306), (1, -0.517, 0.174, -3.513), (1, 3.549, 2.210, 10.729), (1, 2.260, 3.393, 11.699), (1, 0.036, 1.893, 2.751), (1, 0.680, 2.815, 6.804), (1, 0.219, 0.368, -1.459), (1, -0.519, 3.987, 7.924), (1, 0.974, 0.761, 1.231), (1, 0.107, 0.620, -0.927), (1, 1.513, 1.910, 5.755), (1, 3.114, 0.894, 5.910), (1, 3.061, 3.052, 12.276), (1, 2.556, 3.779, 13.448), (1, 1.964, 2.692, 9.002), (1, 3.894, -0.032, 4.690), (1, -0.693, 0.910, -1.655), (1, 2.692, 2.908, 11.108), (1, -0.824, 1.190, -1.078), (1, 3.621, 0.918, 6.997), (1, 3.190, 2.442, 10.707), (1, 1.424, -0.546, -1.791), (1, 2.061, -0.427, -0.158), (1, 1.532, 3.158, 9.540), (1, 0.648, 3.557, 8.967), (1, 2.511, 1.665, 7.017), (1, 1.903, -0.168, 0.302), (1, -0.186, -0.718, -5.528), (1, 2.421, 3.896, 13.531), (1, 3.063, 1.841, 8.650), (1, 0.636, 1.699, 3.367), (1, 1.555, 0.688, 2.174), (1, -0.412, 0.454, -2.462), (1, 1.645, 3.207, 9.911), (1, 3.396, 3.766, 15.090), (1, 0.375, -0.256, -3.017), (1, 3.636, 0.732, 6.469), (1, 2.503, 3.133, 11.405), (1, -0.253, 0.693, -1.429), (1, 3.178, 3.110, 12.686), (1, 3.282, -0.725, 1.388), (1, -0.297, 1.222, 0.070), (1, 1.872, 3.211, 10.377), (1, 3.471, 1.446, 8.278), (1, 2.891, 0.197, 3.374), (1, -0.896, 2.198, 1.802), (1, 1.178, -0.717, -2.796), (1, 0.650, 3.371, 8.412), (1, 0.447, 3.248, 7.637), (1, 1.616, -0.109, -0.097), (1, 1.837, 1.092, 3.951), (1, 0.767, 1.384, 2.684), (1, 3.466, -0.600, 2.133), (1, -0.800, -0.734, -6.802), (1, -0.534, 0.068, -3.865), (1, 3.416, -0.459, 2.455), (1, 0.800, -0.132, -1.795), (1, 2.150, 1.190, 4.869), (1, 0.830, 1.220, 2.319), (1, 2.656, 2.587, 10.072), (1, 0.375, -0.219, -2.906), (1, 0.582, -0.637, -3.749), (1, 0.588, -0.723, -3.992), (1, 3.875, 2.126, 11.127), (1, -0.476, 1.909, 1.775), (1, 0.963, 3.597, 9.716), (1, -0.888, 3.933, 7.021), (1, 1.711, -0.868, -2.184), (1, 3.244, 1.990, 9.460), (1, -0.057, 1.537, 1.497), (1, -0.015, 3.511, 7.504), (1, 0.280, 0.582, -0.695), (1, 2.402, 2.731, 9.998), (1, 2.053, 2.253, 7.865), (1, 1.955, 0.172, 1.424), (1, 3.746, 0.872, 7.107), (1, -0.157, 2.381, 3.829), (1, 3.548, -0.918, 1.340), (1, 2.449, 3.195, 11.482), (1, 1.582, 1.055, 3.329), (1, 1.908, -0.839, -1.700), (1, 2.341, 3.137, 11.091), (1, -0.043, 3.873, 8.532), (1, 0.528, -0.752, -4.198), (1, -0.940, 0.261, -4.098), (1, 2.609, 3.531, 12.812), (1, 2.439, 2.486, 9.336), (1, -0.659, -0.150, -4.768), (1, 2.131, 1.973, 7.181), (1, 0.253, 0.304, -1.583), (1, -0.169, 2.273, 3.480), (1, 1.855, 3.974, 12.631), (1, 0.092, 1.160, 0.666), (1, 3.990, 0.402, 6.187), (1, -0.455, 0.932, -1.113), (1, 2.365, 1.152, 5.185), (1, -0.058, 1.244, 0.618), (1, 0.674, 0.481, -0.209), (1, 3.002, 0.246, 3.743), (1, 1.804, 3.765, 11.902), (1, 3.567, -0.752, 1.876), (1, 0.098, 2.257, 3.968), (1, 0.130, -0.889, -5.409), (1, 0.633, 1.891, 3.940), (1, 0.421, 2.533, 5.440), (1, 2.252, 1.853, 7.063), (1, 3.191, -0.980, 0.443), (1, -0.776, 3.241, 5.171), (1, 0.509, 1.737, 3.229), (1, 3.583, 1.274, 7.986), (1, 1.101, 2.896, 7.891), (1, 3.072, -0.008, 3.120), (1, 2.945, -0.295, 2.006), (1, 3.621, -0.161, 3.760), (1, 1.399, 3.759, 11.075), (1, 3.783, -0.866, 1.968), (1, -0.241, 2.902, 5.225), (1, 1.323, 1.934, 5.449), (1, 1.449, 2.855, 8.464), (1, 0.088, 1.526, 1.753), (1, -1.000, 2.161, 1.485), (1, -0.214, 3.358, 6.647), (1, -0.384, 3.230, 5.921), (1, 3.146, 1.228, 6.975), (1, 1.917, 0.860, 3.415), (1, 1.982, 1.735, 6.167), (1, 1.404, 1.851, 5.360), (1, 2.428, -0.674, -0.166), (1, 2.081, -0.505, -0.352), (1, 0.914, -0.543, -2.802), (1, -0.029, -0.482, -4.506), (1, 0.671, 0.184, -1.105), (1, 1.641, -0.524, -1.292), (1, 1.005, 0.361, 0.094), (1, -0.493, 3.582, 6.760), (2, 3.876, 2.563, 21.500), (2, 0.159, -0.309, 7.986), (2, -0.496, 0.417, 12.998), (2, -0.164, -0.512, 7.092), (2, 0.632, 3.200, 28.571), (2, 3.772, 0.493, 9.188), (2, 2.430, -0.797, 2.789), (2, 3.872, -0.775, 1.475), (2, -0.031, -0.256, 8.495), (2, 2.726, 3.000, 25.271), (2, 1.116, -0.269, 7.269), (2, 0.551, 3.402, 29.860), (2, 0.820, 2.500, 24.179), (2, 1.153, -0.453, 6.131), (2, -0.717, -0.360, 8.556), (2, 0.532, 0.531, 12.654), (2, 2.096, 0.981, 13.791), (2, 0.146, -0.433, 7.259), (2, 1.000, 1.075, 15.452), (2, 2.963, -0.090, 6.495), (2, 1.047, 2.052, 21.267), (2, 0.882, 1.778, 19.785), (2, 1.380, 2.702, 24.832), (2, 1.853, 0.401, 10.554), (2, 2.004, 1.770, 18.618), (2, 3.377, 0.772, 11.253), (2, 1.227, -0.169, 7.759), (2, 0.428, 2.052, 21.885), (2, 0.070, 3.648, 31.816), (2, 0.128, -0.938, 4.244), (2, 2.061, 0.753, 12.454), (2, 1.207, -0.301, 6.989), (2, -0.168, 3.765, 32.757), (2, 3.450, 1.801, 17.353), (2, -0.483, 3.344, 30.547), (2, 1.847, 1.884, 19.455), (2, 3.241, 2.369, 20.975), (2, 0.628, 3.590, 30.912), (2, 2.183, 1.741, 18.263), (2, 0.774, 2.638, 25.057), (2, 3.292, 2.867, 23.912), (2, 0.056, 2.651, 25.850), (2, -0.506, 0.300, 12.308), (2, 0.524, 1.182, 16.570), (2, -0.267, 2.563, 25.647), (2, 3.953, -0.334, 4.040), (2, 2.507, 2.319, 21.408), (2, -0.770, 1.017, 16.875), (2, 0.481, 1.591, 19.062), (2, 3.243, 1.060, 13.114), (2, 2.178, -0.325, 5.873), (2, 2.510, 1.235, 14.900), (2, 2.684, 2.370, 21.535), (2, 3.466, 3.656, 28.469), (2, 2.994, 3.960, 30.764), (2, -0.363, 3.592, 31.917), (2, 1.738, 0.074, 8.708), (2, 1.462, 3.727, 30.902), (2, 0.059, 0.180, 11.021), (2, 2.980, 2.317, 20.925), (2, 1.248, 0.965, 14.545), (2, 0.776, -0.229, 7.850), (2, -0.562, 2.839, 27.598), (2, 3.581, 0.244, 7.883), (2, -0.958, 0.901, 16.362), (2, 3.257, 0.364, 8.925), (2, 1.478, 1.718, 18.827), (2, -0.121, -0.436, 7.507), (2, 0.966, 1.444, 17.697), (2, 3.631, 3.463, 27.144), (2, 0.174, -0.663, 5.848), (2, 2.783, 0.124, 7.959), (2, 1.106, -0.936, 3.276), (2, 0.186, -0.942, 4.162), (2, 3.513, 2.456, 21.222), (2, 0.339, 2.316, 23.558), (2, 0.566, 2.515, 24.523), (2, -0.134, 0.746, 14.607), (2, 1.554, 0.106, 9.084), (2, -0.846, 2.748, 27.337), (2, 3.934, 0.564, 9.451), (2, 2.840, -0.966, 1.366), (2, 1.379, 0.307, 10.463), (2, 1.065, -0.780, 4.253), (2, 3.324, 2.145, 19.546), (2, 0.974, -0.543, 5.767), (2, 2.469, 3.976, 31.385), (2, -0.434, 3.689, 32.570), (2, 0.261, 0.481, 12.624), (2, 3.786, 2.605, 21.843), (2, -0.460, -0.536, 7.243), (2, 2.576, 2.880, 24.702), (2, -0.501, 3.551, 31.810), (2, 2.946, 3.263, 26.633), (2, 2.959, -0.813, 2.162), (2, -0.749, 0.490, 13.686), (2, 2.821, 0.335, 9.187), (2, 3.964, 0.272, 7.667), (2, 0.808, -0.700, 4.994), (2, 0.415, 2.183, 22.682), (2, 2.551, 3.785, 30.156), (2, 0.821, 1.120, 15.897), (2, 1.714, 3.019, 26.400), (2, 2.265, 1.950, 19.438), (2, 1.493, 3.317, 28.409), (2, -0.445, 2.282, 24.134), (2, -0.508, 2.508, 25.553), (2, 1.017, -0.621, 5.255), (2, 1.053, 2.246, 22.422), (2, 0.441, 1.637, 19.382), (2, 3.657, 1.246, 13.816), (2, 0.756, 0.808, 14.095), (2, 1.849, 1.599, 17.742), (2, 1.782, -0.000, 8.215), (2, 1.136, 3.940, 32.506), (2, 2.814, 3.288, 26.916), (2, 3.180, 3.198, 26.008), (2, 0.728, -0.054, 8.946), (2, 0.801, 0.775, 13.852), (2, 1.399, -0.546, 5.322), (2, 1.415, 1.753, 19.103), (2, 2.860, 1.796, 17.913), (2, 0.712, 2.902, 26.699), (2, -0.389, 3.093, 28.945), (2, 3.661, 3.666, 28.333), (2, 3.944, 0.996, 12.030), (2, 1.655, 1.385, 16.657), (2, 0.122, -0.662, 5.906), (2, 3.667, 2.763, 22.912), (2, 2.606, 0.630, 11.172), (2, -0.291, 1.492, 19.242), (2, -0.787, 1.223, 18.125), (2, 2.405, 0.325, 9.545), (2, 3.129, -0.412, 4.398), (2, 0.588, 3.964, 33.194), (2, -0.177, 3.636, 31.993), (2, 2.079, 3.280, 27.603), (2, 3.055, 3.958, 30.692), (2, -0.164, 3.188, 29.292), (2, 3.803, 3.151, 25.105), (2, 3.123, -0.891, 1.531), (2, 3.070, -0.824, 1.988), (2, 3.103, -0.931, 1.309), (2, 0.589, 3.353, 29.529), (2, 1.095, 1.973, 20.744), (2, -0.557, 0.370, 12.775), (2, 1.223, 0.307, 10.620), (2, 3.255, -0.768, 2.136), (2, 0.508, 2.157, 22.435), (2, 0.373, 0.319, 11.544), (2, 1.240, 1.736, 19.177), (2, 1.846, 0.970, 13.972), (2, 3.352, -0.534, 3.445), (2, -0.352, -0.290, 8.610), (2, 0.281, 0.193, 10.880), (2, 3.450, -0.059, 6.193), (2, 0.310, 2.575, 25.140), (2, 1.791, 1.127, 14.970), (2, 1.992, 2.347, 22.087), (2, -0.288, 2.881, 27.576), (2, 3.464, 3.664, 28.518), (2, 0.573, 2.789, 26.159), (2, 2.265, 1.583, 17.233), (2, 3.203, 0.730, 11.177), (2, 3.345, 1.368, 14.862), (2, 0.891, 3.690, 31.248), (2, 2.252, -0.311, 5.884), (2, -0.087, 0.804, 14.912), (2, 0.153, 2.510, 24.905), (2, 3.533, -0.965, 0.675), (2, 2.035, 1.953, 19.683), (2, 0.316, 2.448, 24.373), (2, 2.199, 3.858, 30.946), (2, -0.519, 3.647, 32.399), (2, 0.867, 1.961, 20.901), (2, 2.739, 2.268, 20.866), (2, 2.462, -0.664, 3.551), (2, 1.372, 3.419, 29.144), (2, -0.628, 2.723, 26.968), (2, 3.989, -0.225, 4.659), (2, 0.166, 3.190, 28.976), (2, 1.681, 2.937, 25.943), (2, 2.979, 2.263, 20.600), (2, 3.896, -0.419, 3.590), (2, 3.861, 2.224, 19.485), (2, -0.087, -0.861, 4.918), (2, 1.182, 1.886, 20.133), (2, 3.622, 2.320, 20.301), (2, 3.560, 0.008, 6.491), (2, 3.082, -0.605, 3.285), (2, 1.777, 1.324, 16.169), (2, 2.269, 2.436, 22.348), (2, 0.019, 3.074, 28.423), (2, -0.560, 3.868, 33.765), (2, 1.568, 2.886, 25.749), (2, 2.045, 0.222, 9.286), (2, 1.391, 0.352, 10.723), (2, 0.172, 1.908, 21.276), (2, 1.173, -0.726, 4.474), (2, 1.642, 2.576, 23.814), (2, 3.346, 1.377, 14.918), (2, 0.120, 0.411, 12.344), (2, 3.913, 0.820, 11.008), (2, 1.054, 3.732, 31.340), (2, 2.284, 0.108, 8.362), (2, 2.266, 0.066, 8.131), (2, 3.204, 1.156, 13.735), (2, 3.243, 2.032, 18.947), (2, 3.052, -0.121, 6.221), (2, 1.131, 2.189, 22.000), (2, 2.958, 0.658, 10.990), (2, 1.717, 3.708, 30.530), (2, 2.417, 2.070, 20.004), (2, 2.175, 0.881, 13.110), (2, 0.333, 3.494, 30.629), (2, 3.598, 3.940, 30.044), (2, 3.683, -0.110, 5.660), (2, 2.555, 1.196, 14.620), (2, 1.511, 0.453, 11.206), (2, 0.903, 1.390, 17.439), (2, -0.897, 3.303, 30.716), (2, 0.245, 2.129, 22.527), (2, 1.370, 2.715, 24.923), (2, 1.822, -0.917, 2.676), (2, 2.690, -0.109, 6.657), (2, 0.206, 1.561, 19.162), (2, 3.905, 2.710, 22.357), (2, -0.438, 3.207, 29.678), (2, 0.898, 3.445, 29.772), (2, 1.838, 2.871, 25.385), (2, 0.116, 1.401, 18.292), (2, -0.408, 2.375, 24.656), (2, 1.681, 3.338, 28.349), (2, 1.177, -0.318, 6.914), (2, 1.004, 0.626, 12.753), (2, 2.840, 2.589, 22.691), (2, 1.258, 3.993, 32.700), (2, 2.016, 3.489, 28.920), (2, -0.728, 0.164, 11.713), (2, 0.193, 1.479, 18.682), (2, 2.647, -0.969, 1.541), (2, 3.837, 2.602, 21.773), (2, 0.541, 0.205, 10.690), (2, 0.026, 2.756, 26.511), (2, 0.924, 0.909, 14.530), (2, 0.974, -0.074, 8.581), (2, 0.081, 0.005, 9.948), (2, 1.331, 2.942, 26.320), (2, 2.498, 3.405, 27.934), (2, 3.741, 1.554, 15.581), (2, 3.502, -0.089, 5.964), (2, 3.069, 1.768, 17.539), (2, 3.115, -0.008, 6.839), (2, 3.237, -0.503, 3.745), (2, 0.768, -0.135, 8.420), (2, 0.410, 3.974, 33.437), (2, 0.238, -0.700, 5.564), (2, 3.619, 0.350, 8.482), (2, 3.563, 3.059, 24.788), (2, 2.916, 3.101, 25.691), (2, 0.144, 3.282, 29.549), (2, 1.288, 2.642, 24.565), (2, -0.859, 0.229, 12.234), (2, 1.507, -0.711, 4.229), (2, -0.634, 2.608, 26.281), (2, 2.054, -0.834, 2.942), (2, 0.453, 1.072, 15.980), (2, 3.914, 1.159, 13.039), (2, 0.254, 1.835, 20.758), (2, 1.577, 0.428, 10.991), (2, 1.990, 3.569, 29.421), (2, 1.584, 1.803, 19.234), (2, 0.835, 3.603, 30.785), (2, 0.900, 3.033, 27.296), (2, 1.180, 0.280, 10.499), (2, 2.400, 2.802, 24.409), (2, 0.924, 2.462, 23.851), (2, 2.138, 0.722, 12.192), (2, -0.253, -0.809, 5.401), (2, 3.570, -0.116, 5.733), (2, 0.201, -0.182, 8.708), (2, 2.457, 0.454, 10.267), (2, -0.053, 0.443, 12.709), (2, 2.108, 2.069, 20.309), (2, -0.964, -0.441, 8.318), (2, 1.802, 0.403, 10.614), (2, 3.704, 3.902, 29.711), (2, 1.904, 2.418, 22.603), (2, 2.965, 3.429, 27.606), (2, -0.801, -0.072, 10.370), (2, 3.009, 0.491, 9.937), (2, 2.781, 1.026, 13.376), (2, -0.421, 0.744, 14.883), (2, 3.639, -0.148, 5.476), (2, 0.584, 2.041, 21.663), (2, 1.547, -0.391, 6.107), (2, -0.204, 0.727, 14.564), (2, 0.372, 0.464, 12.410), (2, 1.185, 1.732, 19.207), (2, 3.574, 0.755, 10.954), (2, 2.164, 1.425, 16.385), (2, 1.895, 1.374, 16.351), (2, 2.352, 2.188, 20.779), (2, 0.187, 0.677, 13.874), (2, -0.589, 3.686, 32.703), (2, 3.081, 0.414, 9.403), (2, 3.341, 3.246, 26.137), (2, 0.617, -0.201, 8.174), (2, 1.518, 3.833, 31.481), (2, 2.613, -0.350, 5.286), (2, 3.426, 0.751, 11.082), (2, 2.726, 3.586, 28.787), (2, 2.834, -0.219, 5.855), (2, 1.038, 3.607, 30.605), (2, 0.479, 1.226, 16.874), (2, 1.729, 0.297, 10.053), (2, 0.050, 1.815, 20.841), (2, -0.554, 3.538, 31.782), (2, 2.773, 0.973, 13.064), (2, -0.239, 3.425, 30.786), (2, 3.611, 3.700, 28.590), (2, 1.418, 3.625, 30.332), (2, 1.599, 1.626, 18.156), (2, 1.841, 1.518, 17.269), (2, 1.119, 1.996, 20.856), (2, 2.810, 2.293, 20.947), (2, 1.174, 2.062, 21.198), (2, -0.326, -0.279, 8.655), (2, -0.365, 0.816, 15.259), (2, 1.296, -0.095, 8.132), (2, -0.263, 0.511, 13.327), (2, 1.757, 3.012, 26.314), (2, 1.849, 1.065, 14.539), (2, 1.651, 2.244, 21.814), (2, 3.942, 1.026, 12.214), (2, 2.314, 1.944, 19.353), (2, 3.055, -0.002, 6.930), (2, 0.402, 1.350, 17.698), (2, 0.004, 2.288, 23.724), (2, 3.265, 2.962, 24.509), (2, 1.044, -0.684, 4.850), (2, -0.280, 2.278, 23.948), (2, 1.216, 0.726, 13.142), (2, 3.181, 3.518, 27.925), (2, 3.199, -0.124, 6.055), (2, 0.510, -0.622, 5.755), (2, 2.920, 1.067, 13.484), (2, 2.573, 1.844, 18.492), (2, 1.155, 3.505, 29.878), (2, 2.033, 1.756, 18.502), (2, 1.312, 0.114, 9.373), (2, -0.823, 3.339, 30.854), (2, 0.287, 3.891, 33.060), (2, -0.621, -0.210, 9.363), (2, 3.734, 1.574, 15.712), (2, -0.932, 0.772, 15.561), (2, -0.719, 1.604, 20.345), (2, -0.555, 0.773, 15.190), (2, -0.744, 3.934, 34.348), (2, 1.671, -0.425, 5.778), (2, 2.754, 2.690, 23.385), (2, 1.826, 2.185, 21.283), (2, 1.970, 0.021, 8.159), (2, 2.882, 3.494, 28.081), (2, 1.668, -0.030, 8.150), (2, 0.472, 2.184, 22.633), (2, 1.656, 3.393, 28.701), (2, -0.069, 2.331, 24.057), (2, 0.075, 1.341, 17.973), (2, 1.836, 0.565, 11.554), (2, -0.235, 0.520, 13.357), (2, 3.620, 3.169, 25.393), (2, 0.401, -0.062, 9.224), (2, 1.503, 1.667, 18.501), (2, 3.727, 1.149, 13.166), (2, 2.777, -0.081, 6.737), (2, 3.914, -0.234, 4.680), (2, 1.765, 0.750, 12.737), (2, 1.746, 1.818, 19.161), (2, 0.019, 2.819, 26.893), (2, 1.068, 1.917, 20.434), (2, 3.035, 3.158, 25.915), (2, 2.012, 0.724, 12.330), (2, 2.597, 2.264, 20.986), (2, 3.428, 3.239, 26.005), (2, -0.016, -0.529, 6.842), (2, 1.314, 0.735, 13.095), (2, 2.832, -0.567, 3.768), (2, -0.296, 2.641, 26.141), (2, 2.863, 3.889, 30.470), (2, 2.849, 3.997, 31.130), (2, 1.660, 1.813, 19.216), (2, 2.798, 0.977, 13.062), (2, 3.935, 0.549, 9.359), (2, 1.002, 3.557, 30.342), (2, 3.052, 2.207, 20.193), (2, 3.455, 0.458, 9.294), (2, 3.312, 2.138, 19.515), (2, 0.292, 0.058, 10.056), (2, 0.050, -0.211, 8.682), (2, -0.215, 1.108, 16.866), (2, -0.169, 0.647, 14.048), (2, 2.546, 0.876, 12.709), (2, -0.911, -0.209, 9.659), (2, 0.950, 2.894, 26.413), (2, -0.512, -0.167, 9.508), (2, 1.821, -0.747, 3.696), (2, 2.257, 3.945, 31.415), (2, 2.398, -0.586, 4.087), (2, 3.051, 0.815, 11.836), (2, 3.399, 2.131, 19.389), (2, 2.982, 1.549, 16.314), (2, -0.790, -0.329, 8.819), (2, 3.797, 0.327, 8.167), (2, 1.838, 0.290, 9.902), (2, 1.906, 1.782, 18.785), (2, 1.330, -0.208, 7.422), (2, -0.217, 0.854, 15.344), (2, 3.310, 1.582, 16.180), (2, 2.965, 0.917, 12.537), (2, 3.558, -0.164, 5.460), (2, -0.841, 2.060, 23.203), (2, 2.892, 2.621, 22.834), (2, -0.011, -0.198, 8.821), (2, -0.430, 2.999, 28.424), (2, -0.584, 0.894, 15.946), (2, 0.033, 1.310, 17.829), (2, 3.044, 0.410, 9.418), (2, 3.932, 0.295, 7.836), (2, 0.394, 1.315, 17.494), (2, 1.424, -0.167, 7.573), (2, 1.676, 1.118, 15.031), (2, 1.821, 0.714, 12.462), (2, 2.688, 1.497, 16.292), (2, 3.960, 2.344, 20.103), (2, -0.787, -0.161, 9.819), (2, 3.538, 3.651, 28.366), (2, -0.338, 0.458, 13.088), (2, -0.146, 3.162, 29.120), (2, 3.124, 3.352, 26.989), (2, -0.189, 3.685, 32.301), (2, 0.396, 1.004, 15.626), (2, -0.171, 2.114, 22.858), (2, 3.736, 0.732, 10.659), (2, 1.259, 2.564, 24.127), (2, -0.263, 2.426, 24.820), (2, 1.558, -0.858, 3.292), (2, 2.882, 1.110, 13.776), (2, 0.039, 1.284, 17.666), (2, 3.074, 2.379, 21.201), (2, -0.523, 0.303, 12.344), (2, 0.363, 1.082, 16.132), (2, 2.925, 2.187, 20.195), (2, 0.595, -0.335, 7.397), (2, 0.062, -0.232, 8.544), (2, 0.877, 2.155, 22.050), (2, -0.256, 2.922, 27.788), (2, 1.813, 3.161, 27.152), (2, 2.177, 2.532, 23.016), (2, -0.051, 0.035, 10.263), (2, 2.688, 3.599, 28.906), (2, 2.539, -0.076, 7.008), (2, 2.563, 1.467, 16.240), (2, -0.755, 2.276, 24.410), (2, 3.092, 0.660, 10.868), (2, 2.403, 2.693, 23.756), (2, -0.170, 2.178, 23.239), (2, 2.672, -0.603, 3.712), (2, -0.077, -0.493, 7.116), (2, 1.997, 1.934, 19.608), (2, 1.913, -0.792, 3.335), (2, 0.171, -0.329, 7.857), (2, 2.488, 0.171, 8.540), (2, -0.514, 0.331, 12.500), (2, -0.201, 2.484, 25.103), (2, 2.436, 0.032, 7.759), (2, -0.094, 2.530, 25.275), (2, 2.186, 2.591, 23.358), (2, 3.171, -0.766, 2.231), (2, 2.410, 0.183, 8.687), (2, -0.699, -0.329, 8.728), (2, 3.285, 2.252, 20.228), (2, 1.928, -0.059, 7.720), (2, 3.460, 0.399, 8.931), (2, 2.542, 0.224, 8.801), (2, 2.902, 2.101, 19.702), (2, 3.808, 2.528, 21.358), (2, 0.330, 0.642, 13.522), (2, -0.088, 1.286, 17.804), (2, 3.025, 2.354, 21.100), (2, 3.306, 2.049, 18.986), (2, 1.477, 1.720, 18.845), (2, 2.676, 3.601, 28.931), (2, 1.577, 0.170, 9.443), (2, 1.362, 3.534, 29.843), (2, 2.616, 3.106, 26.018), (2, 3.773, 0.378, 8.496), (2, -0.125, 2.057, 22.465), (2, 3.174, 1.382, 15.120), (2, 0.844, 2.058, 21.503); SELECT ANS[1] > -1.1 AND ANS[1] < -0.9 AND ANS[2] > 5.9 AND ANS[2] < 6.1 AND ANS[3] > 9.9 AND ANS[3] < 10.1 FROM -(SELECT stochasticLinearRegression(0.05, 0, 1, 'SGD')(target, p1, p2) AS ANS FROM grouptest GROUP BY user_id LIMIT 0, 1); +(SELECT stochasticLinearRegression(0.05, 0, 1, 'SGD')(target, p1, p2) AS ANS FROM grouptest GROUP BY user_id ORDER BY user_id LIMIT 1, 1); SELECT ANS[1] > 1.9 AND ANS[1] < 2.1 AND ANS[2] > 2.9 AND ANS[2] < 3.1 AND ANS[3] > -3.1 AND ANS[3] < -2.9 FROM -(SELECT stochasticLinearRegression(0.05, 0, 1, 'SGD')(target, p1, p2) AS ANS FROM grouptest GROUP BY user_id LIMIT 1, 1); +(SELECT stochasticLinearRegression(0.05, 0, 1, 'SGD')(target, p1, p2) AS ANS FROM grouptest GROUP BY user_id ORDER BY user_id LIMIT 0, 1); DROP TABLE defaults; DROP TABLE model; diff --git a/tests/queries/0_stateless/00953_moving_functions.sql b/tests/queries/0_stateless/00953_moving_functions.sql index daaceeeb3ac..b9046158a16 100644 --- a/tests/queries/0_stateless/00953_moving_functions.sql +++ b/tests/queries/0_stateless/00953_moving_functions.sql @@ -24,6 +24,10 @@ INSERT INTO moving_sum_num SELECT * FROM moving_sum_num ORDER BY k,dt FORMAT TabSeparatedWithNames; +-- Result of function 'groupArrayMovingSum' depends on the order of merging +-- aggregate states which is implementation defined in external aggregation. +SET max_bytes_before_external_group_by = 0; + SELECT k, groupArrayMovingSum(v) FROM (SELECT * FROM moving_sum_num ORDER BY k, dt) GROUP BY k ORDER BY k FORMAT TabSeparatedWithNamesAndTypes; SELECT k, groupArrayMovingSum(3)(v) FROM (SELECT * FROM moving_sum_num ORDER BY k, dt) GROUP BY k ORDER BY k FORMAT TabSeparatedWithNamesAndTypes; diff --git a/tests/queries/0_stateless/00970_substring_arg_validation.reference b/tests/queries/0_stateless/00970_substring_arg_validation.reference deleted file mode 100644 index 8b137891791..00000000000 --- a/tests/queries/0_stateless/00970_substring_arg_validation.reference +++ /dev/null @@ -1 +0,0 @@ - diff --git a/tests/queries/0_stateless/00970_substring_arg_validation.sql b/tests/queries/0_stateless/00970_substring_arg_validation.sql deleted file mode 100644 index 43d73bc2cda..00000000000 --- a/tests/queries/0_stateless/00970_substring_arg_validation.sql +++ /dev/null @@ -1,4 +0,0 @@ -SELECT substring('hello', []); -- { serverError 43 } -SELECT substring('hello', 1, []); -- { serverError 43 } -SELECT substring(materialize('hello'), -1, -1); -SELECT substring(materialize('hello'), 0); -- { serverError 135 } \ No newline at end of file diff --git a/tests/queries/0_stateless/01012_reset_running_accumulate.sql b/tests/queries/0_stateless/01012_reset_running_accumulate.sql index b9336b2f50c..c2c5bf6f87d 100644 --- a/tests/queries/0_stateless/01012_reset_running_accumulate.sql +++ b/tests/queries/0_stateless/01012_reset_running_accumulate.sql @@ -1,3 +1,6 @@ +-- Disable external aggregation because the state is reset for each new block of data in 'runningAccumulate' function. +SET max_bytes_before_external_group_by = 0; + SELECT grouping, item, runningAccumulate(state, grouping) diff --git a/tests/queries/0_stateless/01033_function_substring.reference b/tests/queries/0_stateless/01033_function_substring.reference new file mode 100644 index 00000000000..b0fac36e24a --- /dev/null +++ b/tests/queries/0_stateless/01033_function_substring.reference @@ -0,0 +1,173 @@ +-- argument validation + +-- FixedString arguments +hello\0\0\0 +hello\0\0\0 +0\0\0\0\0\0\0\0 +1\0\0\0\0\0\0\0 +2\0\0\0\0\0\0\0 +3\0\0\0\0\0\0\0 +4\0\0\0\0\0\0\0 +5\0\0\0\0\0\0\0 +6\0\0\0\0\0\0\0 +7\0\0\0\0\0\0\0 +8\0\0\0\0\0\0\0 +9\0\0\0\0\0\0\0 +995 +996 +997 +998 +999 +100 +100 +100 +100 +100 + +9 +99 +998 +999\0 + +1 +10 +100 +1004 +995\0 +96\0 +7\0 +\0 + +1000 +001 +02 +3 + +995 +9 +7\0 +\0 + +10 +001 +0 +3 + +-- Enum arguments +hello shark +world eagle +ello hark +orld agle +o k +d e +lo rk +ld le + + +-- Constant enums +f fo +-- negative offset argument +abc +abc +abc +bc +c + +abc +bc +c + +abc +abc +abc +bc +c + +abc +bc +c + +abc +abc +abc +bc +c + +abc +bc +c + +abc +abc +abc +bc +c + +abc +bc +c + +lickhou +lickhou +lickhou +lickhou +lickhou +lickhou +lickhou +lickhou +-- negative length argument +bcdef +bcdef +bcdef +bcdef +bcdef +bcdef +bcdef +bcdef +bcdef +23456 +bcdef +3456 +bcdef +2345 +bcdef +345 +bcdef +23456 +bcdef +3456 +bcdef +2345 +bcdef +345 +-- negative offset and size + + + +g +g +g + + + +g +g +g + + + +6 + + + + +- + + + +6 + + + + +UBSAN bug diff --git a/tests/queries/0_stateless/01033_function_substring.sql b/tests/queries/0_stateless/01033_function_substring.sql new file mode 100644 index 00000000000..82c6b5859e2 --- /dev/null +++ b/tests/queries/0_stateless/01033_function_substring.sql @@ -0,0 +1,146 @@ +SELECT '-- argument validation'; + +SELECT substring('hello', []); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT substring('hello', 1, []); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT substring(materialize('hello'), -1, -1); +SELECT substring(materialize('hello'), 0); -- { serverError ZERO_ARRAY_OR_TUPLE_INDEX } + +SELECT '-- FixedString arguments'; + +SELECT substring(toFixedString('hello', 16), 1, 8); +SELECT substring(toFixedString(materialize('hello'), 16), 1, 8); +SELECT substring(toFixedString(toString(number), 16), 1, 8) FROM system.numbers LIMIT 10; +SELECT substring(toFixedString(toString(number), 4), 1, 3) FROM system.numbers LIMIT 995, 10; +SELECT substring(toFixedString(toString(number), 4), 1, number % 5) FROM system.numbers LIMIT 995, 10; +SELECT substring(toFixedString(toString(number), 4), 1 + number % 5) FROM system.numbers LIMIT 995, 10; +SELECT substring(toFixedString(toString(number), 4), 1 + number % 5, 1 + number % 3) FROM system.numbers LIMIT 995, 10; + +SELECT '-- Enum arguments'; + +DROP TABLE IF EXISTS tab; +CREATE TABLE tab(e8 Enum8('hello' = -5, 'world' = 15), e16 Enum16('shark' = -999, 'eagle' = 9999)) ENGINE MergeTree ORDER BY tuple(); +INSERT INTO TABLE tab VALUES ('hello', 'shark'), ('world', 'eagle'); + +-- positive offsets (slice from left) +SELECT substring(e8, 1), substring (e16, 1) FROM tab; +SELECT substring(e8, 2, 10), substring (e16, 2, 10) FROM tab; +-- negative offsets (slice from right) +SELECT substring(e8, -1), substring (e16, -1) FROM tab; +SELECT substring(e8, -2, 10), substring (e16, -2, 10) FROM tab; +-- zero offset/length +SELECT substring(e8, 1, 0), substring (e16, 1, 0) FROM tab; + +SELECT '-- Constant enums'; +SELECT substring(CAST('foo', 'Enum8(\'foo\' = 1)'), 1, 1), substring(CAST('foo', 'Enum16(\'foo\' = 1111)'), 1, 2); + +DROP TABLE tab; + +SELECT '-- negative offset argument'; + +SELECT substring('abc', number - 5) FROM system.numbers LIMIT 10; +SELECT substring(materialize('abc'), number - 5) FROM system.numbers LIMIT 10; +SELECT substring(toFixedString('abc', 3), number - 5) FROM system.numbers LIMIT 10; +SELECT substring(materialize(toFixedString('abc', 3)), number - 5) FROM system.numbers LIMIT 10; + +SELECT substring('clickhouse', 2, -2); +SELECT substring(materialize('clickhouse'), 2, -2); +SELECT substring('clickhouse', materialize(2), -2); +SELECT substring(materialize('clickhouse'), materialize(2), -2); +SELECT substring('clickhouse', 2, materialize(-2)); +SELECT substring(materialize('clickhouse'), 2, materialize(-2)); +SELECT substring('clickhouse', materialize(2), materialize(-2)); +SELECT substring(materialize('clickhouse'), materialize(2), materialize(-2)); + +SELECT '-- negative length argument'; + +SELECT substring('abcdefgh', 2, -2); +SELECT substring('abcdefgh', materialize(2), -2); +SELECT substring('abcdefgh', 2, materialize(-2)); +SELECT substring('abcdefgh', materialize(2), materialize(-2)); + +SELECT substring(cast('abcdefgh' AS FixedString(8)), 2, -2); +SELECT substring(cast('abcdefgh' AS FixedString(8)), materialize(2), -2); +SELECT substring(cast('abcdefgh' AS FixedString(8)), 2, materialize(-2)); +SELECT substring(cast('abcdefgh' AS FixedString(8)), materialize(2), materialize(-2)); + +DROP TABLE IF EXISTS tab; +CREATE TABLE tab (s String, l Int8, r Int8) ENGINE = Memory; +INSERT INTO tab VALUES ('abcdefgh', 2, -2), ('12345678', 3, -3); + +SELECT substring(s, 2, -2) FROM tab; +SELECT substring(s, l, -2) FROM tab; +SELECT substring(s, 2, r) FROM tab; +SELECT substring(s, l, r) FROM tab; + +DROP TABLE IF EXISTS tab; +CREATE TABLE tab (s FixedString(8), l Int8, r Int8) ENGINE = Memory; +INSERT INTO tab VALUES ('abcdefgh', 2, -2), ('12345678', 3, -3); + +SELECT substring(s, 2, -2) FROM tab; +SELECT substring(s, l, -2) FROM tab; +SELECT substring(s, 2, r) FROM tab; +SELECT substring(s, l, r) FROM tab; + +DROP TABLE IF EXISTS tab; + +SELECT '-- negative offset and size'; + +SELECT substring('abcdefgh', -2, -2); +SELECT substring(materialize('abcdefgh'), -2, -2); +SELECT substring(materialize('abcdefgh'), materialize(-2), materialize(-2)); + +SELECT substring('abcdefgh', -2, -1); +SELECT substring(materialize('abcdefgh'), -2, -1); +SELECT substring(materialize('abcdefgh'), materialize(-2), materialize(-1)); + +SELECT substring(cast('abcdefgh' AS FixedString(8)), -2, -2); +SELECT substring(materialize(cast('abcdefgh' AS FixedString(8))), -2, -2); +SELECT substring(materialize(cast('abcdefgh' AS FixedString(8))), materialize(-2), materialize(-2)); + +SELECT substring(cast('abcdefgh' AS FixedString(8)), -2, -1); +SELECT substring(materialize(cast('abcdefgh' AS FixedString(8))), -2, -1); +SELECT substring(materialize(cast('abcdefgh' AS FixedString(8))), materialize(-2), materialize(-1)); + +DROP TABLE IF EXISTS t; +CREATE TABLE t +( + s String, + l Int8, + r Int8 +) ENGINE = Memory; + +INSERT INTO t VALUES ('abcdefgh', -2, -2),('12345678', -3, -3); + +SELECT substring(s, -2, -2) FROM t; +SELECT substring(s, l, -2) FROM t; +SELECT substring(s, -2, r) FROM t; +SELECT substring(s, l, r) FROM t; + +SELECT '-'; +DROP TABLE IF EXISTS t; +CREATE TABLE t( + s FixedString(8), + l Int8, + r Int8 +) engine = Memory; +INSERT INTO t VALUES ('abcdefgh', -2, -2),('12345678', -3, -3); + +SELECT substring(s, -2, -2) FROM t; +SELECT substring(s, l, -2) FROM t; +SELECT substring(s, -2, r) FROM t; +SELECT substring(s, l, r) FROM t; + +DROP table if exists t; + +SELECT 'UBSAN bug'; + +/** NOTE: The behaviour of substring and substringUTF8 is inconsistent when negative offset is greater than string size: + * substring: + * hello + * ^-----^ - offset -10, length 7, result: "he" + * substringUTF8: + * hello + * ^-----^ - offset -10, length 7, result: "hello" + * This may be subject for change. + */ +SELECT substringUTF8('hello, пÑ�ивеÑ�', -9223372036854775808, number) FROM numbers(16) FORMAT Null; diff --git a/tests/queries/0_stateless/01033_substr_negative_size_arg.reference b/tests/queries/0_stateless/01033_substr_negative_size_arg.reference deleted file mode 100644 index db3a106ac7f..00000000000 --- a/tests/queries/0_stateless/01033_substr_negative_size_arg.reference +++ /dev/null @@ -1,8 +0,0 @@ -lickhou -lickhou -lickhou -lickhou -lickhou -lickhou -lickhou -lickhou diff --git a/tests/queries/0_stateless/01033_substr_negative_size_arg.sql b/tests/queries/0_stateless/01033_substr_negative_size_arg.sql deleted file mode 100644 index a0fba1a6eee..00000000000 --- a/tests/queries/0_stateless/01033_substr_negative_size_arg.sql +++ /dev/null @@ -1,8 +0,0 @@ -SELECT substr('clickhouse', 2, -2); -SELECT substr(materialize('clickhouse'), 2, -2); -SELECT substr('clickhouse', materialize(2), -2); -SELECT substr(materialize('clickhouse'), materialize(2), -2); -SELECT substr('clickhouse', 2, materialize(-2)); -SELECT substr(materialize('clickhouse'), 2, materialize(-2)); -SELECT substr('clickhouse', materialize(2), materialize(-2)); -SELECT substr(materialize('clickhouse'), materialize(2), materialize(-2)); diff --git a/tests/queries/0_stateless/01035_avg_weighted_long.sh b/tests/queries/0_stateless/01035_avg_weighted_long.sh index 0e76d6e328d..92c86981b37 100755 --- a/tests/queries/0_stateless/01035_avg_weighted_long.sh +++ b/tests/queries/0_stateless/01035_avg_weighted_long.sh @@ -28,7 +28,7 @@ exttypes=("Int128" "Int256" "UInt256") echo "SELECT avgWeighted(to${left}(1), to${right}(2));" done done -) | clickhouse-client -nm +) | $CLICKHOUSE_CLIENT_BINARY -nm -echo "$(${CLICKHOUSE_CLIENT} --server_logs_file=/dev/null --query="SELECT avgWeighted(['string'], toFloat64(0))" 2>&1)" \ +${CLICKHOUSE_CLIENT} --server_logs_file=/dev/null --query="SELECT avgWeighted(['string'], toFloat64(0))" 2>&1 \ | grep -c 'Code: 43. DB::Exception: .* DB::Exception:.* Types .* are non-conforming as arguments for aggregate function avgWeighted' diff --git a/tests/queries/0_stateless/01053_ssd_dictionary.sh b/tests/queries/0_stateless/01053_ssd_dictionary.sh index cf1a55b2942..b49144c9b1a 100755 --- a/tests/queries/0_stateless/01053_ssd_dictionary.sh +++ b/tests/queries/0_stateless/01053_ssd_dictionary.sh @@ -6,7 +6,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -USER_FILES_PATH=$(clickhouse-client --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') +USER_FILES_PATH=$($CLICKHOUSE_CLIENT_BINARY --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') $CLICKHOUSE_CLIENT --allow_deprecated_database_ordinary=1 -n --query=" DROP DATABASE IF EXISTS 01053_db; diff --git a/tests/queries/0_stateless/01060_substring_negative_size.reference b/tests/queries/0_stateless/01060_substring_negative_size.reference deleted file mode 100644 index b25696dc7d6..00000000000 --- a/tests/queries/0_stateless/01060_substring_negative_size.reference +++ /dev/null @@ -1,27 +0,0 @@ -bcdef -bcdef -bcdef -bcdef -- -bcdef -bcdef -bcdef -bcdef -- -bcdef -23456 -bcdef -3456 -bcdef -2345 -bcdef -345 -- -bcdef -23456 -bcdef -3456 -bcdef -2345 -bcdef -345 diff --git a/tests/queries/0_stateless/01060_substring_negative_size.sql b/tests/queries/0_stateless/01060_substring_negative_size.sql deleted file mode 100644 index 23cab14a6e0..00000000000 --- a/tests/queries/0_stateless/01060_substring_negative_size.sql +++ /dev/null @@ -1,36 +0,0 @@ -select substring('abcdefgh', 2, -2); -select substring('abcdefgh', materialize(2), -2); -select substring('abcdefgh', 2, materialize(-2)); -select substring('abcdefgh', materialize(2), materialize(-2)); - -select '-'; - -select substring(cast('abcdefgh' as FixedString(8)), 2, -2); -select substring(cast('abcdefgh' as FixedString(8)), materialize(2), -2); -select substring(cast('abcdefgh' as FixedString(8)), 2, materialize(-2)); -select substring(cast('abcdefgh' as FixedString(8)), materialize(2), materialize(-2)); - -select '-'; - -drop table if exists t; -create table t (s String, l Int8, r Int8) engine = Memory; -insert into t values ('abcdefgh', 2, -2), ('12345678', 3, -3); - -select substring(s, 2, -2) from t; -select substring(s, l, -2) from t; -select substring(s, 2, r) from t; -select substring(s, l, r) from t; - -select '-'; - -drop table if exists t; -create table t (s FixedString(8), l Int8, r Int8) engine = Memory; -insert into t values ('abcdefgh', 2, -2), ('12345678', 3, -3); - -select substring(s, 2, -2) from t; -select substring(s, l, -2) from t; -select substring(s, 2, r) from t; -select substring(s, l, r) from t; - -drop table if exists t; - diff --git a/tests/queries/0_stateless/01064_incremental_streaming_from_2_src_with_feedback.sql b/tests/queries/0_stateless/01064_incremental_streaming_from_2_src_with_feedback.sql index 3d75fb0ccc9..ae90dc3cc72 100644 --- a/tests/queries/0_stateless/01064_incremental_streaming_from_2_src_with_feedback.sql +++ b/tests/queries/0_stateless/01064_incremental_streaming_from_2_src_with_feedback.sql @@ -1,5 +1,8 @@ SET joined_subquery_requires_alias = 0; SET max_threads = 1; +-- It affects number of read rows and max_rows_to_read. +SET max_bytes_before_external_sort = 0; +SET max_bytes_before_external_group_by = 0; -- incremental streaming usecase -- that has sense only if data filling order has guarantees of chronological order diff --git a/tests/queries/0_stateless/01088_array_slice_of_aggregate_functions.sql b/tests/queries/0_stateless/01088_array_slice_of_aggregate_functions.sql index ba525f30228..c8466b57051 100644 --- a/tests/queries/0_stateless/01088_array_slice_of_aggregate_functions.sql +++ b/tests/queries/0_stateless/01088_array_slice_of_aggregate_functions.sql @@ -1 +1 @@ -select arraySlice(groupArray(x),1,1) as y from (select uniqState(number) as x from numbers(10) group by number); +select arraySlice(groupArray(x), 1, 1) as y from (select uniqState(number) as x from numbers(10) group by number order by number); diff --git a/tests/queries/0_stateless/01090_zookeeper_mutations_and_insert_quorum_long.sql b/tests/queries/0_stateless/01090_zookeeper_mutations_and_insert_quorum_long.sql index db6555e593e..67534a4611e 100644 --- a/tests/queries/0_stateless/01090_zookeeper_mutations_and_insert_quorum_long.sql +++ b/tests/queries/0_stateless/01090_zookeeper_mutations_and_insert_quorum_long.sql @@ -9,7 +9,6 @@ CREATE TABLE mutations_and_quorum2 (`server_date` Date, `something` String) ENGI -- Should not be larger then 600e6 (default timeout in clickhouse-test) SET insert_quorum=2, insert_quorum_parallel=0, insert_quorum_timeout=300e3; -SET insert_keeper_fault_injection_probability=0; INSERT INTO mutations_and_quorum1 VALUES ('2019-01-01', 'test1'), ('2019-02-01', 'test2'), ('2019-03-01', 'test3'), ('2019-04-01', 'test4'), ('2019-05-01', 'test1'), ('2019-06-01', 'test2'), ('2019-07-01', 'test3'), ('2019-08-01', 'test4'), ('2019-09-01', 'test1'), ('2019-10-01', 'test2'), ('2019-11-01', 'test3'), ('2019-12-01', 'test4'); diff --git a/tests/queries/0_stateless/01098_msgpack_format.sh b/tests/queries/0_stateless/01098_msgpack_format.sh index 24638f33324..e2ae026eb27 100755 --- a/tests/queries/0_stateless/01098_msgpack_format.sh +++ b/tests/queries/0_stateless/01098_msgpack_format.sh @@ -6,7 +6,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CURDIR"/../shell_config.sh -USER_FILES_PATH=$(clickhouse-client --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') +USER_FILES_PATH=$($CLICKHOUSE_CLIENT_BINARY --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS msgpack"; @@ -128,4 +128,3 @@ $CLICKHOUSE_CLIENT --query="SELECT * FROM file('data.msgpack', 'MsgPack', 'x Arr rm $USER_FILES_PATH/data.msgpack - diff --git a/tests/queries/0_stateless/01134_max_rows_to_group_by.sql b/tests/queries/0_stateless/01134_max_rows_to_group_by.sql index bfbc499e1c3..f9ea37cb65a 100644 --- a/tests/queries/0_stateless/01134_max_rows_to_group_by.sql +++ b/tests/queries/0_stateless/01134_max_rows_to_group_by.sql @@ -2,6 +2,9 @@ SET max_block_size = 1; SET max_rows_to_group_by = 10; SET group_by_overflow_mode = 'throw'; +-- Settings 'max_rows_to_group_by' and 'max_bytes_before_external_group_by' are mutually exclusive. +SET max_bytes_before_external_group_by = 0; + SELECT 'test1', number FROM system.numbers GROUP BY number; -- { serverError 158 } SET group_by_overflow_mode = 'break'; diff --git a/tests/queries/0_stateless/01162_strange_mutations.sh b/tests/queries/0_stateless/01162_strange_mutations.sh index eea9ea5f7e5..f2428141264 100755 --- a/tests/queries/0_stateless/01162_strange_mutations.sh +++ b/tests/queries/0_stateless/01162_strange_mutations.sh @@ -1,4 +1,6 @@ #!/usr/bin/env bash +# Tags: no-replicated-database +# Tag no-replicated-database: CREATE AS SELECT is disabled CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh diff --git a/tests/queries/0_stateless/01193_metadata_loading.sh b/tests/queries/0_stateless/01193_metadata_loading.sh index 50425eae018..c25cdf4e970 100755 --- a/tests/queries/0_stateless/01193_metadata_loading.sh +++ b/tests/queries/0_stateless/01193_metadata_loading.sh @@ -29,7 +29,7 @@ create_tables() { groupArray( create1 || toString(number) || create2 || engines[1 + number % length(engines)] || ';\n' || insert1 || toString(number) || insert2 - ), ';\n') FROM numbers($tables) FORMAT TSVRaw;" | $CLICKHOUSE_CLIENT -nm + ), ';\n') FROM numbers($tables) SETTINGS max_bytes_before_external_group_by = 0 FORMAT TSVRaw;" | $CLICKHOUSE_CLIENT -nm } $CLICKHOUSE_CLIENT -q "CREATE DATABASE $db" diff --git a/tests/queries/0_stateless/01244_optimize_distributed_group_by_sharding_key.sql b/tests/queries/0_stateless/01244_optimize_distributed_group_by_sharding_key.sql index 291910ed43f..0d24b238d64 100644 --- a/tests/queries/0_stateless/01244_optimize_distributed_group_by_sharding_key.sql +++ b/tests/queries/0_stateless/01244_optimize_distributed_group_by_sharding_key.sql @@ -4,6 +4,9 @@ set optimize_distributed_group_by_sharding_key=1; +-- Some queries in this test require sorting after aggregation. +set max_bytes_before_external_group_by = 0; + drop table if exists dist_01247; drop table if exists data_01247; diff --git a/tests/queries/0_stateless/01271_show_privileges.reference b/tests/queries/0_stateless/01271_show_privileges.reference index 6c639926aac..1a3a271528c 100644 --- a/tests/queries/0_stateless/01271_show_privileges.reference +++ b/tests/queries/0_stateless/01271_show_privileges.reference @@ -48,6 +48,7 @@ ALTER TABLE [] \N ALTER ALTER DATABASE [] \N ALTER ALTER VIEW REFRESH ['ALTER LIVE VIEW REFRESH','REFRESH VIEW'] VIEW ALTER VIEW ALTER VIEW MODIFY QUERY ['ALTER TABLE MODIFY QUERY'] VIEW ALTER VIEW +ALTER VIEW MODIFY REFRESH ['ALTER TABLE MODIFY QUERY'] VIEW ALTER VIEW ALTER VIEW [] \N ALTER ALTER [] \N ALL CREATE DATABASE [] DATABASE CREATE @@ -127,12 +128,14 @@ SYSTEM FETCHES ['SYSTEM STOP FETCHES','SYSTEM START FETCHES','STOP FETCHES','STA SYSTEM MOVES ['SYSTEM STOP MOVES','SYSTEM START MOVES','STOP MOVES','START MOVES'] TABLE SYSTEM SYSTEM PULLING REPLICATION LOG ['SYSTEM STOP PULLING REPLICATION LOG','SYSTEM START PULLING REPLICATION LOG'] TABLE SYSTEM SYSTEM CLEANUP ['SYSTEM STOP CLEANUP','SYSTEM START CLEANUP'] TABLE SYSTEM +SYSTEM VIEWS ['SYSTEM REFRESH VIEW','SYSTEM START VIEWS','SYSTEM STOP VIEWS','SYSTEM START VIEW','SYSTEM STOP VIEW','SYSTEM CANCEL VIEW','REFRESH VIEW','START VIEWS','STOP VIEWS','START VIEW','STOP VIEW','CANCEL VIEW'] VIEW SYSTEM SYSTEM DISTRIBUTED SENDS ['SYSTEM STOP DISTRIBUTED SENDS','SYSTEM START DISTRIBUTED SENDS','STOP DISTRIBUTED SENDS','START DISTRIBUTED SENDS'] TABLE SYSTEM SENDS SYSTEM REPLICATED SENDS ['SYSTEM STOP REPLICATED SENDS','SYSTEM START REPLICATED SENDS','STOP REPLICATED SENDS','START REPLICATED SENDS'] TABLE SYSTEM SENDS SYSTEM SENDS ['SYSTEM STOP SENDS','SYSTEM START SENDS','STOP SENDS','START SENDS'] \N SYSTEM SYSTEM REPLICATION QUEUES ['SYSTEM STOP REPLICATION QUEUES','SYSTEM START REPLICATION QUEUES','STOP REPLICATION QUEUES','START REPLICATION QUEUES'] TABLE SYSTEM SYSTEM DROP REPLICA ['DROP REPLICA'] TABLE SYSTEM SYSTEM SYNC REPLICA ['SYNC REPLICA'] TABLE SYSTEM +SYSTEM REPLICA READINESS ['SYSTEM REPLICA READY','SYSTEM REPLICA UNREADY'] GLOBAL SYSTEM SYSTEM RESTART REPLICA ['RESTART REPLICA'] TABLE SYSTEM SYSTEM RESTORE REPLICA ['RESTORE REPLICA'] TABLE SYSTEM SYSTEM WAIT LOADING PARTS ['WAIT LOADING PARTS'] TABLE SYSTEM diff --git a/tests/queries/0_stateless/01280_ssd_complex_key_dictionary.sh b/tests/queries/0_stateless/01280_ssd_complex_key_dictionary.sh index 28e8e8386cf..fb7bf5c6fc1 100755 --- a/tests/queries/0_stateless/01280_ssd_complex_key_dictionary.sh +++ b/tests/queries/0_stateless/01280_ssd_complex_key_dictionary.sh @@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -USER_FILES_PATH=$(clickhouse-client --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') +USER_FILES_PATH=$($CLICKHOUSE_CLIENT_BINARY --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') $CLICKHOUSE_CLIENT -n --query=" set allow_deprecated_database_ordinary=1; diff --git a/tests/queries/0_stateless/01291_distributed_low_cardinality_memory_efficient.sql b/tests/queries/0_stateless/01291_distributed_low_cardinality_memory_efficient.sql index 267f5585705..3697a167989 100644 --- a/tests/queries/0_stateless/01291_distributed_low_cardinality_memory_efficient.sql +++ b/tests/queries/0_stateless/01291_distributed_low_cardinality_memory_efficient.sql @@ -6,7 +6,12 @@ DROP TABLE IF EXISTS dist; create table data (key String) Engine=Memory(); create table dist (key LowCardinality(String)) engine=Distributed(test_cluster_two_shards, currentDatabase(), data); insert into data values ('foo'); + set distributed_aggregation_memory_efficient=1; + +-- There is an obscure bug in rare corner case. +set max_bytes_before_external_group_by = 0; + select * from dist group by key; DROP TABLE data; diff --git a/tests/queries/0_stateless/01300_group_by_other_keys_having.sql b/tests/queries/0_stateless/01300_group_by_other_keys_having.sql index 911f61a62e2..203e8322ad9 100644 --- a/tests/queries/0_stateless/01300_group_by_other_keys_having.sql +++ b/tests/queries/0_stateless/01300_group_by_other_keys_having.sql @@ -1,4 +1,5 @@ set optimize_group_by_function_keys = 1; +set optimize_syntax_fuse_functions = 0; set allow_experimental_analyzer = 1; -- { echoOn } diff --git a/tests/queries/0_stateless/01357_version_collapsing_attach_detach_zookeeper.reference b/tests/queries/0_stateless/01357_version_collapsing_attach_detach_zookeeper.reference index 353c70aec11..41609184a74 100644 --- a/tests/queries/0_stateless/01357_version_collapsing_attach_detach_zookeeper.reference +++ b/tests/queries/0_stateless/01357_version_collapsing_attach_detach_zookeeper.reference @@ -1,3 +1,3 @@ -metadata format version: 1\ndate column: \nsampling expression: \nindex granularity: 8192\nmode: 7\nsign column: sign\nprimary key: key1, key2\ndata format version: 1\npartition key: d\ngranularity bytes: 10485760\n +metadata format version: 1\ndate column: \nsampling expression: \nindex granularity: 8192\nmode: 7\nsign column: sign\nprimary key: key1, key2\ndata format version: 1\npartition key: d\ngranularity bytes: 10485760\nmerge parameters format version: 2\nversion column: version\n 1 1 diff --git a/tests/queries/0_stateless/01417_freeze_partition_verbose_zookeeper.sh b/tests/queries/0_stateless/01417_freeze_partition_verbose_zookeeper.sh index 1fd8a2b29c6..bf97a8e4f9d 100755 --- a/tests/queries/0_stateless/01417_freeze_partition_verbose_zookeeper.sh +++ b/tests/queries/0_stateless/01417_freeze_partition_verbose_zookeeper.sh @@ -13,7 +13,7 @@ FREEZE_OUT_STRUCTURE='backup_name String, backup_path String , part_backup_path # setup ${CLICKHOUSE_CLIENT} --query "DROP TABLE IF EXISTS table_for_freeze_replicated SYNC;" -${CLICKHOUSE_CLIENT} --query "CREATE TABLE table_for_freeze_replicated (key UInt64, value String) ENGINE = ReplicatedMergeTree('/test/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/table_for_freeze_replicated', '1') ORDER BY key PARTITION BY key % 10;" +${CLICKHOUSE_CLIENT} --query "CREATE TABLE table_for_freeze_replicated (key UInt64, value String) ENGINE = ReplicatedMergeTree('/test/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/table_for_freeze_replicated', '1') ORDER BY key PARTITION BY key % 10 SETTINGS disable_freeze_partition_for_zero_copy_replication=0;" ${CLICKHOUSE_CLIENT} --insert_keeper_fault_injection_probability=0 --query "INSERT INTO table_for_freeze_replicated SELECT number, toString(number) from numbers(10);" ${CLICKHOUSE_CLIENT} --query "ALTER TABLE table_for_freeze_replicated FREEZE WITH NAME 'test_01417' FORMAT TSVWithNames SETTINGS alter_partition_verbose_result = 1;" \ diff --git a/tests/queries/0_stateless/01451_replicated_detach_drop_and_quorum_long.sql b/tests/queries/0_stateless/01451_replicated_detach_drop_and_quorum_long.sql index eea231c9f58..21b65995482 100644 --- a/tests/queries/0_stateless/01451_replicated_detach_drop_and_quorum_long.sql +++ b/tests/queries/0_stateless/01451_replicated_detach_drop_and_quorum_long.sql @@ -1,6 +1,5 @@ -- Tags: long, replica, no-replicated-database -SET insert_keeper_fault_injection_probability=0; -- disable fault injection; part ids are non-deterministic in case of insert retries SET replication_alter_partitions_sync = 2; @@ -10,7 +9,7 @@ DROP TABLE IF EXISTS replica2; CREATE TABLE replica1 (v UInt8) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test/01451/quorum', 'r1') order by tuple() settings max_replicated_merges_in_queue = 0; CREATE TABLE replica2 (v UInt8) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test/01451/quorum', 'r2') order by tuple() settings max_replicated_merges_in_queue = 0; -INSERT INTO replica1 VALUES (0); +INSERT INTO replica1 SETTINGS insert_keeper_fault_injection_probability=0 VALUES (0); SYSTEM SYNC REPLICA replica2; @@ -27,7 +26,7 @@ ALTER TABLE replica2 DROP PARTITION ID 'all'; SET insert_quorum = 2, insert_quorum_parallel = 0; -INSERT INTO replica2 VALUES (1); +INSERT INTO replica2 SETTINGS insert_keeper_fault_injection_probability=0 VALUES (1); SYSTEM SYNC REPLICA replica2; @@ -39,7 +38,7 @@ SELECT COUNT() FROM replica1; SET insert_quorum_parallel=1; -INSERT INTO replica2 VALUES (2); +INSERT INTO replica2 SETTINGS insert_keeper_fault_injection_probability=0 VALUES (2); -- should work, parallel quorum nodes exists only during insert ALTER TABLE replica1 DROP PART 'all_3_3_0'; diff --git a/tests/queries/0_stateless/01451_replicated_detach_drop_part_long.sql b/tests/queries/0_stateless/01451_replicated_detach_drop_part_long.sql index bf7a471fa40..25b2923ddd9 100644 --- a/tests/queries/0_stateless/01451_replicated_detach_drop_part_long.sql +++ b/tests/queries/0_stateless/01451_replicated_detach_drop_part_long.sql @@ -1,7 +1,6 @@ -- Tags: long, replica, no-replicated-database -- Tag no-replicated-database: Fails due to additional replicas or shards -SET insert_keeper_fault_injection_probability=0; -- disable fault injection; part ids are non-deterministic in case of insert retries SET replication_alter_partitions_sync = 2; DROP TABLE IF EXISTS replica1 SYNC; @@ -10,9 +9,9 @@ DROP TABLE IF EXISTS replica2 SYNC; CREATE TABLE replica1 (v UInt8) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/'||currentDatabase()||'test/01451/attach', 'r1') order by tuple() settings max_replicated_merges_in_queue = 0; CREATE TABLE replica2 (v UInt8) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/'||currentDatabase()||'test/01451/attach', 'r2') order by tuple() settings max_replicated_merges_in_queue = 0; -INSERT INTO replica1 VALUES (0); -INSERT INTO replica1 VALUES (1); -INSERT INTO replica1 VALUES (2); +INSERT INTO replica1 SETTINGS insert_keeper_fault_injection_probability=0 VALUES (0); +INSERT INTO replica1 SETTINGS insert_keeper_fault_injection_probability=0 VALUES (1); +INSERT INTO replica1 SETTINGS insert_keeper_fault_injection_probability=0 VALUES (2); ALTER TABLE replica1 DETACH PART 'all_100_100_0'; -- { serverError 232 } @@ -25,7 +24,7 @@ SELECT v FROM replica1 ORDER BY v; SELECT name FROM system.detached_parts WHERE table = 'replica2' AND database = currentDatabase(); -ALTER TABLE replica2 ATTACH PART 'all_1_1_0'; +ALTER TABLE replica2 ATTACH PART 'all_1_1_0' SETTINGS insert_keeper_fault_injection_probability=0; SYSTEM SYNC REPLICA replica1; SELECT v FROM replica1 ORDER BY v; diff --git a/tests/queries/0_stateless/01459_manual_write_to_replicas.sh b/tests/queries/0_stateless/01459_manual_write_to_replicas.sh index c05d813ca7f..a9a6d27c145 100755 --- a/tests/queries/0_stateless/01459_manual_write_to_replicas.sh +++ b/tests/queries/0_stateless/01459_manual_write_to_replicas.sh @@ -20,10 +20,6 @@ function thread { for x in {0..99}; do # sometimes we can try to commit obsolete part if fetches will be quite fast, # so supress warning messages like "Tried to commit obsolete part ... covered by ..." - # (2) keeper fault injection for inserts because - # it can be a cause of deduplicated parts be visible to SELECTs for sometime (until cleanup thread remove them), - # so the same SELECT on different replicas can return different results, i.e. test output will be non-deterministic - # (see #9712) $CLICKHOUSE_CLIENT --insert_keeper_fault_injection_probability=0 --query "INSERT INTO r$1 SELECT $x % $NUM_REPLICAS = $1 ? $x - 1 : $x" 2>/dev/null # Replace some records as duplicates so they will be written by other replicas done } diff --git a/tests/queries/0_stateless/01459_manual_write_to_replicas_quorum_detach_attach.sh b/tests/queries/0_stateless/01459_manual_write_to_replicas_quorum_detach_attach.sh index 01c88336282..1f76a2efc6b 100755 --- a/tests/queries/0_stateless/01459_manual_write_to_replicas_quorum_detach_attach.sh +++ b/tests/queries/0_stateless/01459_manual_write_to_replicas_quorum_detach_attach.sh @@ -24,7 +24,7 @@ function thread { while true; do $CLICKHOUSE_CLIENT --query "DETACH TABLE r$1" $CLICKHOUSE_CLIENT --query "ATTACH TABLE r$1" - $CLICKHOUSE_CLIENT --insert_quorum 3 --insert_quorum_parallel 0 --insert_keeper_fault_injection_probability=0 --query "INSERT INTO r$1 SELECT $x" 2>&1 | grep -qE "$valid_exceptions_to_retry" || break + $CLICKHOUSE_CLIENT --insert_quorum 3 --insert_quorum_parallel 0 --query "INSERT INTO r$1 SELECT $x" 2>&1 | grep -qE "$valid_exceptions_to_retry" || break done done } diff --git a/tests/queries/0_stateless/01472_many_rows_in_totals.sql b/tests/queries/0_stateless/01472_many_rows_in_totals.sql index d79d189a28d..bea8c255f21 100644 --- a/tests/queries/0_stateless/01472_many_rows_in_totals.sql +++ b/tests/queries/0_stateless/01472_many_rows_in_totals.sql @@ -1,4 +1,7 @@ +-- Disable external aggregation because it may produce several blocks instead of one. +set max_bytes_before_external_group_by = 0; set output_format_write_statistics = 0; + select g, s from (select g, sum(number) as s from numbers(4) group by bitAnd(number, 1) as g with totals order by g) array join [1, 2] as a format Pretty; select '--'; diff --git a/tests/queries/0_stateless/01509_check_many_parallel_quorum_inserts_long.sh b/tests/queries/0_stateless/01509_check_many_parallel_quorum_inserts_long.sh index 1ccbe34b10a..22cd6fb8127 100755 --- a/tests/queries/0_stateless/01509_check_many_parallel_quorum_inserts_long.sh +++ b/tests/queries/0_stateless/01509_check_many_parallel_quorum_inserts_long.sh @@ -20,7 +20,7 @@ done function thread { i=0 retries=300 while [[ $i -lt $retries ]]; do # server can be dead - $CLICKHOUSE_CLIENT --insert_quorum 3 --insert_quorum_parallel 1 --insert_keeper_fault_injection_probability=0 --query "INSERT INTO r$1 SELECT $2" && break + $CLICKHOUSE_CLIENT --insert_quorum 3 --insert_quorum_parallel 1 --query "INSERT INTO r$1 SELECT $2" && break ((++i)) sleep 0.1 done diff --git a/tests/queries/0_stateless/01509_check_parallel_quorum_inserts_long.sh b/tests/queries/0_stateless/01509_check_parallel_quorum_inserts_long.sh index 6fbdf42914c..1589f17c752 100755 --- a/tests/queries/0_stateless/01509_check_parallel_quorum_inserts_long.sh +++ b/tests/queries/0_stateless/01509_check_parallel_quorum_inserts_long.sh @@ -21,7 +21,7 @@ done $CLICKHOUSE_CLIENT -n -q "SYSTEM STOP REPLICATION QUEUES r2;" function thread { - $CLICKHOUSE_CLIENT --insert_quorum 2 --insert_quorum_parallel 1 --insert_keeper_fault_injection_probability=0 --query "INSERT INTO r1 SELECT $1" + $CLICKHOUSE_CLIENT --insert_quorum 2 --insert_quorum_parallel 1 --query "INSERT INTO r1 SELECT $1" } for i in $(seq 1 $NUM_INSERTS); do diff --git a/tests/queries/0_stateless/01509_parallel_quorum_and_merge_long.sh b/tests/queries/0_stateless/01509_parallel_quorum_and_merge_long.sh index bf88ad0e0b2..a814759ab10 100755 --- a/tests/queries/0_stateless/01509_parallel_quorum_and_merge_long.sh +++ b/tests/queries/0_stateless/01509_parallel_quorum_and_merge_long.sh @@ -20,10 +20,9 @@ $CLICKHOUSE_CLIENT -q "CREATE TABLE parallel_q2 (x UInt64) ENGINE=ReplicatedMerg $CLICKHOUSE_CLIENT -q "SYSTEM STOP REPLICATION QUEUES parallel_q2" -$CLICKHOUSE_CLIENT --insert_keeper_fault_injection_probability=0 -q "INSERT INTO parallel_q1 VALUES (1)" - -# disable keeper fault injection during insert since test checks part names. Part names can differ in case of retries during insert -$CLICKHOUSE_CLIENT --insert_quorum 2 --insert_quorum_parallel 1 --insert_keeper_fault_injection_probability=0 --query="INSERT INTO parallel_q1 VALUES (2)" & +# This test depends on part names and those aren't deterministic with faults +$CLICKHOUSE_CLIENT --insert_keeper_fault_injection_probability=0 -q "INSERT INTO parallel_q1 VALUES (1)" +$CLICKHOUSE_CLIENT --insert_keeper_fault_injection_probability=0 --insert_quorum 2 --insert_quorum_parallel 1 --query="INSERT INTO parallel_q1 VALUES (2)" & part_count=$($CLICKHOUSE_CLIENT --query="SELECT COUNT() FROM system.parts WHERE table='parallel_q1' and database='${CLICKHOUSE_DATABASE}'") diff --git a/tests/queries/0_stateless/01509_parallel_quorum_insert_no_replicas_long.sql b/tests/queries/0_stateless/01509_parallel_quorum_insert_no_replicas_long.sql index 5a23473dd0a..24b368090e7 100644 --- a/tests/queries/0_stateless/01509_parallel_quorum_insert_no_replicas_long.sql +++ b/tests/queries/0_stateless/01509_parallel_quorum_insert_no_replicas_long.sql @@ -16,8 +16,6 @@ CREATE TABLE r2 ( ENGINE = ReplicatedMergeTree('/clickhouse/{database}/01509_parallel_quorum_insert_no_replicas', '2') ORDER BY tuple(); -SET insert_keeper_fault_injection_probability=0; - SET insert_quorum_parallel=1; SET insert_quorum=3; diff --git a/tests/queries/0_stateless/01513_count_without_select_sequence_consistency_zookeeper_long.sql b/tests/queries/0_stateless/01513_count_without_select_sequence_consistency_zookeeper_long.sql index 4a992449a16..f800ff86aa5 100644 --- a/tests/queries/0_stateless/01513_count_without_select_sequence_consistency_zookeeper_long.sql +++ b/tests/queries/0_stateless/01513_count_without_select_sequence_consistency_zookeeper_long.sql @@ -20,7 +20,6 @@ SYSTEM SYNC REPLICA quorum3; SET select_sequential_consistency=0; SET optimize_trivial_count_query=1; SET insert_quorum=2, insert_quorum_parallel=0; -SET insert_keeper_fault_injection_probability=0; SYSTEM STOP FETCHES quorum1; diff --git a/tests/queries/0_stateless/01513_optimize_aggregation_in_order_memory_long.sql b/tests/queries/0_stateless/01513_optimize_aggregation_in_order_memory_long.sql index 3d57518d0f4..b107af07194 100644 --- a/tests/queries/0_stateless/01513_optimize_aggregation_in_order_memory_long.sql +++ b/tests/queries/0_stateless/01513_optimize_aggregation_in_order_memory_long.sql @@ -12,6 +12,7 @@ optimize table data_01513 final; set max_memory_usage='500M'; set max_threads=1; set max_block_size=500; +set max_bytes_before_external_group_by=0; select key, groupArray(repeat('a', 200)), count() from data_01513 group by key format Null settings optimize_aggregation_in_order=0; -- { serverError 241 } select key, groupArray(repeat('a', 200)), count() from data_01513 group by key format Null settings optimize_aggregation_in_order=1; diff --git a/tests/queries/0_stateless/01514_distributed_cancel_query_on_error.sh b/tests/queries/0_stateless/01514_distributed_cancel_query_on_error.sh index 99025890cb3..edf3683ccba 100755 --- a/tests/queries/0_stateless/01514_distributed_cancel_query_on_error.sh +++ b/tests/queries/0_stateless/01514_distributed_cancel_query_on_error.sh @@ -15,6 +15,7 @@ opts=( "--max_block_size=50" "--max_threads=1" "--max_distributed_connections=2" + "--max_bytes_before_external_group_by=0" ) ${CLICKHOUSE_CLIENT} "${opts[@]}" -q "SELECT groupArray(repeat('a', if(_shard_num == 2, 100000, 1))), number%100000 k from remote('127.{2,3}', system.numbers) GROUP BY k LIMIT 10e6" |& { # the query should fail earlier on 127.3 and 127.2 should not even go to the memory limit exceeded error. diff --git a/tests/queries/0_stateless/01532_execute_merges_on_single_replica_long.sql b/tests/queries/0_stateless/01532_execute_merges_on_single_replica_long.sql index 30beb29251e..49ef9d8b79f 100644 --- a/tests/queries/0_stateless/01532_execute_merges_on_single_replica_long.sql +++ b/tests/queries/0_stateless/01532_execute_merges_on_single_replica_long.sql @@ -2,8 +2,6 @@ -- Tag no-replicated-database: Fails due to additional replicas or shards -- Tag no-parallel: static zk path -SET insert_keeper_fault_injection_probability=0; -- disable fault injection; part ids are non-deterministic in case of insert retries - DROP TABLE IF EXISTS execute_on_single_replica_r1 SYNC; DROP TABLE IF EXISTS execute_on_single_replica_r2 SYNC; @@ -11,7 +9,7 @@ DROP TABLE IF EXISTS execute_on_single_replica_r2 SYNC; CREATE TABLE execute_on_single_replica_r1 (x UInt64) ENGINE=ReplicatedMergeTree('/clickhouse/tables/test_01532/execute_on_single_replica', 'r1') ORDER BY tuple() SETTINGS execute_merges_on_single_replica_time_threshold=10; CREATE TABLE execute_on_single_replica_r2 (x UInt64) ENGINE=ReplicatedMergeTree('/clickhouse/tables/test_01532/execute_on_single_replica', 'r2') ORDER BY tuple() SETTINGS execute_merges_on_single_replica_time_threshold=10; -INSERT INTO execute_on_single_replica_r1 VALUES (1); +INSERT INTO execute_on_single_replica_r1 SETTINGS insert_keeper_fault_injection_probability=0 VALUES (1); SYSTEM SYNC REPLICA execute_on_single_replica_r2; SET optimize_throw_if_noop=1; diff --git a/tests/queries/0_stateless/01532_tuple_with_name_type.reference b/tests/queries/0_stateless/01532_tuple_with_name_type.reference index f9f6b5995ce..8a3e57d9016 100644 --- a/tests/queries/0_stateless/01532_tuple_with_name_type.reference +++ b/tests/queries/0_stateless/01532_tuple_with_name_type.reference @@ -1,5 +1,4 @@ a Tuple(key String, value String) a Tuple(Tuple(key String, value String)) -a.key Array(String) -a.value Array(String) +a Array(Tuple(key String, value String)) a Tuple(UInt8, Tuple(key String, value String)) diff --git a/tests/queries/0_stateless/01543_avro_deserialization_with_lc.sh b/tests/queries/0_stateless/01543_avro_deserialization_with_lc.sh index 6f48456f71b..697b32a77ae 100755 --- a/tests/queries/0_stateless/01543_avro_deserialization_with_lc.sh +++ b/tests/queries/0_stateless/01543_avro_deserialization_with_lc.sh @@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -USER_FILES_PATH=$(clickhouse-client --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') +USER_FILES_PATH=$($CLICKHOUSE_CLIENT_BINARY --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') $CLICKHOUSE_CLIENT --multiquery --query " SET allow_suspicious_low_cardinality_types=1; diff --git a/tests/queries/0_stateless/01556_accurate_cast_or_null.reference b/tests/queries/0_stateless/01556_accurate_cast_or_null.reference index a2ccd5af868..5187a19cc72 100644 --- a/tests/queries/0_stateless/01556_accurate_cast_or_null.reference +++ b/tests/queries/0_stateless/01556_accurate_cast_or_null.reference @@ -36,6 +36,8 @@ 2023-05-30 14:38:20 1970-01-01 00:00:19 1970-01-01 19:26:40 +1970-01-01 00:00:00 +2106-02-07 06:28:15 \N \N \N diff --git a/tests/queries/0_stateless/01556_accurate_cast_or_null.sql b/tests/queries/0_stateless/01556_accurate_cast_or_null.sql index 2fb7b1177e6..15ac71dea93 100644 --- a/tests/queries/0_stateless/01556_accurate_cast_or_null.sql +++ b/tests/queries/0_stateless/01556_accurate_cast_or_null.sql @@ -39,9 +39,12 @@ SELECT accurateCastOrNull(number + 127, 'Int8') AS x FROM numbers (2) ORDER BY x SELECT accurateCastOrNull(-1, 'DateTime'); SELECT accurateCastOrNull(5000000000, 'DateTime'); SELECT accurateCastOrNull('1xxx', 'DateTime'); -select toString(accurateCastOrNull('2023-05-30 14:38:20', 'DateTime'), timezone()); +SELECT toString(accurateCastOrNull('2023-05-30 14:38:20', 'DateTime'), timezone()); SELECT toString(accurateCastOrNull(19, 'DateTime'), 'UTC'); SELECT toString(accurateCastOrNull(70000, 'DateTime'), 'UTC'); +-- need fixed timezone in these two lines +SELECT toString(accurateCastOrNull('1965-05-30 14:38:20', 'DateTime'), timezone()) SETTINGS session_timezone = 'UTC'; +SELECT toString(accurateCastOrNull('2223-05-30 14:38:20', 'DateTime'), timezone()) SETTINGS session_timezone = 'UTC'; SELECT accurateCastOrNull(-1, 'Date'); SELECT accurateCastOrNull(5000000000, 'Date'); diff --git a/tests/queries/0_stateless/01563_distributed_query_finish.sh b/tests/queries/0_stateless/01563_distributed_query_finish.sh index b49042ead9d..0019c714e40 100755 --- a/tests/queries/0_stateless/01563_distributed_query_finish.sh +++ b/tests/queries/0_stateless/01563_distributed_query_finish.sh @@ -28,7 +28,7 @@ opts=( "--prefer_localhost_replica=0" ) $CLICKHOUSE_CLIENT "${opts[@]}" --format CSV -nm <&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') +USER_FILES_PATH=$($CLICKHOUSE_CLIENT_BINARY --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') $CLICKHOUSE_CLIENT -n --query=" DROP DATABASE IF EXISTS 01684_database_for_cache_dictionary; diff --git a/tests/queries/0_stateless/01685_ssd_cache_dictionary_complex_key.sh b/tests/queries/0_stateless/01685_ssd_cache_dictionary_complex_key.sh index 7f47ce1438f..9dd8a41ce5a 100755 --- a/tests/queries/0_stateless/01685_ssd_cache_dictionary_complex_key.sh +++ b/tests/queries/0_stateless/01685_ssd_cache_dictionary_complex_key.sh @@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -USER_FILES_PATH=$(clickhouse-client --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') +USER_FILES_PATH=$($CLICKHOUSE_CLIENT_BINARY --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') $CLICKHOUSE_CLIENT -n --query=" DROP DATABASE IF EXISTS 01685_database_for_cache_dictionary; diff --git a/tests/queries/0_stateless/01705_normalize_create_alter_function_names.reference b/tests/queries/0_stateless/01705_normalize_create_alter_function_names.reference index b5b93c34c00..75598e15320 100644 --- a/tests/queries/0_stateless/01705_normalize_create_alter_function_names.reference +++ b/tests/queries/0_stateless/01705_normalize_create_alter_function_names.reference @@ -1,2 +1,2 @@ CREATE TABLE default.x\n(\n `i` Int32,\n INDEX mm log2(i) TYPE minmax GRANULARITY 1,\n INDEX nn log2(i) TYPE minmax GRANULARITY 1,\n PROJECTION p\n (\n SELECT max(i)\n ),\n PROJECTION p2\n (\n SELECT min(i)\n )\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/default/x\', \'r\')\nORDER BY i\nSETTINGS index_granularity = 8192 -metadata format version: 1\ndate column: \nsampling expression: \nindex granularity: 8192\nmode: 0\nsign column: \nprimary key: i\ndata format version: 1\npartition key: \nindices: mm log2(i) TYPE minmax GRANULARITY 1, nn log2(i) TYPE minmax GRANULARITY 1\nprojections: p (SELECT max(i)), p2 (SELECT min(i))\ngranularity bytes: 10485760\n +metadata format version: 1\ndate column: \nsampling expression: \nindex granularity: 8192\nmode: 0\nsign column: \nprimary key: i\ndata format version: 1\npartition key: \nindices: mm log2(i) TYPE minmax GRANULARITY 1, nn log2(i) TYPE minmax GRANULARITY 1\nprojections: p (SELECT max(i)), p2 (SELECT min(i))\ngranularity bytes: 10485760\nmerge parameters format version: 2\n diff --git a/tests/queries/0_stateless/01710_aggregate_projections.sh b/tests/queries/0_stateless/01710_aggregate_projections.sh index 326a564a208..7ea40365937 100755 --- a/tests/queries/0_stateless/01710_aggregate_projections.sh +++ b/tests/queries/0_stateless/01710_aggregate_projections.sh @@ -4,6 +4,8 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh +# Number of read rows depends on max_bytes_before_external_group_by. +CLICKHOUSE_CLIENT="$CLICKHOUSE_CLIENT --max_bytes_before_external_group_by 0" $CLICKHOUSE_CLIENT -q "CREATE TABLE test_agg_proj (x Int32, y Int32, PROJECTION x_plus_y (SELECT sum(x - y), argMax(x, y) group by x + y)) ENGINE = MergeTree ORDER BY tuple() settings index_granularity = 1" $CLICKHOUSE_CLIENT -q "insert into test_agg_proj select intDiv(number, 2), -intDiv(number,3) - 1 from numbers(100)" diff --git a/tests/queries/0_stateless/01732_race_condition_storage_join_long.sh b/tests/queries/0_stateless/01732_race_condition_storage_join_long.sh index 48e726aca9d..5fc41890a18 100755 --- a/tests/queries/0_stateless/01732_race_condition_storage_join_long.sh +++ b/tests/queries/0_stateless/01732_race_condition_storage_join_long.sh @@ -15,7 +15,7 @@ echo " function read_thread_big() { - while true; do + while true; do echo " SELECT * FROM ( SELECT number AS x FROM numbers(100000) ) AS t1 ALL FULL JOIN storage_join_race USING (x) FORMAT Null; " | $CLICKHOUSE_CLIENT -n @@ -24,7 +24,7 @@ function read_thread_big() function read_thread_small() { - while true; do + while true; do echo " SELECT * FROM ( SELECT number AS x FROM numbers(10) ) AS t1 ALL FULL JOIN storage_join_race USING (x) FORMAT Null; " | $CLICKHOUSE_CLIENT -n @@ -51,8 +51,11 @@ timeout $TIMEOUT bash -c read_thread_big 2> /dev/null & timeout $TIMEOUT bash -c read_thread_small 2> /dev/null & timeout $TIMEOUT bash -c read_thread_select 2> /dev/null & +# Run insert query with a sleep to make sure that it is executed all the time during the read queries. echo " - INSERT INTO storage_join_race SELECT number AS x, number AS y FROM numbers (10000000); + INSERT INTO storage_join_race + SELECT number AS x, sleepEachRow(0.1) + number AS y FROM numbers ($TIMEOUT * 10) + SETTINGS function_sleep_max_microseconds_per_block = 100000000, max_block_size = 10; " | $CLICKHOUSE_CLIENT -n wait diff --git a/tests/queries/0_stateless/01799_long_uniq_theta_sketch.sql b/tests/queries/0_stateless/01799_long_uniq_theta_sketch.sql index 37f0c31ab10..9cd75de6abc 100644 --- a/tests/queries/0_stateless/01799_long_uniq_theta_sketch.sql +++ b/tests/queries/0_stateless/01799_long_uniq_theta_sketch.sql @@ -1,5 +1,8 @@ -- Tags: long, no-fasttest +-- The result slightly differs but it's ok since `uniqueTheta` is an approximate function. +set max_bytes_before_external_group_by = 0; + SELECT 'uniqTheta'; SELECT Y, uniqTheta(X) FROM (SELECT number AS X, (3*X*X - 7*X + 11) % 37 AS Y FROM system.numbers LIMIT 15) GROUP BY Y ORDER BY Y; diff --git a/tests/queries/0_stateless/01825_type_json_btc.sh b/tests/queries/0_stateless/01825_type_json_btc.sh index f11b952ae3b..1e74166e7a7 100755 --- a/tests/queries/0_stateless/01825_type_json_btc.sh +++ b/tests/queries/0_stateless/01825_type_json_btc.sh @@ -5,7 +5,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -user_files_path=$(clickhouse-client --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') +user_files_path=$($CLICKHOUSE_CLIENT_BINARY --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') mkdir -p ${user_files_path}/${CLICKHOUSE_TEST_UNIQUE_NAME}/ rm -rf ${user_files_path}/${CLICKHOUSE_TEST_UNIQUE_NAME:?}/* cp $CUR_DIR/data_json/btc_transactions.json ${user_files_path}/${CLICKHOUSE_TEST_UNIQUE_NAME}/ diff --git a/tests/queries/0_stateless/01825_type_json_schema_inference.sh b/tests/queries/0_stateless/01825_type_json_schema_inference.sh index 851751ead43..5fca608d8bb 100755 --- a/tests/queries/0_stateless/01825_type_json_schema_inference.sh +++ b/tests/queries/0_stateless/01825_type_json_schema_inference.sh @@ -10,7 +10,7 @@ ${CLICKHOUSE_CLIENT} -q "DROP TABLE IF EXISTS t_json_inference" ${CLICKHOUSE_CLIENT} -q "CREATE TABLE t_json_inference (id UInt64, obj Object(Nullable('json')), s String) \ ENGINE = MergeTree ORDER BY id" --allow_experimental_object_type 1 -user_files_path=$(clickhouse-client --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') +user_files_path=$($CLICKHOUSE_CLIENT_BINARY --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') mkdir -p ${user_files_path}/${CLICKHOUSE_TEST_UNIQUE_NAME}/ rm -rf ${user_files_path}/${CLICKHOUSE_TEST_UNIQUE_NAME:?}/* @@ -35,4 +35,3 @@ ${CLICKHOUSE_CLIENT} -q "INSERT INTO t_json_inference SELECT * FROM file('${CLIC ${CLICKHOUSE_CLIENT} -q "SELECT * FROM t_json_inference FORMAT JSONEachRow" --output_format_json_named_tuples_as_objects 1 ${CLICKHOUSE_CLIENT} -q "DROP TABLE IF EXISTS t_json_inference" - diff --git a/tests/queries/0_stateless/01875_ssd_cache_dictionary_decimal256_type.sh b/tests/queries/0_stateless/01875_ssd_cache_dictionary_decimal256_type.sh index 806fbd05fbf..36a2165329b 100755 --- a/tests/queries/0_stateless/01875_ssd_cache_dictionary_decimal256_type.sh +++ b/tests/queries/0_stateless/01875_ssd_cache_dictionary_decimal256_type.sh @@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -USER_FILES_PATH=$(clickhouse-client --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') +USER_FILES_PATH=$($CLICKHOUSE_CLIENT_BINARY --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') $CLICKHOUSE_CLIENT -n --query=" SET allow_experimental_bigint_types = 1; diff --git a/tests/queries/0_stateless/01889_sqlite_read_write.sh b/tests/queries/0_stateless/01889_sqlite_read_write.sh index 7691d2e3c2c..02b9a649e94 100755 --- a/tests/queries/0_stateless/01889_sqlite_read_write.sh +++ b/tests/queries/0_stateless/01889_sqlite_read_write.sh @@ -6,7 +6,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CUR_DIR"/../shell_config.sh # See 01658_read_file_to_string_column.sh -user_files_path=$(clickhouse-client --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') +user_files_path=$($CLICKHOUSE_CLIENT_BINARY --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') mkdir -p "${user_files_path}/" chmod 777 "${user_files_path}" diff --git a/tests/queries/0_stateless/01903_ssd_cache_dictionary_array_type.sh b/tests/queries/0_stateless/01903_ssd_cache_dictionary_array_type.sh index b1d6049abcf..3676f1429b2 100755 --- a/tests/queries/0_stateless/01903_ssd_cache_dictionary_array_type.sh +++ b/tests/queries/0_stateless/01903_ssd_cache_dictionary_array_type.sh @@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -USER_FILES_PATH=$(clickhouse-client --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') +USER_FILES_PATH=$($CLICKHOUSE_CLIENT_BINARY --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') $CLICKHOUSE_CLIENT -n --query=" DROP TABLE IF EXISTS dictionary_array_source_table; diff --git a/tests/queries/0_stateless/01904_ssd_cache_dictionary_default_nullable_type.sh b/tests/queries/0_stateless/01904_ssd_cache_dictionary_default_nullable_type.sh index 07d2ac7baad..6aecb20329a 100755 --- a/tests/queries/0_stateless/01904_ssd_cache_dictionary_default_nullable_type.sh +++ b/tests/queries/0_stateless/01904_ssd_cache_dictionary_default_nullable_type.sh @@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -USER_FILES_PATH=$(clickhouse-client --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') +USER_FILES_PATH=$($CLICKHOUSE_CLIENT_BINARY --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') $CLICKHOUSE_CLIENT -n --query=" DROP TABLE IF EXISTS dictionary_nullable_source_table; diff --git a/tests/queries/0_stateless/01913_exact_rows_before_limit_full.reference b/tests/queries/0_stateless/01913_exact_rows_before_limit_full.reference index a0f4560ca1c..95ce4d6428d 100644 --- a/tests/queries/0_stateless/01913_exact_rows_before_limit_full.reference +++ b/tests/queries/0_stateless/01913_exact_rows_before_limit_full.reference @@ -45,7 +45,7 @@ "data": [ - [12] + [10] ], "rows": 1, diff --git a/tests/queries/0_stateless/01913_exact_rows_before_limit_full.sql b/tests/queries/0_stateless/01913_exact_rows_before_limit_full.sql index 84f97090169..07e54fb2ec2 100644 --- a/tests/queries/0_stateless/01913_exact_rows_before_limit_full.sql +++ b/tests/queries/0_stateless/01913_exact_rows_before_limit_full.sql @@ -10,20 +10,20 @@ set exact_rows_before_limit = 1, output_format_write_statistics = 0, max_block_s select * from test limit 1 FORMAT JSONCompact; -select * from test where i < 10 group by i limit 1 FORMAT JSONCompact; +select * from test where i < 10 group by i order by i limit 1 FORMAT JSONCompact; -select * from test group by i having i in (10, 11, 12) limit 1 FORMAT JSONCompact; +select * from test group by i having i in (10, 11, 12) order by i limit 1 FORMAT JSONCompact; select * from test where i < 20 order by i limit 1 FORMAT JSONCompact; set prefer_localhost_replica = 0; -select * from cluster(test_cluster_two_shards, currentDatabase(), test) where i < 30 limit 1 FORMAT JSONCompact; +select * from cluster(test_cluster_two_shards, currentDatabase(), test) where i < 30 order by i limit 1 FORMAT JSONCompact; select * from cluster(test_cluster_two_shards, currentDatabase(), test) where i < 20 order by i limit 1 FORMAT JSONCompact; set prefer_localhost_replica = 1; -select * from cluster(test_cluster_two_shards, currentDatabase(), test) where i < 30 limit 1 FORMAT JSONCompact; +select * from cluster(test_cluster_two_shards, currentDatabase(), test) where i < 30 order by i limit 1 FORMAT JSONCompact; select * from cluster(test_cluster_two_shards, currentDatabase(), test) where i < 20 order by i limit 1 FORMAT JSONCompact; -select * from (select * from cluster(test_cluster_two_shards, currentDatabase(), test) where i < 10) limit 1 FORMAT JSONCompact; +select * from (select * from cluster(test_cluster_two_shards, currentDatabase(), test) where i < 10) order by i limit 1 FORMAT JSONCompact; drop table if exists test; diff --git a/tests/queries/0_stateless/01946_test_zstd_decompression_with_escape_sequence_at_the_end_of_buffer.sh b/tests/queries/0_stateless/01946_test_zstd_decompression_with_escape_sequence_at_the_end_of_buffer.sh index 2f8d8f06dee..4b230e4f738 100755 --- a/tests/queries/0_stateless/01946_test_zstd_decompression_with_escape_sequence_at_the_end_of_buffer.sh +++ b/tests/queries/0_stateless/01946_test_zstd_decompression_with_escape_sequence_at_the_end_of_buffer.sh @@ -7,7 +7,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # See 01658_read_file_to_string_column.sh -user_files_path=$(clickhouse-client --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') +user_files_path=$($CLICKHOUSE_CLIENT_BINARY --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') mkdir -p ${user_files_path}/ cp $CUR_DIR/data_zstd/test_01946.zstd ${user_files_path}/ @@ -19,4 +19,3 @@ select * from file('test_01946.zstd', 'JSONEachRow', 'foo String') order by foo set input_format_parallel_parsing = 1; select * from file('test_01946.zstd', 'JSONEachRow', 'foo String') order by foo limit 30 format Null; " - diff --git a/tests/queries/0_stateless/02000_join_on_const.reference b/tests/queries/0_stateless/02000_join_on_const.reference index e9d1c685fdd..848ecedf9e3 100644 --- a/tests/queries/0_stateless/02000_join_on_const.reference +++ b/tests/queries/0_stateless/02000_join_on_const.reference @@ -65,3 +65,7 @@ SELECT * FROM (SELECT 1 as a) as t1 FULL JOIN ( SELECT ('b', 256) as b ) AS t2 SELECT * FROM (SELECT 1 as a) as t1 SEMI JOIN ( SELECT ('b', 256) as b ) AS t2 ON NULL; SELECT * FROM (SELECT 1 as a) as t1 ANTI JOIN ( SELECT ('b', 256) as b ) AS t2 ON NULL; 1 ('',0) +2 +4 2 Nullable(UInt64) UInt8 +4 2 UInt64 Nullable(UInt8) +4 2 Nullable(UInt64) Nullable(UInt8) diff --git a/tests/queries/0_stateless/02000_join_on_const.sql b/tests/queries/0_stateless/02000_join_on_const.sql index 3205c084672..a68e75443d8 100644 --- a/tests/queries/0_stateless/02000_join_on_const.sql +++ b/tests/queries/0_stateless/02000_join_on_const.sql @@ -90,6 +90,31 @@ SELECT * FROM (SELECT 1 as a) as t1 ANTI JOIN ( SELECT ('b', 256) as b ) AS t2 -- { echoOff } +SELECT a + 1 +FROM (SELECT 1 as x) as t1 +LEFT JOIN ( SELECT 1 AS a ) AS t2 +ON TRUE +SETTINGS allow_experimental_analyzer=1, join_use_nulls=1; + +SELECT a + 1, x + 1, toTypeName(a), toTypeName(x) +FROM (SELECT 1 as x) as t1 +LEFT JOIN ( SELECT sum(number) as a from numbers(3) GROUP BY NULL) AS t2 +ON TRUE +SETTINGS allow_experimental_analyzer=1, join_use_nulls=1; + +SELECT a + 1, x + 1, toTypeName(a), toTypeName(x) +FROM (SELECT 1 as x) as t1 +RIGHT JOIN ( SELECT sum(number) as a from numbers(3) GROUP BY NULL) AS t2 +ON TRUE +SETTINGS allow_experimental_analyzer=1, join_use_nulls=1; + +SELECT a + 1, x + 1, toTypeName(a), toTypeName(x) +FROM (SELECT 1 as x) as t1 +FULL JOIN ( SELECT sum(number) as a from numbers(3) GROUP BY NULL) AS t2 +ON TRUE +SETTINGS allow_experimental_analyzer=1, join_use_nulls=1; + + DROP TABLE IF EXISTS t1; DROP TABLE IF EXISTS t2; diff --git a/tests/queries/0_stateless/02003_compress_bz2.sh b/tests/queries/0_stateless/02003_compress_bz2.sh index afcea410297..b17effb20b6 100755 --- a/tests/queries/0_stateless/02003_compress_bz2.sh +++ b/tests/queries/0_stateless/02003_compress_bz2.sh @@ -6,7 +6,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -USER_FILES_PATH=$(clickhouse-client --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') +USER_FILES_PATH=$($CLICKHOUSE_CLIENT_BINARY --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') WORKING_FOLDER_02003="${USER_FILES_PATH}/${CLICKHOUSE_DATABASE}" rm -rf "${WORKING_FOLDER_02003}" diff --git a/tests/queries/0_stateless/02012_compress_lz4.sh b/tests/queries/0_stateless/02012_compress_lz4.sh index 1823718fc9c..aad437c8011 100755 --- a/tests/queries/0_stateless/02012_compress_lz4.sh +++ b/tests/queries/0_stateless/02012_compress_lz4.sh @@ -4,7 +4,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -USER_FILES_PATH=$(clickhouse-client --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') +USER_FILES_PATH=$($CLICKHOUSE_CLIENT_BINARY --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') WORKING_FOLDER_02012="${USER_FILES_PATH}/${CLICKHOUSE_DATABASE}" rm -rf "${WORKING_FOLDER_02012}" diff --git a/tests/queries/0_stateless/02022_storage_filelog_one_file.sh b/tests/queries/0_stateless/02022_storage_filelog_one_file.sh index 32ce1643d4e..ea703d69aa5 100755 --- a/tests/queries/0_stateless/02022_storage_filelog_one_file.sh +++ b/tests/queries/0_stateless/02022_storage_filelog_one_file.sh @@ -9,7 +9,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # Data preparation. # Now we can get the user_files_path by use the table file function for trick. also we can get it by query as: # "insert into function file('exist.txt', 'CSV', 'val1 char') values ('aaaa'); select _path from file('exist.txt', 'CSV', 'val1 char')" -user_files_path=$(clickhouse-client --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') +user_files_path=$($CLICKHOUSE_CLIENT_BINARY --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') for i in {1..20} do diff --git a/tests/queries/0_stateless/02023_storage_filelog.sh b/tests/queries/0_stateless/02023_storage_filelog.sh index 71ed5ba5471..51c8dc8ab3e 100755 --- a/tests/queries/0_stateless/02023_storage_filelog.sh +++ b/tests/queries/0_stateless/02023_storage_filelog.sh @@ -9,7 +9,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # Data preparation. # Now we can get the user_files_path by use the table file function for trick. also we can get it by query as: # "insert into function file('exist.txt', 'CSV', 'val1 char') values ('aaaa'); select _path from file('exist.txt', 'CSV', 'val1 char')" -user_files_path=$(clickhouse-client --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') +user_files_path=$($CLICKHOUSE_CLIENT_BINARY --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') mkdir -p ${user_files_path}/${CLICKHOUSE_TEST_UNIQUE_NAME}/ diff --git a/tests/queries/0_stateless/02024_storage_filelog_mv.sh b/tests/queries/0_stateless/02024_storage_filelog_mv.sh index 67aa825ac67..33c8693648c 100755 --- a/tests/queries/0_stateless/02024_storage_filelog_mv.sh +++ b/tests/queries/0_stateless/02024_storage_filelog_mv.sh @@ -10,7 +10,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # Data preparation. # Now we can get the user_files_path by use the table file function for trick. also we can get it by query as: # "insert into function file('exist.txt', 'CSV', 'val1 char') values ('aaaa'); select _path from file('exist.txt', 'CSV', 'val1 char')" -user_files_path=$(clickhouse-client --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') +user_files_path=$($CLICKHOUSE_CLIENT_BINARY --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') mkdir -p ${user_files_path}/${CLICKHOUSE_TEST_UNIQUE_NAME}/ rm -rf ${user_files_path}/${CLICKHOUSE_TEST_UNIQUE_NAME:?}/* diff --git a/tests/queries/0_stateless/02025_storage_filelog_virtual_col.sh b/tests/queries/0_stateless/02025_storage_filelog_virtual_col.sh index 0219a0421cb..f027b61c3ef 100755 --- a/tests/queries/0_stateless/02025_storage_filelog_virtual_col.sh +++ b/tests/queries/0_stateless/02025_storage_filelog_virtual_col.sh @@ -9,7 +9,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # Data preparation. # Now we can get the user_files_path by use the table file function for trick. also we can get it by query as: # "insert into function file('exist.txt', 'CSV', 'val1 char') values ('aaaa'); select _path from file('exist.txt', 'CSV', 'val1 char')" -user_files_path=$(clickhouse-client --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') +user_files_path=$($CLICKHOUSE_CLIENT_BINARY --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') mkdir -p ${user_files_path}/${CLICKHOUSE_TEST_UNIQUE_NAME}/ diff --git a/tests/queries/0_stateless/02026_storage_filelog_largefile.sh b/tests/queries/0_stateless/02026_storage_filelog_largefile.sh index 41a9d82949c..b0a9a4357f3 100755 --- a/tests/queries/0_stateless/02026_storage_filelog_largefile.sh +++ b/tests/queries/0_stateless/02026_storage_filelog_largefile.sh @@ -10,7 +10,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # Data preparation. # Now we can get the user_files_path by use the table file function for trick. also we can get it by query as: # "insert into function file('exist.txt', 'CSV', 'val1 char') values ('aaaa'); select _path from file('exist.txt', 'CSV', 'val1 char')" -user_files_path=$(clickhouse-client --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') +user_files_path=$($CLICKHOUSE_CLIENT_BINARY --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') mkdir -p ${user_files_path}/${CLICKHOUSE_TEST_UNIQUE_NAME}/ diff --git a/tests/queries/0_stateless/02030_capnp_format.sh b/tests/queries/0_stateless/02030_capnp_format.sh index b4484ca3766..6fcfef23cc7 100755 --- a/tests/queries/0_stateless/02030_capnp_format.sh +++ b/tests/queries/0_stateless/02030_capnp_format.sh @@ -5,11 +5,11 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -USER_FILES_PATH=$(clickhouse-client --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') +USER_FILES_PATH=$($CLICKHOUSE_CLIENT_BINARY --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') CAPN_PROTO_FILE=$USER_FILES_PATH/data.capnp touch $CAPN_PROTO_FILE -SCHEMADIR=$(clickhouse-client --query "select * from file('data.capnp', 'CapnProto', 'val1 char') settings format_schema='nonexist:Message'" 2>&1 | grep Exception | grep -oP "file \K.*(?=/nonexist.capnp)") +SCHEMADIR=$($CLICKHOUSE_CLIENT_BINARY --query "select * from file('data.capnp', 'CapnProto', 'val1 char') settings format_schema='nonexist:Message'" 2>&1 | grep Exception | grep -oP "file \K.*(?=/nonexist.capnp)") CLIENT_SCHEMADIR=$CURDIR/format_schemas SERVER_SCHEMADIR=test_02030 mkdir -p $SCHEMADIR/$SERVER_SCHEMADIR diff --git a/tests/queries/0_stateless/02030_tuple_filter.sql b/tests/queries/0_stateless/02030_tuple_filter.sql index f2fc3a30aa6..1b79ad6c83c 100644 --- a/tests/queries/0_stateless/02030_tuple_filter.sql +++ b/tests/queries/0_stateless/02030_tuple_filter.sql @@ -33,6 +33,7 @@ SET force_primary_key = 0; SELECT * FROM test_tuple_filter WHERE (1, value) = (id, 'A'); SELECT * FROM test_tuple_filter WHERE tuple(id) = tuple(1); +SELECT * FROM test_tuple_filter WHERE (id, (id, id) = (1, NULL)) == (NULL, NULL); SELECT * FROM test_tuple_filter WHERE (log_date, value) = tuple('2021-01-01'); -- { serverError 43 } SELECT * FROM test_tuple_filter WHERE (id, value) = tuple(1); -- { serverError 43 } diff --git a/tests/queries/0_stateless/02047_log_family_complex_structs_data_file_dumps.sh b/tests/queries/0_stateless/02047_log_family_complex_structs_data_file_dumps.sh index 015a162221d..55c01e63294 100755 --- a/tests/queries/0_stateless/02047_log_family_complex_structs_data_file_dumps.sh +++ b/tests/queries/0_stateless/02047_log_family_complex_structs_data_file_dumps.sh @@ -11,7 +11,7 @@ do echo "$engine:" $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS tbl" - $CLICKHOUSE_CLIENT --query="CREATE TABLE tbl(x Array(Array(Int32)), y Array(Tuple(z String, w Float32))) ENGINE=$engine" + $CLICKHOUSE_CLIENT --query="CREATE TABLE tbl(x Array(Array(Int32)), y Nested(z String, w Float32)) ENGINE=$engine" data_dir=$($CLICKHOUSE_CLIENT --query="SELECT data_paths[1] FROM system.tables WHERE name='tbl' AND database=currentDatabase()") echo "empty:" diff --git a/tests/queries/0_stateless/02051_symlinks_to_user_files.sh b/tests/queries/0_stateless/02051_symlinks_to_user_files.sh index 22d6d2938cd..0af71e4deee 100755 --- a/tests/queries/0_stateless/02051_symlinks_to_user_files.sh +++ b/tests/queries/0_stateless/02051_symlinks_to_user_files.sh @@ -6,7 +6,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CUR_DIR"/../shell_config.sh # See 01658_read_file_to_string_column.sh -user_files_path=$(clickhouse-client --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') +user_files_path=$($CLICKHOUSE_CLIENT_BINARY --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') FILE_PATH="${user_files_path}/file/" mkdir -p ${FILE_PATH} @@ -29,4 +29,3 @@ trap cleanup EXIT ${CLICKHOUSE_CLIENT} --query="insert into table function file('${symlink_path}', 'Values', 'a String') select 'OK'"; ${CLICKHOUSE_CLIENT} --query="select * from file('${symlink_path}', 'Values', 'a String') order by a"; - diff --git a/tests/queries/0_stateless/02096_totals_global_in_bug.sql b/tests/queries/0_stateless/02096_totals_global_in_bug.sql index ac4f2b9d2ba..27ca26cf141 100644 --- a/tests/queries/0_stateless/02096_totals_global_in_bug.sql +++ b/tests/queries/0_stateless/02096_totals_global_in_bug.sql @@ -1,2 +1 @@ -select sum(number) from remote('127.0.0.{2,3}', numbers(2)) where number global in (select sum(number) from numbers(2) group by number with totals) group by number with totals - +select sum(number) from remote('127.0.0.{2,3}', numbers(2)) where number global in (select sum(number) from numbers(2) group by number with totals) group by number with totals order by number; diff --git a/tests/queries/0_stateless/02098_hashed_array_dictionary_simple_key.reference b/tests/queries/0_stateless/02098_hashed_array_dictionary_simple_key.reference index 6e88bbad146..41b9ab687f8 100644 --- a/tests/queries/0_stateless/02098_hashed_array_dictionary_simple_key.reference +++ b/tests/queries/0_stateless/02098_hashed_array_dictionary_simple_key.reference @@ -26,6 +26,62 @@ select all values as input stream 0 value_0 value_second_0 1 value_1 value_second_1 2 value_2 value_second_2 +Dictionary hashed_array_dictionary_simple_key_simple_attributes +dictGet existing value +value_0 value_second_0 +value_1 value_second_1 +value_2 value_second_2 +dictGet with non existing value +value_0 value_second_0 +value_1 value_second_1 +value_2 value_second_2 +value_first_default value_second_default +dictGetOrDefault existing value +value_0 value_second_0 +value_1 value_second_1 +value_2 value_second_2 +dictGetOrDefault non existing value +value_0 value_second_0 +value_1 value_second_1 +value_2 value_second_2 +default default +dictHas +1 +1 +1 +0 +select all values as input stream +0 value_0 value_second_0 +1 value_1 value_second_1 +2 value_2 value_second_2 +Dictionary hashed_array_dictionary_simple_key_complex_attributes +dictGet existing value +value_0 value_second_0 +value_1 \N +value_2 value_second_2 +dictGet with non existing value +value_0 value_second_0 +value_1 \N +value_2 value_second_2 +value_first_default value_second_default +dictGetOrDefault existing value +value_0 value_second_0 +value_1 \N +value_2 value_second_2 +dictGetOrDefault non existing value +value_0 value_second_0 +value_1 \N +value_2 value_second_2 +default default +dictHas +1 +1 +1 +0 +select all values as input stream +0 value_0 value_second_0 +1 value_1 \N +2 value_2 value_second_2 Dictionary hashed_array_dictionary_simple_key_complex_attributes dictGet existing value value_0 value_second_0 @@ -64,3 +120,13 @@ dictGet dictGetHierarchy [1] [4,2,1] +Dictionary hashed_array_dictionary_simple_key_hierarchy +dictGet +0 +0 +1 +1 +2 +dictGetHierarchy +[1] +[4,2,1] diff --git a/tests/queries/0_stateless/02098_hashed_array_dictionary_simple_key.sql b/tests/queries/0_stateless/02098_hashed_array_dictionary_simple_key.sql.j2 similarity index 95% rename from tests/queries/0_stateless/02098_hashed_array_dictionary_simple_key.sql rename to tests/queries/0_stateless/02098_hashed_array_dictionary_simple_key.sql.j2 index 7d952223705..e5d8ad36c6d 100644 --- a/tests/queries/0_stateless/02098_hashed_array_dictionary_simple_key.sql +++ b/tests/queries/0_stateless/02098_hashed_array_dictionary_simple_key.sql.j2 @@ -11,6 +11,8 @@ INSERT INTO simple_key_simple_attributes_source_table VALUES(0, 'value_0', 'valu INSERT INTO simple_key_simple_attributes_source_table VALUES(1, 'value_1', 'value_second_1'); INSERT INTO simple_key_simple_attributes_source_table VALUES(2, 'value_2', 'value_second_2'); +{% for dictionary_config in ['', 'SHARDS 16'] -%} + DROP DICTIONARY IF EXISTS hashed_array_dictionary_simple_key_simple_attributes; CREATE DICTIONARY hashed_array_dictionary_simple_key_simple_attributes ( @@ -20,7 +22,7 @@ CREATE DICTIONARY hashed_array_dictionary_simple_key_simple_attributes ) PRIMARY KEY id SOURCE(CLICKHOUSE(TABLE 'simple_key_simple_attributes_source_table')) -LAYOUT(HASHED_ARRAY()) +LAYOUT(HASHED_ARRAY({{ dictionary_config }})) LIFETIME(MIN 1 MAX 1000) SETTINGS(dictionary_use_async_executor=1, max_threads=8); @@ -43,6 +45,7 @@ SELECT 'select all values as input stream'; SELECT * FROM hashed_array_dictionary_simple_key_simple_attributes ORDER BY id; DROP DICTIONARY hashed_array_dictionary_simple_key_simple_attributes; +{% endfor %} DROP TABLE simple_key_simple_attributes_source_table; @@ -59,6 +62,8 @@ INSERT INTO simple_key_complex_attributes_source_table VALUES(0, 'value_0', 'val INSERT INTO simple_key_complex_attributes_source_table VALUES(1, 'value_1', NULL); INSERT INTO simple_key_complex_attributes_source_table VALUES(2, 'value_2', 'value_second_2'); +{% for dictionary_config in ['', 'SHARDS 16'] -%} + DROP DICTIONARY IF EXISTS hashed_array_dictionary_simple_key_complex_attributes; CREATE DICTIONARY hashed_array_dictionary_simple_key_complex_attributes ( @@ -68,7 +73,7 @@ CREATE DICTIONARY hashed_array_dictionary_simple_key_complex_attributes ) PRIMARY KEY id SOURCE(CLICKHOUSE(TABLE 'simple_key_complex_attributes_source_table')) -LAYOUT(HASHED_ARRAY()) +LAYOUT(HASHED_ARRAY({{ dictionary_config }})) LIFETIME(MIN 1 MAX 1000); SELECT 'Dictionary hashed_array_dictionary_simple_key_complex_attributes'; @@ -90,6 +95,9 @@ SELECT 'select all values as input stream'; SELECT * FROM hashed_array_dictionary_simple_key_complex_attributes ORDER BY id; DROP DICTIONARY hashed_array_dictionary_simple_key_complex_attributes; + +{% endfor %} + DROP TABLE simple_key_complex_attributes_source_table; DROP TABLE IF EXISTS simple_key_hierarchy_table; @@ -104,6 +112,8 @@ INSERT INTO simple_key_hierarchy_table VALUES (2, 1); INSERT INTO simple_key_hierarchy_table VALUES (3, 1); INSERT INTO simple_key_hierarchy_table VALUES (4, 2); +{% for dictionary_config in ['', 'SHARDS 16'] -%} + DROP DICTIONARY IF EXISTS hashed_array_dictionary_simple_key_hierarchy; CREATE DICTIONARY hashed_array_dictionary_simple_key_hierarchy ( @@ -112,7 +122,7 @@ CREATE DICTIONARY hashed_array_dictionary_simple_key_hierarchy ) PRIMARY KEY id SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'simple_key_hierarchy_table')) -LAYOUT(HASHED_ARRAY()) +LAYOUT(HASHED_ARRAY({{ dictionary_config }})) LIFETIME(MIN 1 MAX 1000); SELECT 'Dictionary hashed_array_dictionary_simple_key_hierarchy'; @@ -122,5 +132,8 @@ SELECT 'dictGetHierarchy'; SELECT dictGetHierarchy('hashed_array_dictionary_simple_key_hierarchy', toUInt64(1)); SELECT dictGetHierarchy('hashed_array_dictionary_simple_key_hierarchy', toUInt64(4)); +{% endfor %} + DROP DICTIONARY hashed_array_dictionary_simple_key_hierarchy; + DROP TABLE simple_key_hierarchy_table; diff --git a/tests/queries/0_stateless/02099_hashed_array_dictionary_complex_key.reference b/tests/queries/0_stateless/02099_hashed_array_dictionary_complex_key.reference index ec32fa72b4e..13a7548b86f 100644 --- a/tests/queries/0_stateless/02099_hashed_array_dictionary_complex_key.reference +++ b/tests/queries/0_stateless/02099_hashed_array_dictionary_complex_key.reference @@ -26,6 +26,62 @@ select all values as input stream 0 id_key_0 value_0 value_second_0 1 id_key_1 value_1 value_second_1 2 id_key_2 value_2 value_second_2 +Dictionary hashed_array_dictionary_complex_key_simple_attributes +dictGet existing value +value_0 value_second_0 +value_1 value_second_1 +value_2 value_second_2 +dictGet with non existing value +value_0 value_second_0 +value_1 value_second_1 +value_2 value_second_2 +value_first_default value_second_default +dictGetOrDefault existing value +value_0 value_second_0 +value_1 value_second_1 +value_2 value_second_2 +dictGetOrDefault non existing value +value_0 value_second_0 +value_1 value_second_1 +value_2 value_second_2 +default default +dictHas +1 +1 +1 +0 +select all values as input stream +0 id_key_0 value_0 value_second_0 +1 id_key_1 value_1 value_second_1 +2 id_key_2 value_2 value_second_2 +Dictionary hashed_array_dictionary_complex_key_complex_attributes +dictGet existing value +value_0 value_second_0 +value_1 \N +value_2 value_second_2 +dictGet with non existing value +value_0 value_second_0 +value_1 \N +value_2 value_second_2 +value_first_default value_second_default +dictGetOrDefault existing value +value_0 value_second_0 +value_1 \N +value_2 value_second_2 +dictGetOrDefault non existing value +value_0 value_second_0 +value_1 \N +value_2 value_second_2 +default default +dictHas +1 +1 +1 +0 +select all values as input stream +0 id_key_0 value_0 value_second_0 +1 id_key_1 value_1 \N +2 id_key_2 value_2 value_second_2 Dictionary hashed_array_dictionary_complex_key_complex_attributes dictGet existing value value_0 value_second_0 diff --git a/tests/queries/0_stateless/02099_hashed_array_dictionary_complex_key.sql b/tests/queries/0_stateless/02099_hashed_array_dictionary_complex_key.sql.j2 similarity index 96% rename from tests/queries/0_stateless/02099_hashed_array_dictionary_complex_key.sql rename to tests/queries/0_stateless/02099_hashed_array_dictionary_complex_key.sql.j2 index 4d2a825c8af..56f9b264a62 100644 --- a/tests/queries/0_stateless/02099_hashed_array_dictionary_complex_key.sql +++ b/tests/queries/0_stateless/02099_hashed_array_dictionary_complex_key.sql.j2 @@ -12,6 +12,8 @@ INSERT INTO complex_key_simple_attributes_source_table VALUES(0, 'id_key_0', 'va INSERT INTO complex_key_simple_attributes_source_table VALUES(1, 'id_key_1', 'value_1', 'value_second_1'); INSERT INTO complex_key_simple_attributes_source_table VALUES(2, 'id_key_2', 'value_2', 'value_second_2'); +{% for dictionary_config in ['', 'SHARDS 16'] -%} + DROP DICTIONARY IF EXISTS hashed_array_dictionary_complex_key_simple_attributes; CREATE DICTIONARY hashed_array_dictionary_complex_key_simple_attributes ( @@ -23,7 +25,7 @@ CREATE DICTIONARY hashed_array_dictionary_complex_key_simple_attributes PRIMARY KEY id, id_key SOURCE(CLICKHOUSE(TABLE 'complex_key_simple_attributes_source_table')) LIFETIME(MIN 1 MAX 1000) -LAYOUT(COMPLEX_KEY_HASHED_ARRAY()); +LAYOUT(COMPLEX_KEY_HASHED_ARRAY({{ dictionary_config }})); SELECT 'Dictionary hashed_array_dictionary_complex_key_simple_attributes'; SELECT 'dictGet existing value'; @@ -45,6 +47,8 @@ SELECT * FROM hashed_array_dictionary_complex_key_simple_attributes ORDER BY (id DROP DICTIONARY hashed_array_dictionary_complex_key_simple_attributes; +{% endfor %} + DROP TABLE complex_key_simple_attributes_source_table; DROP TABLE IF EXISTS complex_key_complex_attributes_source_table; @@ -61,6 +65,8 @@ INSERT INTO complex_key_complex_attributes_source_table VALUES(0, 'id_key_0', 'v INSERT INTO complex_key_complex_attributes_source_table VALUES(1, 'id_key_1', 'value_1', NULL); INSERT INTO complex_key_complex_attributes_source_table VALUES(2, 'id_key_2', 'value_2', 'value_second_2'); +{% for dictionary_config in ['', 'SHARDS 16'] -%} + DROP DICTIONARY IF EXISTS hashed_array_dictionary_complex_key_complex_attributes; CREATE DICTIONARY hashed_array_dictionary_complex_key_complex_attributes ( @@ -73,7 +79,7 @@ CREATE DICTIONARY hashed_array_dictionary_complex_key_complex_attributes PRIMARY KEY id, id_key SOURCE(CLICKHOUSE(TABLE 'complex_key_complex_attributes_source_table')) LIFETIME(MIN 1 MAX 1000) -LAYOUT(COMPLEX_KEY_HASHED_ARRAY()); +LAYOUT(COMPLEX_KEY_HASHED_ARRAY({{ dictionary_config }})); SELECT 'Dictionary hashed_array_dictionary_complex_key_complex_attributes'; SELECT 'dictGet existing value'; @@ -93,5 +99,7 @@ SELECT dictHas('hashed_array_dictionary_complex_key_complex_attributes', (number SELECT 'select all values as input stream'; SELECT * FROM hashed_array_dictionary_complex_key_complex_attributes ORDER BY (id, id_key); +{% endfor %} + DROP DICTIONARY hashed_array_dictionary_complex_key_complex_attributes; DROP TABLE complex_key_complex_attributes_source_table; diff --git a/tests/queries/0_stateless/02104_json_strings_nullable_string.sh b/tests/queries/0_stateless/02104_json_strings_nullable_string.sh index 6a5d369e7b6..b3b156b5787 100755 --- a/tests/queries/0_stateless/02104_json_strings_nullable_string.sh +++ b/tests/queries/0_stateless/02104_json_strings_nullable_string.sh @@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -USER_FILES_PATH=$(clickhouse-client --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') +USER_FILES_PATH=$($CLICKHOUSE_CLIENT_BINARY --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') DATA_FILE=$USER_FILES_PATH/test_02104_null.data echo -e '{"s" : "NULLSome string"}' > $DATA_FILE @@ -15,4 +15,3 @@ echo -e '["NULLSome string"]' > $DATA_FILE $CLICKHOUSE_CLIENT -q "SELECT * FROM file('test_02104_null.data', 'JSONCompactStringsEachRow', 's Nullable(String)')" rm $DATA_FILE - diff --git a/tests/queries/0_stateless/02105_table_function_file_partiotion_by.sh b/tests/queries/0_stateless/02105_table_function_file_partiotion_by.sh index c96725845d7..c79b5d0eee5 100755 --- a/tests/queries/0_stateless/02105_table_function_file_partiotion_by.sh +++ b/tests/queries/0_stateless/02105_table_function_file_partiotion_by.sh @@ -6,7 +6,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CUR_DIR"/../shell_config.sh # See 01658_read_file_to_string_column.sh -user_files_path=$(clickhouse-client --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') +user_files_path=$($CLICKHOUSE_CLIENT_BINARY --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') mkdir -p "${user_files_path}/" chmod 777 ${user_files_path} @@ -27,4 +27,3 @@ echo 'part 2' ${CLICKHOUSE_CLIENT} --query="select * from file('${FILE_PATH}/2/test_2', 'TSV', 'column1 UInt32, column2 UInt32, column3 UInt32')"; echo 'part 3' ${CLICKHOUSE_CLIENT} --query="select * from file('${FILE_PATH}/3/test_3', 'TSV', 'column1 UInt32, column2 UInt32, column3 UInt32')"; - diff --git a/tests/queries/0_stateless/02117_show_create_table_system.reference b/tests/queries/0_stateless/02117_show_create_table_system.reference index 9ed905a0df8..e89d589857e 100644 --- a/tests/queries/0_stateless/02117_show_create_table_system.reference +++ b/tests/queries/0_stateless/02117_show_create_table_system.reference @@ -406,6 +406,7 @@ CREATE TABLE system.mutations `parts_to_do_names` Array(String), `parts_to_do` Int64, `is_done` UInt8, + `is_killed` UInt8, `latest_failed_part` String, `latest_fail_time` DateTime, `latest_fail_reason` String @@ -1086,6 +1087,7 @@ CREATE TABLE system.tables `storage_policy` String, `total_rows` Nullable(UInt64), `total_bytes` Nullable(UInt64), + `total_bytes_uncompressed` Nullable(UInt64), `parts` Nullable(UInt64), `active_parts` Nullable(UInt64), `total_marks` Nullable(UInt64), diff --git a/tests/queries/0_stateless/02118_deserialize_whole_text.sh b/tests/queries/0_stateless/02118_deserialize_whole_text.sh index d4702887e7f..ccbfc5abe97 100755 --- a/tests/queries/0_stateless/02118_deserialize_whole_text.sh +++ b/tests/queries/0_stateless/02118_deserialize_whole_text.sh @@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -USER_FILES_PATH=$(clickhouse-client --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') +USER_FILES_PATH=$($CLICKHOUSE_CLIENT_BINARY --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') DATA_FILE=$USER_FILES_PATH/data_02118 echo "[\"[1,2,3]trash\"]" > $DATA_FILE @@ -63,4 +63,3 @@ echo "[\"ed9fd45d-6287-47c1-ad9f-d45d628767c1trash\"]" > $DATA_FILE $CLICKHOUSE_CLIENT -q "SELECT * FROM file('data_02118', 'JSONCompactStringsEachRow', 'x UUID')" 2>&1 | grep -F -q "UNEXPECTED_DATA_AFTER_PARSED_VALUE" && echo 'OK' || echo 'FAIL' rm $DATA_FILE - diff --git a/tests/queries/0_stateless/02125_tskv_proper_names_reading.sh b/tests/queries/0_stateless/02125_tskv_proper_names_reading.sh index 49c895329d6..0abf411d38f 100755 --- a/tests/queries/0_stateless/02125_tskv_proper_names_reading.sh +++ b/tests/queries/0_stateless/02125_tskv_proper_names_reading.sh @@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -USER_FILES_PATH=$(clickhouse-client --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') +USER_FILES_PATH=$($CLICKHOUSE_CLIENT_BINARY --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') DATA_FILE=$USER_FILES_PATH/test_02125.data diff --git a/tests/queries/0_stateless/02126_fix_filelog.sh b/tests/queries/0_stateless/02126_fix_filelog.sh index ac2e9d1bd19..b266b582428 100755 --- a/tests/queries/0_stateless/02126_fix_filelog.sh +++ b/tests/queries/0_stateless/02126_fix_filelog.sh @@ -9,7 +9,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # Data preparation. # Now we can get the user_files_path by use the table file function for trick. also we can get it by query as: # "insert into function file('exist.txt', 'CSV', 'val1 char') values ('aaaa'); select _path from file('exist.txt', 'CSV', 'val1 char')" -user_files_path=$(clickhouse-client --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') +user_files_path=$($CLICKHOUSE_CLIENT_BINARY --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') mkdir -p ${user_files_path}/${CLICKHOUSE_TEST_UNIQUE_NAME}/ diff --git a/tests/queries/0_stateless/02130_parse_quoted_null.sh b/tests/queries/0_stateless/02130_parse_quoted_null.sh index 2da62f9a4ff..0c72d0e85a7 100755 --- a/tests/queries/0_stateless/02130_parse_quoted_null.sh +++ b/tests/queries/0_stateless/02130_parse_quoted_null.sh @@ -6,7 +6,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CURDIR"/../shell_config.sh -USER_FILES_PATH=$(clickhouse-client --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') +USER_FILES_PATH=$($CLICKHOUSE_CLIENT_BINARY --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') DATA_FILE=$USER_FILES_PATH/test_02130.data SELECT_QUERY="select * from file('test_02130.data', 'CustomSeparated', 'x Nullable(Float64), y Nullable(UInt64)') settings input_format_parallel_parsing=0, format_custom_escaping_rule='Quoted'" diff --git a/tests/queries/0_stateless/02141_clickhouse_local_interactive_table.reference b/tests/queries/0_stateless/02141_clickhouse_local_interactive_table.reference index 4c1d5dc829f..beda9e36223 100644 --- a/tests/queries/0_stateless/02141_clickhouse_local_interactive_table.reference +++ b/tests/queries/0_stateless/02141_clickhouse_local_interactive_table.reference @@ -1,2 +1,2 @@ -CREATE TABLE _local.table\n(\n `key` String\n)\nENGINE = File(\'TSVWithNamesAndTypes\', \'/dev/null\') +CREATE TABLE default.table\n(\n `key` String\n)\nENGINE = File(\'TSVWithNamesAndTypes\', \'/dev/null\') CREATE TABLE foo.table\n(\n `key` String\n)\nENGINE = File(\'TSVWithNamesAndTypes\', \'/dev/null\') diff --git a/tests/queries/0_stateless/02149_external_schema_inference.sh b/tests/queries/0_stateless/02149_external_schema_inference.sh index 5e03120c80f..41f8bfee2bc 100755 --- a/tests/queries/0_stateless/02149_external_schema_inference.sh +++ b/tests/queries/0_stateless/02149_external_schema_inference.sh @@ -6,13 +6,13 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CURDIR"/../shell_config.sh -USER_FILES_PATH=$(clickhouse-client --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') +USER_FILES_PATH=$($CLICKHOUSE_CLIENT_BINARY --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') FILE_NAME=test_$CLICKHOUSE_TEST_UNIQUE_NAME.data DATA_FILE=$USER_FILES_PATH/$FILE_NAME touch $DATA_FILE -SCHEMADIR=$(clickhouse-client --query "select * from file('$FILE_NAME', 'CapnProto', 'val1 char') settings format_schema='nonexist:Message'" 2>&1 | grep Exception | grep -oP "file \K.*(?=/nonexist.capnp)") +SCHEMADIR=$($CLICKHOUSE_CLIENT_BINARY --query "select * from file('$FILE_NAME', 'CapnProto', 'val1 char') settings format_schema='nonexist:Message'" 2>&1 | grep Exception | grep -oP "file \K.*(?=/nonexist.capnp)") CLIENT_SCHEMADIR=$CURDIR/format_schemas SERVER_SCHEMADIR=test_02149 mkdir -p $SCHEMADIR/$SERVER_SCHEMADIR diff --git a/tests/queries/0_stateless/02149_schema_inference.sh b/tests/queries/0_stateless/02149_schema_inference.sh index b2cc662706c..856549f2215 100755 --- a/tests/queries/0_stateless/02149_schema_inference.sh +++ b/tests/queries/0_stateless/02149_schema_inference.sh @@ -6,13 +6,13 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CURDIR"/../shell_config.sh -USER_FILES_PATH=$(clickhouse-client --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') +USER_FILES_PATH=$($CLICKHOUSE_CLIENT_BINARY --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') FILE_NAME=test_$CLICKHOUSE_TEST_UNIQUE_NAME.data DATA_FILE=${USER_FILES_PATH:?}/$FILE_NAME touch $DATA_FILE -SCHEMADIR=$(clickhouse-client --query "select * from file('$FILE_NAME', 'Template', 'val1 char') settings format_template_row='nonexist'" 2>&1 | grep Exception | grep -oP "file \K.*(?=/nonexist)") +SCHEMADIR=$($CLICKHOUSE_CLIENT_BINARY --query "select * from file('$FILE_NAME', 'Template', 'val1 char') settings format_template_row='nonexist'" 2>&1 | grep Exception | grep -oP "file \K.*(?=/nonexist)") echo "TSV" @@ -248,4 +248,3 @@ $CLICKHOUSE_CLIENT -q "select * from file('$FILE_NAME', 'MsgPack') settings inpu rm $SCHEMADIR/resultset_format_02149 $SCHEMADIR/row_format_02149 rm $DATA_FILE - diff --git a/tests/queries/0_stateless/02149_schema_inference_create_table_syntax.sh b/tests/queries/0_stateless/02149_schema_inference_create_table_syntax.sh index f00f2531dd0..8de2ab8c57a 100755 --- a/tests/queries/0_stateless/02149_schema_inference_create_table_syntax.sh +++ b/tests/queries/0_stateless/02149_schema_inference_create_table_syntax.sh @@ -6,7 +6,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CURDIR"/../shell_config.sh -USER_FILES_PATH=$(clickhouse-client --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') +USER_FILES_PATH=$($CLICKHOUSE_CLIENT_BINARY --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') mkdir $USER_FILES_PATH/test_02149 FILE_NAME=test_02149/data.Parquet DATA_FILE=$USER_FILES_PATH/$FILE_NAME @@ -37,4 +37,3 @@ $CLICKHOUSE_CLIENT -q "select * from test_buffer" $CLICKHOUSE_CLIENT -q "drop table test_buffer" rm -rf ${USER_FILES_PATH:?}/test_02149 - diff --git a/tests/queries/0_stateless/02163_shard_num.reference b/tests/queries/0_stateless/02163_shard_num.reference index 77eea7c95b9..d79b95024f6 100644 --- a/tests/queries/0_stateless/02163_shard_num.reference +++ b/tests/queries/0_stateless/02163_shard_num.reference @@ -1,18 +1,18 @@ -- { echoOn } -SELECT shardNum() AS shard_num, sum(1) as rows FROM remote('127.{1,2}', system, one) GROUP BY _shard_num; -2 1 +SELECT shardNum() AS shard_num, sum(1) as rows FROM remote('127.{1,2}', system, one) GROUP BY _shard_num ORDER BY _shard_num; 1 1 -SELECT shardNum() AS shard_num, sum(1) as rows FROM remote('127.{1,2}', system, one) GROUP BY shard_num; 2 1 +SELECT shardNum() AS shard_num, sum(1) as rows FROM remote('127.{1,2}', system, one) GROUP BY shard_num ORDER BY shard_num; 1 1 -SELECT _shard_num AS shard_num, sum(1) as rows FROM remote('127.{1,2}', system, one) GROUP BY _shard_num; 2 1 +SELECT _shard_num AS shard_num, sum(1) as rows FROM remote('127.{1,2}', system, one) GROUP BY _shard_num ORDER BY _shard_num; 1 1 -SELECT _shard_num AS shard_num, sum(1) as rows FROM remote('127.{1,2}', system, one) GROUP BY shard_num; 2 1 +SELECT _shard_num AS shard_num, sum(1) as rows FROM remote('127.{1,2}', system, one) GROUP BY shard_num ORDER BY shard_num; 1 1 -SELECT a._shard_num AS shard_num, sum(1) as rows FROM remote('127.{1,2}', system, one) a GROUP BY shard_num; 2 1 +SELECT a._shard_num AS shard_num, sum(1) as rows FROM remote('127.{1,2}', system, one) a GROUP BY shard_num ORDER BY shard_num; 1 1 +2 1 SELECT _shard_num FROM remote('127.1', system.one) AS a INNER JOIN (SELECT _shard_num FROM system.one) AS b USING (dummy); -- { serverError UNSUPPORTED_METHOD, UNKNOWN_IDENTIFIER } diff --git a/tests/queries/0_stateless/02163_shard_num.sql b/tests/queries/0_stateless/02163_shard_num.sql index cc87140ebaf..d3b4a95c6a8 100644 --- a/tests/queries/0_stateless/02163_shard_num.sql +++ b/tests/queries/0_stateless/02163_shard_num.sql @@ -1,10 +1,10 @@ -- { echoOn } -SELECT shardNum() AS shard_num, sum(1) as rows FROM remote('127.{1,2}', system, one) GROUP BY _shard_num; -SELECT shardNum() AS shard_num, sum(1) as rows FROM remote('127.{1,2}', system, one) GROUP BY shard_num; -SELECT _shard_num AS shard_num, sum(1) as rows FROM remote('127.{1,2}', system, one) GROUP BY _shard_num; -SELECT _shard_num AS shard_num, sum(1) as rows FROM remote('127.{1,2}', system, one) GROUP BY shard_num; -SELECT a._shard_num AS shard_num, sum(1) as rows FROM remote('127.{1,2}', system, one) a GROUP BY shard_num; +SELECT shardNum() AS shard_num, sum(1) as rows FROM remote('127.{1,2}', system, one) GROUP BY _shard_num ORDER BY _shard_num; +SELECT shardNum() AS shard_num, sum(1) as rows FROM remote('127.{1,2}', system, one) GROUP BY shard_num ORDER BY shard_num; +SELECT _shard_num AS shard_num, sum(1) as rows FROM remote('127.{1,2}', system, one) GROUP BY _shard_num ORDER BY _shard_num; +SELECT _shard_num AS shard_num, sum(1) as rows FROM remote('127.{1,2}', system, one) GROUP BY shard_num ORDER BY shard_num; +SELECT a._shard_num AS shard_num, sum(1) as rows FROM remote('127.{1,2}', system, one) a GROUP BY shard_num ORDER BY shard_num; SELECT _shard_num FROM remote('127.1', system.one) AS a INNER JOIN (SELECT _shard_num FROM system.one) AS b USING (dummy); -- { serverError UNSUPPORTED_METHOD, UNKNOWN_IDENTIFIER } -- { echoOff } diff --git a/tests/queries/0_stateless/02174_cte_scalar_cache_mv.reference b/tests/queries/0_stateless/02174_cte_scalar_cache_mv.reference index 055c88160ad..8ec3608317f 100644 --- a/tests/queries/0_stateless/02174_cte_scalar_cache_mv.reference +++ b/tests/queries/0_stateless/02174_cte_scalar_cache_mv.reference @@ -18,7 +18,7 @@ 89 89 89 89 5 94 94 94 94 5 99 99 99 99 5 -02177_MV 3 80 26 +02177_MV 7 80 22 10 40 70 @@ -60,4 +60,4 @@ 178 188 198 -02177_MV_3 19 0 2 +02177_MV_3 20 0 1 diff --git a/tests/queries/0_stateless/02178_column_function_insert_from.sql b/tests/queries/0_stateless/02178_column_function_insert_from.sql index 13d1ebb4788..fc692ec859c 100644 --- a/tests/queries/0_stateless/02178_column_function_insert_from.sql +++ b/tests/queries/0_stateless/02178_column_function_insert_from.sql @@ -8,6 +8,9 @@ INSERT INTO TESTTABLE values (0,'0',['1']), (1,'1',['1']); SET max_threads = 1; +-- There is a bug which is fixed in new analyzer. +SET max_bytes_before_external_sort = 0; + SELECT attr, _id, arrayFilter(x -> (x IN (select '1')), attr_list) z FROM TESTTABLE ARRAY JOIN z AS attr ORDER BY _id LIMIT 3 BY attr; diff --git a/tests/queries/0_stateless/02185_orc_corrupted_file.sh b/tests/queries/0_stateless/02185_orc_corrupted_file.sh index c5f5e8710ca..1987f094faa 100755 --- a/tests/queries/0_stateless/02185_orc_corrupted_file.sh +++ b/tests/queries/0_stateless/02185_orc_corrupted_file.sh @@ -5,7 +5,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -USER_FILES_PATH=$(clickhouse-client --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') +USER_FILES_PATH=$($CLICKHOUSE_CLIENT_BINARY --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') cp $CUR_DIR/data_orc/corrupted.orc $USER_FILES_PATH/ ${CLICKHOUSE_CLIENT} --query="select * from file('corrupted.orc')" 2>&1 | grep -F -q 'Cannot extract table structure' && echo 'OK' || echo 'FAIL' diff --git a/tests/queries/0_stateless/02185_substring_negative_offset_size.reference b/tests/queries/0_stateless/02185_substring_negative_offset_size.reference deleted file mode 100644 index 107f083a4d8..00000000000 --- a/tests/queries/0_stateless/02185_substring_negative_offset_size.reference +++ /dev/null @@ -1,31 +0,0 @@ - - - -g -g -g -- - - - -g -g -g -- - - - -6 - - - - -- - - - -6 - - - - diff --git a/tests/queries/0_stateless/02185_substring_negative_offset_size.sql b/tests/queries/0_stateless/02185_substring_negative_offset_size.sql deleted file mode 100644 index 715dff30369..00000000000 --- a/tests/queries/0_stateless/02185_substring_negative_offset_size.sql +++ /dev/null @@ -1,48 +0,0 @@ -select substring('abcdefgh', -2, -2); -select substring(materialize('abcdefgh'), -2, -2); -select substring(materialize('abcdefgh'), materialize(-2), materialize(-2)); - -select substring('abcdefgh', -2, -1); -select substring(materialize('abcdefgh'), -2, -1); -select substring(materialize('abcdefgh'), materialize(-2), materialize(-1)); - -select '-'; -select substring(cast('abcdefgh' as FixedString(8)), -2, -2); -select substring(materialize(cast('abcdefgh' as FixedString(8))), -2, -2); -select substring(materialize(cast('abcdefgh' as FixedString(8))), materialize(-2), materialize(-2)); - -select substring(cast('abcdefgh' as FixedString(8)), -2, -1); -select substring(materialize(cast('abcdefgh' as FixedString(8))), -2, -1); -select substring(materialize(cast('abcdefgh' as FixedString(8))), materialize(-2), materialize(-1)); - -select '-'; -drop table if exists t; -create table t -( - s String, - l Int8, - r Int8 -) engine = Memory; - -insert into t values ('abcdefgh', -2, -2),('12345678', -3, -3); - -select substring(s, -2, -2) from t; -select substring(s, l, -2) from t; -select substring(s, -2, r) from t; -select substring(s, l, r) from t; - -select '-'; -drop table if exists t; -create table t( - s FixedString(8), - l Int8, - r Int8 -) engine = Memory; -insert into t values ('abcdefgh', -2, -2),('12345678', -3, -3); - -select substring(s, -2, -2) from t; -select substring(s, l, -2) from t; -select substring(s, -2, r) from t; -select substring(s, l, r) from t; - -drop table if exists t; diff --git a/tests/queries/0_stateless/02222_create_table_without_columns_metadata.sh b/tests/queries/0_stateless/02222_create_table_without_columns_metadata.sh index 9cb4fb939e7..d49c3610852 100755 --- a/tests/queries/0_stateless/02222_create_table_without_columns_metadata.sh +++ b/tests/queries/0_stateless/02222_create_table_without_columns_metadata.sh @@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -USER_FILES_PATH=$(clickhouse-client --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') +USER_FILES_PATH=$($CLICKHOUSE_CLIENT_BINARY --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') $CLICKHOUSE_CLIENT -q "insert into table function file(data.jsonl, 'JSONEachRow', 'x UInt32 default 42, y String') select number as x, 'String' as y from numbers(10)" @@ -30,4 +30,3 @@ $CLICKHOUSE_CLIENT -q "detach table test_dist" $CLICKHOUSE_CLIENT -q "drop table test" $CLICKHOUSE_CLIENT -q "attach table test_dist" $CLICKHOUSE_CLIENT --prefer_localhost_replica=1 -q "select * from test_dist" 2>&1 | grep -q "UNKNOWN_TABLE" && echo "OK" || echo "FAIL" - diff --git a/tests/queries/0_stateless/02226_parallel_reading_from_replicas_benchmark.sh b/tests/queries/0_stateless/02226_parallel_reading_from_replicas_benchmark.sh index 941f024825a..bc90f4b2c11 100755 --- a/tests/queries/0_stateless/02226_parallel_reading_from_replicas_benchmark.sh +++ b/tests/queries/0_stateless/02226_parallel_reading_from_replicas_benchmark.sh @@ -17,7 +17,6 @@ opts=( --allow_experimental_parallel_reading_from_replicas 1 --parallel_replicas_for_non_replicated_merge_tree 1 --max_parallel_replicas 3 - --use_hedged_requests 0 --cluster_for_parallel_replicas parallel_replicas --iterations 1 diff --git a/tests/queries/0_stateless/02227_test_create_empty_sqlite_db.sh b/tests/queries/0_stateless/02227_test_create_empty_sqlite_db.sh index 253d3f3149d..39ba17fc7eb 100755 --- a/tests/queries/0_stateless/02227_test_create_empty_sqlite_db.sh +++ b/tests/queries/0_stateless/02227_test_create_empty_sqlite_db.sh @@ -6,7 +6,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CUR_DIR"/../shell_config.sh # See 01658_read_file_to_string_column.sh -user_files_path=$(clickhouse-client --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') +user_files_path=$($CLICKHOUSE_CLIENT_BINARY --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') function cleanup() { diff --git a/tests/queries/0_stateless/02240_tskv_schema_inference_bug.sh b/tests/queries/0_stateless/02240_tskv_schema_inference_bug.sh index 8655ffd1e1f..ce545a27317 100755 --- a/tests/queries/0_stateless/02240_tskv_schema_inference_bug.sh +++ b/tests/queries/0_stateless/02240_tskv_schema_inference_bug.sh @@ -6,7 +6,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CURDIR"/../shell_config.sh -USER_FILES_PATH=$(clickhouse-client --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') +USER_FILES_PATH=$($CLICKHOUSE_CLIENT_BINARY --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') FILE_NAME=test_02240.data DATA_FILE=${USER_FILES_PATH:?}/$FILE_NAME @@ -19,4 +19,3 @@ a=\N c=[3]\ta=\N' > $DATA_FILE $CLICKHOUSE_CLIENT --max_read_buffer_size=4 -q "desc file('$FILE_NAME', 'TSKV')" $CLICKHOUSE_CLIENT --max_read_buffer_size=4 -q "select * from file('$FILE_NAME', 'TSKV')" - diff --git a/tests/queries/0_stateless/02242_arrow_orc_parquet_nullable_schema_inference.sh b/tests/queries/0_stateless/02242_arrow_orc_parquet_nullable_schema_inference.sh index 1b6999e3f09..e03c62cfc5f 100755 --- a/tests/queries/0_stateless/02242_arrow_orc_parquet_nullable_schema_inference.sh +++ b/tests/queries/0_stateless/02242_arrow_orc_parquet_nullable_schema_inference.sh @@ -6,7 +6,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CURDIR"/../shell_config.sh -USER_FILES_PATH=$(clickhouse-client --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') +USER_FILES_PATH=$($CLICKHOUSE_CLIENT_BINARY --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') FILE_NAME=test_02242.data DATA_FILE=$USER_FILES_PATH/$FILE_NAME diff --git a/tests/queries/0_stateless/02245_parquet_skip_unknown_type.sh b/tests/queries/0_stateless/02245_parquet_skip_unknown_type.sh index 1e416f23b69..954e2e83f27 100755 --- a/tests/queries/0_stateless/02245_parquet_skip_unknown_type.sh +++ b/tests/queries/0_stateless/02245_parquet_skip_unknown_type.sh @@ -5,11 +5,11 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -USER_FILES_PATH=$(clickhouse-client --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') +USER_FILES_PATH=$($CLICKHOUSE_CLIENT_BINARY --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') FILE_NAME=test_02245.parquet DATA_FILE=$USER_FILES_PATH/$FILE_NAME -cp $CUR_DIR/data_parquet_bad_column/metadata_0.parquet $DATA_FILE +cp $CUR_DIR/data_parquet_bad_column/metadata_0.parquet $DATA_FILE $CLICKHOUSE_CLIENT -q "desc file(test_02245.parquet)" 2>&1 | grep -qF "Cannot extract table structure" && echo "OK" || echo "FAIL" diff --git a/tests/queries/0_stateless/02246_tsv_csv_best_effort_schema_inference.sh b/tests/queries/0_stateless/02246_tsv_csv_best_effort_schema_inference.sh index e8aa5914912..233db7a534d 100755 --- a/tests/queries/0_stateless/02246_tsv_csv_best_effort_schema_inference.sh +++ b/tests/queries/0_stateless/02246_tsv_csv_best_effort_schema_inference.sh @@ -6,7 +6,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CURDIR"/../shell_config.sh -USER_FILES_PATH=$(clickhouse-client --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') +USER_FILES_PATH=$($CLICKHOUSE_CLIENT_BINARY --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') FILE_NAME=test_02149.data DATA_FILE=${USER_FILES_PATH:?}/$FILE_NAME @@ -218,5 +218,3 @@ echo -e "\"[(1, 2, 3)]\""> $DATA_FILE $CLIENT_CMD -q "desc file('$FILE_NAME', 'CSV')" $CLIENT_CMD -q "select * from file('$FILE_NAME', 'CSV')" - - diff --git a/tests/queries/0_stateless/02247_names_order_in_json_and_tskv.sh b/tests/queries/0_stateless/02247_names_order_in_json_and_tskv.sh index 0be26371585..e8e3bf88ac4 100755 --- a/tests/queries/0_stateless/02247_names_order_in_json_and_tskv.sh +++ b/tests/queries/0_stateless/02247_names_order_in_json_and_tskv.sh @@ -6,7 +6,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CURDIR"/../shell_config.sh -USER_FILES_PATH=$(clickhouse-client --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') +USER_FILES_PATH=$($CLICKHOUSE_CLIENT_BINARY --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') FILE_NAME=test_02247.data DATA_FILE=${USER_FILES_PATH:?}/$FILE_NAME diff --git a/tests/queries/0_stateless/02247_read_bools_as_numbers_json.sh b/tests/queries/0_stateless/02247_read_bools_as_numbers_json.sh index 1b689aaf577..523b5934543 100755 --- a/tests/queries/0_stateless/02247_read_bools_as_numbers_json.sh +++ b/tests/queries/0_stateless/02247_read_bools_as_numbers_json.sh @@ -6,7 +6,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CURDIR"/../shell_config.sh -USER_FILES_PATH=$(clickhouse-client --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') +USER_FILES_PATH=$($CLICKHOUSE_CLIENT_BINARY --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') FILE_NAME=test_02247.data DATA_FILE=${USER_FILES_PATH:?}/$FILE_NAME diff --git a/tests/queries/0_stateless/02286_mysql_dump_input_format.sh b/tests/queries/0_stateless/02286_mysql_dump_input_format.sh index 1139c1ea68c..a3711497ae8 100755 --- a/tests/queries/0_stateless/02286_mysql_dump_input_format.sh +++ b/tests/queries/0_stateless/02286_mysql_dump_input_format.sh @@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -USER_FILES_PATH=$(clickhouse-client --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') +USER_FILES_PATH=$($CLICKHOUSE_CLIENT_BINARY --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') cp $CURDIR/data_mysql_dump/dump*.sql $USER_FILES_PATH diff --git a/tests/queries/0_stateless/02286_parallel_final.reference b/tests/queries/0_stateless/02286_parallel_final.reference index f6573cb9042..5801fb46908 100644 --- a/tests/queries/0_stateless/02286_parallel_final.reference +++ b/tests/queries/0_stateless/02286_parallel_final.reference @@ -1,9 +1,13 @@ +Test intersecting ranges 2 2 3 5 -8 -8 -8 -8 -8 +Test intersecting ranges finished +Test non intersecting ranges +0 +0 +0 +0 +0 +Test non intersecting ranges finished diff --git a/tests/queries/0_stateless/02286_parallel_final.sh b/tests/queries/0_stateless/02286_parallel_final.sh index de0cca0e966..0ac510208f3 100755 --- a/tests/queries/0_stateless/02286_parallel_final.sh +++ b/tests/queries/0_stateless/02286_parallel_final.sh @@ -5,13 +5,17 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh +echo "Test intersecting ranges" + test_random_values() { layers=$1 $CLICKHOUSE_CLIENT -n -q " + drop table if exists tbl_8parts_${layers}granules_rnd; create table tbl_8parts_${layers}granules_rnd (key1 UInt32, sign Int8) engine = CollapsingMergeTree(sign) order by (key1) partition by (key1 % 8); insert into tbl_8parts_${layers}granules_rnd select number, 1 from numbers_mt($((layers * 8 * 8192))); optimize table tbl_8parts_${layers}granules_rnd final; - explain pipeline select * from tbl_8parts_${layers}granules_rnd final settings max_threads = 16;" 2>&1 | + explain pipeline select * from tbl_8parts_${layers}granules_rnd final settings max_threads = 16, do_not_merge_across_partitions_select_final = 0; + drop table tbl_8parts_${layers}granules_rnd;" 2>&1 | grep -c "CollapsingSortedTransform" } @@ -19,16 +23,24 @@ for layers in 2 3 5 8; do test_random_values $layers done; +echo "Test intersecting ranges finished" + +echo "Test non intersecting ranges" + test_sequential_values() { layers=$1 $CLICKHOUSE_CLIENT -n -q " + drop table if exists tbl_8parts_${layers}granules_seq; create table tbl_8parts_${layers}granules_seq (key1 UInt32, sign Int8) engine = CollapsingMergeTree(sign) order by (key1) partition by (key1 / $((layers * 8192)))::UInt64; insert into tbl_8parts_${layers}granules_seq select number, 1 from numbers_mt($((layers * 8 * 8192))); optimize table tbl_8parts_${layers}granules_seq final; - explain pipeline select * from tbl_8parts_${layers}granules_seq final settings max_threads = 8;" 2>&1 | + explain pipeline select * from tbl_8parts_${layers}granules_seq final settings max_threads = 8, do_not_merge_across_partitions_select_final = 0; + drop table tbl_8parts_${layers}granules_seq;" 2>&1 | grep -c "CollapsingSortedTransform" } for layers in 2 3 5 8 16; do test_sequential_values $layers done; + +echo "Test non intersecting ranges finished" diff --git a/tests/queries/0_stateless/02293_http_header_full_summary_without_progress.sh b/tests/queries/0_stateless/02293_http_header_full_summary_without_progress.sh index 8f08bd6f84b..a08928a773c 100755 --- a/tests/queries/0_stateless/02293_http_header_full_summary_without_progress.sh +++ b/tests/queries/0_stateless/02293_http_header_full_summary_without_progress.sh @@ -7,7 +7,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) CURL_OUTPUT=$(echo 'SELECT 1 + sleepEachRow(0.00002) FROM numbers(100000)' | \ - ${CLICKHOUSE_CURL_COMMAND} -v "${CLICKHOUSE_URL}&wait_end_of_query=1&send_progress_in_http_headers=0&max_execution_time=1" --data-binary @- 2>&1) + ${CLICKHOUSE_CURL_COMMAND} -vsS "${CLICKHOUSE_URL}&wait_end_of_query=1&send_progress_in_http_headers=0&max_execution_time=1" --data-binary @- 2>&1) READ_ROWS=$(echo "${CURL_OUTPUT}" | \ grep 'X-ClickHouse-Summary' | \ diff --git a/tests/queries/0_stateless/02294_floating_point_second_in_settings.sh b/tests/queries/0_stateless/02294_floating_point_second_in_settings.sh index b5bf2deb974..7a18b8fea29 100755 --- a/tests/queries/0_stateless/02294_floating_point_second_in_settings.sh +++ b/tests/queries/0_stateless/02294_floating_point_second_in_settings.sh @@ -38,4 +38,4 @@ check_output "${OUTPUT}" # CHECK system.settings echo "TABLE: system.settings" -echo "SELECT name, value, changed from system.settings where name = 'max_execution_time'" | clickhouse-client --max_execution_time 30.5 +echo "SELECT name, value, changed from system.settings where name = 'max_execution_time'" | $CLICKHOUSE_CLIENT_BINARY --max_execution_time 30.5 diff --git a/tests/queries/0_stateless/02297_regex_parsing_file_names.sh b/tests/queries/0_stateless/02297_regex_parsing_file_names.sh index 12ccb54235b..5973e24844a 100755 --- a/tests/queries/0_stateless/02297_regex_parsing_file_names.sh +++ b/tests/queries/0_stateless/02297_regex_parsing_file_names.sh @@ -9,7 +9,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # Now we can get the user_files_path by use the table file function for trick. also we can get it by query as: # "insert into function file('exist.txt', 'CSV', 'val1 char') values ('aaaa'); select _path from file('exist.txt', 'CSV', 'val1 char')" -CLICKHOUSE_USER_FILES_PATH=$(clickhouse-client --query "select _path, _file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') +CLICKHOUSE_USER_FILES_PATH=$($CLICKHOUSE_CLIENT_BINARY --query "select _path, _file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') mkdir -p ${CLICKHOUSE_USER_FILES_PATH}/ diff --git a/tests/queries/0_stateless/02311_hashed_array_dictionary_hierarchical_functions.reference b/tests/queries/0_stateless/02311_hashed_array_dictionary_hierarchical_functions.reference index 7f4ba0901b6..0b0b4175e1f 100644 --- a/tests/queries/0_stateless/02311_hashed_array_dictionary_hierarchical_functions.reference +++ b/tests/queries/0_stateless/02311_hashed_array_dictionary_hierarchical_functions.reference @@ -33,3 +33,38 @@ Get descendants at first level [] [] [] +Get hierarchy +[] +[1] +[2,1] +[3,1] +[4,2,1] +[] +Get is in hierarchy +0 +1 +1 +1 +1 +0 +Get children +[1] +[2,3] +[4] +[] +[] +[] +Get all descendants +[1,2,3,4] +[2,3,4] +[4] +[] +[] +[] +Get descendants at first level +[1] +[2,3] +[4] +[] +[] +[] diff --git a/tests/queries/0_stateless/02311_hashed_array_dictionary_hierarchical_functions.sql b/tests/queries/0_stateless/02311_hashed_array_dictionary_hierarchical_functions.sql.j2 similarity index 91% rename from tests/queries/0_stateless/02311_hashed_array_dictionary_hierarchical_functions.sql rename to tests/queries/0_stateless/02311_hashed_array_dictionary_hierarchical_functions.sql.j2 index a775f0e5cbf..bc13bcfdb09 100644 --- a/tests/queries/0_stateless/02311_hashed_array_dictionary_hierarchical_functions.sql +++ b/tests/queries/0_stateless/02311_hashed_array_dictionary_hierarchical_functions.sql.j2 @@ -7,6 +7,8 @@ CREATE TABLE hierarchy_source_table INSERT INTO hierarchy_source_table VALUES (1, 0), (2, 1), (3, 1), (4, 2); +{% for dictionary_config in ['', 'SHARDS 16'] -%} + DROP DICTIONARY IF EXISTS hierarchy_hashed_array_dictionary; CREATE DICTIONARY hierarchy_hashed_array_dictionary ( @@ -15,7 +17,7 @@ CREATE DICTIONARY hierarchy_hashed_array_dictionary ) PRIMARY KEY id SOURCE(CLICKHOUSE(TABLE 'hierarchy_source_table')) -LAYOUT(HASHED_ARRAY()) +LAYOUT(HASHED_ARRAY({{ dictionary_config }})) LIFETIME(MIN 1 MAX 1000); SELECT 'Get hierarchy'; @@ -29,6 +31,8 @@ SELECT dictGetDescendants('hierarchy_hashed_array_dictionary', number) FROM syst SELECT 'Get descendants at first level'; SELECT dictGetDescendants('hierarchy_hashed_array_dictionary', number, 1) FROM system.numbers LIMIT 6; +{% endfor %} + DROP DICTIONARY hierarchy_hashed_array_dictionary; DROP TABLE hierarchy_source_table; diff --git a/tests/queries/0_stateless/02316_hierarchical_dictionaries_nullable_parent_key.reference b/tests/queries/0_stateless/02316_hierarchical_dictionaries_nullable_parent_key.reference index 60d9fb16c5f..ab6a247219b 100644 --- a/tests/queries/0_stateless/02316_hierarchical_dictionaries_nullable_parent_key.reference +++ b/tests/queries/0_stateless/02316_hierarchical_dictionaries_nullable_parent_key.reference @@ -106,6 +106,42 @@ Get descendants at first level [] [] [] +HashedArray dictionary +Get hierarchy +[0] +[1,0] +[2,1,0] +[3] +[4,2,1,0] +[] +Get is in hierarchy +1 +1 +1 +1 +1 +0 +Get children +[1] +[2] +[4] +[] +[] +[] +Get all descendants +[1,2,4] +[2,4] +[4] +[] +[] +[] +Get descendants at first level +[1] +[2] +[4] +[] +[] +[] Cache dictionary Get hierarchy [0] diff --git a/tests/queries/0_stateless/02316_hierarchical_dictionaries_nullable_parent_key.sql b/tests/queries/0_stateless/02316_hierarchical_dictionaries_nullable_parent_key.sql.j2 similarity index 97% rename from tests/queries/0_stateless/02316_hierarchical_dictionaries_nullable_parent_key.sql rename to tests/queries/0_stateless/02316_hierarchical_dictionaries_nullable_parent_key.sql.j2 index d477d58d398..b456495513e 100644 --- a/tests/queries/0_stateless/02316_hierarchical_dictionaries_nullable_parent_key.sql +++ b/tests/queries/0_stateless/02316_hierarchical_dictionaries_nullable_parent_key.sql.j2 @@ -56,7 +56,7 @@ SELECT 'Get descendants at first level'; SELECT dictGetDescendants('hierachical_hashed_dictionary', number, 1) FROM system.numbers LIMIT 6; DROP DICTIONARY hierachical_hashed_dictionary; - +{% for dictionary_config in ['', 'SHARDS 16'] -%} DROP DICTIONARY IF EXISTS hierachical_hashed_array_dictionary; CREATE DICTIONARY hierachical_hashed_array_dictionary ( @@ -64,7 +64,7 @@ CREATE DICTIONARY hierachical_hashed_array_dictionary parent_id Nullable(UInt64) HIERARCHICAL ) PRIMARY KEY id SOURCE(CLICKHOUSE(TABLE 'test_hierarhical_table')) -LAYOUT(HASHED_ARRAY()) +LAYOUT(HASHED_ARRAY({{ dictionary_config }})) LIFETIME(0); SELECT 'HashedArray dictionary'; @@ -82,6 +82,8 @@ SELECT dictGetDescendants('hierachical_hashed_array_dictionary', number, 1) FROM DROP DICTIONARY hierachical_hashed_array_dictionary; +{% endfor %} + DROP DICTIONARY IF EXISTS hierachical_cache_dictionary; CREATE DICTIONARY hierachical_cache_dictionary ( diff --git a/tests/queries/0_stateless/02327_capnproto_protobuf_empty_messages.sh b/tests/queries/0_stateless/02327_capnproto_protobuf_empty_messages.sh index 69e65112305..dfc0dedeaf1 100755 --- a/tests/queries/0_stateless/02327_capnproto_protobuf_empty_messages.sh +++ b/tests/queries/0_stateless/02327_capnproto_protobuf_empty_messages.sh @@ -5,10 +5,10 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -USER_FILES_PATH=$(clickhouse-client --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') +USER_FILES_PATH=$($CLICKHOUSE_CLIENT_BINARY --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') touch $USER_FILES_PATH/data.capnp -SCHEMADIR=$(clickhouse-client --query "select * from file('data.capnp', 'CapnProto', 'val1 char') settings format_schema='nonexist:Message'" 2>&1 | grep Exception | grep -oP "file \K.*(?=/nonexist.capnp)") +SCHEMADIR=$($CLICKHOUSE_CLIENT_BINARY --query "select * from file('data.capnp', 'CapnProto', 'val1 char') settings format_schema='nonexist:Message'" 2>&1 | grep Exception | grep -oP "file \K.*(?=/nonexist.capnp)") CLIENT_SCHEMADIR=$CURDIR/format_schemas SERVER_SCHEMADIR=test_02327 mkdir -p $SCHEMADIR/$SERVER_SCHEMADIR diff --git a/tests/queries/0_stateless/02352_lightweight_delete.reference b/tests/queries/0_stateless/02352_lightweight_delete.reference index 3386b3294c3..ce7c6e81ac8 100644 --- a/tests/queries/0_stateless/02352_lightweight_delete.reference +++ b/tests/queries/0_stateless/02352_lightweight_delete.reference @@ -26,7 +26,7 @@ Rows in parts 800000 Count 700000 First row 300000 1 Do ALTER DELETE mutation that does a "heavyweight" delete -Rows in parts 533333 +Rows in parts 466666 Count 466666 First row 300001 10 Delete 100K more rows using lightweight DELETE diff --git a/tests/queries/0_stateless/02352_rwlock.sh b/tests/queries/0_stateless/02352_rwlock.sh index 7de2c7089b8..08551794c2e 100755 --- a/tests/queries/0_stateless/02352_rwlock.sh +++ b/tests/queries/0_stateless/02352_rwlock.sh @@ -21,7 +21,7 @@ function wait_query_by_id_started() # wait for query to be started while [ "$($CLICKHOUSE_CLIENT "$@" -q "select count() from system.processes where query_id = '$query_id'")" -ne 1 ]; do if [ "$( - $CLICKHOUSE_CLIENT -nm -q " + $CLICKHOUSE_CLIENT --max_bytes_before_external_group_by 0 -nm -q " system flush logs; select count() from system.query_log @@ -56,7 +56,7 @@ while :; do insert_query_id="insert-$(random_str 10)" # 20 seconds sleep - $CLICKHOUSE_CLIENT --function_sleep_max_microseconds_per_block 20000000 --query_id "$insert_query_id" -q "INSERT INTO ${CLICKHOUSE_DATABASE}_ordinary.data_02352 SELECT sleepEachRow(1) FROM numbers(20) GROUP BY number" & + $CLICKHOUSE_CLIENT --function_sleep_max_microseconds_per_block 20000000 --max_bytes_before_external_group_by 0 --query_id "$insert_query_id" -q "INSERT INTO ${CLICKHOUSE_DATABASE}_ordinary.data_02352 SELECT sleepEachRow(1) FROM numbers(20) GROUP BY number" & if ! wait_query_by_id_started "$insert_query_id"; then wait continue diff --git a/tests/queries/0_stateless/02353_compression_level.sh b/tests/queries/0_stateless/02353_compression_level.sh index b08dc1e204c..8d6a9c899ad 100755 --- a/tests/queries/0_stateless/02353_compression_level.sh +++ b/tests/queries/0_stateless/02353_compression_level.sh @@ -6,7 +6,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -USER_FILES_PATH=$(clickhouse-client --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') +USER_FILES_PATH=$($CLICKHOUSE_CLIENT_BINARY --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') WORKING_FOLDER_02353="${USER_FILES_PATH}/${CLICKHOUSE_DATABASE}" rm -rf "${WORKING_FOLDER_02353}" diff --git a/tests/queries/0_stateless/02358_file_default_value.sh b/tests/queries/0_stateless/02358_file_default_value.sh index 7d2cb75c4e4..a7c4c17c129 100755 --- a/tests/queries/0_stateless/02358_file_default_value.sh +++ b/tests/queries/0_stateless/02358_file_default_value.sh @@ -4,7 +4,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -USER_FILES_PATH=$(clickhouse-client --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') +USER_FILES_PATH=$($CLICKHOUSE_CLIENT_BINARY --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') WORKING_FOLDER_02357="${USER_FILES_PATH}/${CLICKHOUSE_DATABASE}" rm -rf "${WORKING_FOLDER_02357}" diff --git a/tests/queries/0_stateless/02360_clickhouse_local_config-option.sh b/tests/queries/0_stateless/02360_clickhouse_local_config-option.sh index df0bdf38b4d..b58cfd7ec21 100755 --- a/tests/queries/0_stateless/02360_clickhouse_local_config-option.sh +++ b/tests/queries/0_stateless/02360_clickhouse_local_config-option.sh @@ -6,6 +6,9 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh +SAFE_DIR="${CUR_DIR}/${CLICKHOUSE_DATABASE}_02360_local" +mkdir -p "${SAFE_DIR}" + echo " trace @@ -14,7 +17,7 @@ echo " 9000 - ./ + ${SAFE_DIR} 0 @@ -23,7 +26,7 @@ echo " users.xml -" > $CUR_DIR/config.xml +" > $SAFE_DIR/config.xml echo " @@ -42,13 +45,12 @@ echo " - " > $CUR_DIR/users.xml + " > $SAFE_DIR/users.xml local_opts=( - "--config-file=$CUR_DIR/config.xml" + "--config-file=$SAFE_DIR/config.xml" "--send_logs_level=none") ${CLICKHOUSE_LOCAL} "${local_opts[@]}" --query 'Select 1' |& grep -v -e 'Processing configuration file' -rm -rf $CUR_DIR/users.xml -rm -rf $CUR_DIR/config.xml +rm -rf "${SAFE_DIR}" diff --git a/tests/queries/0_stateless/02366_kql_extend.sql b/tests/queries/0_stateless/02366_kql_extend.sql index 3de489b0815..0a3c1f3dcd4 100644 --- a/tests/queries/0_stateless/02366_kql_extend.sql +++ b/tests/queries/0_stateless/02366_kql_extend.sql @@ -12,16 +12,19 @@ -- 'Costco','Snargaluff',200,'2016-09-10', -- ] + DROP TABLE IF EXISTS Ledger; CREATE TABLE Ledger -( +( Supplier Nullable(String), Fruit String , Price Float64, - Purchase Date + Purchase Date ) ENGINE = Memory; INSERT INTO Ledger VALUES ('Aldi','Apple',4,'2016-09-10'), ('Costco','Apple',2,'2016-09-11'), ('Aldi','Apple',6,'2016-09-10'), ('Costco','Snargaluff',100,'2016-09-12'), ('Aldi','Apple',7,'2016-09-12'), ('Aldi','Snargaluff',400,'2016-09-11'),('Costco','Snargaluff',104,'2016-09-12'),('Aldi','Apple',5,'2016-09-12'),('Aldi','Snargaluff',600,'2016-09-11'),('Costco','Snargaluff',200,'2016-09-10'); +-- This test requies sorting after some of aggregations but I don't know KQL, sorry +set max_bytes_before_external_group_by = 0; set dialect = 'kusto'; print '-- extend #1 --'; diff --git a/tests/queries/0_stateless/02366_kql_makeseries.sql b/tests/queries/0_stateless/02366_kql_makeseries.sql index ecf2ef43cc4..c9ca91c0be0 100644 --- a/tests/queries/0_stateless/02366_kql_makeseries.sql +++ b/tests/queries/0_stateless/02366_kql_makeseries.sql @@ -14,31 +14,34 @@ -- ]; DROP TABLE IF EXISTS make_series_test_table; CREATE TABLE make_series_test_table -( +( Supplier Nullable(String), Fruit String , Price Float64, - Purchase Date + Purchase Date ) ENGINE = Memory; INSERT INTO make_series_test_table VALUES ('Aldi','Apple',4,'2016-09-10'), ('Costco','Apple',2,'2016-09-11'), ('Aldi','Apple',6,'2016-09-10'), ('Costco','Snargaluff',100,'2016-09-12'), ('Aldi','Apple',7,'2016-09-12'), ('Aldi','Snargaluff',400,'2016-09-11'),('Costco','Snargaluff',104,'2016-09-12'),('Aldi','Apple',5,'2016-09-12'),('Aldi','Snargaluff',600,'2016-09-11'),('Costco','Snargaluff',200,'2016-09-10'); DROP TABLE IF EXISTS make_series_test_table2; CREATE TABLE make_series_test_table2 -( +( Supplier Nullable(String), Fruit String , Price Int32, - Purchase Int32 + Purchase Int32 ) ENGINE = Memory; INSERT INTO make_series_test_table2 VALUES ('Aldi','Apple',4,10),('Costco','Apple',2,11),('Aldi','Apple',6,10),('Costco','Snargaluff',100,12),('Aldi','Apple',7,12),('Aldi','Snargaluff',400,11),('Costco','Snargaluff',104,12),('Aldi','Apple',5,12),('Aldi','Snargaluff',600,11),('Costco','Snargaluff',200,10); DROP TABLE IF EXISTS make_series_test_table3; CREATE TABLE make_series_test_table3 -( +( timestamp datetime, metric Float64, ) ENGINE = Memory; INSERT INTO make_series_test_table3 VALUES (parseDateTimeBestEffort('2016-12-31T06:00', 'UTC'), 50), (parseDateTimeBestEffort('2017-01-01', 'UTC'), 4), (parseDateTimeBestEffort('2017-01-02', 'UTC'), 3), (parseDateTimeBestEffort('2017-01-03', 'UTC'), 4), (parseDateTimeBestEffort('2017-01-03T03:00', 'UTC'), 6), (parseDateTimeBestEffort('2017-01-05', 'UTC'), 8), (parseDateTimeBestEffort('2017-01-05T13:40', 'UTC'), 13), (parseDateTimeBestEffort('2017-01-06', 'UTC'), 4), (parseDateTimeBestEffort('2017-01-07', 'UTC'), 3), (parseDateTimeBestEffort('2017-01-08', 'UTC'), 8), (parseDateTimeBestEffort('2017-01-08T21:00', 'UTC'), 8), (parseDateTimeBestEffort('2017-01-09', 'UTC'), 2), (parseDateTimeBestEffort('2017-01-09T12:00', 'UTC'), 11), (parseDateTimeBestEffort('2017-01-10T05:00', 'UTC'), 5); +-- This test requies sorting after some of aggregations but I don't know KQL, sorry +set max_bytes_before_external_group_by = 0; set dialect = 'kusto'; + print '-- from to'; make_series_test_table | make-series PriceAvg = avg(Price) default=0 on Purchase from datetime(2016-09-10) to datetime(2016-09-13) step 1d by Supplier, Fruit | order by Supplier, Fruit; print '-- from'; @@ -68,7 +71,7 @@ make_series_test_table2 | make-series PriceAvg=avg(Price) default=0 on Purchase print '-- without by'; make_series_test_table2 | make-series PriceAvg=avg(Price) default=0 on Purchase step 2.0; -make_series_test_table3 | make-series avg(metric) default=0 on timestamp from datetime(2017-01-01) to datetime(2017-01-10) step 1d +make_series_test_table3 | make-series avg(metric) default=0 on timestamp from datetime(2017-01-01) to datetime(2017-01-10) step 1d -- print '-- summarize --' -- make_series_test_table | summarize count() by format_datetime(bin(Purchase, 1d), 'yy-MM-dd'); diff --git a/tests/queries/0_stateless/02373_heap_buffer_overflow_in_avro.sh b/tests/queries/0_stateless/02373_heap_buffer_overflow_in_avro.sh index 23d6b722c09..3461287d28a 100755 --- a/tests/queries/0_stateless/02373_heap_buffer_overflow_in_avro.sh +++ b/tests/queries/0_stateless/02373_heap_buffer_overflow_in_avro.sh @@ -5,9 +5,8 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -USER_FILES_PATH=$(clickhouse-client --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') +USER_FILES_PATH=$($CLICKHOUSE_CLIENT_BINARY --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') cp $CURDIR/data_avro/corrupted.avro $USER_FILES_PATH/ $CLICKHOUSE_CLIENT -q "select * from file(corrupted.avro)" 2>&1 | grep -F -q "Cannot read compressed data" && echo "OK" || echo "FAIL" - diff --git a/tests/queries/0_stateless/02373_progress_contain_result.sh b/tests/queries/0_stateless/02373_progress_contain_result.sh index c87a5ec7615..fd343df1013 100755 --- a/tests/queries/0_stateless/02373_progress_contain_result.sh +++ b/tests/queries/0_stateless/02373_progress_contain_result.sh @@ -5,5 +5,5 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CURDIR"/../shell_config.sh echo 'SELECT 1 FROM numbers(100)' | - ${CLICKHOUSE_CURL_COMMAND} -v "${CLICKHOUSE_URL}&wait_end_of_query=1&send_progress_in_http_headers=0" --data-binary @- 2>&1 | + ${CLICKHOUSE_CURL_COMMAND} -vsS "${CLICKHOUSE_URL}&wait_end_of_query=1&send_progress_in_http_headers=0" --data-binary @- 2>&1 | grep 'X-ClickHouse-Summary' | sed 's/,\"elapsed_ns[^}]*//' diff --git a/tests/queries/0_stateless/02375_system_schema_inference_cache.reference b/tests/queries/0_stateless/02375_system_schema_inference_cache.reference index 94bdf1b5ddb..676fb441f53 100644 --- a/tests/queries/0_stateless/02375_system_schema_inference_cache.reference +++ b/tests/queries/0_stateless/02375_system_schema_inference_cache.reference @@ -5,6 +5,7 @@ additional_format_info String registration_time DateTime schema Nullable(String) number_of_rows Nullable(UInt64) +schema_inference_mode Nullable(String) x Nullable(Int64) s Nullable(String) x Nullable(Int64) diff --git a/tests/queries/0_stateless/02375_system_schema_inference_cache.sql b/tests/queries/0_stateless/02375_system_schema_inference_cache.sql index 6f656b16c69..310e22ed31f 100644 --- a/tests/queries/0_stateless/02375_system_schema_inference_cache.sql +++ b/tests/queries/0_stateless/02375_system_schema_inference_cache.sql @@ -1,4 +1,4 @@ --- Tags: no-fasttest +-- Tags: no-fasttest, no-parallel set input_format_json_try_infer_numbers_from_strings=1; insert into function file('02374_data1.jsonl') select number as x, 'str' as s from numbers(10); diff --git a/tests/queries/0_stateless/02383_arrow_dict_special_cases.sh b/tests/queries/0_stateless/02383_arrow_dict_special_cases.sh index 40487f16551..80743a97dd0 100755 --- a/tests/queries/0_stateless/02383_arrow_dict_special_cases.sh +++ b/tests/queries/0_stateless/02383_arrow_dict_special_cases.sh @@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -USER_FILES_PATH=$(clickhouse-client --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') +USER_FILES_PATH=$($CLICKHOUSE_CLIENT_BINARY --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') UNIQ_DEST_PATH=$USER_FILES_PATH/test-02383-$RANDOM-$RANDOM mkdir -p $UNIQ_DEST_PATH diff --git a/tests/queries/0_stateless/02402_capnp_format_segments_overflow.sh b/tests/queries/0_stateless/02402_capnp_format_segments_overflow.sh index 244b94d9189..8aad68ffe5c 100755 --- a/tests/queries/0_stateless/02402_capnp_format_segments_overflow.sh +++ b/tests/queries/0_stateless/02402_capnp_format_segments_overflow.sh @@ -5,11 +5,11 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -USER_FILES_PATH=$(clickhouse-client --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') +USER_FILES_PATH=$($CLICKHOUSE_CLIENT_BINARY --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') mkdir -p $USER_FILES_PATH/test_02402 cp $CURDIR/data_capnp/overflow.capnp $USER_FILES_PATH/test_02402/ -SCHEMADIR=$(clickhouse-client --query "select * from file('test_02402/overflow.capnp', 'CapnProto', 'val1 char') settings format_schema='nonexist:Message'" 2>&1 | grep Exception | grep -oP "file \K.*(?=/nonexist.capnp)") +SCHEMADIR=$($CLICKHOUSE_CLIENT_BINARY --query "select * from file('test_02402/overflow.capnp', 'CapnProto', 'val1 char') settings format_schema='nonexist:Message'" 2>&1 | grep Exception | grep -oP "file \K.*(?=/nonexist.capnp)") CLIENT_SCHEMADIR=$CURDIR/format_schemas SERVER_SCHEMADIR=test_02402 diff --git a/tests/queries/0_stateless/02406_minmax_behaviour.reference b/tests/queries/0_stateless/02406_minmax_behaviour.reference new file mode 100644 index 00000000000..d52ba640a0e --- /dev/null +++ b/tests/queries/0_stateless/02406_minmax_behaviour.reference @@ -0,0 +1,192 @@ +-- { echoOn } +SET compile_aggregate_expressions=0; +WITH + arrayJoin([1, 2, 3, nan, 4, 5]) AS data, + arrayJoin([nan, 1, 2, 3, 4]) AS data2, + arrayJoin([1, 2, 3, 4, nan]) AS data3, + arrayJoin([nan, nan, nan]) AS data4, + arrayJoin([nan, 1, 2, 3, nan]) AS data5 +SELECT + min(data), + min(data2), + min(data3), + min(data4), + min(data5); +1 nan 1 nan nan +WITH + arrayJoin([1, 2, 3, nan, 4, 5]) AS data, + arrayJoin([nan, 1, 2, 3, 4]) AS data2, + arrayJoin([1, 2, 3, 4, nan]) AS data3, + arrayJoin([nan, nan, nan]) AS data4, + arrayJoin([nan, 1, 2, 3, nan]) AS data5 +SELECT + max(data), + max(data2), + max(data3), + max(data4), + max(data5); +5 nan 4 nan nan +Select max(number) from numbers(100) settings max_threads=1, max_block_size=10; +99 +Select max(-number) from numbers(100); +0 +Select min(number) from numbers(100) settings max_threads=1, max_block_size=10; +0 +Select min(-number) from numbers(100); +-99 +SELECT minIf(number, rand() % 2 == 3) from numbers(10) settings max_threads=1, max_block_size=5; +0 +SELECT maxIf(number, rand() % 2 == 3) from numbers(10) settings max_threads=1, max_block_size=5; +0 +SELECT minIf(number::Float64, rand() % 2 == 3) from numbers(10) settings max_threads=1, max_block_size=5; +0 +SELECT maxIf(number::Float64, rand() % 2 == 3) from numbers(10) settings max_threads=1, max_block_size=5; +0 +SELECT minIf(number::String, number < 10) as number from numbers(10, 1000); + +SELECT maxIf(number::String, number < 10) as number from numbers(10, 1000); + +SELECT maxIf(number::String, number % 3), maxIf(number::String, number % 5), minIf(number::String, number % 3), minIf(number::String, number > 10) from numbers(400); +98 99 1 100 +SELECT minIf(number::Nullable(String), number < 10) as number from numbers(10, 1000); +\N +SELECT maxIf(number::Nullable(String), number < 10) as number from numbers(10, 1000); +\N +SELECT min(n::Nullable(String)) from (Select if(number < 15 and number % 2 == 1, number * 2, NULL) as n from numbers(10, 20)); +22 +SELECT max(n::Nullable(String)) from (Select if(number < 15 and number % 2 == 1, number * 2, NULL) as n from numbers(10, 20)); +26 +SELECT argMax(number, now()) FROM (Select number as number from numbers(10, 10000)) settings max_threads=1, max_block_size=100; +10 +SELECT argMax(number, now()) FROM (Select number as number from numbers(10, 10000)) settings max_threads=1, max_block_size=20000; +10 +SELECT argMax(number, 1) FROM (Select number as number from numbers(10, 10000)) settings max_threads=1, max_block_size=100; +10 +SELECT argMax(number, 1) FROM (Select number as number from numbers(10, 10000)) settings max_threads=1, max_block_size=20000; +10 +SELECT argMax(number::String, 1) FROM (Select number as number from numbers(10, 10000)) settings max_threads=1, max_block_size=100; +10 +SELECT argMax(number::String, 1) FROM (Select number as number from numbers(10, 10000)) settings max_threads=1, max_block_size=20000; +10 +SELECT argMax(number, now() + number) FROM (Select number as number from numbers(10, 10000)) settings max_threads=1, max_block_size=100; +10009 +SELECT argMax(number, now() + number) FROM (Select number as number from numbers(10, 10000)) settings max_threads=1, max_block_size=20000; +10009 +SELECT argMaxIf(number, now() + number, number % 10 < 20) FROM (Select number as number from numbers(10, 10000)) settings max_threads=1, max_block_size=100; +10009 +SELECT argMaxIf(number, now() + number, number % 10 < 20) FROM (Select number as number from numbers(10, 10000)) settings max_threads=1, max_block_size=20000; +10009 +SELECT argMaxIf(number, now() + number, number % 10 > 20) FROM (Select number as number from numbers(10, 10000)) settings max_threads=1, max_block_size=100; +0 +SELECT argMaxIf(number, now() + number, number % 10 > 20) FROM (Select number as number from numbers(10, 10000)) settings max_threads=1, max_block_size=20000; +0 +SELECT argMax(number, number::Float64) from numbers(2029); +2028 +SELECT argMaxIf(number, number::Float64, number > 2030) from numbers(2029); +0 +SELECT argMaxIf(number, number::Float64, number > 2030) from numbers(2032); +2031 +SELECT argMax(number, -number::Float64) from numbers(2029); +0 +SELECT argMaxIf(number, -number::Float64, number > 2030) from numbers(2029); +0 +SELECT argMaxIf(number, -number::Float64, number > 2030) from numbers(2032); +2031 +SELECT argMin(number, now()) FROM (Select number as number from numbers(10, 10000)) settings max_threads=1, max_block_size=100; +10 +SELECT argMin(number, now()) FROM (Select number as number from numbers(10, 10000)) settings max_threads=1, max_block_size=20000; +10 +SELECT argMin(number, 1) FROM (Select number as number from numbers(10, 10000)) settings max_threads=1, max_block_size=100; +10 +SELECT argMin(number, 1) FROM (Select number as number from numbers(10, 10000)) settings max_threads=1, max_block_size=20000; +10 +SELECT argMin(number::String, 1) FROM (Select number as number from numbers(10, 10000)) settings max_threads=1, max_block_size=100; +10 +SELECT argMin(number::String, 1) FROM (Select number as number from numbers(10, 10000)) settings max_threads=1, max_block_size=20000; +10 +SELECT argMin(number, now() + number) FROM (Select number as number from numbers(10, 10000)) settings max_threads=1, max_block_size=100; +10 +SELECT argMin(number, now() + number) FROM (Select number as number from numbers(10, 10000)) settings max_threads=1, max_block_size=20000; +10 +SELECT argMinIf(number, now() + number, number % 10 < 20) FROM (Select number as number from numbers(10, 10000)) settings max_threads=1, max_block_size=100; +10 +SELECT argMinIf(number, now() + number, number % 10 < 20) FROM (Select number as number from numbers(10, 10000)) settings max_threads=1, max_block_size=20000; +10 +SELECT argMinIf(number, now() + number, number % 10 > 20) FROM (Select number as number from numbers(10, 10000)) settings max_threads=1, max_block_size=100; +0 +SELECT argMinIf(number, now() + number, number % 10 > 20) FROM (Select number as number from numbers(10, 10000)) settings max_threads=1, max_block_size=20000; +0 +SELECT argMin(number, number::Float64) from numbers(2029); +0 +SELECT argMinIf(number, number::Float64, number > 2030) from numbers(2029); +0 +SELECT argMinIf(number, number::Float64, number > 2030) from numbers(2032); +2031 +SELECT argMin(number, -number::Float64) from numbers(2029); +2028 +SELECT argMinIf(number, -number::Float64, number > 2030) from numbers(2029); +0 +SELECT argMinIf(number, -number::Float64, number > 2030) from numbers(2032); +2031 +Select argMax((n, n), n) t, toTypeName(t) FROM (Select if(number % 3 == 0, NULL, number) as n from numbers(10)); +(8,8) Tuple(Nullable(UInt64), Nullable(UInt64)) +Select argMaxIf((n, n), n, n < 5) t, toTypeName(t) FROM (Select if(number % 3 == 0, NULL, number) as n from numbers(10)); +(4,4) Tuple(Nullable(UInt64), Nullable(UInt64)) +Select argMaxIf((n, n), n, n > 5) t, toTypeName(t) FROM (Select if(number % 3 == 0, NULL, number) as n from numbers(10)); +(8,8) Tuple(Nullable(UInt64), Nullable(UInt64)) +Select argMin((n, n), n) t, toTypeName(t) FROM (Select if(number % 3 == 0, NULL, number) as n from numbers(10)); +(1,1) Tuple(Nullable(UInt64), Nullable(UInt64)) +Select argMinIf((n, n), n, n < 5) t, toTypeName(t) FROM (Select if(number % 3 == 0, NULL, number) as n from numbers(10)); +(1,1) Tuple(Nullable(UInt64), Nullable(UInt64)) +Select argMinIf((n, n), n, n > 5) t, toTypeName(t) FROM (Select if(number % 3 == 0, NULL, number) as n from numbers(10)); +(7,7) Tuple(Nullable(UInt64), Nullable(UInt64)) +SET compile_aggregate_expressions=1; +SET min_count_to_compile_aggregate_expression=0; +WITH + arrayJoin([1, 2, 3, nan, 4, 5]) AS data, + arrayJoin([nan, 1, 2, 3, 4]) AS data2, + arrayJoin([1, 2, 3, 4, nan]) AS data3, + arrayJoin([nan, nan, nan]) AS data4, + arrayJoin([nan, 1, 2, 3, nan]) AS data5 +SELECT + min(data), + min(data2), + min(data3), + min(data4), + min(data5); +1 nan 1 nan nan +WITH + arrayJoin([1, 2, 3, nan, 4, 5]) AS data, + arrayJoin([nan, 1, 2, 3, 4]) AS data2, + arrayJoin([1, 2, 3, 4, nan]) AS data3, + arrayJoin([nan, nan, nan]) AS data4, + arrayJoin([nan, 1, 2, 3, nan]) AS data5 +SELECT + max(data), + max(data2), + max(data3), + max(data4), + max(data5); +5 nan 4 nan nan +SELECT minIf(number, rand() % 2 == 3) from numbers(10); +0 +SELECT maxIf(number, rand() % 2 == 3) from numbers(10); +0 +SELECT minIf(number::Float64, rand() % 2 == 3) from numbers(10); +0 +SELECT maxIf(number::Float64, rand() % 2 == 3) from numbers(10); +0 +SELECT minIf(number::String, number < 10) as number from numbers(10, 1000); + +SELECT maxIf(number::String, number < 10) as number from numbers(10, 1000); + +SELECT maxIf(number::String, number % 3), maxIf(number::String, number % 5), minIf(number::String, number % 3), minIf(number::String, number > 10) from numbers(400); +98 99 1 100 +SELECT minIf(number::Nullable(String), number < 10) as number from numbers(10, 1000); +\N +SELECT maxIf(number::Nullable(String), number < 10) as number from numbers(10, 1000); +\N +SELECT min(n::Nullable(String)) from (Select if(number < 15 and number % 2 == 1, number * 2, NULL) as n from numbers(10, 20)); +22 +SELECT max(n::Nullable(String)) from (Select if(number < 15 and number % 2 == 1, number * 2, NULL) as n from numbers(10, 20)); +26 diff --git a/tests/queries/0_stateless/02406_minmax_behaviour.sql b/tests/queries/0_stateless/02406_minmax_behaviour.sql new file mode 100644 index 00000000000..a3afe7d40b0 --- /dev/null +++ b/tests/queries/0_stateless/02406_minmax_behaviour.sql @@ -0,0 +1,140 @@ +-- { echoOn } +SET compile_aggregate_expressions=0; + +WITH + arrayJoin([1, 2, 3, nan, 4, 5]) AS data, + arrayJoin([nan, 1, 2, 3, 4]) AS data2, + arrayJoin([1, 2, 3, 4, nan]) AS data3, + arrayJoin([nan, nan, nan]) AS data4, + arrayJoin([nan, 1, 2, 3, nan]) AS data5 +SELECT + min(data), + min(data2), + min(data3), + min(data4), + min(data5); + +WITH + arrayJoin([1, 2, 3, nan, 4, 5]) AS data, + arrayJoin([nan, 1, 2, 3, 4]) AS data2, + arrayJoin([1, 2, 3, 4, nan]) AS data3, + arrayJoin([nan, nan, nan]) AS data4, + arrayJoin([nan, 1, 2, 3, nan]) AS data5 +SELECT + max(data), + max(data2), + max(data3), + max(data4), + max(data5); + +Select max(number) from numbers(100) settings max_threads=1, max_block_size=10; +Select max(-number) from numbers(100); +Select min(number) from numbers(100) settings max_threads=1, max_block_size=10; +Select min(-number) from numbers(100); + +SELECT minIf(number, rand() % 2 == 3) from numbers(10) settings max_threads=1, max_block_size=5; +SELECT maxIf(number, rand() % 2 == 3) from numbers(10) settings max_threads=1, max_block_size=5; + +SELECT minIf(number::Float64, rand() % 2 == 3) from numbers(10) settings max_threads=1, max_block_size=5; +SELECT maxIf(number::Float64, rand() % 2 == 3) from numbers(10) settings max_threads=1, max_block_size=5; + +SELECT minIf(number::String, number < 10) as number from numbers(10, 1000); +SELECT maxIf(number::String, number < 10) as number from numbers(10, 1000); +SELECT maxIf(number::String, number % 3), maxIf(number::String, number % 5), minIf(number::String, number % 3), minIf(number::String, number > 10) from numbers(400); + +SELECT minIf(number::Nullable(String), number < 10) as number from numbers(10, 1000); +SELECT maxIf(number::Nullable(String), number < 10) as number from numbers(10, 1000); + +SELECT min(n::Nullable(String)) from (Select if(number < 15 and number % 2 == 1, number * 2, NULL) as n from numbers(10, 20)); +SELECT max(n::Nullable(String)) from (Select if(number < 15 and number % 2 == 1, number * 2, NULL) as n from numbers(10, 20)); + +SELECT argMax(number, now()) FROM (Select number as number from numbers(10, 10000)) settings max_threads=1, max_block_size=100; +SELECT argMax(number, now()) FROM (Select number as number from numbers(10, 10000)) settings max_threads=1, max_block_size=20000; +SELECT argMax(number, 1) FROM (Select number as number from numbers(10, 10000)) settings max_threads=1, max_block_size=100; +SELECT argMax(number, 1) FROM (Select number as number from numbers(10, 10000)) settings max_threads=1, max_block_size=20000; +SELECT argMax(number::String, 1) FROM (Select number as number from numbers(10, 10000)) settings max_threads=1, max_block_size=100; +SELECT argMax(number::String, 1) FROM (Select number as number from numbers(10, 10000)) settings max_threads=1, max_block_size=20000; +SELECT argMax(number, now() + number) FROM (Select number as number from numbers(10, 10000)) settings max_threads=1, max_block_size=100; +SELECT argMax(number, now() + number) FROM (Select number as number from numbers(10, 10000)) settings max_threads=1, max_block_size=20000; +SELECT argMaxIf(number, now() + number, number % 10 < 20) FROM (Select number as number from numbers(10, 10000)) settings max_threads=1, max_block_size=100; +SELECT argMaxIf(number, now() + number, number % 10 < 20) FROM (Select number as number from numbers(10, 10000)) settings max_threads=1, max_block_size=20000; +SELECT argMaxIf(number, now() + number, number % 10 > 20) FROM (Select number as number from numbers(10, 10000)) settings max_threads=1, max_block_size=100; +SELECT argMaxIf(number, now() + number, number % 10 > 20) FROM (Select number as number from numbers(10, 10000)) settings max_threads=1, max_block_size=20000; +SELECT argMax(number, number::Float64) from numbers(2029); +SELECT argMaxIf(number, number::Float64, number > 2030) from numbers(2029); +SELECT argMaxIf(number, number::Float64, number > 2030) from numbers(2032); +SELECT argMax(number, -number::Float64) from numbers(2029); +SELECT argMaxIf(number, -number::Float64, number > 2030) from numbers(2029); +SELECT argMaxIf(number, -number::Float64, number > 2030) from numbers(2032); + +SELECT argMin(number, now()) FROM (Select number as number from numbers(10, 10000)) settings max_threads=1, max_block_size=100; +SELECT argMin(number, now()) FROM (Select number as number from numbers(10, 10000)) settings max_threads=1, max_block_size=20000; +SELECT argMin(number, 1) FROM (Select number as number from numbers(10, 10000)) settings max_threads=1, max_block_size=100; +SELECT argMin(number, 1) FROM (Select number as number from numbers(10, 10000)) settings max_threads=1, max_block_size=20000; +SELECT argMin(number::String, 1) FROM (Select number as number from numbers(10, 10000)) settings max_threads=1, max_block_size=100; +SELECT argMin(number::String, 1) FROM (Select number as number from numbers(10, 10000)) settings max_threads=1, max_block_size=20000; +SELECT argMin(number, now() + number) FROM (Select number as number from numbers(10, 10000)) settings max_threads=1, max_block_size=100; +SELECT argMin(number, now() + number) FROM (Select number as number from numbers(10, 10000)) settings max_threads=1, max_block_size=20000; +SELECT argMinIf(number, now() + number, number % 10 < 20) FROM (Select number as number from numbers(10, 10000)) settings max_threads=1, max_block_size=100; +SELECT argMinIf(number, now() + number, number % 10 < 20) FROM (Select number as number from numbers(10, 10000)) settings max_threads=1, max_block_size=20000; +SELECT argMinIf(number, now() + number, number % 10 > 20) FROM (Select number as number from numbers(10, 10000)) settings max_threads=1, max_block_size=100; +SELECT argMinIf(number, now() + number, number % 10 > 20) FROM (Select number as number from numbers(10, 10000)) settings max_threads=1, max_block_size=20000; +SELECT argMin(number, number::Float64) from numbers(2029); +SELECT argMinIf(number, number::Float64, number > 2030) from numbers(2029); +SELECT argMinIf(number, number::Float64, number > 2030) from numbers(2032); +SELECT argMin(number, -number::Float64) from numbers(2029); +SELECT argMinIf(number, -number::Float64, number > 2030) from numbers(2029); +SELECT argMinIf(number, -number::Float64, number > 2030) from numbers(2032); + +Select argMax((n, n), n) t, toTypeName(t) FROM (Select if(number % 3 == 0, NULL, number) as n from numbers(10)); +Select argMaxIf((n, n), n, n < 5) t, toTypeName(t) FROM (Select if(number % 3 == 0, NULL, number) as n from numbers(10)); +Select argMaxIf((n, n), n, n > 5) t, toTypeName(t) FROM (Select if(number % 3 == 0, NULL, number) as n from numbers(10)); + +Select argMin((n, n), n) t, toTypeName(t) FROM (Select if(number % 3 == 0, NULL, number) as n from numbers(10)); +Select argMinIf((n, n), n, n < 5) t, toTypeName(t) FROM (Select if(number % 3 == 0, NULL, number) as n from numbers(10)); +Select argMinIf((n, n), n, n > 5) t, toTypeName(t) FROM (Select if(number % 3 == 0, NULL, number) as n from numbers(10)); + +SET compile_aggregate_expressions=1; +SET min_count_to_compile_aggregate_expression=0; + +WITH + arrayJoin([1, 2, 3, nan, 4, 5]) AS data, + arrayJoin([nan, 1, 2, 3, 4]) AS data2, + arrayJoin([1, 2, 3, 4, nan]) AS data3, + arrayJoin([nan, nan, nan]) AS data4, + arrayJoin([nan, 1, 2, 3, nan]) AS data5 +SELECT + min(data), + min(data2), + min(data3), + min(data4), + min(data5); + +WITH + arrayJoin([1, 2, 3, nan, 4, 5]) AS data, + arrayJoin([nan, 1, 2, 3, 4]) AS data2, + arrayJoin([1, 2, 3, 4, nan]) AS data3, + arrayJoin([nan, nan, nan]) AS data4, + arrayJoin([nan, 1, 2, 3, nan]) AS data5 +SELECT + max(data), + max(data2), + max(data3), + max(data4), + max(data5); + +SELECT minIf(number, rand() % 2 == 3) from numbers(10); +SELECT maxIf(number, rand() % 2 == 3) from numbers(10); + +SELECT minIf(number::Float64, rand() % 2 == 3) from numbers(10); +SELECT maxIf(number::Float64, rand() % 2 == 3) from numbers(10); + +SELECT minIf(number::String, number < 10) as number from numbers(10, 1000); +SELECT maxIf(number::String, number < 10) as number from numbers(10, 1000); +SELECT maxIf(number::String, number % 3), maxIf(number::String, number % 5), minIf(number::String, number % 3), minIf(number::String, number > 10) from numbers(400); + +SELECT minIf(number::Nullable(String), number < 10) as number from numbers(10, 1000); +SELECT maxIf(number::Nullable(String), number < 10) as number from numbers(10, 1000); + +SELECT min(n::Nullable(String)) from (Select if(number < 15 and number % 2 == 1, number * 2, NULL) as n from numbers(10, 20)); +SELECT max(n::Nullable(String)) from (Select if(number < 15 and number % 2 == 1, number * 2, NULL) as n from numbers(10, 20)); diff --git a/tests/queries/0_stateless/02415_all_new_functions_must_be_documented.sql b/tests/queries/0_stateless/02415_all_new_functions_must_be_documented.sql index 148ad303bd4..cabcd230eb6 100644 --- a/tests/queries/0_stateless/02415_all_new_functions_must_be_documented.sql +++ b/tests/queries/0_stateless/02415_all_new_functions_must_be_documented.sql @@ -2,7 +2,6 @@ -- Please help shorten this list down to zero elements. SELECT name FROM system.functions WHERE NOT is_aggregate AND origin = 'System' AND alias_to = '' AND length(description) < 10 AND name NOT IN ( - 'MD4', 'MD5', 'SHA1', 'SHA224', 'SHA256', 'SHA384', 'SHA512', 'halfMD5', 'aes_decrypt_mysql', 'aes_encrypt_mysql', 'decrypt', 'encrypt', 'base64Decode', 'base64Encode', 'tryBase64Decode', 'convertCharset', diff --git a/tests/queries/0_stateless/02421_record_errors_row_by_input_format.sh b/tests/queries/0_stateless/02421_record_errors_row_by_input_format.sh index dda61512936..df304eeeba5 100755 --- a/tests/queries/0_stateless/02421_record_errors_row_by_input_format.sh +++ b/tests/queries/0_stateless/02421_record_errors_row_by_input_format.sh @@ -9,7 +9,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # Data preparation. -CLICKHOUSE_USER_FILES_PATH=$(clickhouse-client --query "select _path, _file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') +CLICKHOUSE_USER_FILES_PATH=$($CLICKHOUSE_CLIENT_BINARY --query "select _path, _file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') mkdir -p ${CLICKHOUSE_USER_FILES_PATH}/ echo -e "1,1\n2,a\nb,3\n4,4\n5,c\n6,6" > ${CLICKHOUSE_USER_FILES_PATH}/a.csv @@ -32,4 +32,3 @@ ${CLICKHOUSE_CLIENT} --query "drop table if exists data;" rm ${CLICKHOUSE_USER_FILES_PATH}/a.csv rm ${CLICKHOUSE_USER_FILES_PATH}/errors_server rm ${CLICKHOUSE_USER_FILES_PATH}/errors_client - diff --git a/tests/queries/0_stateless/02428_combinators_with_over_statement.sql b/tests/queries/0_stateless/02428_combinators_with_over_statement.sql index b42066cdf52..7946b997b00 100644 --- a/tests/queries/0_stateless/02428_combinators_with_over_statement.sql +++ b/tests/queries/0_stateless/02428_combinators_with_over_statement.sql @@ -1,6 +1,6 @@ drop table if exists test; create table test (x AggregateFunction(uniq, UInt64), y Int64) engine=Memory; -insert into test select uniqState(number) as x, number as y from numbers(10) group by number; +insert into test select uniqState(number) as x, number as y from numbers(10) group by number order by x, y; select uniqStateMap(map(1, x)) OVER (PARTITION BY y) from test; select uniqStateForEach([x]) OVER (PARTITION BY y) from test; select uniqStateResample(30, 75, 30)([x], 30) OVER (PARTITION BY y) from test; diff --git a/tests/queries/0_stateless/02447_drop_database_replica.sh b/tests/queries/0_stateless/02447_drop_database_replica.sh index 47a6cf10bda..d5b3ceef46a 100755 --- a/tests/queries/0_stateless/02447_drop_database_replica.sh +++ b/tests/queries/0_stateless/02447_drop_database_replica.sh @@ -55,7 +55,15 @@ $CLICKHOUSE_CLIENT --allow_experimental_database_replicated=1 -q "create databas $CLICKHOUSE_CLIENT -q "system sync database replica $db4" $CLICKHOUSE_CLIENT -q "select cluster, shard_num, replica_num, database_shard_name, database_replica_name, is_active from system.clusters where cluster='$db4'" +# Don't throw "replica doesn't exist" when removing all replicas [from a database] +$CLICKHOUSE_CLIENT -q "system drop database replica 'doesntexist$CLICKHOUSE_DATABASE' from shard 'doesntexist'" + $CLICKHOUSE_CLIENT -q "drop database $db" $CLICKHOUSE_CLIENT -q "drop database $db2" $CLICKHOUSE_CLIENT -q "drop database $db3" + +$CLICKHOUSE_CLIENT --distributed_ddl_output_mode=none -q "create table $db4.rmt (n int) engine=ReplicatedMergeTree order by n" +$CLICKHOUSE_CLIENT -q "system drop replica 'doesntexist$CLICKHOUSE_DATABASE' from database $db4" +$CLICKHOUSE_CLIENT -q "system drop replica 'doesntexist$CLICKHOUSE_DATABASE'" + $CLICKHOUSE_CLIENT -q "drop database $db4" diff --git a/tests/queries/0_stateless/02457_bz2_concatenated.sh b/tests/queries/0_stateless/02457_bz2_concatenated.sh index 5b24f74b9c0..96e23cbfa2a 100755 --- a/tests/queries/0_stateless/02457_bz2_concatenated.sh +++ b/tests/queries/0_stateless/02457_bz2_concatenated.sh @@ -6,7 +6,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -USER_FILES_PATH=$(clickhouse-client --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') +USER_FILES_PATH=$($CLICKHOUSE_CLIENT_BINARY --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') WORKING_FOLDER_02457="${USER_FILES_PATH}/${CLICKHOUSE_DATABASE}" rm -rf "${WORKING_FOLDER_02457}" diff --git a/tests/queries/0_stateless/02459_glob_for_recursive_directory_traversal.sh b/tests/queries/0_stateless/02459_glob_for_recursive_directory_traversal.sh index 2c3deda2328..b8430307ea3 100755 --- a/tests/queries/0_stateless/02459_glob_for_recursive_directory_traversal.sh +++ b/tests/queries/0_stateless/02459_glob_for_recursive_directory_traversal.sh @@ -5,7 +5,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -user_files_path=$(clickhouse-client --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') +user_files_path=$($CLICKHOUSE_CLIENT_BINARY --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') mkdir $user_files_path/d1 touch $user_files_path/d1/text1.txt @@ -40,4 +40,4 @@ rmdir $user_files_path/d1/d2/d3 rm $user_files_path/d1/d2/text2.txt rmdir $user_files_path/d1/d2 rm $user_files_path/d1/text1.txt -rmdir $user_files_path/d1 \ No newline at end of file +rmdir $user_files_path/d1 diff --git a/tests/queries/0_stateless/02477_logical_expressions_optimizer_low_cardinality.reference b/tests/queries/0_stateless/02477_logical_expressions_optimizer_low_cardinality.reference index 84589668d64..ff5f7e5a687 100644 --- a/tests/queries/0_stateless/02477_logical_expressions_optimizer_low_cardinality.reference +++ b/tests/queries/0_stateless/02477_logical_expressions_optimizer_low_cardinality.reference @@ -36,6 +36,42 @@ QUERY id: 0 SETTINGS allow_experimental_analyzer=1 SELECT a FROM t_logical_expressions_optimizer_low_cardinality +WHERE (a != \'x\') AND (a != \'y\') +QUERY id: 0 + PROJECTION COLUMNS + a LowCardinality(String) + PROJECTION + LIST id: 1, nodes: 1 + COLUMN id: 2, column_name: a, result_type: LowCardinality(String), source_id: 3 + JOIN TREE + TABLE id: 3, table_name: default.t_logical_expressions_optimizer_low_cardinality + WHERE + FUNCTION id: 4, function_name: notIn, function_type: ordinary, result_type: UInt8 + ARGUMENTS + LIST id: 5, nodes: 2 + COLUMN id: 2, column_name: a, result_type: LowCardinality(String), source_id: 3 + CONSTANT id: 6, constant_value: Tuple_(\'x\', \'y\'), constant_value_type: Tuple(String, String) + SETTINGS allow_experimental_analyzer=1 +SELECT a +FROM t_logical_expressions_optimizer_low_cardinality +WHERE (a != \'x\') AND (\'y\' != a) +QUERY id: 0 + PROJECTION COLUMNS + a LowCardinality(String) + PROJECTION + LIST id: 1, nodes: 1 + COLUMN id: 2, column_name: a, result_type: LowCardinality(String), source_id: 3 + JOIN TREE + TABLE id: 3, table_name: default.t_logical_expressions_optimizer_low_cardinality + WHERE + FUNCTION id: 4, function_name: notIn, function_type: ordinary, result_type: UInt8 + ARGUMENTS + LIST id: 5, nodes: 2 + COLUMN id: 2, column_name: a, result_type: LowCardinality(String), source_id: 3 + CONSTANT id: 6, constant_value: Tuple_(\'x\', \'y\'), constant_value_type: Tuple(String, String) + SETTINGS allow_experimental_analyzer=1 +SELECT a +FROM t_logical_expressions_optimizer_low_cardinality WHERE (b = 0) OR (b = 1) QUERY id: 0 PROJECTION COLUMNS @@ -60,3 +96,29 @@ QUERY id: 0 COLUMN id: 8, column_name: b, result_type: UInt32, source_id: 3 CONSTANT id: 12, constant_value: UInt64_1, constant_value_type: UInt8 SETTINGS allow_experimental_analyzer=1 +SELECT a +FROM t_logical_expressions_optimizer_low_cardinality +WHERE (b != 0) AND (b != 1) +QUERY id: 0 + PROJECTION COLUMNS + a LowCardinality(String) + PROJECTION + LIST id: 1, nodes: 1 + COLUMN id: 2, column_name: a, result_type: LowCardinality(String), source_id: 3 + JOIN TREE + TABLE id: 3, table_name: default.t_logical_expressions_optimizer_low_cardinality + WHERE + FUNCTION id: 4, function_name: and, function_type: ordinary, result_type: UInt8 + ARGUMENTS + LIST id: 5, nodes: 2 + FUNCTION id: 6, function_name: notEquals, function_type: ordinary, result_type: UInt8 + ARGUMENTS + LIST id: 7, nodes: 2 + COLUMN id: 8, column_name: b, result_type: UInt32, source_id: 3 + CONSTANT id: 9, constant_value: UInt64_0, constant_value_type: UInt8 + FUNCTION id: 10, function_name: notEquals, function_type: ordinary, result_type: UInt8 + ARGUMENTS + LIST id: 11, nodes: 2 + COLUMN id: 8, column_name: b, result_type: UInt32, source_id: 3 + CONSTANT id: 12, constant_value: UInt64_1, constant_value_type: UInt8 + SETTINGS allow_experimental_analyzer=1 diff --git a/tests/queries/0_stateless/02477_logical_expressions_optimizer_low_cardinality.sql b/tests/queries/0_stateless/02477_logical_expressions_optimizer_low_cardinality.sql index 14f8ad830e7..976b21a7e29 100644 --- a/tests/queries/0_stateless/02477_logical_expressions_optimizer_low_cardinality.sql +++ b/tests/queries/0_stateless/02477_logical_expressions_optimizer_low_cardinality.sql @@ -2,13 +2,24 @@ DROP TABLE IF EXISTS t_logical_expressions_optimizer_low_cardinality; set optimize_min_equality_disjunction_chain_length=3; CREATE TABLE t_logical_expressions_optimizer_low_cardinality (a LowCardinality(String), b UInt32) ENGINE = Memory; --- LowCardinality case, ignore optimize_min_equality_disjunction_chain_length limit, optimzer applied +-- LowCardinality case, ignore optimize_min_equality_disjunction_chain_length limit, optimizer applied +-- Chain of OR equals EXPLAIN SYNTAX SELECT a FROM t_logical_expressions_optimizer_low_cardinality WHERE a = 'x' OR a = 'y'; EXPLAIN QUERY TREE SELECT a FROM t_logical_expressions_optimizer_low_cardinality WHERE a = 'x' OR a = 'y' SETTINGS allow_experimental_analyzer = 1; EXPLAIN SYNTAX SELECT a FROM t_logical_expressions_optimizer_low_cardinality WHERE a = 'x' OR 'y' = a; EXPLAIN QUERY TREE SELECT a FROM t_logical_expressions_optimizer_low_cardinality WHERE a = 'x' OR 'y' = a SETTINGS allow_experimental_analyzer = 1; +-- Chain of AND notEquals +EXPLAIN SYNTAX SELECT a FROM t_logical_expressions_optimizer_low_cardinality WHERE a <> 'x' AND a <> 'y'; +EXPLAIN QUERY TREE SELECT a FROM t_logical_expressions_optimizer_low_cardinality WHERE a <> 'x' AND a <> 'y' SETTINGS allow_experimental_analyzer = 1; +EXPLAIN SYNTAX SELECT a FROM t_logical_expressions_optimizer_low_cardinality WHERE a <> 'x' AND 'y' <> a; +EXPLAIN QUERY TREE SELECT a FROM t_logical_expressions_optimizer_low_cardinality WHERE a <> 'x' AND 'y' <> a SETTINGS allow_experimental_analyzer = 1; + -- Non-LowCardinality case, optimizer not applied for short chains +-- Chain of OR equals EXPLAIN SYNTAX SELECT a FROM t_logical_expressions_optimizer_low_cardinality WHERE b = 0 OR b = 1; EXPLAIN QUERY TREE SELECT a FROM t_logical_expressions_optimizer_low_cardinality WHERE b = 0 OR b = 1 SETTINGS allow_experimental_analyzer = 1; +-- Chain of AND notEquals +EXPLAIN SYNTAX SELECT a FROM t_logical_expressions_optimizer_low_cardinality WHERE b <> 0 AND b <> 1; +EXPLAIN QUERY TREE SELECT a FROM t_logical_expressions_optimizer_low_cardinality WHERE b <> 0 AND b <> 1 SETTINGS allow_experimental_analyzer = 1; DROP TABLE t_logical_expressions_optimizer_low_cardinality; diff --git a/tests/queries/0_stateless/02479_race_condition_between_insert_and_droppin_mv.sh b/tests/queries/0_stateless/02479_race_condition_between_insert_and_droppin_mv.sh index 5d9844d5030..9ce4b459fce 100755 --- a/tests/queries/0_stateless/02479_race_condition_between_insert_and_droppin_mv.sh +++ b/tests/queries/0_stateless/02479_race_condition_between_insert_and_droppin_mv.sh @@ -42,7 +42,7 @@ TIMEOUT=55 for i in {1..4} do - timeout $TIMEOUT bash -c drop_mv $i & + timeout $TIMEOUT bash -c "drop_mv $i" & done for i in {1..4} diff --git a/tests/queries/0_stateless/02480_max_map_null_totals.reference b/tests/queries/0_stateless/02480_max_map_null_totals.reference index 5cc9b5a495f..8fa02ad2a39 100644 --- a/tests/queries/0_stateless/02480_max_map_null_totals.reference +++ b/tests/queries/0_stateless/02480_max_map_null_totals.reference @@ -1,119 +1,119 @@ ([-1,0],[0,0]) -([1,2],[0,2]) ([0,1],[0,1]) +([1,2],[0,2]) -([-1,0,1,2],[0,0,0,2]) +([-1,0,1,2],[0,0,1,2]) ([-1,0],[0,0]) -([1,2],[0,2]) +([-1,0,1,2],[0,0,1,2]) ([0,1],[0,1]) -([-1,0,1,2],[0,0,0,2]) +([1,2],[0,2]) ([-1,0],[0,0]) -([1,2],[0,2]) +([-1,0,1,2],[0,0,1,2]) ([0,1],[0,1]) -([-1,0,1,2],[0,0,0,2]) +([1,2],[0,2]) ([-1,0],[0,0]) -([1,2],[0,2]) ([0,1],[0,1]) +([1,2],[0,2]) -([-1,0,1,2],[0,0,0,2]) +([-1,0,1,2],[0,0,1,2]) ([-1,0],[0,0]) -([1,2],[0,2]) +([-1,0,1,2],[0,0,1,2]) ([0,1],[0,1]) -([-1,0,1,2],[0,0,0,2]) +([1,2],[0,2]) ([-1,0],[0,0]) -([1,2],[0,2]) +([-1,0,1,2],[0,0,1,2]) ([0,1],[0,1]) -([-1,0,1,2],[0,0,0,2]) +([1,2],[0,2]) ([0],[0]) -([2],[2]) ([1],[1]) +([2],[2]) -([0,2],[0,2]) +([0,1,2],[0,1,2]) ([0],[0]) -([2],[2]) +([0,1,2],[0,1,2]) ([1],[1]) -([0,2],[0,2]) +([2],[2]) ([0],[0]) -([2],[2]) +([0,1,2],[0,1,2]) ([1],[1]) -([0,2],[0,2]) +([2],[2]) - ([-1,0],[0,0]) -([1,2],[0,2]) ([0,1],[0,1]) +([1,2],[0,2]) -([-1,0,1,2],[0,0,0,2]) +([-1,0,1,2],[0,0,1,2]) ([-1,0],[0,0]) -([1,2],[0,2]) +([-1,0,1,2],[0,0,1,2]) ([0,1],[0,1]) -([-1,0,1,2],[0,0,0,2]) +([1,2],[0,2]) ([-1,0],[0,0]) -([1,2],[0,2]) +([-1,0,1,2],[0,0,1,2]) ([0,1],[0,1]) -([-1,0,1,2],[0,0,0,2]) +([1,2],[0,2]) ([-1,0],[0,0]) -([1,2],[0,2]) ([0,1],[0,1]) +([1,2],[0,2]) -([-1,0,1,2],[0,0,0,2]) +([-1,0,1,2],[0,0,1,2]) ([-1,0],[0,0]) -([1,2],[0,2]) +([-1,0,1,2],[0,0,1,2]) ([0,1],[0,1]) -([-1,0,1,2],[0,0,0,2]) +([1,2],[0,2]) ([-1,0],[0,0]) -([1,2],[0,2]) +([-1,0,1,2],[0,0,1,2]) ([0,1],[0,1]) -([-1,0,1,2],[0,0,0,2]) +([1,2],[0,2]) ([0],[0]) -([2],[2]) ([1],[1]) +([2],[2]) -([0,2],[0,2]) +([0,1,2],[0,1,2]) ([0],[0]) -([2],[2]) +([0,1,2],[0,1,2]) ([1],[1]) -([0,2],[0,2]) +([2],[2]) ([0],[0]) -([2],[2]) +([0,1,2],[0,1,2]) ([1],[1]) -([0,2],[0,2]) +([2],[2]) - ([-1,0],[0,0]) -([1,2],[0,2]) ([0,1],[0,1]) +([1,2],[0,2]) -([-1,0,1,2],[0,0,0,2]) +([-1,0,1,2],[0,0,1,2]) ([-1,0],[0,0]) -([1,2],[0,2]) +([-1,0,1,2],[0,0,1,2]) ([0,1],[0,1]) -([-1,0,1,2],[0,0,0,2]) +([1,2],[0,2]) ([-1,0],[0,0]) -([1,2],[0,2]) +([-1,0,1,2],[0,0,1,2]) ([0,1],[0,1]) -([-1,0,1,2],[0,0,0,2]) +([1,2],[0,2]) ([-1,0],[0,0]) -([1,2],[0,2]) ([0,1],[0,1]) +([1,2],[0,2]) -([-1,0,1,2],[0,0,0,2]) +([-1,0,1,2],[0,0,1,2]) ([-1,0],[0,0]) -([1,2],[0,2]) +([-1,0,1,2],[0,0,1,2]) ([0,1],[0,1]) -([-1,0,1,2],[0,0,0,2]) +([1,2],[0,2]) ([-1,0],[0,0]) -([1,2],[0,2]) +([-1,0,1,2],[0,0,1,2]) ([0,1],[0,1]) -([-1,0,1,2],[0,0,0,2]) +([1,2],[0,2]) ([0],[0]) -([2],[2]) ([1],[1]) +([2],[2]) -([0,2],[0,2]) +([0,1,2],[0,1,2]) ([0],[0]) -([2],[2]) +([0,1,2],[0,1,2]) ([1],[1]) -([0,2],[0,2]) +([2],[2]) ([0],[0]) -([2],[2]) +([0,1,2],[0,1,2]) ([1],[1]) -([0,2],[0,2]) +([2],[2]) diff --git a/tests/queries/0_stateless/02480_max_map_null_totals.sql b/tests/queries/0_stateless/02480_max_map_null_totals.sql index 81e2a5c4243..be2c566ddc1 100644 --- a/tests/queries/0_stateless/02480_max_map_null_totals.sql +++ b/tests/queries/0_stateless/02480_max_map_null_totals.sql @@ -1,39 +1,39 @@ -SELECT maxMap([number % 3, number % 4 - 1], [number, NULL]) FROM numbers(3) GROUP BY number WITH TOTALS; -SELECT maxMap([number % 3, number % 4 - 1], [number, NULL]) FROM numbers(3) GROUP BY number WITH ROLLUP; -SELECT maxMap([number % 3, number % 4 - 1], [number, NULL]) FROM numbers(3) GROUP BY number WITH CUBE; +SELECT maxMap([number % 3, number % 4 - 1], [number, NULL]) FROM numbers(3) GROUP BY number WITH TOTALS ORDER BY number; +SELECT maxMap([number % 3, number % 4 - 1], [number, NULL]) FROM numbers(3) GROUP BY number WITH ROLLUP ORDER BY number; +SELECT maxMap([number % 3, number % 4 - 1], [number, NULL]) FROM numbers(3) GROUP BY number WITH CUBE ORDER BY number; -SELECT minMap([number % 3, number % 4 - 1], [number, NULL]) FROM numbers(3) GROUP BY number WITH TOTALS; -SELECT minMap([number % 3, number % 4 - 1], [number, NULL]) FROM numbers(3) GROUP BY number WITH ROLLUP; -SELECT minMap([number % 3, number % 4 - 1], [number, NULL]) FROM numbers(3) GROUP BY number WITH CUBE; +SELECT minMap([number % 3, number % 4 - 1], [number, NULL]) FROM numbers(3) GROUP BY number WITH TOTALS ORDER BY number; +SELECT minMap([number % 3, number % 4 - 1], [number, NULL]) FROM numbers(3) GROUP BY number WITH ROLLUP ORDER BY number; +SELECT minMap([number % 3, number % 4 - 1], [number, NULL]) FROM numbers(3) GROUP BY number WITH CUBE ORDER BY number; -SELECT sumMap([number % 3, number % 4 - 1], [number, NULL]) FROM numbers(3) GROUP BY number WITH TOTALS; -SELECT sumMap([number % 3, number % 4 - 1], [number, NULL]) FROM numbers(3) GROUP BY number WITH ROLLUP; -SELECT sumMap([number % 3, number % 4 - 1], [number, NULL]) FROM numbers(3) GROUP BY number WITH CUBE; +SELECT sumMap([number % 3, number % 4 - 1], [number, NULL]) FROM numbers(3) GROUP BY number WITH TOTALS ORDER BY number; +SELECT sumMap([number % 3, number % 4 - 1], [number, NULL]) FROM numbers(3) GROUP BY number WITH ROLLUP ORDER BY number; +SELECT sumMap([number % 3, number % 4 - 1], [number, NULL]) FROM numbers(3) GROUP BY number WITH CUBE ORDER BY number; SELECT '-'; -SELECT maxMap([number % 3, number % 4 - 1], [number :: Float64, NULL]) FROM numbers(3) GROUP BY number WITH TOTALS; -SELECT maxMap([number % 3, number % 4 - 1], [number :: Float64, NULL]) FROM numbers(3) GROUP BY number WITH ROLLUP; -SELECT maxMap([number % 3, number % 4 - 1], [number :: Float64, NULL]) FROM numbers(3) GROUP BY number WITH CUBE; +SELECT maxMap([number % 3, number % 4 - 1], [number :: Float64, NULL]) FROM numbers(3) GROUP BY number WITH TOTALS ORDER BY number; +SELECT maxMap([number % 3, number % 4 - 1], [number :: Float64, NULL]) FROM numbers(3) GROUP BY number WITH ROLLUP ORDER BY number; +SELECT maxMap([number % 3, number % 4 - 1], [number :: Float64, NULL]) FROM numbers(3) GROUP BY number WITH CUBE ORDER BY number; -SELECT minMap([number % 3, number % 4 - 1], [number :: Float64, NULL]) FROM numbers(3) GROUP BY number WITH TOTALS; -SELECT minMap([number % 3, number % 4 - 1], [number :: Float64, NULL]) FROM numbers(3) GROUP BY number WITH ROLLUP; -SELECT minMap([number % 3, number % 4 - 1], [number :: Float64, NULL]) FROM numbers(3) GROUP BY number WITH CUBE; +SELECT minMap([number % 3, number % 4 - 1], [number :: Float64, NULL]) FROM numbers(3) GROUP BY number WITH TOTALS ORDER BY number; +SELECT minMap([number % 3, number % 4 - 1], [number :: Float64, NULL]) FROM numbers(3) GROUP BY number WITH ROLLUP ORDER BY number; +SELECT minMap([number % 3, number % 4 - 1], [number :: Float64, NULL]) FROM numbers(3) GROUP BY number WITH CUBE ORDER BY number; -SELECT sumMap([number % 3, number % 4 - 1], [number :: Float64, NULL]) FROM numbers(3) GROUP BY number WITH TOTALS; -SELECT sumMap([number % 3, number % 4 - 1], [number :: Float64, NULL]) FROM numbers(3) GROUP BY number WITH ROLLUP; -SELECT sumMap([number % 3, number % 4 - 1], [number :: Float64, NULL]) FROM numbers(3) GROUP BY number WITH CUBE; +SELECT sumMap([number % 3, number % 4 - 1], [number :: Float64, NULL]) FROM numbers(3) GROUP BY number WITH TOTALS ORDER BY number; +SELECT sumMap([number % 3, number % 4 - 1], [number :: Float64, NULL]) FROM numbers(3) GROUP BY number WITH ROLLUP ORDER BY number; +SELECT sumMap([number % 3, number % 4 - 1], [number :: Float64, NULL]) FROM numbers(3) GROUP BY number WITH CUBE ORDER BY number; SELECT '-'; -SELECT maxMap([number % 3, number % 4 - 1], [number :: UInt256, NULL]) FROM numbers(3) GROUP BY number WITH TOTALS; -SELECT maxMap([number % 3, number % 4 - 1], [number :: UInt256, NULL]) FROM numbers(3) GROUP BY number WITH ROLLUP; -SELECT maxMap([number % 3, number % 4 - 1], [number :: UInt256, NULL]) FROM numbers(3) GROUP BY number WITH CUBE; +SELECT maxMap([number % 3, number % 4 - 1], [number :: UInt256, NULL]) FROM numbers(3) GROUP BY number WITH TOTALS ORDER BY number; +SELECT maxMap([number % 3, number % 4 - 1], [number :: UInt256, NULL]) FROM numbers(3) GROUP BY number WITH ROLLUP ORDER BY number; +SELECT maxMap([number % 3, number % 4 - 1], [number :: UInt256, NULL]) FROM numbers(3) GROUP BY number WITH CUBE ORDER BY number; -SELECT minMap([number % 3, number % 4 - 1], [number :: UInt256, NULL]) FROM numbers(3) GROUP BY number WITH TOTALS; -SELECT minMap([number % 3, number % 4 - 1], [number :: UInt256, NULL]) FROM numbers(3) GROUP BY number WITH ROLLUP; -SELECT minMap([number % 3, number % 4 - 1], [number :: UInt256, NULL]) FROM numbers(3) GROUP BY number WITH CUBE; +SELECT minMap([number % 3, number % 4 - 1], [number :: UInt256, NULL]) FROM numbers(3) GROUP BY number WITH TOTALS ORDER BY number; +SELECT minMap([number % 3, number % 4 - 1], [number :: UInt256, NULL]) FROM numbers(3) GROUP BY number WITH ROLLUP ORDER BY number; +SELECT minMap([number % 3, number % 4 - 1], [number :: UInt256, NULL]) FROM numbers(3) GROUP BY number WITH CUBE ORDER BY number; -SELECT sumMap([number % 3, number % 4 - 1], [number :: UInt256, NULL]) FROM numbers(3) GROUP BY number WITH TOTALS; -SELECT sumMap([number % 3, number % 4 - 1], [number :: UInt256, NULL]) FROM numbers(3) GROUP BY number WITH ROLLUP; -SELECT sumMap([number % 3, number % 4 - 1], [number :: UInt256, NULL]) FROM numbers(3) GROUP BY number WITH CUBE; +SELECT sumMap([number % 3, number % 4 - 1], [number :: UInt256, NULL]) FROM numbers(3) GROUP BY number WITH TOTALS ORDER BY number; +SELECT sumMap([number % 3, number % 4 - 1], [number :: UInt256, NULL]) FROM numbers(3) GROUP BY number WITH ROLLUP ORDER BY number; +SELECT sumMap([number % 3, number % 4 - 1], [number :: UInt256, NULL]) FROM numbers(3) GROUP BY number WITH CUBE ORDER BY number; diff --git a/tests/queries/0_stateless/02481_analyzer_optimize_grouping_sets_keys.sql b/tests/queries/0_stateless/02481_analyzer_optimize_grouping_sets_keys.sql index de9208ef009..fef71fdf94f 100644 --- a/tests/queries/0_stateless/02481_analyzer_optimize_grouping_sets_keys.sql +++ b/tests/queries/0_stateless/02481_analyzer_optimize_grouping_sets_keys.sql @@ -1,4 +1,5 @@ set allow_experimental_analyzer = 1; +set optimize_syntax_fuse_functions = 0; EXPLAIN QUERY TREE run_passes=1 SELECT avg(log(2) * number) AS k FROM numbers(10000000) diff --git a/tests/queries/0_stateless/02482_capnp_list_of_structs.sh b/tests/queries/0_stateless/02482_capnp_list_of_structs.sh index 091bd4dba2a..9d78b9893dd 100755 --- a/tests/queries/0_stateless/02482_capnp_list_of_structs.sh +++ b/tests/queries/0_stateless/02482_capnp_list_of_structs.sh @@ -5,10 +5,10 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -USER_FILES_PATH=$(clickhouse-client --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') +USER_FILES_PATH=$($CLICKHOUSE_CLIENT_BINARY --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') touch $USER_FILES_PATH/data.capnp -SCHEMADIR=$(clickhouse-client --query "select * from file('data.capnp', 'CapnProto', 'val1 char') settings format_schema='nonexist:Message'" 2>&1 | grep Exception | grep -oP "file \K.*(?=/nonexist.capnp)") +SCHEMADIR=$($CLICKHOUSE_CLIENT_BINARY --query "select * from file('data.capnp', 'CapnProto', 'val1 char') settings format_schema='nonexist:Message'" 2>&1 | grep Exception | grep -oP "file \K.*(?=/nonexist.capnp)") CLIENT_SCHEMADIR=$CURDIR/format_schemas SERVER_SCHEMADIR=test_02482 mkdir -p $SCHEMADIR/$SERVER_SCHEMADIR diff --git a/tests/queries/0_stateless/02483_capnp_decimals.sh b/tests/queries/0_stateless/02483_capnp_decimals.sh index bdfa9dac3d5..ef545a5539f 100755 --- a/tests/queries/0_stateless/02483_capnp_decimals.sh +++ b/tests/queries/0_stateless/02483_capnp_decimals.sh @@ -5,10 +5,10 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -USER_FILES_PATH=$(clickhouse-client --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') +USER_FILES_PATH=$($CLICKHOUSE_CLIENT_BINARY --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') touch $USER_FILES_PATH/data.capnp -SCHEMADIR=$(clickhouse-client --query "select * from file('data.capnp', 'CapnProto', 'val1 char') settings format_schema='nonexist:Message'" 2>&1 | grep Exception | grep -oP "file \K.*(?=/nonexist.capnp)") +SCHEMADIR=$($CLICKHOUSE_CLIENT_BINARY --query "select * from file('data.capnp', 'CapnProto', 'val1 char') settings format_schema='nonexist:Message'" 2>&1 | grep Exception | grep -oP "file \K.*(?=/nonexist.capnp)") CLIENT_SCHEMADIR=$CURDIR/format_schemas SERVER_SCHEMADIR=test_02483 mkdir -p $SCHEMADIR/$SERVER_SCHEMADIR @@ -21,4 +21,3 @@ $CLICKHOUSE_CLIENT -q "select * from file(02483_data.capnp, auto, 'decimal64 Dec rm $USER_FILES_PATH/data.capnp rm $USER_FILES_PATH/02483_data.capnp - diff --git a/tests/queries/0_stateless/02483_test_reverse_dns_resolution.reference b/tests/queries/0_stateless/02483_test_reverse_dns_resolution.reference deleted file mode 100644 index 2bae467069f..00000000000 --- a/tests/queries/0_stateless/02483_test_reverse_dns_resolution.reference +++ /dev/null @@ -1,14 +0,0 @@ --- { echoOn } --- Expect dns.google on both queries -select reverseDNSQuery('8.8.8.8'); -['dns.google'] -select reverseDNSQuery('2001:4860:4860::8888'); -['dns.google'] --- Expect empty response -select reverseDNSQuery(''); -[] --- Expect error, invalid column type -select reverseDNSQuery(1); -- {serverError 36} --- Expect error, wrong number of arguments -select reverseDNSQuery(); -- {serverError 42} -select reverseDNSQuery(1, 2); -- {serverError 42} diff --git a/tests/queries/0_stateless/02483_test_reverse_dns_resolution.sql b/tests/queries/0_stateless/02483_test_reverse_dns_resolution.sql deleted file mode 100644 index d9576c0641a..00000000000 --- a/tests/queries/0_stateless/02483_test_reverse_dns_resolution.sql +++ /dev/null @@ -1,14 +0,0 @@ --- { echoOn } --- Expect dns.google on both queries -select reverseDNSQuery('8.8.8.8'); -select reverseDNSQuery('2001:4860:4860::8888'); - --- Expect empty response -select reverseDNSQuery(''); - --- Expect error, invalid column type -select reverseDNSQuery(1); -- {serverError 36} - --- Expect error, wrong number of arguments -select reverseDNSQuery(); -- {serverError 42} -select reverseDNSQuery(1, 2); -- {serverError 42} diff --git a/tests/queries/0_stateless/02487_create_index_normalize_functions.reference b/tests/queries/0_stateless/02487_create_index_normalize_functions.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/02487_create_index_normalize_functions.sql b/tests/queries/0_stateless/02487_create_index_normalize_functions.sql new file mode 100644 index 00000000000..2155f5d6665 --- /dev/null +++ b/tests/queries/0_stateless/02487_create_index_normalize_functions.sql @@ -0,0 +1,6 @@ + +create table rmt (n int, ts DateTime64(8, 'UTC')) engine=ReplicatedMergeTree('/test/02487/{database}/rmt', '1') order by n; +alter table rmt add index idx1 date(ts) TYPE MinMax GRANULARITY 1; +create index idx2 on rmt date(ts) TYPE MinMax GRANULARITY 1; +system restart replica rmt; +create table rmt2 (n int, ts DateTime64(8, 'UTC'), index idx1 date(ts) TYPE MinMax GRANULARITY 1, index idx2 date(ts) TYPE MinMax GRANULARITY 1) engine=ReplicatedMergeTree('/test/02487/{database}/rmt', '2') order by n; diff --git a/tests/queries/0_stateless/02488_zero_copy_detached_parts_drop_table.reference b/tests/queries/0_stateless/02488_zero_copy_detached_parts_drop_table.reference new file mode 100644 index 00000000000..00c825f598a --- /dev/null +++ b/tests/queries/0_stateless/02488_zero_copy_detached_parts_drop_table.reference @@ -0,0 +1,3 @@ +0 +broken-on-start broken-on-start_all_0_0_0 +42 diff --git a/tests/queries/0_stateless/02488_zero_copy_detached_parts_drop_table.sh b/tests/queries/0_stateless/02488_zero_copy_detached_parts_drop_table.sh new file mode 100755 index 00000000000..b01f16e1cad --- /dev/null +++ b/tests/queries/0_stateless/02488_zero_copy_detached_parts_drop_table.sh @@ -0,0 +1,30 @@ +#!/usr/bin/env bash +# Tags: no-fasttest, zookeeper + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +$CLICKHOUSE_CLIENT -q "create table rmt1 (n int) engine=ReplicatedMergeTree('/test/02488/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX', '1') order by n + settings min_bytes_for_wide_part=0, allow_remote_fs_zero_copy_replication=1, storage_policy='s3_cache'" +$CLICKHOUSE_CLIENT -q "create table rmt2 (n int) engine=ReplicatedMergeTree('/test/02488/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX', '2') order by n + settings min_bytes_for_wide_part=0, allow_remote_fs_zero_copy_replication=1, storage_policy='s3_cache'" + +$CLICKHOUSE_CLIENT --insert_keeper_fault_injection_probability=0 -q "insert into rmt2 values (42)" +$CLICKHOUSE_CLIENT -q "system sync replica rmt1" + +path=$($CLICKHOUSE_CLIENT -q "select path from system.parts where database='$CLICKHOUSE_DATABASE' and table='rmt2' and name='all_0_0_0'") +# ensure that path is absolute before removing +$CLICKHOUSE_CLIENT -q "select throwIf(substring('$path', 1, 1) != '/', 'Path is relative: $path')" || exit +rm -f $path/count.txt + +$CLICKHOUSE_CLIENT -q "detach table rmt2 sync" +$CLICKHOUSE_CLIENT --send_logs_level='fatal' -q "attach table rmt2" + +$CLICKHOUSE_CLIENT -q "select reason, name from system.detached_parts where database='$CLICKHOUSE_DATABASE' and table='rmt2'" + +$CLICKHOUSE_CLIENT -q "drop table rmt2 sync" + +$CLICKHOUSE_CLIENT -q "select * from rmt1" + +$CLICKHOUSE_CLIENT -q "drop table rmt1" diff --git a/tests/queries/0_stateless/02490_replacing_merge_tree_is_deleted_column.reference b/tests/queries/0_stateless/02490_replacing_merge_tree_is_deleted_column.reference index 04a2b75bb4f..c897004b4e3 100644 --- a/tests/queries/0_stateless/02490_replacing_merge_tree_is_deleted_column.reference +++ b/tests/queries/0_stateless/02490_replacing_merge_tree_is_deleted_column.reference @@ -91,6 +91,28 @@ d4 1 0 == (Replicas) Test settings == c2 1 0 c4 1 0 +no cleanup 1 d1 5 0 +no cleanup 1 d2 1 0 +no cleanup 1 d3 1 0 +no cleanup 1 d4 3 0 +no cleanup 1 d5 1 0 +no cleanup 2 d1 5 0 +no cleanup 2 d2 1 0 +no cleanup 2 d3 1 0 +no cleanup 2 d4 3 0 +no cleanup 2 d5 1 0 +no cleanup 2 d6 2 1 +no cleanup 3 d1 5 0 +no cleanup 3 d2 1 0 +no cleanup 3 d3 1 0 +no cleanup 3 d4 3 0 +no cleanup 3 d5 1 0 +no cleanup 4 d1 5 0 +no cleanup 4 d2 1 0 +no cleanup 4 d3 1 0 +no cleanup 4 d4 3 0 +no cleanup 4 d5 1 0 +no cleanup 4 d6 2 1 == Check cleanup & settings for other merge trees == d1 1 1 d1 1 1 diff --git a/tests/queries/0_stateless/02490_replacing_merge_tree_is_deleted_column.sql b/tests/queries/0_stateless/02490_replacing_merge_tree_is_deleted_column.sql index 8549300d49f..80c18ae308b 100644 --- a/tests/queries/0_stateless/02490_replacing_merge_tree_is_deleted_column.sql +++ b/tests/queries/0_stateless/02490_replacing_merge_tree_is_deleted_column.sql @@ -5,7 +5,7 @@ set allow_deprecated_syntax_for_merge_tree=0; -- Test the bahaviour without the is_deleted column DROP TABLE IF EXISTS test; -CREATE TABLE test (uid String, version UInt32, is_deleted UInt8) ENGINE = ReplacingMergeTree(version) Order by (uid); +CREATE TABLE test (uid String, version UInt32, is_deleted UInt8) ENGINE = ReplacingMergeTree(version) Order by (uid) settings allow_experimental_replacing_merge_with_cleanup=1; INSERT INTO test (*) VALUES ('d1', 1, 0), ('d2', 1, 0), ('d6', 1, 0), ('d4', 1, 0), ('d6', 2, 1), ('d3', 1, 0), ('d1', 2, 1), ('d5', 1, 0), ('d4', 2, 1), ('d1', 3, 0), ('d1', 4, 1), ('d4', 3, 0), ('d1', 5, 0); SELECT '== Test SELECT ... FINAL - no is_deleted =='; select * from test FINAL order by uid; @@ -13,7 +13,7 @@ OPTIMIZE TABLE test FINAL CLEANUP; select * from test order by uid; DROP TABLE IF EXISTS test; -CREATE TABLE test (uid String, version UInt32, is_deleted UInt8) ENGINE = ReplacingMergeTree(version) Order by (uid) SETTINGS clean_deleted_rows='Always'; +CREATE TABLE test (uid String, version UInt32, is_deleted UInt8) ENGINE = ReplacingMergeTree(version) Order by (uid) SETTINGS clean_deleted_rows='Always', allow_experimental_replacing_merge_with_cleanup=1; INSERT INTO test (*) VALUES ('d1', 1, 0), ('d2', 1, 0), ('d6', 1, 0), ('d4', 1, 0), ('d6', 2, 1), ('d3', 1, 0), ('d1', 2, 1), ('d5', 1, 0), ('d4', 2, 1), ('d1', 3, 0), ('d1', 4, 1), ('d4', 3, 0), ('d1', 5, 0); SELECT '== Test SELECT ... FINAL - no is_deleted SETTINGS clean_deleted_rows=Always =='; select * from test FINAL order by uid; @@ -22,7 +22,7 @@ select * from test order by uid; -- Test the new behaviour DROP TABLE IF EXISTS test; -CREATE TABLE test (uid String, version UInt32, is_deleted UInt8) ENGINE = ReplacingMergeTree(version, is_deleted) Order by (uid); +CREATE TABLE test (uid String, version UInt32, is_deleted UInt8) ENGINE = ReplacingMergeTree(version, is_deleted) Order by (uid) settings allow_experimental_replacing_merge_with_cleanup=1; INSERT INTO test (*) VALUES ('d1', 1, 0), ('d2', 1, 0), ('d6', 1, 0), ('d4', 1, 0), ('d6', 2, 1), ('d3', 1, 0), ('d1', 2, 1), ('d5', 1, 0), ('d4', 2, 1), ('d1', 3, 0), ('d1', 4, 1), ('d4', 3, 0), ('d1', 5, 0); SELECT '== Test SELECT ... FINAL =='; select * from test FINAL order by uid; @@ -37,7 +37,7 @@ INSERT INTO test (*) VALUES ('d4', 1, 0), ('d6', 2, 1), ('d3', 1, 0), ('d1', 2, select * from test FINAL order by uid; DROP TABLE IF EXISTS test; -CREATE TABLE test (uid String, version UInt32, is_deleted UInt8) ENGINE = ReplacingMergeTree(version, is_deleted) Order by (uid); +CREATE TABLE test (uid String, version UInt32, is_deleted UInt8) ENGINE = ReplacingMergeTree(version, is_deleted) Order by (uid) settings allow_experimental_replacing_merge_with_cleanup=1; -- Expect d6 to be version=3 is_deleted=false INSERT INTO test (*) VALUES ('d1', 1, 0), ('d1', 2, 1), ('d1', 3, 0), ('d1', 4, 1), ('d1', 5, 0), ('d2', 1, 0), ('d3', 1, 0), ('d4', 1, 0), ('d5', 1, 0), ('d6', 1, 0), ('d6', 3, 0); @@ -56,7 +56,7 @@ OPTIMIZE TABLE test FINAL CLEANUP; select * from test order by uid; DROP TABLE IF EXISTS test; -CREATE TABLE test (uid String, version UInt32, is_deleted UInt8) ENGINE = ReplacingMergeTree(version, is_deleted) Order by (uid) SETTINGS clean_deleted_rows='Always'; +CREATE TABLE test (uid String, version UInt32, is_deleted UInt8) ENGINE = ReplacingMergeTree(version, is_deleted) Order by (uid) SETTINGS clean_deleted_rows='Always', allow_experimental_replacing_merge_with_cleanup=1; SELECT '== Test of the SETTINGS clean_deleted_rows as Always =='; INSERT INTO test (*) VALUES ('d1', 1, 0), ('d2', 1, 0), ('d6', 1, 0), ('d4', 1, 0), ('d6', 2, 1), ('d3', 1, 0), ('d1', 2, 1), ('d5', 1, 0), ('d4', 2, 1), ('d1', 3, 0), ('d1', 4, 1), ('d4', 3, 0), ('d1', 5, 0); @@ -66,7 +66,7 @@ select * from test order by uid; OPTIMIZE TABLE test FINAL; -- d6 has to be removed since we set clean_deleted_rows as 'Always' -select * from test order by uid; +select * from test where is_deleted=0 order by uid; SELECT '== Test of the SETTINGS clean_deleted_rows as Never =='; ALTER TABLE test MODIFY SETTING clean_deleted_rows='Never'; @@ -80,7 +80,7 @@ DROP TABLE IF EXISTS testCleanupR1; CREATE TABLE testCleanupR1 (uid String, version UInt32, is_deleted UInt8) ENGINE = ReplicatedReplacingMergeTree('/clickhouse/{database}/tables/test_cleanup/', 'r1', version, is_deleted) - ORDER BY uid; + ORDER BY uid settings allow_experimental_replacing_merge_with_cleanup=1; INSERT INTO testCleanupR1 (*) VALUES ('d1', 1, 0),('d2', 1, 0),('d3', 1, 0),('d4', 1, 0); @@ -101,7 +101,7 @@ DROP TABLE IF EXISTS testSettingsR1; CREATE TABLE testSettingsR1 (col1 String, version UInt32, is_deleted UInt8) ENGINE = ReplicatedReplacingMergeTree('/clickhouse/{database}/tables/test_setting/', 'r1', version, is_deleted) ORDER BY col1 - SETTINGS clean_deleted_rows = 'Always'; + SETTINGS clean_deleted_rows = 'Always', allow_experimental_replacing_merge_with_cleanup=1; INSERT INTO testSettingsR1 (*) VALUES ('c1', 1, 1),('c2', 1, 0),('c3', 1, 1),('c4', 1, 0); SYSTEM SYNC REPLICA testSettingsR1; -- Avoid "Cannot select parts for optimization: Entry for part all_2_2_0 hasn't been read from the replication log yet" @@ -110,13 +110,13 @@ OPTIMIZE TABLE testSettingsR1 FINAL; -- Only d3 to d5 remain SELECT '== (Replicas) Test settings =='; -SELECT * FROM testSettingsR1 order by col1; +SELECT * FROM testSettingsR1 where is_deleted=0 order by col1; ------------------------------ -- Check errors DROP TABLE IF EXISTS test; -CREATE TABLE test (uid String, version UInt32, is_deleted UInt8) ENGINE = ReplacingMergeTree(version, is_deleted) Order by (uid); +CREATE TABLE test (uid String, version UInt32, is_deleted UInt8) ENGINE = ReplacingMergeTree(version, is_deleted) Order by (uid) settings allow_experimental_replacing_merge_with_cleanup=1; -- is_deleted == 0/1 INSERT INTO test (*) VALUES ('d1', 1, 2); -- { serverError INCORRECT_DATA } @@ -125,35 +125,49 @@ DROP TABLE IF EXISTS test; -- checkis_deleted type CREATE TABLE test (uid String, version UInt32, is_deleted String) ENGINE = ReplacingMergeTree(version, is_deleted) Order by (uid); -- { serverError BAD_TYPE_OF_FIELD } +CREATE TABLE test (uid String, version UInt32, is_deleted UInt8) ENGINE = ReplacingMergeTree(version, is_deleted) Order by (uid); +INSERT INTO test (*) VALUES ('d1', 1, 0), ('d2', 1, 0), ('d6', 1, 0), ('d4', 1, 0), ('d6', 2, 1), ('d3', 1, 0), ('d1', 2, 1), ('d5', 1, 0), ('d4', 2, 1), ('d1', 3, 0), ('d1', 4, 1), ('d4', 3, 0), ('d1', 5, 0); +select 'no cleanup 1', * from test FINAL order by uid; +OPTIMIZE TABLE test FINAL CLEANUP; -- { serverError SUPPORT_IS_DISABLED } +select 'no cleanup 2', * from test order by uid; +DROP TABLE test; + +CREATE TABLE test (uid String, version UInt32, is_deleted UInt8) ENGINE = ReplicatedReplacingMergeTree('/clickhouse/{database}/tables/no_cleanup/', 'r1', version, is_deleted) Order by (uid); +INSERT INTO test (*) VALUES ('d1', 1, 0), ('d2', 1, 0), ('d6', 1, 0), ('d4', 1, 0), ('d6', 2, 1), ('d3', 1, 0), ('d1', 2, 1), ('d5', 1, 0), ('d4', 2, 1), ('d1', 3, 0), ('d1', 4, 1), ('d4', 3, 0), ('d1', 5, 0); +select 'no cleanup 3', * from test FINAL order by uid; +OPTIMIZE TABLE test FINAL CLEANUP; -- { serverError SUPPORT_IS_DISABLED } +select 'no cleanup 4', * from test order by uid; +DROP TABLE test; + -- is_deleted column for other mergeTrees - ErrorCodes::LOGICAL_ERROR) -- Check clean_deleted_rows='Always' for other MergeTrees SELECT '== Check cleanup & settings for other merge trees =='; -CREATE TABLE testMT (uid String, version UInt32, is_deleted UInt8) ENGINE = MergeTree() Order by (uid) SETTINGS clean_deleted_rows='Always'; +CREATE TABLE testMT (uid String, version UInt32, is_deleted UInt8) ENGINE = MergeTree() Order by (uid) SETTINGS clean_deleted_rows='Always', allow_experimental_replacing_merge_with_cleanup=1; INSERT INTO testMT (*) VALUES ('d1', 1, 1); OPTIMIZE TABLE testMT FINAL CLEANUP; -- { serverError CANNOT_ASSIGN_OPTIMIZE } OPTIMIZE TABLE testMT FINAL; SELECT * FROM testMT order by uid; -CREATE TABLE testSummingMT (uid String, version UInt32, is_deleted UInt8) ENGINE = SummingMergeTree() Order by (uid) SETTINGS clean_deleted_rows='Always'; +CREATE TABLE testSummingMT (uid String, version UInt32, is_deleted UInt8) ENGINE = SummingMergeTree() Order by (uid) SETTINGS clean_deleted_rows='Always', allow_experimental_replacing_merge_with_cleanup=1; INSERT INTO testSummingMT (*) VALUES ('d1', 1, 1); OPTIMIZE TABLE testSummingMT FINAL CLEANUP; -- { serverError CANNOT_ASSIGN_OPTIMIZE } OPTIMIZE TABLE testSummingMT FINAL; SELECT * FROM testSummingMT order by uid; -CREATE TABLE testAggregatingMT (uid String, version UInt32, is_deleted UInt8) ENGINE = AggregatingMergeTree() Order by (uid) SETTINGS clean_deleted_rows='Always'; +CREATE TABLE testAggregatingMT (uid String, version UInt32, is_deleted UInt8) ENGINE = AggregatingMergeTree() Order by (uid) SETTINGS clean_deleted_rows='Always', allow_experimental_replacing_merge_with_cleanup=1; INSERT INTO testAggregatingMT (*) VALUES ('d1', 1, 1); OPTIMIZE TABLE testAggregatingMT FINAL CLEANUP; -- { serverError CANNOT_ASSIGN_OPTIMIZE } OPTIMIZE TABLE testAggregatingMT FINAL; SELECT * FROM testAggregatingMT order by uid; -CREATE TABLE testCollapsingMT (uid String, version UInt32, is_deleted UInt8, sign Int8) ENGINE = CollapsingMergeTree(sign) Order by (uid) SETTINGS clean_deleted_rows='Always'; +CREATE TABLE testCollapsingMT (uid String, version UInt32, is_deleted UInt8, sign Int8) ENGINE = CollapsingMergeTree(sign) Order by (uid) SETTINGS clean_deleted_rows='Always', allow_experimental_replacing_merge_with_cleanup=1; INSERT INTO testCollapsingMT (*) VALUES ('d1', 1, 1, 1); OPTIMIZE TABLE testCollapsingMT FINAL CLEANUP; -- { serverError CANNOT_ASSIGN_OPTIMIZE } OPTIMIZE TABLE testCollapsingMT FINAL; SELECT * FROM testCollapsingMT order by uid; -CREATE TABLE testVersionedCMT (uid String, version UInt32, is_deleted UInt8, sign Int8) ENGINE = VersionedCollapsingMergeTree(sign, version) Order by (uid) SETTINGS clean_deleted_rows='Always'; +CREATE TABLE testVersionedCMT (uid String, version UInt32, is_deleted UInt8, sign Int8) ENGINE = VersionedCollapsingMergeTree(sign, version) Order by (uid) SETTINGS clean_deleted_rows='Always', allow_experimental_replacing_merge_with_cleanup=1; INSERT INTO testVersionedCMT (*) VALUES ('d1', 1, 1, 1); OPTIMIZE TABLE testVersionedCMT FINAL CLEANUP; -- { serverError CANNOT_ASSIGN_OPTIMIZE } OPTIMIZE TABLE testVersionedCMT FINAL; diff --git a/tests/queries/0_stateless/02494_analyzer_compound_expression_crash_fix.sql b/tests/queries/0_stateless/02494_analyzer_compound_expression_crash_fix.sql index 4eef7792180..b8d43acbef2 100644 --- a/tests/queries/0_stateless/02494_analyzer_compound_expression_crash_fix.sql +++ b/tests/queries/0_stateless/02494_analyzer_compound_expression_crash_fix.sql @@ -3,7 +3,7 @@ SET allow_experimental_analyzer = 1; DROP TABLE IF EXISTS test_table; CREATE TABLE test_table ( fingerprint UInt16, - fields Array(Tuple(name Array(UInt32), value String)) + fields Nested(name Array(UInt32), value String) ) ENGINE = MergeTree ORDER BY fingerprint; diff --git a/tests/queries/0_stateless/02496_remove_redundant_sorting.reference b/tests/queries/0_stateless/02496_remove_redundant_sorting.reference index b318157835d..e7c169cf45e 100644 --- a/tests/queries/0_stateless/02496_remove_redundant_sorting.reference +++ b/tests/queries/0_stateless/02496_remove_redundant_sorting.reference @@ -113,27 +113,26 @@ FROM ) ORDER BY number DESC ) AS t2 +ORDER BY t1.number, t2.number -- explain -Expression ((Projection + Before ORDER BY)) - Join (JOIN FillRightFirst) - Expression ((Before JOIN + Projection)) - Sorting (Sorting for ORDER BY) - Expression ((Before ORDER BY + (Projection + Before ORDER BY))) +Expression (Projection) + Sorting (Sorting for ORDER BY) + Expression (Before ORDER BY) + Join (JOIN FillRightFirst) + Expression ((Before JOIN + (Projection + (Before ORDER BY + (Projection + Before ORDER BY))))) ReadFromSystemNumbers - Expression ((Joined actions + (Rename joined columns + Projection))) - Sorting (Sorting for ORDER BY) - Expression ((Before ORDER BY + (Projection + Before ORDER BY))) + Expression ((Joined actions + (Rename joined columns + (Projection + (Before ORDER BY + (Projection + Before ORDER BY)))))) ReadFromSystemNumbers -- execute -0 2 -0 1 0 0 -1 2 -1 1 +0 1 +0 2 1 0 -2 2 -2 1 +1 1 +1 2 2 0 +2 1 +2 2 -- CROSS JOIN with subqueries, ORDER BY in main query -> all ORDER BY clauses will be removed in subqueries -- query SELECT * @@ -193,15 +192,18 @@ FROM ORDER BY number DESC ) GROUP BY number +ORDER BY number -- explain -Expression ((Projection + Before ORDER BY)) - Aggregating - Expression ((Before GROUP BY + (Projection + (Before ORDER BY + (Projection + Before ORDER BY))))) - ReadFromSystemNumbers +Expression (Projection) + Sorting (Sorting for ORDER BY) + Expression (Before ORDER BY) + Aggregating + Expression ((Before GROUP BY + (Projection + (Before ORDER BY + (Projection + Before ORDER BY))))) + ReadFromSystemNumbers -- execute 0 -2 1 +2 -- GROUP BY with aggregation function which depends on order -> keep ORDER BY in first subquery, and eliminate in second subquery -- query SELECT any(number) @@ -217,15 +219,18 @@ FROM ORDER BY number DESC ) GROUP BY number +ORDER BY number -- explain -Expression ((Projection + Before ORDER BY)) - Aggregating - Expression ((Before GROUP BY + (Projection + (Before ORDER BY + (Projection + Before ORDER BY))))) - ReadFromSystemNumbers +Expression (Projection) + Sorting (Sorting for ORDER BY) + Expression (Before ORDER BY) + Aggregating + Expression ((Before GROUP BY + (Projection + (Before ORDER BY + (Projection + Before ORDER BY))))) + ReadFromSystemNumbers -- execute 0 -2 1 +2 -- query with aggregation function but w/o GROUP BY -> remove sorting -- query SELECT sum(number) @@ -315,15 +320,18 @@ FROM GROUP BY number ) WHERE a > 0 +ORDER BY a -- explain -Expression ((Projection + (Before ORDER BY + ))) - Aggregating - Filter - Filter (( + (Before GROUP BY + (Projection + (Before ORDER BY + (Projection + Before ORDER BY)))))) - ReadFromSystemNumbers +Expression (Projection) + Sorting (Sorting for ORDER BY) + Expression ((Before ORDER BY + )) + Aggregating + Filter + Filter (( + (Before GROUP BY + (Projection + (Before ORDER BY + (Projection + Before ORDER BY)))))) + ReadFromSystemNumbers -- execute -2 1 +2 -- GROUP BY in most inner query makes execution parallelized, and removing inner sorting steps will keep it that way. But need to correctly update data streams sorting properties after removing sorting steps -- query SELECT * diff --git a/tests/queries/0_stateless/02496_remove_redundant_sorting.sh b/tests/queries/0_stateless/02496_remove_redundant_sorting.sh index 8b529c26d93..c676e0340b1 100755 --- a/tests/queries/0_stateless/02496_remove_redundant_sorting.sh +++ b/tests/queries/0_stateless/02496_remove_redundant_sorting.sh @@ -96,7 +96,8 @@ FROM ORDER BY number ASC ) ORDER BY number DESC -) AS t2" +) AS t2 +ORDER BY t1.number, t2.number" run_query "$query" echo "-- CROSS JOIN with subqueries, ORDER BY in main query -> all ORDER BY clauses will be removed in subqueries" @@ -138,7 +139,8 @@ FROM ) ORDER BY number DESC ) -GROUP BY number" +GROUP BY number +ORDER BY number" run_query "$query" echo "-- GROUP BY with aggregation function which depends on order -> keep ORDER BY in first subquery, and eliminate in second subquery" @@ -154,7 +156,8 @@ FROM ) ORDER BY number DESC ) -GROUP BY number" +GROUP BY number +ORDER BY number" run_query "$query" echo "-- query with aggregation function but w/o GROUP BY -> remove sorting" @@ -218,7 +221,8 @@ FROM ) GROUP BY number ) -WHERE a > 0" +WHERE a > 0 +ORDER BY a" run_query "$query" echo "-- GROUP BY in most inner query makes execution parallelized, and removing inner sorting steps will keep it that way. But need to correctly update data streams sorting properties after removing sorting steps" diff --git a/tests/queries/0_stateless/02496_remove_redundant_sorting_analyzer.reference b/tests/queries/0_stateless/02496_remove_redundant_sorting_analyzer.reference index ee2099c62ba..16d3327b9c2 100644 --- a/tests/queries/0_stateless/02496_remove_redundant_sorting_analyzer.reference +++ b/tests/queries/0_stateless/02496_remove_redundant_sorting_analyzer.reference @@ -113,27 +113,26 @@ FROM ) ORDER BY number DESC ) AS t2 +ORDER BY t1.number, t2.number -- explain -Expression ((Project names + (Projection + DROP unused columns after JOIN))) - Join (JOIN FillRightFirst) - Expression ((Change column names to column identifiers + Project names)) - Sorting (Sorting for ORDER BY) - Expression ((Before ORDER BY + (Projection + (Change column names to column identifiers + (Project names + (Before ORDER BY + (Projection + Change column names to column identifiers))))))) +Expression (Project names) + Sorting (Sorting for ORDER BY) + Expression ((Before ORDER BY + (Projection + DROP unused columns after JOIN))) + Join (JOIN FillRightFirst) + Expression ((Change column names to column identifiers + (Project names + (Before ORDER BY + (Projection + (Change column names to column identifiers + (Project names + (Before ORDER BY + (Projection + Change column names to column identifiers))))))))) ReadFromSystemNumbers - Expression ((Change column names to column identifiers + Project names)) - Sorting (Sorting for ORDER BY) - Expression ((Before ORDER BY + (Projection + (Change column names to column identifiers + (Project names + (Before ORDER BY + (Projection + Change column names to column identifiers))))))) + Expression ((Change column names to column identifiers + (Project names + (Before ORDER BY + (Projection + (Change column names to column identifiers + (Project names + (Before ORDER BY + (Projection + Change column names to column identifiers))))))))) ReadFromSystemNumbers -- execute -0 2 -0 1 0 0 -1 2 -1 1 +0 1 +0 2 1 0 -2 2 -2 1 +1 1 +1 2 2 0 +2 1 +2 2 -- CROSS JOIN with subqueries, ORDER BY in main query -> all ORDER BY clauses will be removed in subqueries -- query SELECT * @@ -193,15 +192,18 @@ FROM ORDER BY number DESC ) GROUP BY number +ORDER BY number -- explain -Expression ((Project names + Projection)) - Aggregating - Expression ((Before GROUP BY + (Change column names to column identifiers + (Project names + (Before ORDER BY + (Projection + (Change column names to column identifiers + (Project names + (Before ORDER BY + (Projection + Change column names to column identifiers)))))))))) - ReadFromSystemNumbers +Expression (Project names) + Sorting (Sorting for ORDER BY) + Expression ((Before ORDER BY + Projection)) + Aggregating + Expression ((Before GROUP BY + (Change column names to column identifiers + (Project names + (Before ORDER BY + (Projection + (Change column names to column identifiers + (Project names + (Before ORDER BY + (Projection + Change column names to column identifiers)))))))))) + ReadFromSystemNumbers -- execute 0 -2 1 +2 -- GROUP BY with aggregation function which depends on order -> keep ORDER BY in first subquery, and eliminate in second subquery -- query SELECT any(number) @@ -217,17 +219,20 @@ FROM ORDER BY number DESC ) GROUP BY number +ORDER BY number -- explain -Expression ((Project names + Projection)) - Aggregating - Expression ((Before GROUP BY + (Change column names to column identifiers + Project names))) - Sorting (Sorting for ORDER BY) - Expression ((Before ORDER BY + (Projection + (Change column names to column identifiers + (Project names + (Before ORDER BY + (Projection + Change column names to column identifiers))))))) - ReadFromSystemNumbers +Expression (Project names) + Sorting (Sorting for ORDER BY) + Expression ((Before ORDER BY + Projection)) + Aggregating + Expression ((Before GROUP BY + (Change column names to column identifiers + Project names))) + Sorting (Sorting for ORDER BY) + Expression ((Before ORDER BY + (Projection + (Change column names to column identifiers + (Project names + (Before ORDER BY + (Projection + Change column names to column identifiers))))))) + ReadFromSystemNumbers -- execute 0 -2 1 +2 -- query with aggregation function but w/o GROUP BY -> remove sorting -- query SELECT sum(number) @@ -319,17 +324,20 @@ FROM GROUP BY number ) WHERE a > 0 +ORDER BY a -- explain -Expression ((Project names + Projection)) - Filter ((WHERE + (Change column names to column identifiers + (Project names + Projection)))) - Aggregating - Expression ((Before GROUP BY + (Change column names to column identifiers + Project names))) - Sorting (Sorting for ORDER BY) - Expression ((Before ORDER BY + (Projection + (Change column names to column identifiers + (Project names + (Before ORDER BY + (Projection + Change column names to column identifiers))))))) - ReadFromSystemNumbers +Expression (Project names) + Sorting (Sorting for ORDER BY) + Expression ((Before ORDER BY + Projection)) + Filter ((WHERE + (Change column names to column identifiers + (Project names + Projection)))) + Aggregating + Expression ((Before GROUP BY + (Change column names to column identifiers + Project names))) + Sorting (Sorting for ORDER BY) + Expression ((Before ORDER BY + (Projection + (Change column names to column identifiers + (Project names + (Before ORDER BY + (Projection + Change column names to column identifiers))))))) + ReadFromSystemNumbers -- execute -2 1 +2 -- GROUP BY in most inner query makes execution parallelized, and removing inner sorting steps will keep it that way. But need to correctly update data streams sorting properties after removing sorting steps -- query SELECT * diff --git a/tests/queries/0_stateless/02498_analyzer_settings_push_down.sql b/tests/queries/0_stateless/02498_analyzer_settings_push_down.sql index 91bdce2cca9..67623869f0a 100644 --- a/tests/queries/0_stateless/02498_analyzer_settings_push_down.sql +++ b/tests/queries/0_stateless/02498_analyzer_settings_push_down.sql @@ -1,4 +1,5 @@ SET allow_experimental_analyzer = 1; +SET optimize_functions_to_subcolumns = 0; DROP TABLE IF EXISTS test_table; CREATE TABLE test_table (id UInt64, value Tuple(a UInt64)) ENGINE=MergeTree ORDER BY id; diff --git a/tests/queries/0_stateless/02500_prevent_drop_nested_if_empty_part.sql b/tests/queries/0_stateless/02500_prevent_drop_nested_if_empty_part.sql index 529f574d32d..d8564546b0e 100644 --- a/tests/queries/0_stateless/02500_prevent_drop_nested_if_empty_part.sql +++ b/tests/queries/0_stateless/02500_prevent_drop_nested_if_empty_part.sql @@ -2,41 +2,19 @@ DROP TABLE IF EXISTS 02500_nested; SET flatten_nested = 1; -CREATE TABLE 02500_nested(arr Array(Tuple(a Int32, b Int32))) Engine=MergeTree ORDER BY tuple(); -INSERT INTO 02500_nested(arr.a, arr.b) VALUES ([1], [2]); -ALTER TABLE 02500_nested ADD COLUMN z Int32; -ALTER TABLE 02500_nested DROP COLUMN arr; -- { serverError BAD_ARGUMENTS } -DROP TABLE 02500_nested; - -CREATE TABLE 02500_nested(arr Array(Tuple(a Int32, b Int32)), z Int32) Engine=MergeTree ORDER BY tuple(); -INSERT INTO 02500_nested(arr.a, arr.b, z) VALUES ([1], [2], 2); -ALTER TABLE 02500_nested DROP COLUMN arr; -DROP TABLE 02500_nested; - CREATE TABLE 02500_nested(nes Nested(a Int32, b Int32)) Engine=MergeTree ORDER BY tuple(); INSERT INTO 02500_nested(nes.a, nes.b) VALUES ([1], [2]); ALTER TABLE 02500_nested ADD COLUMN z Int32; ALTER TABLE 02500_nested DROP COLUMN nes; -- { serverError BAD_ARGUMENTS } DROP TABLE 02500_nested; -CREATE TABLE 02500_nested(nes Array(Tuple(a Int32, b Int32)), z Int32) Engine=MergeTree ORDER BY tuple(); +CREATE TABLE 02500_nested(nes Nested(a Int32, b Int32), z Int32) Engine=MergeTree ORDER BY tuple(); INSERT INTO 02500_nested(nes.a, nes.b, z) VALUES ([1], [2], 2); ALTER TABLE 02500_nested DROP COLUMN nes; DROP TABLE 02500_nested; SET flatten_nested = 0; -CREATE TABLE 02500_nested(arr Array(Tuple(a Int32, b Int32))) Engine=MergeTree ORDER BY tuple(); -INSERT INTO 02500_nested(arr) VALUES ([(1, 2)]); -ALTER TABLE 02500_nested ADD COLUMN z Int32; -ALTER TABLE 02500_nested DROP COLUMN arr; -- { serverError BAD_ARGUMENTS } -DROP TABLE 02500_nested; - -CREATE TABLE 02500_nested(arr Array(Tuple(a Int32, b Int32)), z Int32) Engine=MergeTree ORDER BY tuple(); -INSERT INTO 02500_nested(arr, z) VALUES ([(1, 2)], 2); -ALTER TABLE 02500_nested DROP COLUMN arr; -DROP TABLE 02500_nested; - CREATE TABLE 02500_nested(nes Nested(a Int32, b Int32)) Engine=MergeTree ORDER BY tuple(); INSERT INTO 02500_nested(nes) VALUES ([(1, 2)]); ALTER TABLE 02500_nested ADD COLUMN z Int32; diff --git a/tests/queries/0_stateless/02500_remove_redundant_distinct.reference b/tests/queries/0_stateless/02500_remove_redundant_distinct.reference index 3f580763dba..d7623cd5541 100644 --- a/tests/queries/0_stateless/02500_remove_redundant_distinct.reference +++ b/tests/queries/0_stateless/02500_remove_redundant_distinct.reference @@ -73,22 +73,24 @@ FROM SELECT DISTINCT number AS n FROM numbers(2) ) as y +ORDER BY x.n, y.n -- explain Expression (Projection) Distinct - Distinct (Preliminary DISTINCT) - Expression (Before ORDER BY) - Join (JOIN FillRightFirst) - Expression ((Before JOIN + Projection)) - Distinct - Distinct (Preliminary DISTINCT) - Expression (Before ORDER BY) - ReadFromSystemNumbers - Expression ((Joined actions + (Rename joined columns + Projection))) - Distinct - Distinct (Preliminary DISTINCT) - Expression (Before ORDER BY) - ReadFromSystemNumbers + Sorting (Sorting for ORDER BY) + Distinct (Preliminary DISTINCT) + Expression (Before ORDER BY) + Join (JOIN FillRightFirst) + Expression ((Before JOIN + Projection)) + Distinct + Distinct (Preliminary DISTINCT) + Expression (Before ORDER BY) + ReadFromSystemNumbers + Expression ((Joined actions + (Rename joined columns + Projection))) + Distinct + Distinct (Preliminary DISTINCT) + Expression (Before ORDER BY) + ReadFromSystemNumbers -- execute 0 0 0 1 @@ -106,12 +108,15 @@ FROM FROM numbers(3) ) ) +ORDER BY a, b -- explain -Expression ((Projection + (Before ORDER BY + (Projection + (Before ORDER BY + Projection))))) - Distinct - Distinct (Preliminary DISTINCT) - Expression (Before ORDER BY) - ReadFromSystemNumbers +Expression (Projection) + Sorting (Sorting for ORDER BY) + Expression ((Before ORDER BY + (Projection + (Before ORDER BY + Projection)))) + Distinct + Distinct (Preliminary DISTINCT) + Expression (Before ORDER BY) + ReadFromSystemNumbers -- execute 0 0 1 2 @@ -128,12 +133,15 @@ FROM FROM numbers(3) ) ) +ORDER BY a, b -- explain -Expression ((Projection + (Before ORDER BY + (Projection + (Before ORDER BY + Projection))))) - Distinct - Distinct (Preliminary DISTINCT) - Expression (Before ORDER BY) - ReadFromSystemNumbers +Expression (Projection) + Sorting (Sorting for ORDER BY) + Expression ((Before ORDER BY + (Projection + (Before ORDER BY + Projection)))) + Distinct + Distinct (Preliminary DISTINCT) + Expression (Before ORDER BY) + ReadFromSystemNumbers -- execute 2 0 0 2 1 2 @@ -147,21 +155,23 @@ FROM FROM VALUES('Hello', 'World', 'Goodbye') ) AS words ARRAY JOIN [0, 1] AS arr +ORDER BY c1, arr -- explain Expression (Projection) Distinct - Distinct (Preliminary DISTINCT) - Expression (Before ORDER BY) - ArrayJoin (ARRAY JOIN) - Expression ((Before ARRAY JOIN + Projection)) - Distinct - Distinct (Preliminary DISTINCT) - Expression (Before ORDER BY) - ReadFromStorage (Values) + Sorting (Sorting for ORDER BY) + Distinct (Preliminary DISTINCT) + Expression (Before ORDER BY) + ArrayJoin (ARRAY JOIN) + Expression ((Before ARRAY JOIN + Projection)) + Distinct + Distinct (Preliminary DISTINCT) + Expression (Before ORDER BY) + ReadFromStorage (Values) -- execute +Goodbye Hello World -Goodbye -- WITH FILL: do _not_ remove outer DISTINCT because new rows are generated between inner and outer DISTINCTs -- query SELECT DISTINCT * @@ -194,16 +204,18 @@ FROM SELECT DISTINCT ['Istanbul', 'Berlin', 'Bensheim'] AS cities ) WHERE arrayJoin(cities) IN ['Berlin', 'Bensheim'] +ORDER BY cities -- explain Expression (( + Projection)) Distinct - Distinct (Preliminary DISTINCT) - Expression (Before ORDER BY) - Filter ((WHERE + Projection)) - Distinct - Distinct (Preliminary DISTINCT) - Expression (Before ORDER BY) - ReadFromStorage (SystemOne) + Sorting (Sorting for ORDER BY) + Distinct (Preliminary DISTINCT) + Expression (Before ORDER BY) + Filter ((WHERE + Projection)) + Distinct + Distinct (Preliminary DISTINCT) + Expression (Before ORDER BY) + ReadFromStorage (SystemOne) -- execute ['Istanbul','Berlin','Bensheim'] -- GROUP BY before DISTINCT with on the same columns => remove DISTINCT @@ -222,20 +234,23 @@ FROM FROM numbers(3) AS x, numbers(3, 3) AS y ) GROUP BY a + ORDER BY a ) -- explain -Expression ((Projection + (Before ORDER BY + (Projection + Before ORDER BY)))) - Aggregating - Expression ((Before GROUP BY + (Projection + Before ORDER BY))) - Join (JOIN FillRightFirst) - Expression (Before JOIN) - ReadFromSystemNumbers - Expression ((Joined actions + (Rename joined columns + (Projection + Before ORDER BY)))) - ReadFromSystemNumbers +Expression ((Projection + (Before ORDER BY + Projection))) + Sorting (Sorting for ORDER BY) + Expression (Before ORDER BY) + Aggregating + Expression ((Before GROUP BY + (Projection + Before ORDER BY))) + Join (JOIN FillRightFirst) + Expression (Before JOIN) + ReadFromSystemNumbers + Expression ((Joined actions + (Rename joined columns + (Projection + Before ORDER BY)))) + ReadFromSystemNumbers -- execute 0 -2 1 +2 -- GROUP BY before DISTINCT with on different columns => do _not_ remove DISTINCT -- query SELECT DISTINCT c @@ -252,19 +267,22 @@ FROM FROM numbers(3) AS x, numbers(3, 3) AS y ) GROUP BY a + ORDER BY a ) -- explain Expression (Projection) Distinct Distinct (Preliminary DISTINCT) - Expression ((Before ORDER BY + (Projection + Before ORDER BY))) - Aggregating - Expression ((Before GROUP BY + (Projection + Before ORDER BY))) - Join (JOIN FillRightFirst) - Expression (Before JOIN) - ReadFromSystemNumbers - Expression ((Joined actions + (Rename joined columns + (Projection + Before ORDER BY)))) - ReadFromSystemNumbers + Expression ((Before ORDER BY + Projection)) + Sorting (Sorting for ORDER BY) + Expression (Before ORDER BY) + Aggregating + Expression ((Before GROUP BY + (Projection + Before ORDER BY))) + Join (JOIN FillRightFirst) + Expression (Before JOIN) + ReadFromSystemNumbers + Expression ((Joined actions + (Rename joined columns + (Projection + Before ORDER BY)))) + ReadFromSystemNumbers -- execute 12 -- GROUP BY WITH ROLLUP before DISTINCT with on different columns => do _not_ remove DISTINCT @@ -283,20 +301,23 @@ FROM FROM numbers(3) AS x, numbers(3, 3) AS y ) GROUP BY a WITH ROLLUP + ORDER BY a ) -- explain Expression (Projection) Distinct Distinct (Preliminary DISTINCT) - Expression ((Before ORDER BY + (Projection + Before ORDER BY))) - Rollup - Aggregating - Expression ((Before GROUP BY + (Projection + Before ORDER BY))) - Join (JOIN FillRightFirst) - Expression (Before JOIN) - ReadFromSystemNumbers - Expression ((Joined actions + (Rename joined columns + (Projection + Before ORDER BY)))) - ReadFromSystemNumbers + Expression ((Before ORDER BY + Projection)) + Sorting (Sorting for ORDER BY) + Expression (Before ORDER BY) + Rollup + Aggregating + Expression ((Before GROUP BY + (Projection + Before ORDER BY))) + Join (JOIN FillRightFirst) + Expression (Before JOIN) + ReadFromSystemNumbers + Expression ((Joined actions + (Rename joined columns + (Projection + Before ORDER BY)))) + ReadFromSystemNumbers -- execute 12 36 @@ -316,22 +337,25 @@ FROM FROM numbers(3) AS x, numbers(3, 3) AS y ) GROUP BY a WITH ROLLUP + ORDER BY a ) -- explain -Expression ((Projection + (Before ORDER BY + (Projection + Before ORDER BY)))) - Rollup - Aggregating - Expression ((Before GROUP BY + (Projection + Before ORDER BY))) - Join (JOIN FillRightFirst) - Expression (Before JOIN) - ReadFromSystemNumbers - Expression ((Joined actions + (Rename joined columns + (Projection + Before ORDER BY)))) - ReadFromSystemNumbers +Expression ((Projection + (Before ORDER BY + Projection))) + Sorting (Sorting for ORDER BY) + Expression (Before ORDER BY) + Rollup + Aggregating + Expression ((Before GROUP BY + (Projection + Before ORDER BY))) + Join (JOIN FillRightFirst) + Expression (Before JOIN) + ReadFromSystemNumbers + Expression ((Joined actions + (Rename joined columns + (Projection + Before ORDER BY)))) + ReadFromSystemNumbers -- execute 0 -2 -1 0 +1 +2 -- GROUP BY WITH CUBE before DISTINCT with on different columns => do _not_ remove DISTINCT -- query SELECT DISTINCT c @@ -348,20 +372,23 @@ FROM FROM numbers(3) AS x, numbers(3, 3) AS y ) GROUP BY a WITH CUBE + ORDER BY a ) -- explain Expression (Projection) Distinct Distinct (Preliminary DISTINCT) - Expression ((Before ORDER BY + (Projection + Before ORDER BY))) - Cube - Aggregating - Expression ((Before GROUP BY + (Projection + Before ORDER BY))) - Join (JOIN FillRightFirst) - Expression (Before JOIN) - ReadFromSystemNumbers - Expression ((Joined actions + (Rename joined columns + (Projection + Before ORDER BY)))) - ReadFromSystemNumbers + Expression ((Before ORDER BY + Projection)) + Sorting (Sorting for ORDER BY) + Expression (Before ORDER BY) + Cube + Aggregating + Expression ((Before GROUP BY + (Projection + Before ORDER BY))) + Join (JOIN FillRightFirst) + Expression (Before JOIN) + ReadFromSystemNumbers + Expression ((Joined actions + (Rename joined columns + (Projection + Before ORDER BY)))) + ReadFromSystemNumbers -- execute 12 36 @@ -381,22 +408,25 @@ FROM FROM numbers(3) AS x, numbers(3, 3) AS y ) GROUP BY a WITH CUBE + ORDER BY a ) -- explain -Expression ((Projection + (Before ORDER BY + (Projection + Before ORDER BY)))) - Cube - Aggregating - Expression ((Before GROUP BY + (Projection + Before ORDER BY))) - Join (JOIN FillRightFirst) - Expression (Before JOIN) - ReadFromSystemNumbers - Expression ((Joined actions + (Rename joined columns + (Projection + Before ORDER BY)))) - ReadFromSystemNumbers +Expression ((Projection + (Before ORDER BY + Projection))) + Sorting (Sorting for ORDER BY) + Expression (Before ORDER BY) + Cube + Aggregating + Expression ((Before GROUP BY + (Projection + Before ORDER BY))) + Join (JOIN FillRightFirst) + Expression (Before JOIN) + ReadFromSystemNumbers + Expression ((Joined actions + (Rename joined columns + (Projection + Before ORDER BY)))) + ReadFromSystemNumbers -- execute 0 -2 -1 0 +1 +2 -- GROUP BY WITH TOTALS before DISTINCT with on different columns => do _not_ remove DISTINCT -- query SELECT DISTINCT c @@ -413,20 +443,23 @@ FROM FROM numbers(3) AS x, numbers(3, 3) AS y ) GROUP BY a WITH TOTALS + ORDER BY a ) -- explain Expression (Projection) Distinct Distinct (Preliminary DISTINCT) - Expression ((Before ORDER BY + (Projection + Before ORDER BY))) - TotalsHaving - Aggregating - Expression ((Before GROUP BY + (Projection + Before ORDER BY))) - Join (JOIN FillRightFirst) - Expression (Before JOIN) - ReadFromSystemNumbers - Expression ((Joined actions + (Rename joined columns + (Projection + Before ORDER BY)))) - ReadFromSystemNumbers + Expression ((Before ORDER BY + Projection)) + Sorting (Sorting for ORDER BY) + Expression (Before ORDER BY) + TotalsHaving + Aggregating + Expression ((Before GROUP BY + (Projection + Before ORDER BY))) + Join (JOIN FillRightFirst) + Expression (Before JOIN) + ReadFromSystemNumbers + Expression ((Joined actions + (Rename joined columns + (Projection + Before ORDER BY)))) + ReadFromSystemNumbers -- execute 12 @@ -447,21 +480,24 @@ FROM FROM numbers(3) AS x, numbers(3, 3) AS y ) GROUP BY a WITH TOTALS + ORDER BY a ) -- explain -Expression ((Projection + (Before ORDER BY + (Projection + Before ORDER BY)))) - TotalsHaving - Aggregating - Expression ((Before GROUP BY + (Projection + Before ORDER BY))) - Join (JOIN FillRightFirst) - Expression (Before JOIN) - ReadFromSystemNumbers - Expression ((Joined actions + (Rename joined columns + (Projection + Before ORDER BY)))) - ReadFromSystemNumbers +Expression ((Projection + (Before ORDER BY + Projection))) + Sorting (Sorting for ORDER BY) + Expression (Before ORDER BY) + TotalsHaving + Aggregating + Expression ((Before GROUP BY + (Projection + Before ORDER BY))) + Join (JOIN FillRightFirst) + Expression (Before JOIN) + ReadFromSystemNumbers + Expression ((Joined actions + (Rename joined columns + (Projection + Before ORDER BY)))) + ReadFromSystemNumbers -- execute 0 -2 1 +2 0 -- DISTINCT COUNT() with GROUP BY => do _not_ remove DISTINCT @@ -488,21 +524,23 @@ FROM SELECT DISTINCT number FROM numbers(2) ) +ORDER BY number -- explain Expression (Projection) Distinct - Distinct (Preliminary DISTINCT) - Union - Expression ((Before ORDER BY + Projection)) - Distinct - Distinct (Preliminary DISTINCT) - Expression (Before ORDER BY) - ReadFromSystemNumbers - Expression (( + Projection)) - Distinct - Distinct (Preliminary DISTINCT) - Expression (Before ORDER BY) - ReadFromSystemNumbers + Sorting (Sorting for ORDER BY) + Distinct (Preliminary DISTINCT) + Union + Expression ((Before ORDER BY + Projection)) + Distinct + Distinct (Preliminary DISTINCT) + Expression (Before ORDER BY) + ReadFromSystemNumbers + Expression (( + Projection)) + Distinct + Distinct (Preliminary DISTINCT) + Expression (Before ORDER BY) + ReadFromSystemNumbers -- execute 0 1 diff --git a/tests/queries/0_stateless/02500_remove_redundant_distinct.sh b/tests/queries/0_stateless/02500_remove_redundant_distinct.sh index f07cdca4b5a..c4f0994cd13 100755 --- a/tests/queries/0_stateless/02500_remove_redundant_distinct.sh +++ b/tests/queries/0_stateless/02500_remove_redundant_distinct.sh @@ -59,7 +59,8 @@ FROM ( SELECT DISTINCT number AS n FROM numbers(2) -) as y" +) as y +ORDER BY x.n, y.n" run_query "$query" echo "-- DISTINCT duplicates with several columns" @@ -72,7 +73,8 @@ FROM SELECT DISTINCT number as a, 2*number as b FROM numbers(3) ) -)" +) +ORDER BY a, b" run_query "$query" echo "-- DISTINCT duplicates with constant columns" @@ -85,7 +87,8 @@ FROM SELECT DISTINCT 1, number as a, 2*number as b FROM numbers(3) ) -)" +) +ORDER BY a, b" run_query "$query" echo "-- ARRAY JOIN: do _not_ remove outer DISTINCT because new rows are generated between inner and outer DISTINCTs" @@ -95,7 +98,8 @@ FROM SELECT DISTINCT * FROM VALUES('Hello', 'World', 'Goodbye') ) AS words -ARRAY JOIN [0, 1] AS arr" +ARRAY JOIN [0, 1] AS arr +ORDER BY c1, arr" run_query "$query" echo "-- WITH FILL: do _not_ remove outer DISTINCT because new rows are generated between inner and outer DISTINCTs" @@ -114,7 +118,8 @@ FROM ( SELECT DISTINCT ['Istanbul', 'Berlin', 'Bensheim'] AS cities ) -WHERE arrayJoin(cities) IN ['Berlin', 'Bensheim']" +WHERE arrayJoin(cities) IN ['Berlin', 'Bensheim'] +ORDER BY cities" run_query "$query" echo "-- GROUP BY before DISTINCT with on the same columns => remove DISTINCT" @@ -132,6 +137,7 @@ FROM FROM numbers(3) AS x, numbers(3, 3) AS y ) GROUP BY a + ORDER BY a )" run_query "$query" @@ -150,6 +156,7 @@ FROM FROM numbers(3) AS x, numbers(3, 3) AS y ) GROUP BY a + ORDER BY a )" run_query "$query" @@ -168,6 +175,7 @@ FROM FROM numbers(3) AS x, numbers(3, 3) AS y ) GROUP BY a WITH ROLLUP + ORDER BY a )" run_query "$query" @@ -186,6 +194,7 @@ FROM FROM numbers(3) AS x, numbers(3, 3) AS y ) GROUP BY a WITH ROLLUP + ORDER BY a )" run_query "$query" @@ -204,6 +213,7 @@ FROM FROM numbers(3) AS x, numbers(3, 3) AS y ) GROUP BY a WITH CUBE + ORDER BY a )" run_query "$query" @@ -222,6 +232,7 @@ FROM FROM numbers(3) AS x, numbers(3, 3) AS y ) GROUP BY a WITH CUBE + ORDER BY a )" run_query "$query" @@ -240,6 +251,7 @@ FROM FROM numbers(3) AS x, numbers(3, 3) AS y ) GROUP BY a WITH TOTALS + ORDER BY a )" run_query "$query" @@ -258,6 +270,7 @@ FROM FROM numbers(3) AS x, numbers(3, 3) AS y ) GROUP BY a WITH TOTALS + ORDER BY a )" run_query "$query" @@ -274,5 +287,6 @@ FROM UNION ALL SELECT DISTINCT number FROM numbers(2) -)" +) +ORDER BY number" run_query "$query" diff --git a/tests/queries/0_stateless/02500_remove_redundant_distinct_analyzer.reference b/tests/queries/0_stateless/02500_remove_redundant_distinct_analyzer.reference index a5f2c3e5ca3..b79f6310166 100644 --- a/tests/queries/0_stateless/02500_remove_redundant_distinct_analyzer.reference +++ b/tests/queries/0_stateless/02500_remove_redundant_distinct_analyzer.reference @@ -74,22 +74,25 @@ FROM SELECT DISTINCT number AS n FROM numbers(2) ) as y +ORDER BY x.n, y.n -- explain Expression (Project names) Distinct (DISTINCT) - Distinct (Preliminary DISTINCT) - Expression ((Projection + DROP unused columns after JOIN)) - Join (JOIN FillRightFirst) - Expression ((Change column names to column identifiers + Project names)) - Distinct (DISTINCT) - Distinct (Preliminary DISTINCT) - Expression ((Projection + Change column names to column identifiers)) - ReadFromSystemNumbers - Expression ((Change column names to column identifiers + Project names)) - Distinct (DISTINCT) - Distinct (Preliminary DISTINCT) - Expression ((Projection + Change column names to column identifiers)) - ReadFromSystemNumbers + Sorting (Sorting for ORDER BY) + Expression (Before ORDER BY) + Distinct (Preliminary DISTINCT) + Expression ((Projection + DROP unused columns after JOIN)) + Join (JOIN FillRightFirst) + Expression ((Change column names to column identifiers + Project names)) + Distinct (DISTINCT) + Distinct (Preliminary DISTINCT) + Expression ((Projection + Change column names to column identifiers)) + ReadFromSystemNumbers + Expression ((Change column names to column identifiers + Project names)) + Distinct (DISTINCT) + Distinct (Preliminary DISTINCT) + Expression ((Projection + Change column names to column identifiers)) + ReadFromSystemNumbers -- execute 0 0 0 1 @@ -107,12 +110,15 @@ FROM FROM numbers(3) ) ) +ORDER BY a, b -- explain -Expression ((Project names + (Projection + (Change column names to column identifiers + (Project names + (Projection + (Change column names to column identifiers + Project names))))))) - Distinct (DISTINCT) - Distinct (Preliminary DISTINCT) - Expression ((Projection + Change column names to column identifiers)) - ReadFromSystemNumbers +Expression (Project names) + Sorting (Sorting for ORDER BY) + Expression ((Before ORDER BY + (Projection + (Change column names to column identifiers + (Project names + (Projection + (Change column names to column identifiers + Project names))))))) + Distinct (DISTINCT) + Distinct (Preliminary DISTINCT) + Expression ((Projection + Change column names to column identifiers)) + ReadFromSystemNumbers -- execute 0 0 1 2 @@ -129,12 +135,15 @@ FROM FROM numbers(3) ) ) +ORDER BY a, b -- explain -Expression ((Project names + (Projection + (Change column names to column identifiers + (Project names + (Projection + (Change column names to column identifiers + Project names))))))) - Distinct (DISTINCT) - Distinct (Preliminary DISTINCT) - Expression ((Projection + Change column names to column identifiers)) - ReadFromSystemNumbers +Expression (Project names) + Sorting (Sorting for ORDER BY) + Expression ((Before ORDER BY + (Projection + (Change column names to column identifiers + (Project names + (Projection + (Change column names to column identifiers + Project names))))))) + Distinct (DISTINCT) + Distinct (Preliminary DISTINCT) + Expression ((Projection + Change column names to column identifiers)) + ReadFromSystemNumbers -- execute 2 0 0 2 1 2 @@ -148,21 +157,24 @@ FROM FROM VALUES('Hello', 'World', 'Goodbye') ) AS words ARRAY JOIN [0, 1] AS arr +ORDER BY c1, arr -- explain Expression (Project names) Distinct (DISTINCT) - Distinct (Preliminary DISTINCT) - Expression (Projection) - ArrayJoin (ARRAY JOIN) - Expression ((DROP unused columns before ARRAY JOIN + (ARRAY JOIN actions + (Change column names to column identifiers + Project names)))) - Distinct (DISTINCT) - Distinct (Preliminary DISTINCT) - Expression ((Projection + Change column names to column identifiers)) - ReadFromStorage (Values) + Sorting (Sorting for ORDER BY) + Expression (Before ORDER BY) + Distinct (Preliminary DISTINCT) + Expression (Projection) + ArrayJoin (ARRAY JOIN) + Expression ((DROP unused columns before ARRAY JOIN + (ARRAY JOIN actions + (Change column names to column identifiers + Project names)))) + Distinct (DISTINCT) + Distinct (Preliminary DISTINCT) + Expression ((Projection + Change column names to column identifiers)) + ReadFromStorage (Values) -- execute +Goodbye Hello World -Goodbye -- WITH FILL: do _not_ remove outer DISTINCT because new rows are generated between inner and outer DISTINCTs -- query SELECT DISTINCT * @@ -196,16 +208,19 @@ FROM SELECT DISTINCT ['Istanbul', 'Berlin', 'Bensheim'] AS cities ) WHERE arrayJoin(cities) IN ['Berlin', 'Bensheim'] +ORDER BY cities -- explain Expression (Project names) Distinct (DISTINCT) - Distinct (Preliminary DISTINCT) - Expression (Projection) - Filter ((WHERE + (Change column names to column identifiers + Project names))) - Distinct (DISTINCT) - Distinct (Preliminary DISTINCT) - Expression ((Projection + Change column names to column identifiers)) - ReadFromStorage (SystemOne) + Sorting (Sorting for ORDER BY) + Expression (Before ORDER BY) + Distinct (Preliminary DISTINCT) + Expression (Projection) + Filter ((WHERE + (Change column names to column identifiers + Project names))) + Distinct (DISTINCT) + Distinct (Preliminary DISTINCT) + Expression ((Projection + Change column names to column identifiers)) + ReadFromStorage (SystemOne) -- execute ['Istanbul','Berlin','Bensheim'] -- GROUP BY before DISTINCT with on the same columns => remove DISTINCT @@ -224,20 +239,23 @@ FROM FROM numbers(3) AS x, numbers(3, 3) AS y ) GROUP BY a + ORDER BY a ) -- explain -Expression ((Project names + (Projection + (Change column names to column identifiers + (Project names + Projection))))) - Aggregating - Expression ((Before GROUP BY + (Change column names to column identifiers + (Project names + (Projection + DROP unused columns after JOIN))))) - Join (JOIN FillRightFirst) - Expression (Change column names to column identifiers) - ReadFromSystemNumbers - Expression (Change column names to column identifiers) - ReadFromSystemNumbers +Expression ((Project names + (Projection + (Change column names to column identifiers + Project names)))) + Sorting (Sorting for ORDER BY) + Expression ((Before ORDER BY + Projection)) + Aggregating + Expression ((Before GROUP BY + (Change column names to column identifiers + (Project names + (Projection + DROP unused columns after JOIN))))) + Join (JOIN FillRightFirst) + Expression (Change column names to column identifiers) + ReadFromSystemNumbers + Expression (Change column names to column identifiers) + ReadFromSystemNumbers -- execute 0 -2 1 +2 -- GROUP BY before DISTINCT with on different columns => do _not_ remove DISTINCT -- query SELECT DISTINCT c @@ -254,19 +272,22 @@ FROM FROM numbers(3) AS x, numbers(3, 3) AS y ) GROUP BY a + ORDER BY a ) -- explain Expression (Project names) Distinct (DISTINCT) Distinct (Preliminary DISTINCT) - Expression ((Projection + (Change column names to column identifiers + (Project names + Projection)))) - Aggregating - Expression ((Before GROUP BY + (Change column names to column identifiers + (Project names + (Projection + DROP unused columns after JOIN))))) - Join (JOIN FillRightFirst) - Expression (Change column names to column identifiers) - ReadFromSystemNumbers - Expression (Change column names to column identifiers) - ReadFromSystemNumbers + Expression ((Projection + (Change column names to column identifiers + Project names))) + Sorting (Sorting for ORDER BY) + Expression ((Before ORDER BY + Projection)) + Aggregating + Expression ((Before GROUP BY + (Change column names to column identifiers + (Project names + (Projection + DROP unused columns after JOIN))))) + Join (JOIN FillRightFirst) + Expression (Change column names to column identifiers) + ReadFromSystemNumbers + Expression (Change column names to column identifiers) + ReadFromSystemNumbers -- execute 12 -- GROUP BY WITH ROLLUP before DISTINCT with on different columns => do _not_ remove DISTINCT @@ -285,20 +306,23 @@ FROM FROM numbers(3) AS x, numbers(3, 3) AS y ) GROUP BY a WITH ROLLUP + ORDER BY a ) -- explain Expression (Project names) Distinct (DISTINCT) Distinct (Preliminary DISTINCT) - Expression ((Projection + (Change column names to column identifiers + (Project names + Projection)))) - Rollup - Aggregating - Expression ((Before GROUP BY + (Change column names to column identifiers + (Project names + (Projection + DROP unused columns after JOIN))))) - Join (JOIN FillRightFirst) - Expression (Change column names to column identifiers) - ReadFromSystemNumbers - Expression (Change column names to column identifiers) - ReadFromSystemNumbers + Expression ((Projection + (Change column names to column identifiers + Project names))) + Sorting (Sorting for ORDER BY) + Expression ((Before ORDER BY + Projection)) + Rollup + Aggregating + Expression ((Before GROUP BY + (Change column names to column identifiers + (Project names + (Projection + DROP unused columns after JOIN))))) + Join (JOIN FillRightFirst) + Expression (Change column names to column identifiers) + ReadFromSystemNumbers + Expression (Change column names to column identifiers) + ReadFromSystemNumbers -- execute 12 36 @@ -318,22 +342,25 @@ FROM FROM numbers(3) AS x, numbers(3, 3) AS y ) GROUP BY a WITH ROLLUP + ORDER BY a ) -- explain -Expression ((Project names + (Projection + (Change column names to column identifiers + (Project names + Projection))))) - Rollup - Aggregating - Expression ((Before GROUP BY + (Change column names to column identifiers + (Project names + (Projection + DROP unused columns after JOIN))))) - Join (JOIN FillRightFirst) - Expression (Change column names to column identifiers) - ReadFromSystemNumbers - Expression (Change column names to column identifiers) - ReadFromSystemNumbers +Expression ((Project names + (Projection + (Change column names to column identifiers + Project names)))) + Sorting (Sorting for ORDER BY) + Expression ((Before ORDER BY + Projection)) + Rollup + Aggregating + Expression ((Before GROUP BY + (Change column names to column identifiers + (Project names + (Projection + DROP unused columns after JOIN))))) + Join (JOIN FillRightFirst) + Expression (Change column names to column identifiers) + ReadFromSystemNumbers + Expression (Change column names to column identifiers) + ReadFromSystemNumbers -- execute 0 -2 -1 0 +1 +2 -- GROUP BY WITH CUBE before DISTINCT with on different columns => do _not_ remove DISTINCT -- query SELECT DISTINCT c @@ -350,20 +377,23 @@ FROM FROM numbers(3) AS x, numbers(3, 3) AS y ) GROUP BY a WITH CUBE + ORDER BY a ) -- explain Expression (Project names) Distinct (DISTINCT) Distinct (Preliminary DISTINCT) - Expression ((Projection + (Change column names to column identifiers + (Project names + Projection)))) - Cube - Aggregating - Expression ((Before GROUP BY + (Change column names to column identifiers + (Project names + (Projection + DROP unused columns after JOIN))))) - Join (JOIN FillRightFirst) - Expression (Change column names to column identifiers) - ReadFromSystemNumbers - Expression (Change column names to column identifiers) - ReadFromSystemNumbers + Expression ((Projection + (Change column names to column identifiers + Project names))) + Sorting (Sorting for ORDER BY) + Expression ((Before ORDER BY + Projection)) + Cube + Aggregating + Expression ((Before GROUP BY + (Change column names to column identifiers + (Project names + (Projection + DROP unused columns after JOIN))))) + Join (JOIN FillRightFirst) + Expression (Change column names to column identifiers) + ReadFromSystemNumbers + Expression (Change column names to column identifiers) + ReadFromSystemNumbers -- execute 12 36 @@ -383,22 +413,25 @@ FROM FROM numbers(3) AS x, numbers(3, 3) AS y ) GROUP BY a WITH CUBE + ORDER BY a ) -- explain -Expression ((Project names + (Projection + (Change column names to column identifiers + (Project names + Projection))))) - Cube - Aggregating - Expression ((Before GROUP BY + (Change column names to column identifiers + (Project names + (Projection + DROP unused columns after JOIN))))) - Join (JOIN FillRightFirst) - Expression (Change column names to column identifiers) - ReadFromSystemNumbers - Expression (Change column names to column identifiers) - ReadFromSystemNumbers +Expression ((Project names + (Projection + (Change column names to column identifiers + Project names)))) + Sorting (Sorting for ORDER BY) + Expression ((Before ORDER BY + Projection)) + Cube + Aggregating + Expression ((Before GROUP BY + (Change column names to column identifiers + (Project names + (Projection + DROP unused columns after JOIN))))) + Join (JOIN FillRightFirst) + Expression (Change column names to column identifiers) + ReadFromSystemNumbers + Expression (Change column names to column identifiers) + ReadFromSystemNumbers -- execute 0 -2 -1 0 +1 +2 -- GROUP BY WITH TOTALS before DISTINCT with on different columns => do _not_ remove DISTINCT -- query SELECT DISTINCT c @@ -415,20 +448,23 @@ FROM FROM numbers(3) AS x, numbers(3, 3) AS y ) GROUP BY a WITH TOTALS + ORDER BY a ) -- explain Expression (Project names) Distinct (DISTINCT) Distinct (Preliminary DISTINCT) - Expression ((Projection + (Change column names to column identifiers + (Project names + Projection)))) - TotalsHaving - Aggregating - Expression ((Before GROUP BY + (Change column names to column identifiers + (Project names + (Projection + DROP unused columns after JOIN))))) - Join (JOIN FillRightFirst) - Expression (Change column names to column identifiers) - ReadFromSystemNumbers - Expression (Change column names to column identifiers) - ReadFromSystemNumbers + Expression ((Projection + (Change column names to column identifiers + Project names))) + Sorting (Sorting for ORDER BY) + Expression ((Before ORDER BY + Projection)) + TotalsHaving + Aggregating + Expression ((Before GROUP BY + (Change column names to column identifiers + (Project names + (Projection + DROP unused columns after JOIN))))) + Join (JOIN FillRightFirst) + Expression (Change column names to column identifiers) + ReadFromSystemNumbers + Expression (Change column names to column identifiers) + ReadFromSystemNumbers -- execute 12 @@ -449,21 +485,24 @@ FROM FROM numbers(3) AS x, numbers(3, 3) AS y ) GROUP BY a WITH TOTALS + ORDER BY a ) -- explain -Expression ((Project names + (Projection + (Change column names to column identifiers + (Project names + Projection))))) - TotalsHaving - Aggregating - Expression ((Before GROUP BY + (Change column names to column identifiers + (Project names + (Projection + DROP unused columns after JOIN))))) - Join (JOIN FillRightFirst) - Expression (Change column names to column identifiers) - ReadFromSystemNumbers - Expression (Change column names to column identifiers) - ReadFromSystemNumbers +Expression ((Project names + (Projection + (Change column names to column identifiers + Project names)))) + Sorting (Sorting for ORDER BY) + Expression ((Before ORDER BY + Projection)) + TotalsHaving + Aggregating + Expression ((Before GROUP BY + (Change column names to column identifiers + (Project names + (Projection + DROP unused columns after JOIN))))) + Join (JOIN FillRightFirst) + Expression (Change column names to column identifiers) + ReadFromSystemNumbers + Expression (Change column names to column identifiers) + ReadFromSystemNumbers -- execute 0 -2 1 +2 0 -- DISTINCT COUNT() with GROUP BY => do _not_ remove DISTINCT @@ -490,21 +529,24 @@ FROM SELECT DISTINCT number FROM numbers(2) ) +ORDER BY number -- explain Expression (Project names) Distinct (DISTINCT) - Distinct (Preliminary DISTINCT) - Union - Expression ((Projection + (Change column names to column identifiers + Project names))) - Distinct (DISTINCT) - Distinct (Preliminary DISTINCT) - Expression ((Projection + Change column names to column identifiers)) - ReadFromSystemNumbers - Expression (( + ( + Project names))) - Distinct (DISTINCT) - Distinct (Preliminary DISTINCT) - Expression ((Projection + Change column names to column identifiers)) - ReadFromSystemNumbers + Sorting (Sorting for ORDER BY) + Expression (Before ORDER BY) + Distinct (Preliminary DISTINCT) + Union + Expression ((Projection + (Change column names to column identifiers + Project names))) + Distinct (DISTINCT) + Distinct (Preliminary DISTINCT) + Expression ((Projection + Change column names to column identifiers)) + ReadFromSystemNumbers + Expression (( + ( + Project names))) + Distinct (DISTINCT) + Distinct (Preliminary DISTINCT) + Expression ((Projection + Change column names to column identifiers)) + ReadFromSystemNumbers -- execute 0 1 diff --git a/tests/queries/0_stateless/02504_regexp_dictionary_ua_parser.sh b/tests/queries/0_stateless/02504_regexp_dictionary_ua_parser.sh index 6e386360d60..d3a8743b880 100755 --- a/tests/queries/0_stateless/02504_regexp_dictionary_ua_parser.sh +++ b/tests/queries/0_stateless/02504_regexp_dictionary_ua_parser.sh @@ -6,7 +6,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -user_files_path=$(clickhouse-client --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') +user_files_path=$($CLICKHOUSE_CLIENT_BINARY --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') mkdir -p $user_files_path/test_02504 diff --git a/tests/queries/0_stateless/02504_regexp_dictionary_yaml_source.sh b/tests/queries/0_stateless/02504_regexp_dictionary_yaml_source.sh index 34ea8b9bfbe..7211372f2f7 100755 --- a/tests/queries/0_stateless/02504_regexp_dictionary_yaml_source.sh +++ b/tests/queries/0_stateless/02504_regexp_dictionary_yaml_source.sh @@ -6,7 +6,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -USER_FILES_PATH=$(clickhouse-client --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') +USER_FILES_PATH=$($CLICKHOUSE_CLIENT_BINARY --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') mkdir -p $USER_FILES_PATH/test_02504 @@ -77,7 +77,7 @@ system reload dictionary regexp_dict1; -- { serverError 489 } " cat > "$yaml" <= 40000 SETTINGS mutations_sync = 1; SELECT 'Rows in parts', SUM(rows) FROM system.parts WHERE database = currentDatabase() AND table = 'lwd_test_02521' AND active; -Rows in parts 40000 +Rows in parts 15000 SELECT 'Count', count() FROM lwd_test_02521; Count 15000 OPTIMIZE TABLE lwd_test_02521 FINAL SETTINGS mutations_sync = 1; diff --git a/tests/queries/0_stateless/02567_and_consistency.sql b/tests/queries/0_stateless/02567_and_consistency.sql index 8ad06bd68cb..0eeab99e539 100644 --- a/tests/queries/0_stateless/02567_and_consistency.sql +++ b/tests/queries/0_stateless/02567_and_consistency.sql @@ -5,6 +5,7 @@ FROM ) GROUP BY number HAVING 1 AND sin(sum(number)) +ORDER BY ALL SETTINGS enable_optimize_predicate_expression = 0; SELECT '====='; @@ -16,6 +17,7 @@ FROM ) GROUP BY number HAVING 1 AND sin(1) +ORDER BY ALL SETTINGS enable_optimize_predicate_expression = 0; SELECT '====='; @@ -27,6 +29,7 @@ FROM ) GROUP BY number HAVING x AND sin(sum(number)) +ORDER BY ALL SETTINGS enable_optimize_predicate_expression = 1; SELECT '====='; @@ -38,6 +41,7 @@ FROM ) GROUP BY number HAVING 1 AND sin(sum(number)) +ORDER BY ALL SETTINGS enable_optimize_predicate_expression = 0; SELECT '====='; @@ -57,6 +61,7 @@ FROM ) GROUP BY number HAVING 1 AND sin(sum(number)) +ORDER BY ALL SETTINGS enable_optimize_predicate_expression = 1; select '#45440'; @@ -72,14 +77,18 @@ SELECT NOT h, h IS NULL FROM t2 AS left -GROUP BY g; -select '='; +GROUP BY g +ORDER BY g DESC; + +SELECT '='; + SELECT MAX(left.c0), min2(left.c0, -(-left.c0) * (radians(left.c0) - radians(left.c0))) as g, (((-1925024212 IS NOT NULL) IS NOT NULL) != radians(tan(1216286224))) AND cos(lcm(MAX(left.c0), -1966575216) OR (MAX(left.c0) * 1180517420)) as h, not h, h is null FROM t2 AS left - GROUP BY g HAVING h SETTINGS enable_optimize_predicate_expression = 0; -select '='; + GROUP BY g HAVING h ORDER BY g DESC SETTINGS enable_optimize_predicate_expression = 0; +SELECT '='; + SELECT MAX(left.c0), min2(left.c0, -(-left.c0) * (radians(left.c0) - radians(left.c0))) as g, (((-1925024212 IS NOT NULL) IS NOT NULL) != radians(tan(1216286224))) AND cos(lcm(MAX(left.c0), -1966575216) OR (MAX(left.c0) * 1180517420)) as h, not h, h is null FROM t2 AS left - GROUP BY g HAVING h SETTINGS enable_optimize_predicate_expression = 1; + GROUP BY g HAVING h ORDER BY g DESC SETTINGS enable_optimize_predicate_expression = 1; DROP TABLE IF EXISTS t2; diff --git a/tests/queries/0_stateless/02668_logical_optimizer_removing_redundant_checks.reference b/tests/queries/0_stateless/02668_logical_optimizer_removing_redundant_checks.reference index 60ff2d76995..089d1849eb4 100644 --- a/tests/queries/0_stateless/02668_logical_optimizer_removing_redundant_checks.reference +++ b/tests/queries/0_stateless/02668_logical_optimizer_removing_redundant_checks.reference @@ -87,4 +87,40 @@ QUERY id: 0 LIST id: 6, nodes: 2 COLUMN id: 7, column_name: a, result_type: Int32, source_id: 3 CONSTANT id: 8, constant_value: UInt64_2, constant_value_type: UInt8 +2 test2 +QUERY id: 0 + PROJECTION COLUMNS + a Int32 + b LowCardinality(String) + PROJECTION + LIST id: 1, nodes: 2 + COLUMN id: 2, column_name: a, result_type: Int32, source_id: 3 + COLUMN id: 4, column_name: b, result_type: LowCardinality(String), source_id: 3 + JOIN TREE + TABLE id: 3, table_name: default.02668_logical_optimizer + WHERE + FUNCTION id: 5, function_name: notIn, function_type: ordinary, result_type: UInt8 + ARGUMENTS + LIST id: 6, nodes: 2 + COLUMN id: 7, column_name: a, result_type: Int32, source_id: 3 + CONSTANT id: 8, constant_value: Tuple_(UInt64_1, UInt64_3), constant_value_type: Tuple(UInt8, UInt8) +2 test2 +3 another +QUERY id: 0 + PROJECTION COLUMNS + a Int32 + b LowCardinality(String) + PROJECTION + LIST id: 1, nodes: 2 + COLUMN id: 2, column_name: a, result_type: Int32, source_id: 3 + COLUMN id: 4, column_name: b, result_type: LowCardinality(String), source_id: 3 + JOIN TREE + TABLE id: 3, table_name: default.02668_logical_optimizer + WHERE + FUNCTION id: 5, function_name: notEquals, function_type: ordinary, result_type: UInt8 + ARGUMENTS + LIST id: 6, nodes: 2 + COLUMN id: 7, column_name: a, result_type: Int32, source_id: 3 + CONSTANT id: 8, constant_value: UInt64_1, constant_value_type: UInt8 +1 1 diff --git a/tests/queries/0_stateless/02668_logical_optimizer_removing_redundant_checks.sql b/tests/queries/0_stateless/02668_logical_optimizer_removing_redundant_checks.sql index eebea322dbf..7d624195df9 100644 --- a/tests/queries/0_stateless/02668_logical_optimizer_removing_redundant_checks.sql +++ b/tests/queries/0_stateless/02668_logical_optimizer_removing_redundant_checks.sql @@ -8,6 +8,7 @@ ENGINE=Memory; INSERT INTO 02668_logical_optimizer VALUES (1, 'test'), (2, 'test2'), (3, 'another'); +-- Chain of OR equals SET optimize_min_equality_disjunction_chain_length = 2; SELECT * FROM 02668_logical_optimizer WHERE a = 1 OR 3 = a OR 1 = a; @@ -16,6 +17,7 @@ EXPLAIN QUERY TREE SELECT * FROM 02668_logical_optimizer WHERE a = 1 OR 3 = a OR SELECT * FROM 02668_logical_optimizer WHERE a = 1 OR 1 = a; EXPLAIN QUERY TREE SELECT * FROM 02668_logical_optimizer WHERE a = 1 OR 1 = a; +-- Chain of AND equals SELECT * FROM 02668_logical_optimizer WHERE a = 1 AND 2 = a; EXPLAIN QUERY TREE SELECT * FROM 02668_logical_optimizer WHERE a = 1 AND 2 = a; @@ -25,4 +27,15 @@ EXPLAIN QUERY TREE SELECT * FROM 02668_logical_optimizer WHERE a = 3 AND b = 'an SELECT * FROM 02668_logical_optimizer WHERE a = 2 AND 2 = a; EXPLAIN QUERY TREE SELECT * FROM 02668_logical_optimizer WHERE a = 2 AND 2 = a; +-- Chain of AND notEquals +SET optimize_min_inequality_conjunction_chain_length = 2; + +SELECT * FROM 02668_logical_optimizer WHERE a <> 1 AND 3 <> a AND 1 <> a; +EXPLAIN QUERY TREE SELECT * FROM 02668_logical_optimizer WHERE a <> 1 AND 3 <> a AND 1 <> a; + +SELECT * FROM 02668_logical_optimizer WHERE a <> 1 AND 1 <> a; +EXPLAIN QUERY TREE SELECT * FROM 02668_logical_optimizer WHERE a <> 1 AND 1 <> a; + +SELECT a FROM 02668_logical_optimizer WHERE (b = 'test') AND ('test' = b); + SELECT (k = 3) OR ( (k = 1) OR (k = 2) OR ( (NULL OR 1) = k ) ) FROM ( SELECT materialize(1) AS k ); diff --git a/tests/queries/0_stateless/02702_logical_optimizer_with_nulls.reference b/tests/queries/0_stateless/02702_logical_optimizer_with_nulls.reference index eb79bbc842a..e7f46a974e6 100644 --- a/tests/queries/0_stateless/02702_logical_optimizer_with_nulls.reference +++ b/tests/queries/0_stateless/02702_logical_optimizer_with_nulls.reference @@ -75,3 +75,5 @@ QUERY id: 0 LIST id: 6, nodes: 2 COLUMN id: 7, column_name: a, result_type: Nullable(Int32), source_id: 3 CONSTANT id: 8, constant_value: Tuple_(UInt64_1, UInt64_3, UInt64_2), constant_value_type: Tuple(UInt8, UInt8, UInt8) +1 +1 diff --git a/tests/queries/0_stateless/02702_logical_optimizer_with_nulls.sql b/tests/queries/0_stateless/02702_logical_optimizer_with_nulls.sql index 07d0b170a02..72ab507f541 100644 --- a/tests/queries/0_stateless/02702_logical_optimizer_with_nulls.sql +++ b/tests/queries/0_stateless/02702_logical_optimizer_with_nulls.sql @@ -29,4 +29,7 @@ INSERT INTO 02702_logical_optimizer_with_null_column VALUES (1, 'test'), (2, 'te SELECT * FROM 02702_logical_optimizer_with_null_column WHERE a = 1 OR 3 = a OR 2 = a; EXPLAIN QUERY TREE SELECT * FROM 02702_logical_optimizer_with_null_column WHERE a = 1 OR 3 = a OR 2 = a; +SELECT materialize(1) AS k WHERE NULL OR (0 OR (k = 2) OR (k = CAST(1, 'Nullable(UInt8)') OR k = 3)); +SELECT (k = 2) OR (k = 1) OR ((NULL OR 1) = k) FROM (SELECT 1 AS k); + DROP TABLE 02702_logical_optimizer_with_null_column; diff --git a/tests/queries/0_stateless/02718_array_fold.reference b/tests/queries/0_stateless/02718_array_fold.reference index 4139232d145..e746cd1546c 100644 --- a/tests/queries/0_stateless/02718_array_fold.reference +++ b/tests/queries/0_stateless/02718_array_fold.reference @@ -1,5 +1,5 @@ -Negative tests -Const arrays +-- Negative tests +-- Const arrays 23 3 101 @@ -7,7 +7,7 @@ Const arrays [4,3,2,1] ([4,3,2,1],[1,2,3,4]) ([1,3,5],[2,4,6]) -Non-const arrays +-- Non-const arrays 0 1 3 @@ -23,3 +23,14 @@ Non-const arrays [1,0] [1,0,2] [3,1,0,2] +-- Bug 57458 +abcdef ['c'] 3 +ghijkl ['h','k'] 5 +mnopqr ['n'] 2 +xxx..yyy.. ['xxx','yyy'] 6 +.......... [] 0 +..xx..yyy. ['xx','yyy'] 7 +.......... [] 0 +xxx....... ['xxx'] 1 + -- Bug 57816 +[] diff --git a/tests/queries/0_stateless/02718_array_fold.sql b/tests/queries/0_stateless/02718_array_fold.sql index 0486a5ce2e3..7dee33c4705 100644 --- a/tests/queries/0_stateless/02718_array_fold.sql +++ b/tests/queries/0_stateless/02718_array_fold.sql @@ -1,4 +1,4 @@ -SELECT 'Negative tests'; +SELECT '-- Negative tests'; SELECT arrayFold(); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } SELECT arrayFold(1); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } SELECT arrayFold(1, toUInt64(0)); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } @@ -9,7 +9,7 @@ SELECT arrayFold( acc,x,y -> x, [0, 1], 'not an array', toUInt8(0)); -- { serve SELECT arrayFold( acc,x -> x, [0, 1], [2, 3], toUInt8(0)); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } SELECT arrayFold( acc,x,y -> x, [0, 1], [2, 3, 4], toUInt8(0)); -- { serverError SIZES_OF_ARRAYS_DONT_MATCH } -SELECT 'Const arrays'; +SELECT '-- Const arrays'; SELECT arrayFold( acc,x -> acc+x*2, [1, 2, 3, 4], toInt64(3)); SELECT arrayFold( acc,x -> acc+x*2, emptyArrayInt64(), toInt64(3)); SELECT arrayFold( acc,x,y -> acc+x*2+y*3, [1, 2, 3, 4], [5, 6, 7, 8], toInt64(3)); @@ -18,7 +18,43 @@ SELECT arrayFold( acc,x -> arrayPushFront(acc, x), [1, 2, 3, 4], emptyArrayInt6 SELECT arrayFold( acc,x -> (arrayPushFront(acc.1, x),arrayPushBack(acc.2, x)), [1, 2, 3, 4], (emptyArrayInt64(), emptyArrayInt64())); SELECT arrayFold( acc,x -> x%2 ? (arrayPushBack(acc.1, x), acc.2): (acc.1, arrayPushBack(acc.2, x)), [1, 2, 3, 4, 5, 6], (emptyArrayInt64(), emptyArrayInt64())); -SELECT 'Non-const arrays'; +SELECT '-- Non-const arrays'; SELECT arrayFold( acc,x -> acc+x, range(number), number) FROM system.numbers LIMIT 5; SELECT arrayFold( acc,x -> arrayPushFront(acc,x), range(number), emptyArrayUInt64()) FROM system.numbers LIMIT 5; SELECT arrayFold( acc,x -> x%2 ? arrayPushFront(acc,x) : arrayPushBack(acc,x), range(number), emptyArrayUInt64()) FROM system.numbers LIMIT 5; + +SELECT '-- Bug 57458'; + +DROP TABLE IF EXISTS tab; + +CREATE TABLE tab (line String, patterns Array(String)) ENGINE = MergeTree ORDER BY line; +INSERT INTO tab VALUES ('abcdef', ['c']), ('ghijkl', ['h', 'k']), ('mnopqr', ['n']); + +SELECT + line, + patterns, + arrayFold(acc, pat -> position(line, pat), patterns, 0::UInt64) +FROM tab +ORDER BY line; + +DROP TABLE tab; + +CREATE TABLE tab (line String) ENGINE = Memory(); +INSERT INTO tab VALUES ('xxx..yyy..'), ('..........'), ('..xx..yyy.'), ('..........'), ('xxx.......'); + +SELECT + line, + splitByNonAlpha(line), + arrayFold( + (acc, str) -> position(line, str), + splitByNonAlpha(line), + 0::UInt64 + ) +FROM + tab; + +DROP TABLE tab; + +SELECT ' -- Bug 57816'; + +SELECT arrayFold(acc, x -> arrayIntersect(acc, x), [['qwe', 'asd'], ['qwe','asde']], []); diff --git a/tests/queries/0_stateless/02719_aggregate_with_empty_string_key.sql b/tests/queries/0_stateless/02719_aggregate_with_empty_string_key.sql index 7930b2ca0cc..12572982ddd 100644 --- a/tests/queries/0_stateless/02719_aggregate_with_empty_string_key.sql +++ b/tests/queries/0_stateless/02719_aggregate_with_empty_string_key.sql @@ -2,6 +2,6 @@ drop table if exists test ; create table test(str Nullable(String), i Int64) engine=Memory(); insert into test values(null, 1),('', 2),('s', 1); select '-----------String------------'; -select str ,max(i) from test group by str; +select str, max(i) from test group by str order by str nulls first; drop table test; diff --git a/tests/queries/0_stateless/02721_parquet_field_not_found.sh b/tests/queries/0_stateless/02721_parquet_field_not_found.sh index 72925afec6e..8b366d0e9a2 100755 --- a/tests/queries/0_stateless/02721_parquet_field_not_found.sh +++ b/tests/queries/0_stateless/02721_parquet_field_not_found.sh @@ -5,5 +5,5 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_LOCAL -q "select 42 as x format Parquet" | $CLICKHOUSE_LOCAL --input-format Parquet --structure "x UInt32, y UInt32" -q "select * from table" 2>&1 | grep -c "THERE_IS_NO_COLUMN" +$CLICKHOUSE_LOCAL -q "select 42 as x format Parquet" | $CLICKHOUSE_LOCAL --input-format Parquet --structure "x UInt32, y UInt32" --input_format_parquet_allow_missing_columns=0 -q "select * from table" 2>&1 | grep -c "THERE_IS_NO_COLUMN" diff --git a/tests/queries/0_stateless/02722_database_filesystem.sh b/tests/queries/0_stateless/02722_database_filesystem.sh index c21b1af2ff4..f3af7abcbb3 100755 --- a/tests/queries/0_stateless/02722_database_filesystem.sh +++ b/tests/queries/0_stateless/02722_database_filesystem.sh @@ -6,7 +6,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CURDIR"/../shell_config.sh # see 01658_read_file_to_stringcolumn.sh -CLICKHOUSE_USER_FILES_PATH=$(clickhouse-client --query "select _path, _file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') +CLICKHOUSE_USER_FILES_PATH=$($CLICKHOUSE_CLIENT_BINARY --query "select _path, _file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') # Prepare data unique_name=${CLICKHOUSE_TEST_UNIQUE_NAME} diff --git a/tests/queries/0_stateless/02724_decompress_filename_exception.sh b/tests/queries/0_stateless/02724_decompress_filename_exception.sh index bbc2b8d066b..e413910b934 100755 --- a/tests/queries/0_stateless/02724_decompress_filename_exception.sh +++ b/tests/queries/0_stateless/02724_decompress_filename_exception.sh @@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -USER_FILES_PATH=$(clickhouse-client --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') +USER_FILES_PATH=$($CLICKHOUSE_CLIENT_BINARY --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') FILENAME="${USER_FILES_PATH}/corrupted_file.tsv.xx" echo 'corrupted file' > $FILENAME; diff --git a/tests/queries/0_stateless/02731_parallel_replicas_join_subquery.sql b/tests/queries/0_stateless/02731_parallel_replicas_join_subquery.sql index 29c20980c14..a117378b0bf 100644 --- a/tests/queries/0_stateless/02731_parallel_replicas_join_subquery.sql +++ b/tests/queries/0_stateless/02731_parallel_replicas_join_subquery.sql @@ -1,5 +1,7 @@ -- Tags: zookeeper +DROP TABLE IF EXISTS join_inner_table SYNC; + CREATE TABLE join_inner_table ( id UUID, @@ -23,7 +25,6 @@ SET allow_experimental_analyzer = 0; SET max_parallel_replicas = 3; SET prefer_localhost_replica = 1; SET cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost'; -SET use_hedged_requests = 0; SET joined_subquery_requires_alias = 0; SELECT '=============== INNER QUERY (NO PARALLEL) ==============='; @@ -78,6 +79,8 @@ ORDER BY is_initial_query, c, query; ---- Query with JOIN +DROP TABLE IF EXISTS join_outer_table SYNC; + CREATE TABLE join_outer_table ( id UUID, diff --git a/tests/queries/0_stateless/02732_rename_after_processing.sh b/tests/queries/0_stateless/02732_rename_after_processing.sh index cdbc9892bc7..9d44ff9fc34 100755 --- a/tests/queries/0_stateless/02732_rename_after_processing.sh +++ b/tests/queries/0_stateless/02732_rename_after_processing.sh @@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CURDIR"/../shell_config.sh # see 01658_read_file_to_stringcolumn.sh -CLICKHOUSE_USER_FILES_PATH=$(clickhouse-client --query "select _path, _file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') +CLICKHOUSE_USER_FILES_PATH=$($CLICKHOUSE_CLIENT_BINARY --query "select _path, _file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') # Prepare data unique_name=${CLICKHOUSE_TEST_UNIQUE_NAME} diff --git a/tests/queries/0_stateless/02751_parallel_replicas_bug_chunkinfo_not_set.sql b/tests/queries/0_stateless/02751_parallel_replicas_bug_chunkinfo_not_set.sql index 2ea2cecc7b5..5ec0a1fcc31 100644 --- a/tests/queries/0_stateless/02751_parallel_replicas_bug_chunkinfo_not_set.sql +++ b/tests/queries/0_stateless/02751_parallel_replicas_bug_chunkinfo_not_set.sql @@ -18,7 +18,7 @@ INSERT INTO join_inner_table__fuzz_1 SELECT FROM generateRandom('number Int64, value1 String, value2 String, time Int64', 1, 10, 2) LIMIT 100; -SET max_parallel_replicas = 3, prefer_localhost_replica = 1, use_hedged_requests = 0, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', allow_experimental_parallel_reading_from_replicas = 1; +SET max_parallel_replicas = 3, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', allow_experimental_parallel_reading_from_replicas = 1; -- SELECT query will write a Warning to the logs SET send_logs_level='error'; diff --git a/tests/queries/0_stateless/02760_dictionaries_memory.sql.j2 b/tests/queries/0_stateless/02760_dictionaries_memory.sql.j2 index ea979506e07..67e8f098217 100644 --- a/tests/queries/0_stateless/02760_dictionaries_memory.sql.j2 +++ b/tests/queries/0_stateless/02760_dictionaries_memory.sql.j2 @@ -14,6 +14,7 @@ SET max_memory_usage='4Mi'; 'FLAT(INITIAL_ARRAY_SIZE 3_000_000 MAX_ARRAY_SIZE 3_000_000)', 'HASHED()', 'HASHED_ARRAY()', + 'HASHED_ARRAY(SHARDS 2)', 'SPARSE_HASHED()', 'SPARSE_HASHED(SHARDS 2 /* shards are special, they use threads */)', ] %} diff --git a/tests/queries/0_stateless/02764_parallel_replicas_plain_merge_tree.sql b/tests/queries/0_stateless/02764_parallel_replicas_plain_merge_tree.sql index aaf68dfd300..9caa6f76e89 100644 --- a/tests/queries/0_stateless/02764_parallel_replicas_plain_merge_tree.sql +++ b/tests/queries/0_stateless/02764_parallel_replicas_plain_merge_tree.sql @@ -1,7 +1,7 @@ CREATE TABLE IF NOT EXISTS parallel_replicas_plain (x String) ENGINE=MergeTree() ORDER BY x; INSERT INTO parallel_replicas_plain SELECT toString(number) FROM numbers(10); -SET max_parallel_replicas=3, allow_experimental_parallel_reading_from_replicas=1, use_hedged_requests=0, cluster_for_parallel_replicas='parallel_replicas'; +SET max_parallel_replicas=3, allow_experimental_parallel_reading_from_replicas=1, cluster_for_parallel_replicas='parallel_replicas'; SET send_logs_level='error'; SET parallel_replicas_for_non_replicated_merge_tree = 0; diff --git a/tests/queries/0_stateless/02765_parallel_replicas_final_modifier.sql b/tests/queries/0_stateless/02765_parallel_replicas_final_modifier.sql index f447051e1e5..6c121802b06 100644 --- a/tests/queries/0_stateless/02765_parallel_replicas_final_modifier.sql +++ b/tests/queries/0_stateless/02765_parallel_replicas_final_modifier.sql @@ -2,7 +2,7 @@ CREATE TABLE IF NOT EXISTS parallel_replicas_final (x String) ENGINE=ReplacingMe INSERT INTO parallel_replicas_final SELECT toString(number) FROM numbers(10); -SET max_parallel_replicas=3, allow_experimental_parallel_reading_from_replicas=1, use_hedged_requests=0, cluster_for_parallel_replicas='parallel_replicas'; +SET max_parallel_replicas=3, allow_experimental_parallel_reading_from_replicas=1, cluster_for_parallel_replicas='parallel_replicas'; SET parallel_replicas_for_non_replicated_merge_tree = 1; SELECT * FROM parallel_replicas_final FINAL FORMAT Null; diff --git a/tests/queries/0_stateless/02769_parallel_replicas_unavailable_shards.sql b/tests/queries/0_stateless/02769_parallel_replicas_unavailable_shards.sql index 020a429c109..38d592201e3 100644 --- a/tests/queries/0_stateless/02769_parallel_replicas_unavailable_shards.sql +++ b/tests/queries/0_stateless/02769_parallel_replicas_unavailable_shards.sql @@ -4,7 +4,7 @@ INSERT INTO test_parallel_replicas_unavailable_shards SELECT * FROM numbers(10); SYSTEM FLUSH LOGS; -SET allow_experimental_parallel_reading_from_replicas=2, max_parallel_replicas=11, use_hedged_requests=0, cluster_for_parallel_replicas='parallel_replicas', parallel_replicas_for_non_replicated_merge_tree=1; +SET allow_experimental_parallel_reading_from_replicas=2, max_parallel_replicas=11, cluster_for_parallel_replicas='parallel_replicas', parallel_replicas_for_non_replicated_merge_tree=1; SET send_logs_level='error'; SELECT count() FROM test_parallel_replicas_unavailable_shards WHERE NOT ignore(*); diff --git a/tests/queries/0_stateless/02771_multidirectory_globs_storage_file.sh b/tests/queries/0_stateless/02771_multidirectory_globs_storage_file.sh index 46ce3d97ba2..932837b83db 100755 --- a/tests/queries/0_stateless/02771_multidirectory_globs_storage_file.sh +++ b/tests/queries/0_stateless/02771_multidirectory_globs_storage_file.sh @@ -10,7 +10,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # Data preparation. # Now we can get the user_files_path by using the file function, we can also get it by this query: # "insert into function file('exist.txt', 'CSV', 'val1 char') values ('aaaa'); select _path from file('exist.txt', 'CSV', 'val1 char')" -user_files_path=$(clickhouse-client --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') +user_files_path=$($CLICKHOUSE_CLIENT_BINARY --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') rm -rf ${user_files_path:?}/${CLICKHOUSE_TEST_UNIQUE_NAME:?}/* mkdir -p ${user_files_path}/${CLICKHOUSE_TEST_UNIQUE_NAME}/ diff --git a/tests/queries/0_stateless/02771_parallel_replicas_analyzer.reference b/tests/queries/0_stateless/02771_parallel_replicas_analyzer.reference index f688db940d9..35573110550 100644 --- a/tests/queries/0_stateless/02771_parallel_replicas_analyzer.reference +++ b/tests/queries/0_stateless/02771_parallel_replicas_analyzer.reference @@ -8,5 +8,5 @@ 5935810273536892891 7885388429666205427 8124171311239967992 -1 1 -- Simple query with analyzer and pure parallel replicas\nSELECT number\nFROM join_inner_table__fuzz_146_replicated\n SETTINGS\n allow_experimental_analyzer = 1,\n max_parallel_replicas = 2,\n cluster_for_parallel_replicas = \'test_cluster_one_shard_three_replicas_localhost\',\n allow_experimental_parallel_reading_from_replicas = 1,\n use_hedged_requests = 0; -0 2 SELECT `join_inner_table__fuzz_146_replicated`.`number` AS `number` FROM `default`.`join_inner_table__fuzz_146_replicated` SETTINGS allow_experimental_analyzer = 1, max_parallel_replicas = 2, cluster_for_parallel_replicas = \'test_cluster_one_shard_three_replicas_localhost\', allow_experimental_parallel_reading_from_replicas = 1, use_hedged_requests = 0 +1 1 -- Simple query with analyzer and pure parallel replicas\nSELECT number\nFROM join_inner_table__fuzz_146_replicated\n SETTINGS\n allow_experimental_analyzer = 1,\n max_parallel_replicas = 2,\n cluster_for_parallel_replicas = \'test_cluster_one_shard_three_replicas_localhost\',\n allow_experimental_parallel_reading_from_replicas = 1; +0 2 SELECT `join_inner_table__fuzz_146_replicated`.`number` AS `number` FROM `default`.`join_inner_table__fuzz_146_replicated` SETTINGS allow_experimental_analyzer = 1, max_parallel_replicas = 2, cluster_for_parallel_replicas = \'test_cluster_one_shard_three_replicas_localhost\', allow_experimental_parallel_reading_from_replicas = 1 diff --git a/tests/queries/0_stateless/02771_parallel_replicas_analyzer.sql b/tests/queries/0_stateless/02771_parallel_replicas_analyzer.sql index 35089c0cedb..88a0d2163d6 100644 --- a/tests/queries/0_stateless/02771_parallel_replicas_analyzer.sql +++ b/tests/queries/0_stateless/02771_parallel_replicas_analyzer.sql @@ -24,8 +24,7 @@ FROM join_inner_table__fuzz_146_replicated allow_experimental_analyzer = 1, max_parallel_replicas = 2, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', - allow_experimental_parallel_reading_from_replicas = 1, - use_hedged_requests = 0; + allow_experimental_parallel_reading_from_replicas = 1; SYSTEM FLUSH LOGS; -- There should be 2 different queries diff --git a/tests/queries/0_stateless/02782_uniq_exact_parallel_merging_bug.sh b/tests/queries/0_stateless/02782_uniq_exact_parallel_merging_bug.sh index a7f71eacf0f..060efe86602 100755 --- a/tests/queries/0_stateless/02782_uniq_exact_parallel_merging_bug.sh +++ b/tests/queries/0_stateless/02782_uniq_exact_parallel_merging_bug.sh @@ -8,12 +8,12 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CURDIR"/../shell_config.sh -clickhouse-client -q " +$CLICKHOUSE_CLIENT -q " CREATE TABLE ${CLICKHOUSE_DATABASE}.t(s String) ENGINE = MergeTree ORDER BY tuple(); " -clickhouse-client -q "insert into ${CLICKHOUSE_DATABASE}.t select number%10==0 ? toString(number) : '' from numbers_mt(1e7)" +$CLICKHOUSE_CLIENT -q "insert into ${CLICKHOUSE_DATABASE}.t select number%10==0 ? toString(number) : '' from numbers_mt(1e7)" -clickhouse-benchmark -q "select count(distinct s) from ${CLICKHOUSE_DATABASE}.t settings max_memory_usage = '50Mi'" --ignore-error -c 16 -i 1000 2>/dev/null +$CLICKHOUSE_BENCHMARK -q "select count(distinct s) from ${CLICKHOUSE_DATABASE}.t settings max_memory_usage = '50Mi'" --ignore-error -c 16 -i 1000 2>/dev/null diff --git a/tests/queries/0_stateless/02783_parallel_replicas_trivial_count_optimization.sh b/tests/queries/0_stateless/02783_parallel_replicas_trivial_count_optimization.sh index 9cfd3a392c8..bafab249b47 100755 --- a/tests/queries/0_stateless/02783_parallel_replicas_trivial_count_optimization.sh +++ b/tests/queries/0_stateless/02783_parallel_replicas_trivial_count_optimization.sh @@ -1,4 +1,6 @@ #!/usr/bin/env bash +# Tags: no-replicated-database +# Tag no-replicated-database: CREATE AS SELECT is disabled CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh @@ -30,7 +32,6 @@ function run_query_with_pure_parallel_replicas () { --query_id "${1}_pure" \ --max_parallel_replicas 3 \ --prefer_localhost_replica 1 \ - --use_hedged_requests 0 \ --cluster_for_parallel_replicas 'test_cluster_one_shard_three_replicas_localhost' \ --allow_experimental_parallel_reading_from_replicas 1 \ --allow_experimental_analyzer 0 @@ -40,7 +41,6 @@ function run_query_with_pure_parallel_replicas () { --query_id "${1}_pure_analyzer" \ --max_parallel_replicas 3 \ --prefer_localhost_replica 1 \ - --use_hedged_requests 0 \ --cluster_for_parallel_replicas 'test_cluster_one_shard_three_replicas_localhost' \ --allow_experimental_parallel_reading_from_replicas 1 \ --allow_experimental_analyzer 1 @@ -56,7 +56,6 @@ function run_query_with_custom_key_parallel_replicas () { --query "$2" \ --query_id "${1}_custom_key" \ --max_parallel_replicas 3 \ - --use_hedged_requests 0 \ --parallel_replicas_custom_key_filter_type 'default' \ --parallel_replicas_custom_key "$2" \ --allow_experimental_analyzer 0 @@ -65,7 +64,6 @@ function run_query_with_custom_key_parallel_replicas () { --query "$2" \ --query_id "${1}_custom_key_analyzer" \ --max_parallel_replicas 3 \ - --use_hedged_requests 0 \ --parallel_replicas_custom_key_filter_type 'default' \ --parallel_replicas_custom_key "$2" \ --allow_experimental_analyzer 1 diff --git a/tests/queries/0_stateless/02784_parallel_replicas_automatic_decision.sh b/tests/queries/0_stateless/02784_parallel_replicas_automatic_decision.sh index 741b51284fe..8a3b34e5cfa 100755 --- a/tests/queries/0_stateless/02784_parallel_replicas_automatic_decision.sh +++ b/tests/queries/0_stateless/02784_parallel_replicas_automatic_decision.sh @@ -49,7 +49,6 @@ function run_query_with_pure_parallel_replicas () { --query_id "${1}_pure" \ --max_parallel_replicas 3 \ --prefer_localhost_replica 1 \ - --use_hedged_requests 0 \ --cluster_for_parallel_replicas "parallel_replicas" \ --allow_experimental_parallel_reading_from_replicas 1 \ --parallel_replicas_for_non_replicated_merge_tree 1 \ diff --git a/tests/queries/0_stateless/02784_parallel_replicas_automatic_decision_join.sh b/tests/queries/0_stateless/02784_parallel_replicas_automatic_decision_join.sh index ed68a304b85..baeeb820da5 100755 --- a/tests/queries/0_stateless/02784_parallel_replicas_automatic_decision_join.sh +++ b/tests/queries/0_stateless/02784_parallel_replicas_automatic_decision_join.sh @@ -64,7 +64,6 @@ function run_query_with_pure_parallel_replicas () { --query_id "${1}_pure" \ --max_parallel_replicas 3 \ --prefer_localhost_replica 1 \ - --use_hedged_requests 0 \ --cluster_for_parallel_replicas "parallel_replicas" \ --allow_experimental_parallel_reading_from_replicas 1 \ --parallel_replicas_for_non_replicated_merge_tree 1 \ diff --git a/tests/queries/0_stateless/02802_clickhouse_disks_s3_copy.reference b/tests/queries/0_stateless/02802_clickhouse_disks_s3_copy.reference index 96860a2f90a..8da02f5bef6 100644 --- a/tests/queries/0_stateless/02802_clickhouse_disks_s3_copy.reference +++ b/tests/queries/0_stateless/02802_clickhouse_disks_s3_copy.reference @@ -2,3 +2,5 @@ s3_plain_native_copy Single operation copy has completed. s3_plain_no_native_copy Single part upload has completed. +copy from s3_plain_native_copy to s3_plain_another +Single operation copy has completed. diff --git a/tests/queries/0_stateless/02802_clickhouse_disks_s3_copy.sh b/tests/queries/0_stateless/02802_clickhouse_disks_s3_copy.sh index 33321607728..2b9e5296a05 100755 --- a/tests/queries/0_stateless/02802_clickhouse_disks_s3_copy.sh +++ b/tests/queries/0_stateless/02802_clickhouse_disks_s3_copy.sh @@ -24,5 +24,20 @@ function run_test_for_disk() clickhouse-disks -C "$config" --disk "$disk" remove $CLICKHOUSE_DATABASE/test.copy } +function run_test_copy_from_s3_to_s3(){ + local disk_src=$1 && shift + local disk_dest=$1 && shift + + echo "copy from $disk_src to $disk_dest" + clickhouse-disks -C "$config" --disk "$disk_src" write --input "$config" $CLICKHOUSE_DATABASE/test + + clickhouse-disks -C "$config" --log-level test copy --disk-from "$disk_src" --disk-to "$disk_dest" $CLICKHOUSE_DATABASE/test $CLICKHOUSE_DATABASE/test.copy |& { + grep -o -e "Single part upload has completed." -e "Single operation copy has completed." + } + clickhouse-disks -C "$config" --disk "$disk_dest" remove $CLICKHOUSE_DATABASE/test.copy/test + clickhouse-disks -C "$config" --disk "$disk_dest" remove $CLICKHOUSE_DATABASE/test.copy +} + run_test_for_disk s3_plain_native_copy run_test_for_disk s3_plain_no_native_copy +run_test_copy_from_s3_to_s3 s3_plain_native_copy s3_plain_another diff --git a/tests/queries/0_stateless/02802_clickhouse_disks_s3_copy.xml b/tests/queries/0_stateless/02802_clickhouse_disks_s3_copy.xml index d4235a70903..4b45815a125 100644 --- a/tests/queries/0_stateless/02802_clickhouse_disks_s3_copy.xml +++ b/tests/queries/0_stateless/02802_clickhouse_disks_s3_copy.xml @@ -8,6 +8,13 @@ clickhouse true + + s3_plain + http://localhost:11111/test/clickhouse-disks/ + clickhouse + clickhouse + true + s3_plain diff --git a/tests/queries/0_stateless/02811_parallel_replicas_prewhere_count.sql b/tests/queries/0_stateless/02811_parallel_replicas_prewhere_count.sql index 374d73d7d03..14edeecf57e 100644 --- a/tests/queries/0_stateless/02811_parallel_replicas_prewhere_count.sql +++ b/tests/queries/0_stateless/02811_parallel_replicas_prewhere_count.sql @@ -13,7 +13,6 @@ SET skip_unavailable_shards=1, allow_experimental_parallel_reading_from_replicas=1, max_parallel_replicas=3, -use_hedged_requests=0, cluster_for_parallel_replicas='parallel_replicas', parallel_replicas_for_non_replicated_merge_tree=1, parallel_replicas_min_number_of_rows_per_replica=1000; diff --git a/tests/queries/0_stateless/02813_series_period_detect.reference b/tests/queries/0_stateless/02813_series_period_detect.reference new file mode 100644 index 00000000000..e32a57d0935 --- /dev/null +++ b/tests/queries/0_stateless/02813_series_period_detect.reference @@ -0,0 +1,12 @@ +14 +3 +3 +3 +0 +62 +6 +6 +nan +3 +0 +nan diff --git a/tests/queries/0_stateless/02813_series_period_detect.sql b/tests/queries/0_stateless/02813_series_period_detect.sql new file mode 100644 index 00000000000..ef3479d321c --- /dev/null +++ b/tests/queries/0_stateless/02813_series_period_detect.sql @@ -0,0 +1,22 @@ +-- Tags: no-fasttest + +DROP TABLE IF EXISTS tb1; + +CREATE TABLE tb1 (n UInt32, a Array(Int32)) engine=Memory; +INSERT INTO tb1 VALUES (1, [10, 20, 30, 10, 20, 30, 10, 20, 30, 10, 20, 30, 10, 20, 30, 10, 20, 30, 10, 20, 30]), (2, [2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]), (3, [6, 3, 4]); + +SELECT seriesPeriodDetectFFT([139, 87, 110, 68, 54, 50, 51, 53, 133, 86, 141, 97, 156, 94, 149, 95, 140, 77, 61, 50, 54, 47, 133, 72, 152, 94, 148, 105, 162, 101, 160, 87, 63, 53, 55, 54, 151, 103, 189, 108, 183, 113, 175, 113, 178, 90, 71, 62, 62, 65, 165, 109, 181, 115, 182, 121, 178, 114, 170]); +SELECT seriesPeriodDetectFFT([10, 20, 30, 10, 20, 30, 10, 20, 30, 10, 20, 30, 10, 20, 30, 10, 20, 30, 10, 20, 30]); +SELECT seriesPeriodDetectFFT([10.1, 20.45, 40.34, 10.1, 20.45, 40.34, 10.1, 20.45, 40.34, 10.1, 20.45, 40.34, 10.1, 20.45, 40.34, 10.1, 20.45, 40.34, 10.1, 20.45, 40.34, 10.1, 20.45, 40.34]); +SELECT seriesPeriodDetectFFT([10.1, 10, 400, 10.1, 10, 400, 10.1, 10, 400, 10.1, 10, 400, 10.1, 10, 400, 10.1, 10, 400, 10.1, 10, 400, 10.1, 10, 400]); +SELECT seriesPeriodDetectFFT([2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]); +SELECT seriesPeriodDetectFFT(arrayMap(x -> sin(x / 10), range(1000))); +SELECT seriesPeriodDetectFFT(arrayMap(x -> abs((x % 6) - 3), range(1000))); +SELECT seriesPeriodDetectFFT(arrayMap(x -> if((x % 6) < 3, 3, 0), range(1000))); +SELECT seriesPeriodDetectFFT([1,2,3]); +SELECT seriesPeriodDetectFFT(a) FROM tb1; +DROP TABLE IF EXISTS tb1; +SELECT seriesPeriodDetectFFT(); --{ serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH} +SELECT seriesPeriodDetectFFT([]); -- { serverError ILLEGAL_COLUMN} +SELECT seriesPeriodDetectFFT([NULL, NULL, NULL]); -- { serverError ILLEGAL_COLUMN} +SELECT seriesPeriodDetectFFT([10, 20, 30, 10, 202, 30, NULL]); -- { serverError ILLEGAL_COLUMN } \ No newline at end of file diff --git a/tests/queries/0_stateless/02817_structure_to_schema.reference b/tests/queries/0_stateless/02817_structure_to_schema.reference index 1f39a8ed50e..9fe41d838e7 100644 --- a/tests/queries/0_stateless/02817_structure_to_schema.reference +++ b/tests/queries/0_stateless/02817_structure_to_schema.reference @@ -189,7 +189,7 @@ struct Message } entries @0 : List(Entry); } - e1 @0 : List(E1); + e1 @0 : E1; struct E2 { struct Entry @@ -230,9 +230,9 @@ struct Message } entries @0 : List(Entry); } - e2 @1 : List(E2); + e2 @1 : E2; } - c1 @0 : C1; + c1 @0 : List(C1); } Read/write with no schema 0 @@ -400,49 +400,41 @@ message Message { message C1 { - message E1 + message E1Value { message E1Value { - message E1Value - { - repeated uint32 e1Value = 1; - } - repeated E1Value e1Value = 1; + repeated uint32 e1Value = 1; } - map e1 = 1; + repeated E1Value e1Value = 1; } - repeated E1 e1 = 1; - message E2 + map e1 = 1; + message E2Value { - message E2Value + message E1 { - message E1 - { - repeated bytes e1 = 1; - } - repeated E1 e1 = 1; + repeated bytes e1 = 1; + } + repeated E1 e1 = 1; + message E2 + { + uint32 e1 = 1; message E2 { - uint32 e1 = 1; - message E2 + message E1 { - message E1 - { - repeated bytes e1 = 1; - } - repeated E1 e1 = 1; - uint32 e2 = 2; + repeated bytes e1 = 1; } - E2 e2 = 2; + repeated E1 e1 = 1; + uint32 e2 = 2; } - repeated E2 e2 = 2; + E2 e2 = 2; } - map e2 = 1; + repeated E2 e2 = 2; } - repeated E2 e2 = 2; + map e2 = 2; } - C1 c1 = 1; + repeated C1 c1 = 1; } Read/write with no schema 0 diff --git a/tests/queries/0_stateless/02833_local_with_dialect.reference b/tests/queries/0_stateless/02833_local_with_dialect.reference index dbb67375997..573541ac970 100644 --- a/tests/queries/0_stateless/02833_local_with_dialect.reference +++ b/tests/queries/0_stateless/02833_local_with_dialect.reference @@ -1,2 +1 @@ 0 -[?2004h[?2004lBye. diff --git a/tests/queries/0_stateless/02833_local_with_dialect.sh b/tests/queries/0_stateless/02833_local_with_dialect.sh index 012a6d91269..de009961cba 100755 --- a/tests/queries/0_stateless/02833_local_with_dialect.sh +++ b/tests/queries/0_stateless/02833_local_with_dialect.sh @@ -6,4 +6,5 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CUR_DIR"/../shell_config.sh -echo "exit" | ${CLICKHOUSE_LOCAL} --query "from s\"SELECT * FROM numbers(1)\"" --dialect prql --interactive +# Remove last line since the good bye message changes depending on the date +echo "exit" | ${CLICKHOUSE_LOCAL} --query "from s\"SELECT * FROM numbers(1)\"" --dialect prql --interactive | head -n -1 diff --git a/tests/queries/0_stateless/02835_parallel_replicas_over_distributed.sql b/tests/queries/0_stateless/02835_parallel_replicas_over_distributed.sql index 60aa5748575..1e6f9304c0c 100644 --- a/tests/queries/0_stateless/02835_parallel_replicas_over_distributed.sql +++ b/tests/queries/0_stateless/02835_parallel_replicas_over_distributed.sql @@ -14,13 +14,13 @@ insert into test select *, today() from numbers(100); SELECT count(), min(id), max(id), avg(id) FROM test_d -SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 3, prefer_localhost_replica = 0, parallel_replicas_for_non_replicated_merge_tree=1, use_hedged_requests=0; +SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 3, prefer_localhost_replica = 0, parallel_replicas_for_non_replicated_merge_tree=1; insert into test select *, today() from numbers(100); SELECT count(), min(id), max(id), avg(id) FROM test_d -SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 3, prefer_localhost_replica = 0, parallel_replicas_for_non_replicated_merge_tree=1, use_hedged_requests=0; +SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 3, prefer_localhost_replica = 0, parallel_replicas_for_non_replicated_merge_tree=1; -- 2 shards @@ -38,10 +38,10 @@ insert into test2 select *, today() from numbers(100); SELECT count(), min(id), max(id), avg(id) FROM test2_d -SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 3, prefer_localhost_replica = 0, parallel_replicas_for_non_replicated_merge_tree=1, use_hedged_requests=0; +SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 3, prefer_localhost_replica = 0, parallel_replicas_for_non_replicated_merge_tree=1; insert into test2 select *, today() from numbers(100); SELECT count(), min(id), max(id), avg(id) FROM test2_d -SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 3, prefer_localhost_replica = 0, parallel_replicas_for_non_replicated_merge_tree=1, use_hedged_requests=0; +SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 3, prefer_localhost_replica = 0, parallel_replicas_for_non_replicated_merge_tree=1; diff --git a/tests/queries/0_stateless/02841_parallel_replicas_summary.sh b/tests/queries/0_stateless/02841_parallel_replicas_summary.sh index 792c45b06d6..c82d2c8b0c0 100755 --- a/tests/queries/0_stateless/02841_parallel_replicas_summary.sh +++ b/tests/queries/0_stateless/02841_parallel_replicas_summary.sh @@ -36,7 +36,6 @@ echo " cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', allow_experimental_parallel_reading_from_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, - use_hedged_requests = 0, interactive_delay=0 "\ | ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&wait_end_of_query=1&query_id=${query_id_base}_interactive_0" --data-binary @- -vvv 2>&1 \ @@ -51,7 +50,6 @@ echo " cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', allow_experimental_parallel_reading_from_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, - use_hedged_requests = 0, interactive_delay=99999999999 "\ | ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&wait_end_of_query=1&query_id=${query_id_base}_interactive_high" --data-binary @- -vvv 2>&1 \ diff --git a/tests/queries/0_stateless/02861_index_set_incorrect_args.sql b/tests/queries/0_stateless/02861_index_set_incorrect_args.sql index fa51f5c9abc..17b505cd051 100644 --- a/tests/queries/0_stateless/02861_index_set_incorrect_args.sql +++ b/tests/queries/0_stateless/02861_index_set_incorrect_args.sql @@ -2,5 +2,5 @@ DROP TABLE IF EXISTS set_index__fuzz_41; CREATE TABLE set_index__fuzz_41 (`a` Date, `b` Nullable(DateTime64(3)), INDEX b_set b TYPE set(0) GRANULARITY 1) ENGINE = MergeTree ORDER BY tuple(); INSERT INTO set_index__fuzz_41 (a) VALUES (today()); -SELECT b FROM set_index__fuzz_41 WHERE and(b = 256) SETTINGS force_data_skipping_indices = 'b_set', optimize_move_to_prewhere = 0, max_parallel_replicas=2, parallel_replicas_for_non_replicated_merge_tree=1, allow_experimental_parallel_reading_from_replicas=2, use_hedged_requests=0; -- { serverError TOO_FEW_ARGUMENTS_FOR_FUNCTION } +SELECT b FROM set_index__fuzz_41 WHERE and(b = 256) SETTINGS force_data_skipping_indices = 'b_set', optimize_move_to_prewhere = 0, max_parallel_replicas=2, parallel_replicas_for_non_replicated_merge_tree=1, allow_experimental_parallel_reading_from_replicas=2; -- { serverError TOO_FEW_ARGUMENTS_FOR_FUNCTION } DROP TABLE set_index__fuzz_41; diff --git a/tests/queries/0_stateless/02861_replacing_merge_tree_with_cleanup.sql b/tests/queries/0_stateless/02861_replacing_merge_tree_with_cleanup.sql index 7b78e2900e7..4cd44a131e3 100644 --- a/tests/queries/0_stateless/02861_replacing_merge_tree_with_cleanup.sql +++ b/tests/queries/0_stateless/02861_replacing_merge_tree_with_cleanup.sql @@ -2,7 +2,8 @@ DROP TABLE IF EXISTS test; CREATE TABLE test (uid String, version UInt32, is_deleted UInt8) ENGINE = ReplacingMergeTree(version, is_deleted) Order by (uid) SETTINGS vertical_merge_algorithm_min_rows_to_activate = 1, vertical_merge_algorithm_min_columns_to_activate = 0, min_rows_for_wide_part = 1, - min_bytes_for_wide_part = 1; + min_bytes_for_wide_part = 1, + allow_experimental_replacing_merge_with_cleanup=1; -- Expect d6 to be version=3 is_deleted=false INSERT INTO test (*) VALUES ('d1', 1, 0), ('d1', 2, 1), ('d1', 3, 0), ('d1', 4, 1), ('d1', 5, 0), ('d2', 1, 0), ('d3', 1, 0), ('d4', 1, 0), ('d5', 1, 0), ('d6', 1, 0), ('d6', 3, 0); diff --git a/tests/queries/0_stateless/02862_index_inverted_incorrect_args.sql b/tests/queries/0_stateless/02862_index_inverted_incorrect_args.sql index 0678023f2f4..7ba122a7155 100644 --- a/tests/queries/0_stateless/02862_index_inverted_incorrect_args.sql +++ b/tests/queries/0_stateless/02862_index_inverted_incorrect_args.sql @@ -3,7 +3,7 @@ DROP TABLE IF EXISTS tab; SET allow_experimental_inverted_index=1; CREATE TABLE tab (`k` UInt64, `s` Map(String, String), INDEX af mapKeys(s) TYPE inverted(2) GRANULARITY 1) ENGINE = MergeTree ORDER BY k SETTINGS index_granularity = 2, index_granularity_bytes = '10Mi'; INSERT INTO tab (k) VALUES (0); -SELECT * FROM tab PREWHERE (s[NULL]) = 'Click a03' SETTINGS allow_experimental_analyzer=1; -- { serverError ILLEGAL_TYPE_OF_COLUMN_FOR_FILTER } +SELECT * FROM tab PREWHERE (s[NULL]) = 'Click a03' SETTINGS allow_experimental_analyzer=1; SELECT * FROM tab PREWHERE (s[1]) = 'Click a03' SETTINGS allow_experimental_analyzer=1; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } SELECT * FROM tab PREWHERE (s['foo']) = 'Click a03' SETTINGS allow_experimental_analyzer=1; DROP TABLE tab; diff --git a/tests/queries/0_stateless/02869_parallel_replicas_read_from_several.sql b/tests/queries/0_stateless/02869_parallel_replicas_read_from_several.sql index 9559b46fa08..e040fae1fa6 100644 --- a/tests/queries/0_stateless/02869_parallel_replicas_read_from_several.sql +++ b/tests/queries/0_stateless/02869_parallel_replicas_read_from_several.sql @@ -24,5 +24,5 @@ system sync replica t3; SELECT count(), min(k), max(k), avg(k) FROM t1 -SETTINGS allow_experimental_parallel_reading_from_replicas = 1, max_parallel_replicas = 3, prefer_localhost_replica = 0, use_hedged_requests=0, +SETTINGS allow_experimental_parallel_reading_from_replicas = 1, max_parallel_replicas = 3, prefer_localhost_replica = 0, cluster_for_parallel_replicas='test_cluster_one_shard_three_replicas_localhost', parallel_replicas_single_task_marks_count_multiplier = 0.001; diff --git a/tests/queries/0_stateless/02874_array_random_sample.sh b/tests/queries/0_stateless/02874_array_random_sample.sh index fe136d6d5d2..b5bfc422b5a 100755 --- a/tests/queries/0_stateless/02874_array_random_sample.sh +++ b/tests/queries/0_stateless/02874_array_random_sample.sh @@ -11,7 +11,7 @@ passed_tests=0 # Test Function for Integer Arrays run_integer_test() { - query_result=$(clickhouse-client -q "SELECT arrayRandomSample([1,2,3], 2)") + query_result=$($CLICKHOUSE_CLIENT_BINARY -q "SELECT arrayRandomSample([1,2,3], 2)") mapfile -t sorted_result < <(echo "$query_result" | tr -d '[]' | tr ',' '\n' | sort -n) declare -A expected_outcomes expected_outcomes["1 2"]=1 @@ -34,7 +34,7 @@ run_integer_test() { # Test Function for String Arrays run_string_test() { - query_result=$(clickhouse-client -q "SELECT arrayRandomSample(['a','b','c'], 2)") + query_result=$($CLICKHOUSE_CLIENT_BINARY -q "SELECT arrayRandomSample(['a','b','c'], 2)") mapfile -t sorted_result < <(echo "$query_result" | tr -d "[]'" | tr ',' '\n' | sort) declare -A expected_outcomes expected_outcomes["a b"]=1 @@ -57,7 +57,7 @@ run_string_test() { # Test Function for Nested Arrays run_nested_array_test() { - query_result=$(clickhouse-client -q "SELECT arrayRandomSample([[7,2],[3,4],[7,6]], 2)") + query_result=$($CLICKHOUSE_CLIENT_BINARY -q "SELECT arrayRandomSample([[7,2],[3,4],[7,6]], 2)") # Convert to a space-separated string for easy sorting. converted_result=$(echo "$query_result" | tr -d '[]' | tr ',' ' ') @@ -87,7 +87,7 @@ run_nested_array_test() { # Test Function for K > array.size run_higher_k_test() { - query_result=$(clickhouse-client -q "SELECT arrayRandomSample([1,2,3], 5)") + query_result=$($CLICKHOUSE_CLIENT_BINARY -q "SELECT arrayRandomSample([1,2,3], 5)") mapfile -t sorted_result < <(echo "$query_result" | tr -d '[]' | tr ',' '\n' | sort -n) sorted_original=("1" "2" "3") @@ -111,7 +111,7 @@ run_higher_k_test() { # Test Function for Integer Arrays with samples = 0 run_integer_with_samples_0_test() { - query_result=$(clickhouse-client -q "SELECT arrayRandomSample([1,2,3], 0)") + query_result=$($CLICKHOUSE_CLIENT_BINARY -q "SELECT arrayRandomSample([1,2,3], 0)") mapfile -t sorted_result < <(echo "$query_result" | tr -d '[]' | tr ',' '\n' | sort -n) # An empty array should produce an empty string after transformations @@ -137,7 +137,7 @@ run_integer_with_samples_0_test() { # Test Function for Empty Array with K > 0 run_empty_array_with_k_test() { - query_result=$(clickhouse-client -q "SELECT arrayRandomSample([], 5)") + query_result=$($CLICKHOUSE_CLIENT_BINARY -q "SELECT arrayRandomSample([], 5)") if [[ "$query_result" == "[]" ]]; then echo "Empty Array with K > 0 Test: Passed" @@ -153,7 +153,7 @@ run_empty_array_with_k_test() { # Test Function for Non-Unsigned-Integer K run_non_unsigned_integer_k_test() { # Test with negative integer - query_result=$(clickhouse-client -q "SELECT arrayRandomSample([1, 2, 3], -5)" 2>&1) + query_result=$($CLICKHOUSE_CLIENT_BINARY -q "SELECT arrayRandomSample([1, 2, 3], -5)" 2>&1) if [[ "$query_result" == *"ILLEGAL_TYPE_OF_ARGUMENT"* ]]; then echo "Non-Unsigned-Integer K Test (Negative Integer): Passed" ((passed_tests++)) @@ -165,7 +165,7 @@ run_non_unsigned_integer_k_test() { ((total_tests++)) # Test with string - query_result=$(clickhouse-client -q "SELECT arrayRandomSample([1, 2, 3], 'a')" 2>&1) + query_result=$($CLICKHOUSE_CLIENT_BINARY -q "SELECT arrayRandomSample([1, 2, 3], 'a')" 2>&1) if [[ "$query_result" == *"ILLEGAL_TYPE_OF_ARGUMENT"* ]]; then echo "Non-Unsigned-Integer K Test (String): Passed" ((passed_tests++)) @@ -177,7 +177,7 @@ run_non_unsigned_integer_k_test() { ((total_tests++)) # Test with floating-point number - query_result=$(clickhouse-client -q "SELECT arrayRandomSample([1, 2, 3], 1.5)" 2>&1) + query_result=$($CLICKHOUSE_CLIENT_BINARY -q "SELECT arrayRandomSample([1, 2, 3], 1.5)" 2>&1) if [[ "$query_result" == *"ILLEGAL_TYPE_OF_ARGUMENT"* ]]; then echo "Non-Unsigned-Integer K Test (Floating-Point): Passed" ((passed_tests++)) @@ -193,18 +193,18 @@ run_non_unsigned_integer_k_test() { run_multi_row_scalar_k_test() { # Create a table. Use a random database name as tests potentially run in parallel. db=`tr -dc A-Za-z0-9 = '2023-09-19 00:00:00') AND (processed_at <= '2023-09-20 01:00:00'); +SELECT tid, processed_at, created_at, amount FROM t FINAL ORDER BY tid; + +SELECT sum(amount) FROM t FINAL WHERE (processed_at >= '2023-09-19 00:00:00') AND (processed_at <= '2023-09-20 01:00:00'); + +INSERT INTO t VALUES (5879429,'2023-07-01 03:50:35','2023-07-01 03:50:35',-278) (5881397,'2023-07-01 06:22:26','2023-07-01 06:22:27',2807) (5925060,'2023-07-04 00:24:03','2023-07-04 00:24:02',-12) (5936591,'2023-07-04 07:37:19','2023-07-04 07:37:18',-12) (5940709,'2023-07-04 09:13:35','2023-07-04 09:13:35',2820) (5942342,'2023-07-04 09:58:00','2023-07-04 09:57:59',-12) (5952231,'2023-07-04 22:33:24','2023-07-04 22:33:24',1692) (5959449,'2023-07-05 04:32:55','2023-07-05 04:32:54',-12) (5963240,'2023-07-05 06:37:08','2023-07-05 06:37:09',1709) (5965742,'2023-07-05 07:27:01','2023-07-05 07:27:02',1709) (5969948,'2023-07-05 08:44:36','2023-07-05 08:44:37',2278) (5971673,'2023-07-05 09:14:09','2023-07-05 09:14:09',5695) (6012987,'2023-07-06 20:52:28','2023-07-06 20:52:27',-536); + +SELECT tid, processed_at, created_at, amount FROM t FINAL ORDER BY tid; + +SELECT sum(amount) FROM t FINAL WHERE (processed_at >= '2023-09-19 00:00:00') AND (processed_at <= '2023-09-20 01:00:00'); DROP TABLE t; diff --git a/tests/queries/0_stateless/02875_parallel_replicas_cluster_all_replicas.sql b/tests/queries/0_stateless/02875_parallel_replicas_cluster_all_replicas.sql index 1201a156246..f59d38ceb04 100644 --- a/tests/queries/0_stateless/02875_parallel_replicas_cluster_all_replicas.sql +++ b/tests/queries/0_stateless/02875_parallel_replicas_cluster_all_replicas.sql @@ -2,7 +2,7 @@ DROP TABLE IF EXISTS tt; CREATE TABLE tt (n UInt64) ENGINE=MergeTree() ORDER BY tuple(); INSERT INTO tt SELECT * FROM numbers(10); -SET allow_experimental_parallel_reading_from_replicas=1, max_parallel_replicas=3, use_hedged_requests=0, parallel_replicas_for_non_replicated_merge_tree=1; +SET allow_experimental_parallel_reading_from_replicas=1, max_parallel_replicas=3, parallel_replicas_for_non_replicated_merge_tree=1; SELECT count() FROM clusterAllReplicas('test_cluster_two_shard_three_replicas_localhost', currentDatabase(), tt) settings log_comment='02875_190aed82-2423-413b-ad4c-24dcca50f65b'; SYSTEM FLUSH LOGS; diff --git a/tests/queries/0_stateless/02875_parallel_replicas_remote.sql b/tests/queries/0_stateless/02875_parallel_replicas_remote.sql index f47fc559df9..5fbaf34b621 100644 --- a/tests/queries/0_stateless/02875_parallel_replicas_remote.sql +++ b/tests/queries/0_stateless/02875_parallel_replicas_remote.sql @@ -2,7 +2,7 @@ DROP TABLE IF EXISTS tt; CREATE TABLE tt (n UInt64) ENGINE=MergeTree() ORDER BY tuple(); INSERT INTO tt SELECT * FROM numbers(10); -SET allow_experimental_parallel_reading_from_replicas=1, max_parallel_replicas=3, use_hedged_requests=0, parallel_replicas_for_non_replicated_merge_tree=1; +SET allow_experimental_parallel_reading_from_replicas=1, max_parallel_replicas=3, parallel_replicas_for_non_replicated_merge_tree=1; SELECT count() FROM remote('127.0.0.{1..6}', currentDatabase(), tt) settings log_comment='02875_89f3c39b-1919-48cb-b66e-ef9904e73146'; SYSTEM FLUSH LOGS; diff --git a/tests/queries/0_stateless/02876_formats_with_names_dont_use_header.sh b/tests/queries/0_stateless/02876_formats_with_names_dont_use_header.sh index ce06ff530b9..04df7f99589 100755 --- a/tests/queries/0_stateless/02876_formats_with_names_dont_use_header.sh +++ b/tests/queries/0_stateless/02876_formats_with_names_dont_use_header.sh @@ -8,3 +8,4 @@ echo -e "a,b,c\n1,2,3" > $CLICKHOUSE_TEST_UNIQUE_NAME.csvwithnames $CLICKHOUSE_LOCAL -q "select b from file('$CLICKHOUSE_TEST_UNIQUE_NAME.csvwithnames') settings input_format_with_names_use_header=0" +rm $CLICKHOUSE_TEST_UNIQUE_NAME.csvwithnames diff --git a/tests/queries/0_stateless/02884_parallel_window_functions.sql b/tests/queries/0_stateless/02884_parallel_window_functions.sql index 3151b42f896..c5ab013a198 100644 --- a/tests/queries/0_stateless/02884_parallel_window_functions.sql +++ b/tests/queries/0_stateless/02884_parallel_window_functions.sql @@ -1,9 +1,11 @@ +-- Tags: long, no-tsan, no-asan, no-ubsan, no-msan, no-debug + CREATE TABLE window_funtion_threading Engine = MergeTree ORDER BY (ac, nw) AS SELECT toUInt64(toFloat32(number % 2) % 20000000) as ac, - toFloat32(1) as wg, + toFloat32(1) as wg, toUInt16(toFloat32(number % 3) % 400) as nw FROM numbers_mt(10000000); diff --git a/tests/queries/0_stateless/02884_parallel_window_functions_bug.reference b/tests/queries/0_stateless/02884_parallel_window_functions_bug.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/02884_parallel_window_functions_bug.sql b/tests/queries/0_stateless/02884_parallel_window_functions_bug.sql new file mode 100644 index 00000000000..84bc69e2310 --- /dev/null +++ b/tests/queries/0_stateless/02884_parallel_window_functions_bug.sql @@ -0,0 +1,84 @@ +CREATE TABLE IF NOT EXISTS posts +( + `page_id` LowCardinality(String), + `post_id` String CODEC(LZ4), + `host_id` UInt32 CODEC(T64, LZ4), + `path_id` UInt32, + `created` DateTime CODEC(T64, LZ4), + `as_of` DateTime CODEC(T64, LZ4) +) +ENGINE = ReplacingMergeTree(as_of) +PARTITION BY toStartOfMonth(created) +ORDER BY (page_id, post_id); + +CREATE TABLE IF NOT EXISTS post_metrics +( + `page_id` LowCardinality(String), + `post_id` String CODEC(LZ4), + `created` DateTime CODEC(T64, LZ4), + `impressions` UInt32 CODEC(T64, LZ4), + `clicks` UInt32 CODEC(T64, LZ4), + `as_of` DateTime CODEC(T64, LZ4) +) +ENGINE = ReplacingMergeTree(as_of) +PARTITION BY toStartOfMonth(created) +ORDER BY (page_id, post_id); + +INSERT INTO posts SELECT + repeat('a', (number % 10) + 1), + toString(number), + number % 10, + number, + now() - toIntervalMinute(number), + now() +FROM numbers(100000); + +INSERT INTO post_metrics SELECT + repeat('a', (number % 10) + 1), + toString(number), + now() - toIntervalMinute(number), + number * 100, + number * 10, + now() +FROM numbers(100000); + +SELECT + host_id, + path_id, + max(rank) AS rank +FROM +( + WITH + as_of_posts AS + ( + SELECT + *, + row_number() OVER (PARTITION BY (page_id, post_id) ORDER BY as_of DESC) AS row_num + FROM posts + WHERE (created >= subtractHours(now(), 24)) AND (host_id > 0) + ), + as_of_post_metrics AS + ( + SELECT + *, + row_number() OVER (PARTITION BY (page_id, post_id) ORDER BY as_of DESC) AS row_num + FROM post_metrics + WHERE created >= subtractHours(now(), 24) + ) + SELECT + page_id, + post_id, + host_id, + path_id, + impressions, + clicks, + ntile(20) OVER (PARTITION BY page_id ORDER BY clicks ASC ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS rank + FROM as_of_posts + GLOBAL LEFT JOIN as_of_post_metrics USING (page_id, post_id, row_num) + WHERE (row_num = 1) AND (impressions > 0) +) AS t +WHERE t.rank > 18 +GROUP BY + host_id, + path_id +FORMAT Null; diff --git a/tests/queries/0_stateless/02887_insert_quorum_wo_keeper_retries.sql b/tests/queries/0_stateless/02887_insert_quorum_wo_keeper_retries.sql index 489d25d7433..3e75d415089 100644 --- a/tests/queries/0_stateless/02887_insert_quorum_wo_keeper_retries.sql +++ b/tests/queries/0_stateless/02887_insert_quorum_wo_keeper_retries.sql @@ -7,6 +7,7 @@ CREATE TABLE quorum1(x UInt32) ENGINE ReplicatedMergeTree('/clickhouse/tables/{d CREATE TABLE quorum2(x UInt32) ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_02887/quorum', '2') ORDER BY x; SET insert_keeper_fault_injection_probability=0; +SET insert_keeper_max_retries = 0; SET insert_quorum = 2; system enable failpoint replicated_merge_tree_insert_quorum_fail_0; diff --git a/tests/queries/0_stateless/02889_file_log_save_errors.sh b/tests/queries/0_stateless/02889_file_log_save_errors.sh index 62f876e13db..8ef7816d57d 100755 --- a/tests/queries/0_stateless/02889_file_log_save_errors.sh +++ b/tests/queries/0_stateless/02889_file_log_save_errors.sh @@ -4,7 +4,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -user_files_path=$(clickhouse-client --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') +user_files_path=$($CLICKHOUSE_CLIENT_BINARY --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') ${CLICKHOUSE_CLIENT} --query "drop table if exists file_log;" ${CLICKHOUSE_CLIENT} --query "drop table if exists log_errors;" @@ -43,4 +43,3 @@ ${CLICKHOUSE_CLIENT} --query "drop table file_log;" ${CLICKHOUSE_CLIENT} --query "drop table log_errors;" rm -rf ${user_files_path}/${CLICKHOUSE_TEST_UNIQUE_NAME:?} - diff --git a/tests/queries/0_stateless/02898_parallel_replicas_progress_bar.sql b/tests/queries/0_stateless/02898_parallel_replicas_progress_bar.sql index 70a1cedf663..d8bfec12b3a 100644 --- a/tests/queries/0_stateless/02898_parallel_replicas_progress_bar.sql +++ b/tests/queries/0_stateless/02898_parallel_replicas_progress_bar.sql @@ -2,9 +2,9 @@ DROP TABLE IF EXISTS t1 SYNC; DROP TABLE IF EXISTS t2 SYNC; DROP TABLE IF EXISTS t3 SYNC; -CREATE TABLE t1(k UInt32, v String) ENGINE ReplicatedMergeTree('/parallel_replicas/{database}/test_tbl', 'r1') ORDER BY k; -CREATE TABLE t2(k UInt32, v String) ENGINE ReplicatedMergeTree('/parallel_replicas/{database}/test_tbl', 'r2') ORDER BY k; -CREATE TABLE t3(k UInt32, v String) ENGINE ReplicatedMergeTree('/parallel_replicas/{database}/test_tbl', 'r3') ORDER BY k; +CREATE TABLE t1(k UInt32, v String) ENGINE ReplicatedMergeTree('/02898_parallel_replicas/{database}/test_tbl', 'r1') ORDER BY k; +CREATE TABLE t2(k UInt32, v String) ENGINE ReplicatedMergeTree('/02898_parallel_replicas/{database}/test_tbl', 'r2') ORDER BY k; +CREATE TABLE t3(k UInt32, v String) ENGINE ReplicatedMergeTree('/02898_parallel_replicas/{database}/test_tbl', 'r3') ORDER BY k; insert into t1 select number, toString(number) from numbers(1000, 1000); insert into t2 select number, toString(number) from numbers(2000, 1000); @@ -14,7 +14,7 @@ system sync replica t1; system sync replica t2; system sync replica t3; -SET allow_experimental_parallel_reading_from_replicas=1, max_parallel_replicas=3, use_hedged_requests=0, parallel_replicas_for_non_replicated_merge_tree=1, cluster_for_parallel_replicas='test_cluster_one_shard_three_replicas_localhost'; +SET allow_experimental_parallel_reading_from_replicas=1, max_parallel_replicas=3, cluster_for_parallel_replicas='test_cluster_one_shard_three_replicas_localhost'; -- default coordinator SELECT count(), min(k), max(k), avg(k) FROM t1 SETTINGS log_comment='02898_default_190aed82-2423-413b-ad4c-24dcca50f65b'; diff --git a/tests/queries/0_stateless/02900_clickhouse_local_drop_current_database.reference b/tests/queries/0_stateless/02900_clickhouse_local_drop_current_database.reference index 4a321380536..838bf18b937 100644 --- a/tests/queries/0_stateless/02900_clickhouse_local_drop_current_database.reference +++ b/tests/queries/0_stateless/02900_clickhouse_local_drop_current_database.reference @@ -5,6 +5,6 @@ SELECT 1; DROP DATABASE foo; SELECT 2; 2 -USE _local; +USE default; SELECT 3; 3 diff --git a/tests/queries/0_stateless/02900_clickhouse_local_drop_current_database.sh b/tests/queries/0_stateless/02900_clickhouse_local_drop_current_database.sh index 1af40f8778d..3250c70a268 100755 --- a/tests/queries/0_stateless/02900_clickhouse_local_drop_current_database.sh +++ b/tests/queries/0_stateless/02900_clickhouse_local_drop_current_database.sh @@ -10,6 +10,6 @@ ${CLICKHOUSE_LOCAL} --echo --multiquery " SELECT 1; DROP DATABASE foo; SELECT 2; - USE _local; + USE default; SELECT 3; " diff --git a/tests/queries/0_stateless/02900_union_schema_inference_mode.reference b/tests/queries/0_stateless/02900_union_schema_inference_mode.reference new file mode 100644 index 00000000000..864cd780ddb --- /dev/null +++ b/tests/queries/0_stateless/02900_union_schema_inference_mode.reference @@ -0,0 +1,33 @@ +a Nullable(Int64) +obj Tuple(f1 Nullable(Int64), f2 Nullable(String), f3 Nullable(Int64)) +b Nullable(Int64) +c Nullable(String) +{"a":"1","obj":{"f1":"1","f2":"2020-01-01","f3":null},"b":null,"c":null} +{"a":null,"obj":{"f1":null,"f2":"Some string","f3":"2"},"b":"2","c":null} +{"a":null,"obj":{"f1":null,"f2":null,"f3":null},"b":null,"c":"hello"} +UNION data1.jsonl a Nullable(Int64), obj Tuple(f1 Nullable(Int64), f2 Nullable(Date)) +UNION data2.jsonl b Nullable(Int64), obj Tuple(f2 Nullable(String), f3 Nullable(Int64)) +UNION data3.jsonl c Nullable(String) +c Nullable(String) +a Nullable(Int64) +obj Tuple(f1 Nullable(Int64), f2 Nullable(String), f3 Nullable(Int64)) +b Nullable(Int64) +c Nullable(String) +a Nullable(Int64) +obj Tuple(f1 Nullable(Int64), f2 Nullable(String), f3 Nullable(Int64)) +b Nullable(Int64) +c Nullable(String) +{"a":"1","obj":{"f1":"1","f2":"2020-01-01","f3":null},"b":null,"c":null} +{"a":null,"obj":{"f1":null,"f2":"Some string","f3":"2"},"b":"2","c":null} +{"a":null,"obj":{"f1":null,"f2":null,"f3":null},"b":null,"c":"hello"} +UNION archive.tar::data1.jsonl a Nullable(Int64), obj Tuple(f1 Nullable(Int64), f2 Nullable(Date)) +UNION archive.tar::data2.jsonl b Nullable(Int64), obj Tuple(f2 Nullable(String), f3 Nullable(Int64)) +UNION archive.tar::data3.jsonl c Nullable(String) +c Nullable(String) +a Nullable(Int64) +obj Tuple(f1 Nullable(Int64), f2 Nullable(String), f3 Nullable(Int64)) +b Nullable(Int64) +c Nullable(String) +1 +1 +1 diff --git a/tests/queries/0_stateless/02900_union_schema_inference_mode.sh b/tests/queries/0_stateless/02900_union_schema_inference_mode.sh new file mode 100755 index 00000000000..dc0dd8ae1f4 --- /dev/null +++ b/tests/queries/0_stateless/02900_union_schema_inference_mode.sh @@ -0,0 +1,56 @@ +#!/usr/bin/env bash +# Tags: no-fasttest, no-msan, no-ubsan + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +mkdir -p $CLICKHOUSE_TEST_UNIQUE_NAME +echo '{"a" : 1, "obj" : {"f1" : 1, "f2" : "2020-01-01"}}' > $CLICKHOUSE_TEST_UNIQUE_NAME/data1.jsonl +echo '{"b" : 2, "obj" : {"f3" : 2, "f2" : "Some string"}}' > $CLICKHOUSE_TEST_UNIQUE_NAME/data2.jsonl +echo '{"c" : "hello"}' > $CLICKHOUSE_TEST_UNIQUE_NAME/data3.jsonl + +$CLICKHOUSE_LOCAL -nm -q " +set schema_inference_mode = 'union'; +desc file('$CLICKHOUSE_TEST_UNIQUE_NAME/data{1,2,3}.jsonl'); +select * from file('$CLICKHOUSE_TEST_UNIQUE_NAME/data{1,2,3}.jsonl') order by tuple(*) format JSONEachRow; +select schema_inference_mode, splitByChar('/', source)[-1] as file, schema from system.schema_inference_cache order by file; +" + +$CLICKHOUSE_LOCAL -nm -q " +set schema_inference_mode = 'union'; +desc file('$CLICKHOUSE_TEST_UNIQUE_NAME/data3.jsonl'); +desc file('$CLICKHOUSE_TEST_UNIQUE_NAME/data{1,2,3}.jsonl'); +" + +cd $CLICKHOUSE_TEST_UNIQUE_NAME/ && tar -cf archive.tar data1.jsonl data2.jsonl data3.jsonl && cd .. + +$CLICKHOUSE_LOCAL -nm -q " +set schema_inference_mode = 'union'; +desc file('$CLICKHOUSE_TEST_UNIQUE_NAME/archive.tar :: data{1,2,3}.jsonl'); +select * from file('$CLICKHOUSE_TEST_UNIQUE_NAME/archive.tar :: data{1,2,3}.jsonl') order by tuple(*) format JSONEachRow; +select schema_inference_mode, splitByChar('/', source)[-1] as file, schema from system.schema_inference_cache order by file; +" + +$CLICKHOUSE_LOCAL -nm -q " +set schema_inference_mode = 'union'; +desc file('$CLICKHOUSE_TEST_UNIQUE_NAME/archive.tar :: data3.jsonl'); +desc file('$CLICKHOUSE_TEST_UNIQUE_NAME/archive.tar :: data{1,2,3}.jsonl'); +" + +echo 'Error' > $CLICKHOUSE_TEST_UNIQUE_NAME/data4.jsonl +$CLICKHOUSE_LOCAL -q "desc file('$CLICKHOUSE_TEST_UNIQUE_NAME/data{1,2,3,4}.jsonl') settings schema_inference_mode='union'" 2>&1 | grep -c -F "Cannot extract table structure" + +$CLICKHOUSE_LOCAL -nm -q " +set schema_inference_mode = 'union'; +desc file('$CLICKHOUSE_TEST_UNIQUE_NAME/data{2,3}.jsonl'); +desc file('$CLICKHOUSE_TEST_UNIQUE_NAME/data{1,2,3,4}.jsonl'); +" 2>&1 | grep -c -F "Cannot extract table structure" + +echo 42 > $CLICKHOUSE_TEST_UNIQUE_NAME/data1.csv +echo 42, 43 > $CLICKHOUSE_TEST_UNIQUE_NAME/data2.csv + +$CLICKHOUSE_LOCAL -q "desc file('$CLICKHOUSE_TEST_UNIQUE_NAME/data{1,2}.csv') settings schema_inference_mode='union'" 2>&1 | grep -c -F "BAD_ARGUMENTS"; + +rm -rf ${CLICKHOUSE_TEST_UNIQUE_NAME} + diff --git a/tests/queries/0_stateless/02901_parallel_replicas_rollup.sh b/tests/queries/0_stateless/02901_parallel_replicas_rollup.sh index f23b80348c1..029b4d07ee2 100755 --- a/tests/queries/0_stateless/02901_parallel_replicas_rollup.sh +++ b/tests/queries/0_stateless/02901_parallel_replicas_rollup.sh @@ -29,8 +29,7 @@ $CLICKHOUSE_CLIENT \ --query_id "${query_id}" \ --max_parallel_replicas 3 \ --prefer_localhost_replica 1 \ - --use_hedged_requests 0 \ - --cluster_for_parallel_replicas "parallel_replicas" \ + --cluster_for_parallel_replicas "test_cluster_one_shard_three_replicas_localhost" \ --allow_experimental_parallel_reading_from_replicas 1 \ --parallel_replicas_for_non_replicated_merge_tree 1 \ --parallel_replicas_min_number_of_rows_per_replica 0 \ @@ -63,8 +62,7 @@ $CLICKHOUSE_CLIENT \ --query_id "${query_id}" \ --max_parallel_replicas 3 \ --prefer_localhost_replica 1 \ - --use_hedged_requests 0 \ - --cluster_for_parallel_replicas "parallel_replicas" \ + --cluster_for_parallel_replicas "test_cluster_one_shard_three_replicas_localhost" \ --allow_experimental_parallel_reading_from_replicas 1 \ --parallel_replicas_for_non_replicated_merge_tree 1 \ --parallel_replicas_min_number_of_rows_per_replica 0 \ diff --git a/tests/queries/0_stateless/02906_flatten_only_true_nested.reference b/tests/queries/0_stateless/02906_flatten_only_true_nested.reference new file mode 100644 index 00000000000..e7a96da8db9 --- /dev/null +++ b/tests/queries/0_stateless/02906_flatten_only_true_nested.reference @@ -0,0 +1,3 @@ +data.x Array(UInt32) +data.y Array(UInt32) +data Array(Tuple(x UInt64, y UInt64)) diff --git a/tests/queries/0_stateless/02906_flatten_only_true_nested.sql b/tests/queries/0_stateless/02906_flatten_only_true_nested.sql new file mode 100644 index 00000000000..e930b46bd70 --- /dev/null +++ b/tests/queries/0_stateless/02906_flatten_only_true_nested.sql @@ -0,0 +1,9 @@ +set flatten_nested = 1; +drop table if exists test_nested; +create table test_nested (data Nested(x UInt32, y UInt32)) engine=Memory; +desc test_nested; +drop table test_nested; +drop table if exists test_array_tuple; +create table test_array_tuple (data Array(Tuple(x UInt64, y UInt64))) engine=Memory; +desc test_array_tuple; +drop table test_array_tuple; diff --git a/tests/queries/0_stateless/02906_force_optimize_projection_name.sql b/tests/queries/0_stateless/02906_force_optimize_projection_name.sql index 773f8cc514d..952ef8178b7 100644 --- a/tests/queries/0_stateless/02906_force_optimize_projection_name.sql +++ b/tests/queries/0_stateless/02906_force_optimize_projection_name.sql @@ -8,7 +8,8 @@ CREATE TABLE test ) ) ENGINE = MergeTree() -ORDER BY id; +ORDER BY id +SETTINGS index_granularity_bytes = 10000; INSERT INTO test SELECT number, 'test' FROM numbers(1, 100); diff --git a/tests/queries/0_stateless/02910_replicated_merge_parameters_must_consistent.reference b/tests/queries/0_stateless/02910_replicated_merge_parameters_must_consistent.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/02910_replicated_merge_parameters_must_consistent.sql b/tests/queries/0_stateless/02910_replicated_merge_parameters_must_consistent.sql new file mode 100644 index 00000000000..3c1bec4fb3f --- /dev/null +++ b/tests/queries/0_stateless/02910_replicated_merge_parameters_must_consistent.sql @@ -0,0 +1,80 @@ +-- Tags: zookeeper, no-replicated-database +CREATE TABLE t +( + `id` UInt64, + `val` String, + `legacy_ver` UInt64, +) +ENGINE = ReplicatedReplacingMergeTree('/tables/{database}/t/', 'r1', legacy_ver) +ORDER BY id; + +CREATE TABLE t_r +( + `id` UInt64, + `val` String, + `legacy_ver` UInt64 +) +ENGINE = ReplicatedReplacingMergeTree('/tables/{database}/t/', 'r2') +ORDER BY id; -- { serverError METADATA_MISMATCH } + +CREATE TABLE t2 +( + `id` UInt64, + `val` String, + `legacy_ver` UInt64, + `deleted` UInt8 +) +ENGINE = ReplicatedReplacingMergeTree('/tables/{database}/t2/', 'r1', legacy_ver) +ORDER BY id; + +CREATE TABLE t2_r +( + `id` UInt64, + `val` String, + `legacy_ver` UInt64, + `deleted` UInt8 +) +ENGINE = ReplicatedReplacingMergeTree('/tables/{database}/t2/', 'r2', legacy_ver, deleted) +ORDER BY id; -- { serverError METADATA_MISMATCH } + +CREATE TABLE t3 +( + `key` UInt64, + `metrics1` UInt64, + `metrics2` UInt64 +) +ENGINE = ReplicatedSummingMergeTree('/tables/{database}/t3/', 'r1', metrics1) +ORDER BY key; + +CREATE TABLE t3_r +( + `key` UInt64, + `metrics1` UInt64, + `metrics2` UInt64 +) +ENGINE = ReplicatedSummingMergeTree('/tables/{database}/t3/', 'r2', metrics2) +ORDER BY key; -- { serverError METADATA_MISMATCH } + +CREATE TABLE t4 +( + `key` UInt32, + `Path` String, + `Time` DateTime('UTC'), + `Value` Float64, + `Version` UInt32, + `col` UInt64 +) +ENGINE = ReplicatedGraphiteMergeTree('/tables/{database}/t4/', 'r1', 'graphite_rollup') +ORDER BY key; + +CREATE TABLE t4_r +( + `key` UInt32, + `Path` String, + `Time` DateTime('UTC'), + `Value` Float64, + `Version` UInt32, + `col` UInt64 +) +ENGINE = ReplicatedGraphiteMergeTree('/tables/{database}/t4/', 'r2', 'graphite_rollup_alternative') +ORDER BY key; -- { serverError METADATA_MISMATCH } diff --git a/tests/queries/0_stateless/02916_csv_infer_numbers_from_strings.reference b/tests/queries/0_stateless/02916_csv_infer_numbers_from_strings.reference new file mode 100644 index 00000000000..f64557f1b70 --- /dev/null +++ b/tests/queries/0_stateless/02916_csv_infer_numbers_from_strings.reference @@ -0,0 +1,6 @@ +c1 Nullable(Int64) +c2 Nullable(Float64) +c3 Nullable(Bool) +c1 Nullable(String) +c2 Nullable(String) +c3 Nullable(String) diff --git a/tests/queries/0_stateless/02916_csv_infer_numbers_from_strings.sql b/tests/queries/0_stateless/02916_csv_infer_numbers_from_strings.sql new file mode 100644 index 00000000000..713d3d7190c --- /dev/null +++ b/tests/queries/0_stateless/02916_csv_infer_numbers_from_strings.sql @@ -0,0 +1,4 @@ +set input_format_csv_try_infer_numbers_from_strings=1; +desc format(CSV, '"42","42.42","True"'); +desc format(CSV, '"42","42.42","True"\n"abc","def","ghk"'); + diff --git a/tests/queries/0_stateless/02916_distributed_skip_unavailable_shards.reference b/tests/queries/0_stateless/02916_distributed_skip_unavailable_shards.reference new file mode 100644 index 00000000000..77fc99a2f2f --- /dev/null +++ b/tests/queries/0_stateless/02916_distributed_skip_unavailable_shards.reference @@ -0,0 +1 @@ +1234 abcd 1 diff --git a/tests/queries/0_stateless/02916_distributed_skip_unavailable_shards.sql b/tests/queries/0_stateless/02916_distributed_skip_unavailable_shards.sql new file mode 100644 index 00000000000..48a1294982d --- /dev/null +++ b/tests/queries/0_stateless/02916_distributed_skip_unavailable_shards.sql @@ -0,0 +1,28 @@ +-- Tags: shard, no-fasttest + +DROP TABLE IF EXISTS table_02916; +DROP TABLE IF EXISTS table_02916_distributed; + +CREATE TABLE table_02916 +( + `ID` UInt32, + `Name` String +) +ENGINE = MergeTree +ORDER BY ID; + +INSERT INTO table_02916 VALUES (1234, 'abcd'); + +CREATE TABLE table_02916_distributed +( + `ID` UInt32, + `Name` String +) +ENGINE = Distributed(test_unavailable_shard, currentDatabase(), table_02916, rand()) +SETTINGS skip_unavailable_shards = 1; + +SET send_logs_level='fatal'; +SELECT *, _shard_num FROM table_02916_distributed; + +DROP TABLE table_02916_distributed; +DROP TABLE table_02916; diff --git a/tests/queries/0_stateless/02918_fuzzjson_table_function.reference b/tests/queries/0_stateless/02918_fuzzjson_table_function.reference index 1b5c6f46f77..8ad9e886b49 100644 --- a/tests/queries/0_stateless/02918_fuzzjson_table_function.reference +++ b/tests/queries/0_stateless/02918_fuzzjson_table_function.reference @@ -150,3 +150,4 @@ {} 730 200 +50 diff --git a/tests/queries/0_stateless/02918_fuzzjson_table_function.sql b/tests/queries/0_stateless/02918_fuzzjson_table_function.sql index 6db0c69dbac..398b3572587 100644 --- a/tests/queries/0_stateless/02918_fuzzjson_table_function.sql +++ b/tests/queries/0_stateless/02918_fuzzjson_table_function.sql @@ -92,15 +92,70 @@ SELECT * FROM fuzzJSON(02918_json_fuzzer, max_key_length=10, min_key_length=0) L SELECT * FROM fuzzJSON(02918_json_fuzzer, max_key_length=10, min_key_length=11) LIMIT 10; -- { serverError BAD_ARGUMENTS } -- -DROP TABLE IF EXISTS 02918_table_obj; -CREATE TABLE 02918_table_obj (json_obj Object('json')) Engine=Memory; +DROP TABLE IF EXISTS 02918_table_obj1; +CREATE TABLE 02918_table_obj1 (json_obj Object('json')) Engine=Memory; -INSERT INTO 02918_table_obj SELECT * FROM fuzzJSON( +INSERT INTO 02918_table_obj1 SELECT * FROM fuzzJSON( 02918_json_fuzzer, json_str='{"name": "John Doe", "age": 27, "address": {"city": "Citiville", "zip": "12345"}, "hobbies": ["reading", "traveling", "coding"]}', random_seed=12345) LIMIT 200; -SELECT count() FROM 02918_table_obj; -DROP TABLE IF EXISTS 02918_table_obj; +SELECT count() FROM 02918_table_obj1; + +DROP TABLE IF EXISTS 02918_table_obj1; + +-- +DROP TABLE IF EXISTS 02918_table_obj2; +CREATE TABLE 02918_table_obj2 (json_obj Object('json')) Engine=Memory; + +INSERT INTO 02918_table_obj2 SELECT * FROM fuzzJSON( + 02918_json_fuzzer, + json_str= + '{ + "name": { + "first": "Joan", + "last": "of Arc" + }, + "birth": {"date": "January 6, 1412", "place": "Domremy, France"}, + "death": {"date": "May 30, 1431", "place": "Rouen, France"}, + "occupation": "Military Leader", + "achievements": ["Lifted Siege of Orleans", "Assisted in Charles VII\'s Coronation"], + "legacy": { + "honors": ["Canonized Saint", "National Heroine of France"], + "memorials": [ + {"name": "Joan of Arc Memorial", "location": "Domremy"}, + {"name": "Place Jeanne d\'Arc", "location": "Rouen"} + ] + } + }', + random_seed=12345, + max_output_length=1024) LIMIT 50; + +INSERT INTO 02918_table_obj2 SELECT * FROM fuzzJSON( + 02918_json_fuzzer, + json_str= + '{ + "name": { + "first": "Joan", + "last": "of Arc" + }, + "birth": {"date": "January 6, 1412", "place": "Domremy, France"}, + "death": {"date": "May 30, 1431", "place": "Rouen, France"}, + "occupation": "Military Leader", + "achievements": ["Lifted Siege of Orleans", "Assisted in Charles VII\'s Coronation"], + "legacy": { + "honors": ["Canonized Saint", "National Heroine of France"], + "memorials": [ + {"name": "Joan of Arc Memorial", "location": "Domremy"}, + {"name": "Place Jeanne d\'Arc", "location": "Rouen"} + ] + } + }', + random_seed=12345, + max_output_length=1024, malform_output=true) LIMIT 50; -- {serverError INCORRECT_DATA } + +SELECT count() FROM 02918_table_obj2; + +DROP TABLE IF EXISTS 02918_table_obj2; DROP NAMED COLLECTION IF EXISTS 02918_json_fuzzer; diff --git a/tests/queries/0_stateless/02918_optimize_count_for_merge_tables.reference b/tests/queries/0_stateless/02918_optimize_count_for_merge_tables.reference new file mode 100644 index 00000000000..3f5700b6d63 --- /dev/null +++ b/tests/queries/0_stateless/02918_optimize_count_for_merge_tables.reference @@ -0,0 +1,9 @@ +2 +Expression ((Projection + Before ORDER BY)) + MergingAggregated + ReadFromPreparedSource (Optimized trivial count) +3 +Expression ((Projection + Before ORDER BY)) + Aggregating + Expression (Before GROUP BY) + ReadFromMerge diff --git a/tests/queries/0_stateless/02918_optimize_count_for_merge_tables.sql b/tests/queries/0_stateless/02918_optimize_count_for_merge_tables.sql new file mode 100644 index 00000000000..9feb2aa2ad6 --- /dev/null +++ b/tests/queries/0_stateless/02918_optimize_count_for_merge_tables.sql @@ -0,0 +1,32 @@ +-- Tests that Merge-engine (not: MergeTree!) tables support the trivial count +-- optimization if all underlying tables support it + +DROP TABLE IF EXISTS mt1; +DROP TABLE IF EXISTS mt2; +DROP TABLE IF EXISTS merge; + +CREATE TABLE mt1 (id UInt64) ENGINE = MergeTree ORDER BY id; +CREATE TABLE mt2 (id UInt64) ENGINE = MergeTree ORDER BY id; +CREATE TABLE merge (id UInt64) ENGINE = Merge(currentDatabase(), '^mt[0-9]+$'); + +INSERT INTO mt1 VALUES (1); +INSERT INTO mt2 VALUES (1); + +SELECT count() FROM merge; + +-- can use the trivial count optimization +EXPLAIN SELECT count() FROM merge settings allow_experimental_analyzer=0; + +CREATE TABLE mt3 (id UInt64) ENGINE = TinyLog; + +INSERT INTO mt2 VALUES (2); + +SELECT count() FROM merge; + +-- can't use the trivial count optimization as TinyLog doesn't support it +EXPLAIN SELECT count() FROM merge settings allow_experimental_analyzer=0; + +DROP TABLE IF EXISTS mt1; +DROP TABLE IF EXISTS mt2; +DROP TABLE IF EXISTS mt3; +DROP TABLE IF EXISTS merge; diff --git a/tests/queries/0_stateless/02918_template_format_deadlock.reference b/tests/queries/0_stateless/02918_template_format_deadlock.reference new file mode 100644 index 00000000000..83f5da32cc7 --- /dev/null +++ b/tests/queries/0_stateless/02918_template_format_deadlock.reference @@ -0,0 +1 @@ +42 43 diff --git a/tests/queries/0_stateless/02918_template_format_deadlock.sh b/tests/queries/0_stateless/02918_template_format_deadlock.sh new file mode 100755 index 00000000000..344a8b55b0d --- /dev/null +++ b/tests/queries/0_stateless/02918_template_format_deadlock.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash +# Tags: no-fasttest + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +DATA_FILE=$CLICKHOUSE_TEST_UNIQUE_NAME +TEMPLATE_FILE=$CLICKHOUSE_TEST_UNIQUE_NAME.template + +echo "42 | 43 +Error line" > $DATA_FILE +echo '${a:CSV} | ${b:CSV}' > $TEMPLATE_FILE + +$CLICKHOUSE_LOCAL -q "select * from file('$DATA_FILE', Template, 'a UInt32, b UInt32') settings format_template_row='$TEMPLATE_FILE', input_format_allow_errors_num=1" + +rm $DATA_FILE +rm $TEMPLATE_FILE + diff --git a/tests/queries/0_stateless/02919_insert_meet_eternal_hardware_error.reference b/tests/queries/0_stateless/02919_insert_meet_eternal_hardware_error.reference new file mode 100644 index 00000000000..a9463b5a7b0 --- /dev/null +++ b/tests/queries/0_stateless/02919_insert_meet_eternal_hardware_error.reference @@ -0,0 +1 @@ +[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15] diff --git a/tests/queries/0_stateless/02919_insert_meet_eternal_hardware_error.sql b/tests/queries/0_stateless/02919_insert_meet_eternal_hardware_error.sql new file mode 100644 index 00000000000..05602b42c6a --- /dev/null +++ b/tests/queries/0_stateless/02919_insert_meet_eternal_hardware_error.sql @@ -0,0 +1,26 @@ +-- Tags: zookeeper, no-parallel + +DROP TABLE IF EXISTS t_hardware_error NO DELAY; + +CREATE TABLE t_hardware_error ( + KeyID UInt32 +) Engine = ReplicatedMergeTree('/clickhouse/tables/{shard}/{database}/t_async_insert_dedup', '{replica}') +ORDER BY (KeyID); + +insert into t_hardware_error values (1), (2), (3), (4), (5); + +-- Data is written to ZK but the connection fails right after and we can't recover it +system enable failpoint replicated_merge_tree_commit_zk_fail_after_op; +system enable failpoint replicated_merge_tree_commit_zk_fail_when_recovering_from_hw_fault; + +insert into t_hardware_error values (6), (7), (8), (9), (10); -- {serverError UNKNOWN_STATUS_OF_INSERT} + +system disable failpoint replicated_commit_zk_fail_after_op; +system disable failpoint replicated_merge_tree_commit_zk_fail_when_recovering_from_hw_fault; + +insert into t_hardware_error values (11), (12), (13), (14), (15); + +-- All 3 commits have been written correctly. The unknown status is ok (since it failed after the operation) +Select arraySort(groupArray(KeyID)) FROM t_hardware_error; + +DROP TABLE t_hardware_error NO DELAY; diff --git a/tests/queries/0_stateless/02919_skip_lots_of_parsing_errors.reference b/tests/queries/0_stateless/02919_skip_lots_of_parsing_errors.reference new file mode 100644 index 00000000000..4b4c9812f09 --- /dev/null +++ b/tests/queries/0_stateless/02919_skip_lots_of_parsing_errors.reference @@ -0,0 +1,4 @@ +42 +100000 +42 +100000 diff --git a/tests/queries/0_stateless/02919_skip_lots_of_parsing_errors.sh b/tests/queries/0_stateless/02919_skip_lots_of_parsing_errors.sh new file mode 100755 index 00000000000..7ddb55fb39b --- /dev/null +++ b/tests/queries/0_stateless/02919_skip_lots_of_parsing_errors.sh @@ -0,0 +1,23 @@ +#!/usr/bin/env bash +# Tags: no-fasttest, no-cpu-aarch64 + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +FILE=$CLICKHOUSE_TEST_UNIQUE_NAME +ERRORS_FILE=$CLICKHOUSE_TEST_UNIQUE_NAME.errors + +$CLICKHOUSE_LOCAL -q "select 'Error' from numbers(100000) format TSVRaw" > $FILE +echo -e "42" >> $FILE + +$CLICKHOUSE_LOCAL -q "select * from file('$FILE', CSV, 'x UInt32') settings input_format_allow_errors_ratio=1, max_block_size=10000, input_format_parallel_parsing=0, input_format_record_errors_file_path='$ERRORS_FILE'"; +$CLICKHOUSE_LOCAL -q "select count() from file('$ERRORS_FILE', CSV)" +rm $ERRORS_FILE + +$CLICKHOUSE_LOCAL -q "select * from file('$FILE', CSV, 'x UInt32') settings input_format_allow_errors_ratio=1, max_block_size=10000, input_format_parallel_parsing=1, input_format_record_errors_file_path='$ERRORS_FILE'"; +$CLICKHOUSE_LOCAL -q "select count() from file('$ERRORS_FILE', CSV)" +rm $ERRORS_FILE + +rm $FILE + diff --git a/tests/queries/0_stateless/02919_storage_fuzzjson.reference b/tests/queries/0_stateless/02919_storage_fuzzjson.reference index a134ce52c11..8f4ee4a5615 100644 --- a/tests/queries/0_stateless/02919_storage_fuzzjson.reference +++ b/tests/queries/0_stateless/02919_storage_fuzzjson.reference @@ -1,3 +1,4 @@ 100 100 100 +100 100 diff --git a/tests/queries/0_stateless/02919_storage_fuzzjson.sql b/tests/queries/0_stateless/02919_storage_fuzzjson.sql index 80b4a406a08..bf473f4b6b8 100644 --- a/tests/queries/0_stateless/02919_storage_fuzzjson.sql +++ b/tests/queries/0_stateless/02919_storage_fuzzjson.sql @@ -42,3 +42,24 @@ CREATE TABLE 02919_test_table_reuse_args(str String) ENGINE = FuzzJSON( SELECT count() FROM (SELECT * FROM 02919_test_table_reuse_args LIMIT 100); DROP TABLE IF EXISTS 02919_test_table_reuse_args; + +-- +DROP TABLE IF EXISTS 02919_test_table_invalid_col_type; +CREATE TABLE 02919_test_table_invalid_col_type +( + str Nullable(Int64) +) +ENGINE = FuzzJSON('{"pet":"rat"}', NULL); -- { serverError BAD_ARGUMENTS } + +DROP TABLE IF EXISTS 02919_test_table_invalid_col_type; + +-- +DROP TABLE IF EXISTS 02919_test_multi_col; +CREATE TABLE 02919_test_multi_col +( + str1 String, + str2 String +) ENGINE = FuzzJSON('{"pet":"rat"}', 999); + +SELECT count(str1), count(str2) FROM (SELECT str1, str2 FROM 02919_test_multi_col LIMIT 100); +DROP TABLE IF EXISTS 02919_test_multi_col; diff --git a/tests/queries/0_stateless/02920_unary_operators_functions.reference b/tests/queries/0_stateless/02920_unary_operators_functions.reference new file mode 100644 index 00000000000..0cfbf08886f --- /dev/null +++ b/tests/queries/0_stateless/02920_unary_operators_functions.reference @@ -0,0 +1 @@ +2 diff --git a/tests/queries/0_stateless/02920_unary_operators_functions.sql b/tests/queries/0_stateless/02920_unary_operators_functions.sql new file mode 100644 index 00000000000..3f3c3a1618a --- /dev/null +++ b/tests/queries/0_stateless/02920_unary_operators_functions.sql @@ -0,0 +1 @@ +SELECT NOT (0) + NOT (0); \ No newline at end of file diff --git a/tests/queries/0_stateless/02922_analyzer_aggregate_nothing_type.sql b/tests/queries/0_stateless/02922_analyzer_aggregate_nothing_type.sql index 987515527f0..a064c091df0 100644 --- a/tests/queries/0_stateless/02922_analyzer_aggregate_nothing_type.sql +++ b/tests/queries/0_stateless/02922_analyzer_aggregate_nothing_type.sql @@ -11,7 +11,7 @@ SET allow_experimental_parallel_reading_from_replicas=1, max_parallel_replicas=2, use_hedged_requests=0, - cluster_for_parallel_replicas='parallel_replicas', + cluster_for_parallel_replicas='test_cluster_one_shard_three_replicas_localhost', parallel_replicas_for_non_replicated_merge_tree=1 ; diff --git a/tests/queries/0_stateless/02931_file_cluster.sh b/tests/queries/0_stateless/02931_file_cluster.sh index e628687a42a..8566e2ab08e 100755 --- a/tests/queries/0_stateless/02931_file_cluster.sh +++ b/tests/queries/0_stateless/02931_file_cluster.sh @@ -4,7 +4,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -USER_FILES_PATH=$(clickhouse-client --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') +USER_FILES_PATH=$($CLICKHOUSE_CLIENT_BINARY --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') mkdir -p "${USER_FILES_PATH}"/"${CLICKHOUSE_TEST_UNIQUE_NAME}"/ diff --git a/tests/queries/0_stateless/02931_max_num_to_warn.reference b/tests/queries/0_stateless/02931_max_num_to_warn.reference new file mode 100644 index 00000000000..c0ad7354039 --- /dev/null +++ b/tests/queries/0_stateless/02931_max_num_to_warn.reference @@ -0,0 +1,3 @@ +The number of attached tables is more than 10 +The number of attached databases is more than 10 +The number of active parts is more than 10 diff --git a/tests/queries/0_stateless/02931_max_num_to_warn.sql b/tests/queries/0_stateless/02931_max_num_to_warn.sql new file mode 100644 index 00000000000..49b981fc355 --- /dev/null +++ b/tests/queries/0_stateless/02931_max_num_to_warn.sql @@ -0,0 +1,53 @@ +-- Tags: no-parallel + +CREATE DATABASE IF NOT EXISTS test_max_num_to_warn_02931; +CREATE TABLE IF NOT EXISTS test_max_num_to_warn_02931.test_max_num_to_warn_1 (id Int32, str String) Engine=Memory; +CREATE TABLE IF NOT EXISTS test_max_num_to_warn_02931.test_max_num_to_warn_2 (id Int32, str String) Engine=Memory; +CREATE TABLE IF NOT EXISTS test_max_num_to_warn_02931.test_max_num_to_warn_3 (id Int32, str String) Engine=Memory; +CREATE TABLE IF NOT EXISTS test_max_num_to_warn_02931.test_max_num_to_warn_4 (id Int32, str String) Engine=Memory; +CREATE TABLE IF NOT EXISTS test_max_num_to_warn_02931.test_max_num_to_warn_5 (id Int32, str String) Engine=Memory; +CREATE TABLE IF NOT EXISTS test_max_num_to_warn_02931.test_max_num_to_warn_6 (id Int32, str String) Engine=Memory; +CREATE TABLE IF NOT EXISTS test_max_num_to_warn_02931.test_max_num_to_warn_7 (id Int32, str String) Engine=Memory; +CREATE TABLE IF NOT EXISTS test_max_num_to_warn_02931.test_max_num_to_warn_8 (id Int32, str String) Engine=Memory; +CREATE TABLE IF NOT EXISTS test_max_num_to_warn_02931.test_max_num_to_warn_9 (id Int32, str String) Engine=Memory; +CREATE TABLE IF NOT EXISTS test_max_num_to_warn_02931.test_max_num_to_warn_10 (id Int32, str String) Engine=Memory; +CREATE TABLE IF NOT EXISTS test_max_num_to_warn_02931.test_max_num_to_warn_11 (id Int32, str String) Engine=Memory; + +CREATE DATABASE IF NOT EXISTS test_max_num_to_warn_1; +CREATE DATABASE IF NOT EXISTS test_max_num_to_warn_2; +CREATE DATABASE IF NOT EXISTS test_max_num_to_warn_3; +CREATE DATABASE IF NOT EXISTS test_max_num_to_warn_4; +CREATE DATABASE IF NOT EXISTS test_max_num_to_warn_5; +CREATE DATABASE IF NOT EXISTS test_max_num_to_warn_6; +CREATE DATABASE IF NOT EXISTS test_max_num_to_warn_7; +CREATE DATABASE IF NOT EXISTS test_max_num_to_warn_8; +CREATE DATABASE IF NOT EXISTS test_max_num_to_warn_9; +CREATE DATABASE IF NOT EXISTS test_max_num_to_warn_10; +CREATE DATABASE IF NOT EXISTS test_max_num_to_warn_11; + +INSERT INTO test_max_num_to_warn_02931.test_max_num_to_warn_1 VALUES (1, 'Hello'); +INSERT INTO test_max_num_to_warn_02931.test_max_num_to_warn_2 VALUES (1, 'Hello'); +INSERT INTO test_max_num_to_warn_02931.test_max_num_to_warn_3 VALUES (1, 'Hello'); +INSERT INTO test_max_num_to_warn_02931.test_max_num_to_warn_4 VALUES (1, 'Hello'); +INSERT INTO test_max_num_to_warn_02931.test_max_num_to_warn_5 VALUES (1, 'Hello'); +INSERT INTO test_max_num_to_warn_02931.test_max_num_to_warn_6 VALUES (1, 'Hello'); +INSERT INTO test_max_num_to_warn_02931.test_max_num_to_warn_7 VALUES (1, 'Hello'); +INSERT INTO test_max_num_to_warn_02931.test_max_num_to_warn_8 VALUES (1, 'Hello'); +INSERT INTO test_max_num_to_warn_02931.test_max_num_to_warn_9 VALUES (1, 'Hello'); +INSERT INTO test_max_num_to_warn_02931.test_max_num_to_warn_10 VALUES (1, 'Hello'); +INSERT INTO test_max_num_to_warn_02931.test_max_num_to_warn_11 VALUES (1, 'Hello'); + +SELECT * FROM system.warnings where message in ('The number of attached tables is more than 10', 'The number of attached databases is more than 10', 'The number of active parts is more than 10'); + +DROP DATABASE IF EXISTS test_max_num_to_warn_02931; +DROP DATABASE IF EXISTS test_max_num_to_warn_1; +DROP DATABASE IF EXISTS test_max_num_to_warn_2; +DROP DATABASE IF EXISTS test_max_num_to_warn_3; +DROP DATABASE IF EXISTS test_max_num_to_warn_4; +DROP DATABASE IF EXISTS test_max_num_to_warn_5; +DROP DATABASE IF EXISTS test_max_num_to_warn_6; +DROP DATABASE IF EXISTS test_max_num_to_warn_7; +DROP DATABASE IF EXISTS test_max_num_to_warn_8; +DROP DATABASE IF EXISTS test_max_num_to_warn_9; +DROP DATABASE IF EXISTS test_max_num_to_warn_10; +DROP DATABASE IF EXISTS test_max_num_to_warn_11; diff --git a/tests/queries/0_stateless/02931_size_virtual_column_use_structure_from_insertion_table.sh b/tests/queries/0_stateless/02931_size_virtual_column_use_structure_from_insertion_table.sh index 27ef26dd9a5..d9e4a2c8f8b 100755 --- a/tests/queries/0_stateless/02931_size_virtual_column_use_structure_from_insertion_table.sh +++ b/tests/queries/0_stateless/02931_size_virtual_column_use_structure_from_insertion_table.sh @@ -10,4 +10,4 @@ create table test (x UInt64, y UInt32, size UInt64) engine=Memory; insert into test select c1, c2, _size from file('$CLICKHOUSE_TEST_UNIQUE_NAME.csv') settings use_structure_from_insertion_table_in_table_functions=1; select * from test; " - +rm $CLICKHOUSE_TEST_UNIQUE_NAME.csv diff --git a/tests/queries/0_stateless/02932_apply_deleted_mask.reference b/tests/queries/0_stateless/02932_apply_deleted_mask.reference new file mode 100644 index 00000000000..22499472f84 --- /dev/null +++ b/tests/queries/0_stateless/02932_apply_deleted_mask.reference @@ -0,0 +1,15 @@ +Inserted +100 4950 +10 100 0 +Lighweight deleted +86 4271 +10 100 10 +Mask applied +86 4271 +10 86 0 +Lighweight deleted +72 3578 +10 86 10 +Mask applied in partition +72 3578 +10 84 9 diff --git a/tests/queries/0_stateless/02932_apply_deleted_mask.sql b/tests/queries/0_stateless/02932_apply_deleted_mask.sql new file mode 100644 index 00000000000..0ada0640a8f --- /dev/null +++ b/tests/queries/0_stateless/02932_apply_deleted_mask.sql @@ -0,0 +1,43 @@ +DROP TABLE IF EXISTS t_materialize_delete; + +CREATE TABLE t_materialize_delete (id UInt64, v UInt64) +ENGINE = MergeTree ORDER BY id PARTITION BY id % 10; + +SET mutations_sync = 2; + +INSERT INTO t_materialize_delete SELECT number, number FROM numbers(100); + +SELECT 'Inserted'; + +SELECT count(), sum(v) FROM t_materialize_delete; +SELECT count(), sum(rows), sum(has_lightweight_delete) FROM system.parts WHERE database = currentDatabase() AND table = 't_materialize_delete' AND active; + +SELECT 'Lighweight deleted'; + +DELETE FROM t_materialize_delete WHERE id % 7 = 3; + +SELECT count(), sum(v) FROM t_materialize_delete; +SELECT count(), sum(rows), sum(has_lightweight_delete) FROM system.parts WHERE database = currentDatabase() AND table = 't_materialize_delete' AND active; + +SELECT 'Mask applied'; + +ALTER TABLE t_materialize_delete APPLY DELETED MASK; + +SELECT count(), sum(v) FROM t_materialize_delete; +SELECT count(), sum(rows), sum(has_lightweight_delete) FROM system.parts WHERE database = currentDatabase() AND table = 't_materialize_delete' AND active; + +SELECT 'Lighweight deleted'; + +DELETE FROM t_materialize_delete WHERE id % 7 = 4; + +SELECT count(), sum(v) FROM t_materialize_delete; +SELECT count(), sum(rows), sum(has_lightweight_delete) FROM system.parts WHERE database = currentDatabase() AND table = 't_materialize_delete' AND active; + +SELECT 'Mask applied in partition'; + +ALTER TABLE t_materialize_delete APPLY DELETED MASK IN PARTITION 5; + +SELECT count(), sum(v) FROM t_materialize_delete; +SELECT count(), sum(rows), sum(has_lightweight_delete) FROM system.parts WHERE database = currentDatabase() AND table = 't_materialize_delete' AND active; + +DROP TABLE t_materialize_delete; diff --git a/tests/queries/0_stateless/02932_kill_query_sleep.reference b/tests/queries/0_stateless/02932_kill_query_sleep.reference new file mode 100644 index 00000000000..9c19635a83f --- /dev/null +++ b/tests/queries/0_stateless/02932_kill_query_sleep.reference @@ -0,0 +1,2 @@ +Cancelling query +QUERY_WAS_CANCELLED diff --git a/tests/queries/0_stateless/02932_kill_query_sleep.sh b/tests/queries/0_stateless/02932_kill_query_sleep.sh new file mode 100755 index 00000000000..08c375b875d --- /dev/null +++ b/tests/queries/0_stateless/02932_kill_query_sleep.sh @@ -0,0 +1,39 @@ +#!/usr/bin/env bash + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + + +function wait_query_started() +{ + local query_id="$1" + $CLICKHOUSE_CLIENT --query "SYSTEM FLUSH LOGS" + while [[ $($CLICKHOUSE_CLIENT --query="SELECT count() FROM system.query_log WHERE query_id='$query_id' AND current_database = currentDatabase()") == 0 ]]; do + sleep 0.1; + $CLICKHOUSE_CLIENT --query "SYSTEM FLUSH LOGS;" + done +} + +function kill_query() +{ + local query_id="$1" + $CLICKHOUSE_CLIENT --query "KILL QUERY WHERE query_id='$query_id'" >/dev/null + while [[ $($CLICKHOUSE_CLIENT --query="SELECT count() FROM system.processes WHERE query_id='$query_id'") != 0 ]]; do sleep 0.1; done +} + + +sleep_query_id="sleep_query_id_02932_kill_query_sleep_${CLICKHOUSE_DATABASE}_$RANDOM" + +# This sleep query wants to sleep for 1000 seconds (which is too long). +# We're going to cancel this query later. +sleep_query="SELECT sleep(1000)" + +$CLICKHOUSE_CLIENT --query_id="$sleep_query_id" --function_sleep_max_microseconds_per_block="1000000000" --query "$sleep_query" >/dev/null 2>&1 & +wait_query_started "$sleep_query_id" + +echo "Cancelling query" +kill_query "$sleep_query_id" + +$CLICKHOUSE_CLIENT --query "SYSTEM FLUSH LOGS;" +$CLICKHOUSE_CLIENT --query "SELECT exception FROM system.query_log WHERE query_id='$sleep_query_id' AND current_database = currentDatabase()" | grep -oF "QUERY_WAS_CANCELLED" diff --git a/tests/queries/0_stateless/02932_lwd_and_mutations.reference b/tests/queries/0_stateless/02932_lwd_and_mutations.reference new file mode 100644 index 00000000000..dc0d3536b8f --- /dev/null +++ b/tests/queries/0_stateless/02932_lwd_and_mutations.reference @@ -0,0 +1,14 @@ +900 0 [1,2,3,4,5,6,7,8,9] +1 1000 1 +800 200 [2,3,4,5,6,7,8,9] +1 800 0 +700 150 [3,4,5,6,7,8,9] +1 800 1 +600 300 [4,5,6,7,8,9] +1 600 0 +400 200 [6,7,8,9] +1 500 1 +200 100 [8,9] +1 300 1 +200 100 [8,9] +1 200 0 diff --git a/tests/queries/0_stateless/02932_lwd_and_mutations.sql b/tests/queries/0_stateless/02932_lwd_and_mutations.sql new file mode 100644 index 00000000000..a68aca91764 --- /dev/null +++ b/tests/queries/0_stateless/02932_lwd_and_mutations.sql @@ -0,0 +1,43 @@ +DROP TABLE IF EXISTS t_lwd_mutations; + +CREATE TABLE t_lwd_mutations(id UInt64, v UInt64) ENGINE = MergeTree ORDER BY id; +INSERT INTO t_lwd_mutations SELECT number, 0 FROM numbers(1000); + +SET mutations_sync = 2; + +DELETE FROM t_lwd_mutations WHERE id % 10 = 0; + +SELECT count(), sum(v), arraySort(groupUniqArray(id % 10)) FROM t_lwd_mutations; +SELECT count(), sum(rows), sum(has_lightweight_delete) FROM system.parts WHERE database = currentDatabase() AND table = 't_lwd_mutations' AND active; + +ALTER TABLE t_lwd_mutations UPDATE v = 1 WHERE id % 4 = 0, DELETE WHERE id % 10 = 1; + +SELECT count(), sum(v), arraySort(groupUniqArray(id % 10)) FROM t_lwd_mutations; +SELECT count(), sum(rows), sum(has_lightweight_delete) FROM system.parts WHERE database = currentDatabase() AND table = 't_lwd_mutations' AND active; + +DELETE FROM t_lwd_mutations WHERE id % 10 = 2; + +SELECT count(), sum(v), arraySort(groupUniqArray(id % 10)) FROM t_lwd_mutations; +SELECT count(), sum(rows), sum(has_lightweight_delete) FROM system.parts WHERE database = currentDatabase() AND table = 't_lwd_mutations' AND active; + +ALTER TABLE t_lwd_mutations UPDATE v = 1 WHERE id % 4 = 1, DELETE WHERE id % 10 = 3; + +SELECT count(), sum(v), arraySort(groupUniqArray(id % 10)) FROM t_lwd_mutations; +SELECT count(), sum(rows), sum(has_lightweight_delete) FROM system.parts WHERE database = currentDatabase() AND table = 't_lwd_mutations' AND active; + +ALTER TABLE t_lwd_mutations UPDATE _row_exists = 0 WHERE id % 10 = 4, DELETE WHERE id % 10 = 5; + +SELECT count(), sum(v), arraySort(groupUniqArray(id % 10)) FROM t_lwd_mutations; +SELECT count(), sum(rows), sum(has_lightweight_delete) FROM system.parts WHERE database = currentDatabase() AND table = 't_lwd_mutations' AND active; + +ALTER TABLE t_lwd_mutations DELETE WHERE id % 10 = 6, UPDATE _row_exists = 0 WHERE id % 10 = 7; + +SELECT count(), sum(v), arraySort(groupUniqArray(id % 10)) FROM t_lwd_mutations; +SELECT count(), sum(rows), sum(has_lightweight_delete) FROM system.parts WHERE database = currentDatabase() AND table = 't_lwd_mutations' AND active; + +ALTER TABLE t_lwd_mutations APPLY DELETED MASK; + +SELECT count(), sum(v), arraySort(groupUniqArray(id % 10)) FROM t_lwd_mutations; +SELECT count(), sum(rows), sum(has_lightweight_delete) FROM system.parts WHERE database = currentDatabase() AND table = 't_lwd_mutations' AND active; + +DROP TABLE IF EXISTS t_lwd_mutations; diff --git a/tests/queries/0_stateless/02932_non_ready_set_stuck.reference b/tests/queries/0_stateless/02932_non_ready_set_stuck.reference new file mode 100644 index 00000000000..fc39e7c9b45 --- /dev/null +++ b/tests/queries/0_stateless/02932_non_ready_set_stuck.reference @@ -0,0 +1,2 @@ + +0 0 diff --git a/tests/queries/0_stateless/02932_non_ready_set_stuck.sql b/tests/queries/0_stateless/02932_non_ready_set_stuck.sql new file mode 100644 index 00000000000..c04f8f18751 --- /dev/null +++ b/tests/queries/0_stateless/02932_non_ready_set_stuck.sql @@ -0,0 +1,2 @@ +CREATE TABLE tab (item_id UInt64, price_sold Nullable(Float32), date Date) ENGINE = MergeTree ORDER BY item_id; +SELECT * FROM (SELECT item_id FROM tab GROUP BY item_id WITH TOTALS ORDER BY '922337203.6854775806' IN (SELECT NULL)) AS l RIGHT JOIN (SELECT item_id FROM tab) AS r ON l.item_id = r.item_id WHERE NULL; diff --git a/tests/queries/0_stateless/02932_parallel_replicas_fuzzer.reference b/tests/queries/0_stateless/02932_parallel_replicas_fuzzer.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/02932_parallel_replicas_fuzzer.sql b/tests/queries/0_stateless/02932_parallel_replicas_fuzzer.sql new file mode 100644 index 00000000000..3daaf36188a --- /dev/null +++ b/tests/queries/0_stateless/02932_parallel_replicas_fuzzer.sql @@ -0,0 +1,38 @@ +SET parallel_replicas_for_non_replicated_merge_tree=1; + +-- https://github.com/ClickHouse/ClickHouse/issues/49559 +CREATE TABLE join_inner_table__fuzz_146 (`id` UUID, `key` String, `number` Int64, `value1` String, `value2` String, `time` Nullable(Int64)) ENGINE = MergeTree ORDER BY (id, number, key); +INSERT INTO join_inner_table__fuzz_146 SELECT CAST('833c9e22-c245-4eb5-8745-117a9a1f26b1', 'UUID') AS id, CAST(rowNumberInAllBlocks(), 'String') AS key, * FROM generateRandom('number Int64, value1 String, value2 String, time Int64', 1, 10, 2) LIMIT 100; +SELECT key, value1, value2, toUInt64(min(time)) AS start_ts FROM join_inner_table__fuzz_146 GROUP BY key, value1, value2 WITH CUBE ORDER BY key ASC NULLS LAST, value2 DESC NULLS LAST LIMIT 9223372036854775806 + FORMAT Null + SETTINGS + max_parallel_replicas = 3, + prefer_localhost_replica = 1, + cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', + allow_experimental_parallel_reading_from_replicas = 1, + use_hedged_requests = 0; + + +-- https://github.com/ClickHouse/ClickHouse/issues/48496 +CREATE TABLE t_02709__fuzz_23 (`key` Nullable(UInt8), `sign` Int8, `date` DateTime64(3)) ENGINE = CollapsingMergeTree(sign) PARTITION BY date ORDER BY key SETTINGS allow_nullable_key=1; +INSERT INTO t_02709__fuzz_23 values (1, 1, '2023-12-01 00:00:00.000'); +SELECT NULL FROM t_02709__fuzz_23 FINAL +GROUP BY sign, '1023' +ORDER BY nan DESC, [0, NULL, NULL, NULL, NULL] DESC +FORMAT Null +SETTINGS + max_parallel_replicas = 3, + allow_experimental_parallel_reading_from_replicas = 1, + use_hedged_requests = 0, + cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost'; + +SELECT _CAST(NULL, 'Nullable(Nothing)') AS `NULL` +FROM t_02709__fuzz_23 FINAL +GROUP BY + t_02709__fuzz_23.sign, + '1023' +ORDER BY + nan DESC, + _CAST([0, NULL, NULL, NULL, NULL], 'Array(Nullable(UInt8))') DESC +FORMAT Null +SETTINGS receive_timeout = 10., receive_data_timeout_ms = 10000, use_hedged_requests = 0, allow_suspicious_low_cardinality_types = 1, max_parallel_replicas = 3, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', allow_experimental_parallel_reading_from_replicas = 1, parallel_replicas_for_non_replicated_merge_tree = 1, log_queries = 1, table_function_remote_max_addresses = 200, allow_experimental_analyzer = 1; diff --git a/tests/queries/0_stateless/02932_query_settings_max_size_drop.reference b/tests/queries/0_stateless/02932_query_settings_max_size_drop.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/02932_query_settings_max_size_drop.sql b/tests/queries/0_stateless/02932_query_settings_max_size_drop.sql new file mode 100644 index 00000000000..1685861bd2e --- /dev/null +++ b/tests/queries/0_stateless/02932_query_settings_max_size_drop.sql @@ -0,0 +1,31 @@ +CREATE TABLE test_max_size_drop +Engine = MergeTree() +ORDER BY number +AS SELECT number +FROM numbers(1000) +; + +DROP TABLE test_max_size_drop SETTINGS max_table_size_to_drop = 1; -- { serverError 359 } +DROP TABLE test_max_size_drop; + +CREATE TABLE test_max_size_drop +Engine = MergeTree() +ORDER BY number +AS SELECT number +FROM numbers(1000) +; + +ALTER TABLE test_max_size_drop DROP PARTITION tuple() SETTINGS max_partition_size_to_drop = 1; -- { serverError 359 } +ALTER TABLE test_max_size_drop DROP PARTITION tuple(); +DROP TABLE test_max_size_drop; + +CREATE TABLE test_max_size_drop +Engine = MergeTree() +ORDER BY number +AS SELECT number +FROM numbers(1000) +; + +ALTER TABLE test_max_size_drop DROP PART 'all_1_1_0' SETTINGS max_partition_size_to_drop = 1; -- { serverError 359 } +ALTER TABLE test_max_size_drop DROP PART 'all_1_1_0'; +DROP TABLE test_max_size_drop; diff --git a/tests/queries/0_stateless/02932_refreshable_materialized_views.reference b/tests/queries/0_stateless/02932_refreshable_materialized_views.reference new file mode 100644 index 00000000000..4c5b678cfa5 --- /dev/null +++ b/tests/queries/0_stateless/02932_refreshable_materialized_views.reference @@ -0,0 +1,44 @@ +<1: created view> a [] 1 +CREATE MATERIALIZED VIEW default.a\nREFRESH AFTER 1 SECOND\n(\n `x` UInt64\n)\nENGINE = Memory AS\nSELECT number AS x\nFROM numbers(2)\nUNION ALL\nSELECT rand64() AS x +<2: refreshed> 3 1 1 +<3: time difference at least> 500 +<4: next refresh in> 1 +<4.5: altered> Scheduled Finished 2052-01-01 00:00:00 +CREATE MATERIALIZED VIEW default.a\nREFRESH EVERY 2 YEAR\n(\n `x` Int16\n)\nENGINE = Memory AS\nSELECT x * 2 AS x\nFROM default.src +<5: no refresh> 3 +<6: refreshed> 2 +<7: refreshed> Scheduled Finished 2054-01-01 00:00:00 +CREATE MATERIALIZED VIEW default.b\nREFRESH EVERY 2 YEAR DEPENDS ON default.a\n(\n `y` Int32\n)\nENGINE = MergeTree\nORDER BY y\nSETTINGS index_granularity = 8192 AS\nSELECT x * 10 AS y\nFROM default.a +<8: refreshed> 20 +<9: refreshed> a Scheduled Finished 2054-01-01 00:00:00 +<9: refreshed> b Scheduled Finished 2054-01-01 00:00:00 +<10: waiting> a Scheduled [] 2054-01-01 00:00:00 +<10: waiting> b WaitingForDependencies ['default.a'] 2054-01-01 00:00:00 +<11: chain-refreshed a> 4 +<12: chain-refreshed b> 40 +<13: chain-refreshed> a Scheduled [] Finished 2054-01-01 00:00:01 2056-01-01 00:00:00 +<13: chain-refreshed> b Scheduled ['default.a'] Finished 2054-01-24 23:22:21 2056-01-01 00:00:00 +<14: waiting for next cycle> a Scheduled [] 2058-01-01 00:00:00 +<14: waiting for next cycle> b WaitingForDependencies ['default.a'] 2060-01-01 00:00:00 +<15: chain-refreshed a> 6 +<16: chain-refreshed b> 60 +<17: chain-refreshed> a Scheduled 2062-01-01 00:00:00 +<17: chain-refreshed> b Scheduled 2062-01-01 00:00:00 +<18: removed dependency> b Scheduled [] 2062-03-03 03:03:03 2064-01-01 00:00:00 5 +CREATE MATERIALIZED VIEW default.b\nREFRESH EVERY 2 YEAR\n(\n `y` Int32\n)\nENGINE = MergeTree\nORDER BY y\nSETTINGS index_granularity = 8192 AS\nSELECT x * 10 AS y\nFROM default.a +<19: exception> 1 +<20: unexception> 1 +<21: rename> 1 +<22: rename> d Finished +<23: simple refresh> 1 +<24: rename during refresh> 1 +<25: rename during refresh> f Running +<27: cancelled> f Scheduled +CREATE MATERIALIZED VIEW default.g\nREFRESH EVERY 1 WEEK OFFSET 3 DAY 4 HOUR RANDOMIZE FOR 4 DAY 1 HOUR\n(\n `x` Int64\n)\nENGINE = Memory AS\nSELECT 42 +<29: randomize> 1 1 +CREATE MATERIALIZED VIEW default.h\nREFRESH EVERY 1 SECOND TO default.dest\n(\n `x` Int64\n) AS\nSELECT x * 10 AS x\nFROM default.src +<30: to existing table> 10 +<31: to existing table> 10 +<31: to existing table> 20 +<32: empty> i Scheduled Unknown +<32: empty> j Scheduled Finished diff --git a/tests/queries/0_stateless/02932_refreshable_materialized_views.sh b/tests/queries/0_stateless/02932_refreshable_materialized_views.sh new file mode 100755 index 00000000000..8daea063fc5 --- /dev/null +++ b/tests/queries/0_stateless/02932_refreshable_materialized_views.sh @@ -0,0 +1,303 @@ +#!/usr/bin/env bash +# Tags: atomic-database + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# reset --log_comment +CLICKHOUSE_LOG_COMMENT= +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +# Set session timezone to UTC to make all DateTime formatting and parsing use UTC, because refresh +# scheduling is done in UTC. +CLICKHOUSE_CLIENT="`echo "$CLICKHOUSE_CLIENT" | sed 's/--session_timezone[= ][^ ]*//g'`" +CLICKHOUSE_CLIENT="`echo "$CLICKHOUSE_CLIENT --allow_experimental_refreshable_materialized_view=1 --session_timezone Etc/UTC"`" + +$CLICKHOUSE_CLIENT -nq "create view refreshes as select * from system.view_refreshes where database = '$CLICKHOUSE_DATABASE' order by view" + + +# Basic refreshing. +$CLICKHOUSE_CLIENT -nq " + create materialized view a + refresh after 1 second + engine Memory + empty + as select number as x from numbers(2) union all select rand64() as x" +$CLICKHOUSE_CLIENT -nq "select '<1: created view>', view, remaining_dependencies, exception, last_refresh_result in ('Unknown', 'Finished') from refreshes"; +$CLICKHOUSE_CLIENT -nq "show create a" +# Wait for any refresh. (xargs trims the string and turns \t and \n into spaces) +while [ "`$CLICKHOUSE_CLIENT -nq "select last_refresh_result from refreshes -- $LINENO" | xargs`" == 'Unknown' ] +do + sleep 0.1 +done +# Check table contents. +$CLICKHOUSE_CLIENT -nq "select '<2: refreshed>', count(), sum(x=0), sum(x=1) from a" +# Wait for table contents to change. +res1="`$CLICKHOUSE_CLIENT -nq 'select * from a order by x format Values'`" +while : +do + res2="`$CLICKHOUSE_CLIENT -nq 'select * from a order by x format Values -- $LINENO'`" + [ "$res2" == "$res1" ] || break + sleep 0.1 +done +time2="`$CLICKHOUSE_CLIENT -nq "select reinterpret(now64(), 'Int64')"`" +# Wait for another change. +while : +do + res3="`$CLICKHOUSE_CLIENT -nq 'select * from a order by x format Values -- $LINENO'`" + [ "$res3" == "$res2" ] || break + sleep 0.1 +done +# Check that the two changes were at least 500ms apart, in particular that we're not refreshing +# like crazy. This is potentially flaky, but we need at least one test that uses non-mocked timer +# to make sure the clock+timer code works at all. If it turns out flaky, increase refresh period above. +$CLICKHOUSE_CLIENT -nq " + select '<3: time difference at least>', min2(reinterpret(now64(), 'Int64') - $time2, 500); + select '<4: next refresh in>', next_refresh_time-last_refresh_time from refreshes;" + +# Create a source table from which views will read. +$CLICKHOUSE_CLIENT -nq " + create table src (x Int8) engine Memory as select 1" + +# Switch to fake clock, change refresh schedule, change query. +$CLICKHOUSE_CLIENT -nq " + system test view a set fake time '2050-01-01 00:00:01';" +while [ "`$CLICKHOUSE_CLIENT -nq "select status, last_refresh_time, next_refresh_time from refreshes -- $LINENO" | xargs`" != 'Scheduled 2050-01-01 00:00:01 2050-01-01 00:00:02' ] +do + sleep 0.1 +done +$CLICKHOUSE_CLIENT -nq " + alter table a modify refresh every 2 year; + alter table a modify query select x*2 as x from src; + select '<4.5: altered>', status, last_refresh_result, next_refresh_time from refreshes; + show create a;" +# Advance time to trigger the refresh. +$CLICKHOUSE_CLIENT -nq " + select '<5: no refresh>', count() from a; + system test view a set fake time '2052-02-03 04:05:06';" +while [ "`$CLICKHOUSE_CLIENT -nq "select last_refresh_time from refreshes -- $LINENO" | xargs`" != '2052-02-03 04:05:06' ] +do + sleep 0.1 +done +$CLICKHOUSE_CLIENT -nq " + select '<6: refreshed>', * from a; + select '<7: refreshed>', status, last_refresh_result, next_refresh_time from refreshes;" + +# Create a dependent view, refresh it once. +$CLICKHOUSE_CLIENT -nq " + create materialized view b refresh every 2 year depends on a (y Int32) engine MergeTree order by y empty as select x*10 as y from a; + show create b; + system test view b set fake time '2052-11-11 11:11:11'; + system refresh view b;" +while [ "`$CLICKHOUSE_CLIENT -nq "select last_refresh_time from refreshes where view = 'b' -- $LINENO" | xargs`" != '2052-11-11 11:11:11' ] +do + sleep 0.1 +done +# Next refresh shouldn't start until the dependency refreshes. +$CLICKHOUSE_CLIENT -nq " + select '<8: refreshed>', * from b; + select '<9: refreshed>', view, status, last_refresh_result, next_refresh_time from refreshes; + system test view b set fake time '2054-01-24 23:22:21';" +while [ "`$CLICKHOUSE_CLIENT -nq "select status, next_refresh_time from refreshes where view = 'b' -- $LINENO" | xargs`" != 'WaitingForDependencies 2054-01-01 00:00:00' ] +do + sleep 0.1 +done +# Update source table (by dropping and re-creating it - to test that tables are looked up by name +# rather than uuid), kick off refresh of the dependency. +$CLICKHOUSE_CLIENT -nq " + select '<10: waiting>', view, status, remaining_dependencies, next_refresh_time from refreshes; + drop table src; + create table src (x Int16) engine Memory as select 2; + system test view a set fake time '2054-01-01 00:00:01';" +while [ "`$CLICKHOUSE_CLIENT -nq "select status from refreshes where view = 'b' -- $LINENO" | xargs`" != 'Scheduled' ] +do + sleep 0.1 +done +# Both tables should've refreshed. +$CLICKHOUSE_CLIENT -nq " + select '<11: chain-refreshed a>', * from a; + select '<12: chain-refreshed b>', * from b; + select '<13: chain-refreshed>', view, status, remaining_dependencies, last_refresh_result, last_refresh_time, next_refresh_time, exception from refreshes;" + +# Make the dependent table run ahead by one refresh cycle, make sure it waits for the dependency to +# catch up to the same cycle. +$CLICKHOUSE_CLIENT -nq " + system test view b set fake time '2059-01-01 00:00:00'; + system refresh view b;" +while [ "`$CLICKHOUSE_CLIENT -nq "select next_refresh_time from refreshes where view = 'b' -- $LINENO" | xargs`" != '2060-01-01 00:00:00' ] +do + sleep 0.1 +done +$CLICKHOUSE_CLIENT -nq " + system test view b set fake time '2061-01-01 00:00:00'; + system test view a set fake time '2057-01-01 00:00:00';" +while [ "`$CLICKHOUSE_CLIENT -nq "select status, next_refresh_time from refreshes -- $LINENO" | xargs`" != 'Scheduled 2058-01-01 00:00:00 WaitingForDependencies 2060-01-01 00:00:00' ] +do + sleep 0.1 +done +sleep 1 +$CLICKHOUSE_CLIENT -nq " + select '<14: waiting for next cycle>', view, status, remaining_dependencies, next_refresh_time from refreshes; + truncate src; + insert into src values (3); + system test view a set fake time '2060-02-02 02:02:02';" +while [ "`$CLICKHOUSE_CLIENT -nq "select next_refresh_time from refreshes where view = 'b' -- $LINENO" | xargs`" != '2062-01-01 00:00:00' ] +do + sleep 0.1 +done +$CLICKHOUSE_CLIENT -nq " + select '<15: chain-refreshed a>', * from a; + select '<16: chain-refreshed b>', * from b; + select '<17: chain-refreshed>', view, status, next_refresh_time from refreshes;" + +# Get to WaitingForDependencies state and remove the depencency. +$CLICKHOUSE_CLIENT -nq " + system test view b set fake time '2062-03-03 03:03:03'" +while [ "`$CLICKHOUSE_CLIENT -nq "select status from refreshes where view = 'b' -- $LINENO" | xargs`" != 'WaitingForDependencies' ] +do + sleep 0.1 +done +$CLICKHOUSE_CLIENT -nq " + alter table b modify refresh every 2 year" +while [ "`$CLICKHOUSE_CLIENT -nq "select status, last_refresh_time from refreshes where view = 'b' -- $LINENO" | xargs`" != 'Scheduled 2062-03-03 03:03:03' ] +do + sleep 0.1 +done +$CLICKHOUSE_CLIENT -nq " + select '<18: removed dependency>', view, status, remaining_dependencies, last_refresh_time,next_refresh_time, refresh_count from refreshes where view = 'b'; + show create b;" + +# Select from a table that doesn't exist, get an exception. +$CLICKHOUSE_CLIENT -nq " + drop table a; + drop table b; + create materialized view c refresh every 1 second (x Int64) engine Memory empty as select * from src; + drop table src;" +while [ "`$CLICKHOUSE_CLIENT -nq "select last_refresh_result from refreshes -- $LINENO" | xargs`" != 'Exception' ] +do + sleep 0.1 +done +# Check exception, create src, expect successful refresh. +$CLICKHOUSE_CLIENT -nq " + select '<19: exception>', exception ilike '%UNKNOWN_TABLE%' from refreshes; + create table src (x Int64) engine Memory as select 1; + system refresh view c;" +while [ "`$CLICKHOUSE_CLIENT -nq "select last_refresh_result from refreshes -- $LINENO" | xargs`" != 'Finished' ] +do + sleep 0.1 +done +# Rename table. +$CLICKHOUSE_CLIENT -nq " + select '<20: unexception>', * from c; + rename table c to d; + select '<21: rename>', * from d; + select '<22: rename>', view, last_refresh_result from refreshes;" + +# Do various things during a refresh. +# First make a nonempty view. +$CLICKHOUSE_CLIENT -nq " + drop table d; + truncate src; + insert into src values (1) + create materialized view e refresh every 1 second (x Int64) engine MergeTree order by x empty as select x + sleepEachRow(1) as x from src settings max_block_size = 1;" +while [ "`$CLICKHOUSE_CLIENT -nq "select last_refresh_result from refreshes -- $LINENO" | xargs`" != 'Finished' ] +do + sleep 0.1 +done +# Stop refreshes. +$CLICKHOUSE_CLIENT -nq " + select '<23: simple refresh>', * from e; + system stop view e;" +while [ "`$CLICKHOUSE_CLIENT -nq "select status from refreshes -- $LINENO" | xargs`" != 'Disabled' ] +do + sleep 0.1 +done +# Make refreshes slow, wait for a slow refresh to start. (We stopped refreshes first to make sure +# we wait for a slow refresh, not a previous fast one.) +$CLICKHOUSE_CLIENT -nq " + insert into src select * from numbers(1000) settings max_block_size=1; + system start view e;" +while [ "`$CLICKHOUSE_CLIENT -nq "select status from refreshes -- $LINENO" | xargs`" != 'Running' ] +do + sleep 0.1 +done +# Rename. +$CLICKHOUSE_CLIENT -nq " + rename table e to f; + select '<24: rename during refresh>', * from f; + select '<25: rename during refresh>', view, status from refreshes; + alter table f modify refresh after 10 year;" +sleep 2 # make it likely that at least one row was processed +# Cancel. +$CLICKHOUSE_CLIENT -nq " + system cancel view f;" +while [ "`$CLICKHOUSE_CLIENT -nq "select last_refresh_result from refreshes -- $LINENO" | xargs`" != 'Cancelled' ] +do + sleep 0.1 +done +# Check that another refresh doesn't immediately start after the cancelled one. +sleep 1 +$CLICKHOUSE_CLIENT -nq " + select '<27: cancelled>', view, status from refreshes; + system refresh view f;" +while [ "`$CLICKHOUSE_CLIENT -nq "select status from refreshes -- $LINENO" | xargs`" != 'Running' ] +do + sleep 0.1 +done +# Drop. +$CLICKHOUSE_CLIENT -nq " + drop table f; + select '<28: drop during refresh>', view, status from refreshes;" + +# Try OFFSET and RANDOMIZE FOR. +$CLICKHOUSE_CLIENT -nq " + create materialized view g refresh every 1 week offset 3 day 4 hour randomize for 4 day 1 hour (x Int64) engine Memory empty as select 42; + show create g; + system test view g set fake time '2050-02-03 15:30:13';" +while [ "`$CLICKHOUSE_CLIENT -nq "select next_refresh_time > '2049-01-01' from refreshes -- $LINENO" | xargs`" != '1' ] +do + sleep 0.1 +done +$CLICKHOUSE_CLIENT -nq " + with '2050-02-10 04:00:00'::DateTime as expected + select '<29: randomize>', abs(next_refresh_time::Int64 - expected::Int64) <= 3600*(24*4+1), next_refresh_time != expected from refreshes;" + +# Send data 'TO' an existing table. +$CLICKHOUSE_CLIENT -nq " + drop table g; + create table dest (x Int64) engine MergeTree order by x; + truncate src; + insert into src values (1); + create materialized view h refresh every 1 second to dest empty as select x*10 as x from src; + show create h;" +while [ "`$CLICKHOUSE_CLIENT -nq "select last_refresh_result from refreshes -- $LINENO" | xargs`" != 'Finished' ] +do + sleep 0.1 +done +$CLICKHOUSE_CLIENT -nq " + select '<30: to existing table>', * from dest; + insert into src values (2);" +while [ "`$CLICKHOUSE_CLIENT -nq "select count() from dest -- $LINENO" | xargs`" != '2' ] +do + sleep 0.1 +done +$CLICKHOUSE_CLIENT -nq " + select '<31: to existing table>', * from dest; + drop table dest; + drop table src; + drop table h;" + +# EMPTY +$CLICKHOUSE_CLIENT -nq " + create materialized view i refresh after 1 year engine Memory empty as select number as x from numbers(2); + create materialized view j refresh after 1 year engine Memory as select number as x from numbers(2)" +while [ "`$CLICKHOUSE_CLIENT -nq "select sum(last_success_time is null) from refreshes -- $LINENO" | xargs`" == '2' ] +do + sleep 0.1 +done +$CLICKHOUSE_CLIENT -nq " + select '<32: empty>', view, status, last_refresh_result from refreshes order by view; + drop table i; + drop table j" + +$CLICKHOUSE_CLIENT -nq " + drop table refreshes;" diff --git a/tests/queries/0_stateless/02933_change_cache_setting_without_restart.reference b/tests/queries/0_stateless/02933_change_cache_setting_without_restart.reference new file mode 100644 index 00000000000..d4dd4da0c5d --- /dev/null +++ b/tests/queries/0_stateless/02933_change_cache_setting_without_restart.reference @@ -0,0 +1,7 @@ +134217728 10000000 33554432 4194304 1 0 0 0 /var/lib/clickhouse/filesystem_caches/s3_cache_02933/ 0 0 0 1 +134217728 10000000 33554432 4194304 1 0 0 0 /var/lib/clickhouse/filesystem_caches/s3_cache_02933/ 10 1000 0 1 +134217728 10000000 33554432 4194304 1 0 0 0 /var/lib/clickhouse/filesystem_caches/s3_cache_02933/ 5 1000 0 1 +134217728 10000000 33554432 4194304 1 0 0 0 /var/lib/clickhouse/filesystem_caches/s3_cache_02933/ 15 1000 0 1 +134217728 10000000 33554432 4194304 1 0 0 0 /var/lib/clickhouse/filesystem_caches/s3_cache_02933/ 2 1000 0 1 +134217728 10000000 33554432 4194304 1 0 0 0 /var/lib/clickhouse/filesystem_caches/s3_cache_02933/ 0 1000 0 1 +134217728 10000000 33554432 4194304 1 0 0 0 /var/lib/clickhouse/filesystem_caches/s3_cache_02933/ 0 0 0 1 diff --git a/tests/queries/0_stateless/02933_change_cache_setting_without_restart.sh b/tests/queries/0_stateless/02933_change_cache_setting_without_restart.sh new file mode 100755 index 00000000000..ddad7a1904b --- /dev/null +++ b/tests/queries/0_stateless/02933_change_cache_setting_without_restart.sh @@ -0,0 +1,65 @@ +#!/usr/bin/env bash +# Tags: no-fasttest, no-parallel, no-s3-storage + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +disk_name="s3_cache_02933" +$CLICKHOUSE_CLIENT --query "DESCRIBE FILESYSTEM CACHE '${disk_name}'" + +config_path=/etc/clickhouse-server/config.d/storage_conf.xml +config_path_tmp=$config_path.tmp + +cat $config_path \ +| sed "s|0<\/background_download_threads>|10<\/background_download_threads>|" \ +> $config_path_tmp +mv $config_path_tmp $config_path + +cat $config_path \ +| sed "s|0<\/background_download_queue_size_limit>|1000<\/background_download_queue_size_limit>|" \ +> $config_path_tmp +mv $config_path_tmp $config_path + +$CLICKHOUSE_CLIENT --query "SYSTEM RELOAD CONFIG" +$CLICKHOUSE_CLIENT --query "DESCRIBE FILESYSTEM CACHE '${disk_name}'" + +cat $config_path \ +| sed "s|10<\/background_download_threads>|5<\/background_download_threads>|" \ +> $config_path_tmp +mv $config_path_tmp $config_path + +$CLICKHOUSE_CLIENT --query "SYSTEM RELOAD CONFIG" +$CLICKHOUSE_CLIENT --query "DESCRIBE FILESYSTEM CACHE '${disk_name}'" + +cat $config_path \ +| sed "s|5<\/background_download_threads>|15<\/background_download_threads>|" \ +> $config_path_tmp +mv $config_path_tmp $config_path + +$CLICKHOUSE_CLIENT --query "SYSTEM RELOAD CONFIG" +$CLICKHOUSE_CLIENT --query "DESCRIBE FILESYSTEM CACHE '${disk_name}'" + +cat $config_path \ +| sed "s|15<\/background_download_threads>|2<\/background_download_threads>|" \ +> $config_path_tmp +mv $config_path_tmp $config_path + +$CLICKHOUSE_CLIENT --query "SYSTEM RELOAD CONFIG" +$CLICKHOUSE_CLIENT --query "DESCRIBE FILESYSTEM CACHE '${disk_name}'" + +cat $config_path \ +| sed "s|2<\/background_download_threads>|0<\/background_download_threads>|" \ +> $config_path_tmp +mv $config_path_tmp $config_path + +$CLICKHOUSE_CLIENT --query "SYSTEM RELOAD CONFIG" +$CLICKHOUSE_CLIENT --query "DESCRIBE FILESYSTEM CACHE '${disk_name}'" + +cat $config_path \ +| sed "s|1000<\/background_download_queue_size_limit>|0<\/background_download_queue_size_limit>|" \ +> $config_path_tmp +mv $config_path_tmp $config_path + +$CLICKHOUSE_CLIENT --query "SYSTEM RELOAD CONFIG" +$CLICKHOUSE_CLIENT --query "DESCRIBE FILESYSTEM CACHE '${disk_name}'" diff --git a/tests/queries/0_stateless/02933_group_by_memory_usage.reference b/tests/queries/0_stateless/02933_group_by_memory_usage.reference new file mode 100644 index 00000000000..1481fcaa297 --- /dev/null +++ b/tests/queries/0_stateless/02933_group_by_memory_usage.reference @@ -0,0 +1,3 @@ +Spin up a long running query +1 1 1 1 1 +0 diff --git a/tests/queries/0_stateless/02933_group_by_memory_usage.sh b/tests/queries/0_stateless/02933_group_by_memory_usage.sh new file mode 100755 index 00000000000..bb1bbbf16a2 --- /dev/null +++ b/tests/queries/0_stateless/02933_group_by_memory_usage.sh @@ -0,0 +1,13 @@ +#!/usr/bin/env bash +# Tags: long, no-random-settings + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +query_id="group-by-mem-usage-$CLICKHOUSE_DATABASE" + +echo "Spin up a long running query" +${CLICKHOUSE_CLIENT} --query "with q as (select length(groupArray(toString(number))) as x from numbers_mt(2e6) group by number order by x limit 1), q1 as (select * from q), q2 as (select * from q), q3 as (select * from q), q4 as (select * from q) select * from q, q1, q2, q3, q4 settings max_bytes_before_external_group_by='1G', max_memory_usage='2G'" --query_id "$query_id" +${CLICKHOUSE_CLIENT} --query "system flush logs" +${CLICKHOUSE_CLIENT} --query "select ProfileEvents['ExternalAggregationWritePart'] from system.query_log where current_database = currentDatabase() and type = 'QueryFinish' and query_id = '$query_id' and event_date >= today() - 1" diff --git a/tests/queries/0_stateless/02933_paste_join.reference b/tests/queries/0_stateless/02933_paste_join.reference new file mode 100644 index 00000000000..84ae5987926 --- /dev/null +++ b/tests/queries/0_stateless/02933_paste_join.reference @@ -0,0 +1,74 @@ +0 0 +1 1 +2 2 +3 3 +4 4 +5 5 +6 6 +7 7 +8 8 +9 9 +0 9 +1 8 +2 7 +3 6 +4 5 +5 4 +6 3 +7 2 +8 1 +9 0 +1 2 +0 0 +1 1 +2 2 +3 3 +4 4 +5 5 +6 0 +7 1 +8 2 +9 3 +10 4 +0 0 +1 1 +0 0 0 0 +1 1 1 1 +2 2 2 2 +3 3 3 3 +4 4 4 4 +5 5 5 5 +6 6 6 6 +7 7 7 7 +8 8 8 8 +9 9 9 9 +10 10 10 10 +11 11 11 11 +12 12 12 12 +13 13 13 13 +14 14 14 14 +15 15 15 15 +16 16 16 16 +17 17 17 17 +18 18 18 18 +19 19 19 19 +20 20 20 20 +21 21 21 21 +22 22 22 22 +23 23 23 23 +24 24 24 24 +25 25 25 25 +26 26 26 26 +27 27 27 27 +28 28 28 28 +29 29 29 29 +UInt64 +UInt64 +UInt64 +UInt64 +UInt64 +UInt64 +UInt64 +UInt64 +UInt64 +UInt64 diff --git a/tests/queries/0_stateless/02933_paste_join.sql b/tests/queries/0_stateless/02933_paste_join.sql new file mode 100644 index 00000000000..1c346438d77 --- /dev/null +++ b/tests/queries/0_stateless/02933_paste_join.sql @@ -0,0 +1,37 @@ +select * from (SELECT number as a FROM numbers(10)) t1 PASTE JOIN (select number as a from numbers(10)) t2; +select * from (SELECT number as a FROM numbers(10)) t1 PASTE JOIN (select number as a from numbers(10) order by a desc) t2; +create table if not exists test (num UInt64) engine=Memory; +insert into test select number from numbers(6); +insert into test select number from numbers(5); +SELECT * FROM (SELECT 1) t1 PASTE JOIN (SELECT 2) SETTINGS joined_subquery_requires_alias=0; +select * from (SELECT number as a FROM numbers(11)) t1 PASTE JOIN test t2 SETTINGS max_threads=1; +select * from (SELECT number as a FROM numbers(11)) t1 PASTE JOIN (select * from test limit 2) t2 SETTINGs max_threads=1; +CREATE TABLE t1 (a UInt64, b UInt64) ENGINE = Memory; +INSERT INTO t1 SELECT number, number FROM numbers(0, 3); +INSERT INTO t1 SELECT number, number FROM numbers(3, 2); +INSERT INTO t1 SELECT number, number FROM numbers(5, 7); +INSERT INTO t1 SELECT number, number FROM numbers(12, 2); +INSERT INTO t1 SELECT number, number FROM numbers(14, 1); +INSERT INTO t1 SELECT number, number FROM numbers(15, 2); +INSERT INTO t1 SELECT number, number FROM numbers(17, 1); +INSERT INTO t1 SELECT number, number FROM numbers(18, 2); +INSERT INTO t1 SELECT number, number FROM numbers(20, 2); +INSERT INTO t1 SELECT number, number FROM numbers(22, 2); +INSERT INTO t1 SELECT number, number FROM numbers(24, 2); +INSERT INTO t1 SELECT number, number FROM numbers(26, 2); +INSERT INTO t1 SELECT number, number FROM numbers(28, 2); + + +CREATE TABLE t2 (a UInt64, b UInt64) ENGINE = Memory; +INSERT INTO t2 SELECT number, number FROM numbers(0, 2); +INSERT INTO t2 SELECT number, number FROM numbers(2, 3); +INSERT INTO t2 SELECT number, number FROM numbers(5, 5); +INSERT INTO t2 SELECT number, number FROM numbers(10, 5); +INSERT INTO t2 SELECT number, number FROM numbers(15, 15); + +SELECT * FROM ( SELECT * from t1 ) t1 PASTE JOIN ( SELECT * from t2 ) t2 SETTINGS max_threads = 1; +SELECT toTypeName(a) FROM (SELECT number as a FROM numbers(11)) t1 PASTE JOIN (select number as a from numbers(10)) t2 SETTINGS join_use_nulls = 1; +SET max_threads = 2; +select * from (SELECT number as a FROM numbers(10)) t1 ANY PASTE JOIN (select number as a from numbers(10)) t2; -- { clientError SYNTAX_ERROR } +select * from (SELECT number as a FROM numbers(10)) t1 ALL PASTE JOIN (select number as a from numbers(10)) t2; -- { clientError SYNTAX_ERROR } +select * from (SELECT number as a FROM numbers_mt(10)) t1 PASTE JOIN (select number as a from numbers(10) ORDER BY a DESC) t2 SETTINGS max_block_size=3; -- { serverError BAD_ARGUMENTS } diff --git a/tests/queries/0_stateless/02933_replicated_database_forbid_create_as_select.reference b/tests/queries/0_stateless/02933_replicated_database_forbid_create_as_select.reference new file mode 100644 index 00000000000..d00491fd7e5 --- /dev/null +++ b/tests/queries/0_stateless/02933_replicated_database_forbid_create_as_select.reference @@ -0,0 +1 @@ +1 diff --git a/tests/queries/0_stateless/02933_replicated_database_forbid_create_as_select.sh b/tests/queries/0_stateless/02933_replicated_database_forbid_create_as_select.sh new file mode 100755 index 00000000000..c295f5be43b --- /dev/null +++ b/tests/queries/0_stateless/02933_replicated_database_forbid_create_as_select.sh @@ -0,0 +1,16 @@ +#!/usr/bin/env bash +# Tags: replica + +# CREATE AS SELECT for Replicated database is broken (https://github.com/ClickHouse/ClickHouse/issues/35408). +# This should be fixed and this test should eventually be deleted. + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +${CLICKHOUSE_CLIENT} --allow_experimental_database_replicated=1 --query "CREATE DATABASE ${CLICKHOUSE_DATABASE}_db engine = Replicated('/clickhouse/databases/${CLICKHOUSE_TEST_ZOOKEEPER_PREFIX}/${CLICKHOUSE_DATABASE}_db', '{shard}', '{replica}')" +# Non-replicated engines are allowed +${CLICKHOUSE_CLIENT} --distributed_ddl_output_mode=none --query "CREATE TABLE ${CLICKHOUSE_DATABASE}_db.test (id UInt64) ENGINE = MergeTree() ORDER BY id AS SELECT 1" +# Replicated storafes are forbidden +${CLICKHOUSE_CLIENT} --query "CREATE TABLE ${CLICKHOUSE_DATABASE}_db.test2 (id UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/test2', '1') ORDER BY id AS SELECT 1" |& grep -cm1 "SUPPORT_IS_DISABLED" +${CLICKHOUSE_CLIENT} --query "DROP DATABASE ${CLICKHOUSE_DATABASE}_db" diff --git a/tests/queries/0_stateless/02935_format_with_arbitrary_types.reference b/tests/queries/0_stateless/02935_format_with_arbitrary_types.reference new file mode 100644 index 00000000000..3455adc8723 --- /dev/null +++ b/tests/queries/0_stateless/02935_format_with_arbitrary_types.reference @@ -0,0 +1,70 @@ +-- Const string + non-const arbitrary type +The answer to all questions is 42. +The answer to all questions is 43. +The answer to all questions is 44. +The answer to all questions is 45. +The answer to all questions is 46. +The answer to all questions is 47. +The answer to all questions is 48. +The answer to all questions is 49. +The answer to all questions is 50. +The answer to all questions is 51. +The answer to all questions is 52. +The answer to all questions is 53. +The answer to all questions is 42.42. +The answer to all questions is 43.43. +The answer to all questions is 44. +The answer to all questions is true. +The answer to all questions is false. +The answer to all questions is foo. +The answer to all questions is bar. +The answer to all questions is foo. +The answer to all questions is bar. +The answer to all questions is foo. +The answer to all questions is bar. +The answer to all questions is foo. +The answer to all questions is bar. +The answer to all questions is 42. +The answer to all questions is 42. +The answer to all questions is fae310ca-d52a-4923-9e9b-02bf67f4b009. +The answer to all questions is 2023-11-14. +The answer to all questions is 2123-11-14. +The answer to all questions is 2023-11-14 05:50:12. +The answer to all questions is 2023-11-14 05:50:12.123. +The answer to all questions is hallo. +The answer to all questions is [\'foo\',\'bar\']. +The answer to all questions is {"foo":"bar"}. +The answer to all questions is (42,\'foo\'). +The answer to all questions is {42:\'foo\'}. +The answer to all questions is 122.233.64.201. +The answer to all questions is 2001:1:130f:2:3:9c0:876a:130b. +The answer to all questions is (42,43). +The answer to all questions is [(0,0),(10,0),(10,10),(0,10)]. +The answer to all questions is [[(20,20),(50,20),(50,50),(20,50)],[(30,30),(50,50),(50,30)]]. +The answer to all questions is [[[(0,0),(10,0),(10,10),(0,10)]],[[(20,20),(50,20),(50,50),(20,50)],[(30,30),(50,50),(50,30)]]]. +-- Nested +The [\'foo\',\'bar\'] to all questions is [\'qaz\',\'qux\']. +-- NULL arguments +\N +\N +\N +\N +\N +\N +\N +-- Various arguments tests +The Non-const to all questions is strings +The Two arguments to all questions is test +The Three to all questions is arguments and test +The 3 to all questions is arguments test and with int type +The 42 to all questions is 144 +The 42 to all questions is 144 and 255 +The 42 to all questions is 144 +The 42 to all questions is 144 and 255 +-- Single argument tests +The answer to all questions is 42. +The answer to all questions is 42. +The answer to all questions is foo. +The answer to all questions is foo. +\N +\N diff --git a/tests/queries/0_stateless/02935_format_with_arbitrary_types.sql b/tests/queries/0_stateless/02935_format_with_arbitrary_types.sql new file mode 100644 index 00000000000..ad1de2bec6d --- /dev/null +++ b/tests/queries/0_stateless/02935_format_with_arbitrary_types.sql @@ -0,0 +1,85 @@ + +-- Tags: no-fasttest +-- no-fasttest: json type needs rapidjson library, geo types need s2 geometry + +SET allow_experimental_object_type = 1; +SET allow_suspicious_low_cardinality_types=1; + +SELECT '-- Const string + non-const arbitrary type'; +SELECT format('The {0} to all questions is {1}.', 'answer', materialize(42 :: Int8)); +SELECT format('The {0} to all questions is {1}.', 'answer', materialize(43 :: Int16)); +SELECT format('The {0} to all questions is {1}.', 'answer', materialize(44 :: Int32)); +SELECT format('The {0} to all questions is {1}.', 'answer', materialize(45 :: Int64)); +SELECT format('The {0} to all questions is {1}.', 'answer', materialize(46 :: Int128)); +SELECT format('The {0} to all questions is {1}.', 'answer', materialize(47 :: Int256)); +SELECT format('The {0} to all questions is {1}.', 'answer', materialize(48 :: UInt8)); +SELECT format('The {0} to all questions is {1}.', 'answer', materialize(49 :: UInt16)); +SELECT format('The {0} to all questions is {1}.', 'answer', materialize(50 :: UInt32)); +SELECT format('The {0} to all questions is {1}.', 'answer', materialize(51 :: UInt64)); +SELECT format('The {0} to all questions is {1}.', 'answer', materialize(52 :: UInt128)); +SELECT format('The {0} to all questions is {1}.', 'answer', materialize(53 :: UInt256)); +SELECT format('The {0} to all questions is {1}.', 'answer', materialize(42.42 :: Float32)); +SELECT format('The {0} to all questions is {1}.', 'answer', materialize(43.43 :: Float64)); +SELECT format('The {0} to all questions is {1}.', 'answer', materialize(44.44 :: Decimal(2))); +SELECT format('The {0} to all questions is {1}.', 'answer', materialize(true :: Bool)); +SELECT format('The {0} to all questions is {1}.', 'answer', materialize(false :: Bool)); +SELECT format('The {0} to all questions is {1}.', 'answer', materialize('foo' :: String)); +SELECT format('The {0} to all questions is {1}.', 'answer', materialize('bar' :: FixedString(3))); +SELECT format('The {0} to all questions is {1}.', 'answer', materialize('foo' :: Nullable(String))); +SELECT format('The {0} to all questions is {1}.', 'answer', materialize('bar' :: Nullable(FixedString(3)))); +SELECT format('The {0} to all questions is {1}.', 'answer', materialize('foo' :: LowCardinality(String))); +SELECT format('The {0} to all questions is {1}.', 'answer', materialize('bar' :: LowCardinality(FixedString(3)))); +SELECT format('The {0} to all questions is {1}.', 'answer', materialize('foo' :: LowCardinality(Nullable(String)))); +SELECT format('The {0} to all questions is {1}.', 'answer', materialize('bar' :: LowCardinality(Nullable(FixedString(3))))); +SELECT format('The {0} to all questions is {1}.', 'answer', materialize(42 :: LowCardinality(Nullable(UInt32)))); +SELECT format('The {0} to all questions is {1}.', 'answer', materialize(42 :: LowCardinality(UInt32))); +SELECT format('The {0} to all questions is {1}.', 'answer', materialize('fae310ca-d52a-4923-9e9b-02bf67f4b009' :: UUID)); +SELECT format('The {0} to all questions is {1}.', 'answer', materialize('2023-11-14' :: Date)); +SELECT format('The {0} to all questions is {1}.', 'answer', materialize('2123-11-14' :: Date32)); +SELECT format('The {0} to all questions is {1}.', 'answer', materialize('2023-11-14 05:50:12' :: DateTime('Europe/Amsterdam'))); +SELECT format('The {0} to all questions is {1}.', 'answer', materialize('2023-11-14 05:50:12.123' :: DateTime64(3, 'Europe/Amsterdam'))); +SELECT format('The {0} to all questions is {1}.', 'answer', materialize('hallo' :: Enum('hallo' = 1))); +SELECT format('The {0} to all questions is {1}.', 'answer', materialize(['foo', 'bar'] :: Array(String))); +SELECT format('The {0} to all questions is {1}.', 'answer', materialize('{"foo": "bar"}' :: JSON)); +SELECT format('The {0} to all questions is {1}.', 'answer', materialize((42, 'foo') :: Tuple(Int32, String))); +SELECT format('The {0} to all questions is {1}.', 'answer', materialize(map(42, 'foo') :: Map(Int32, String))); +SELECT format('The {0} to all questions is {1}.', 'answer', materialize('122.233.64.201' :: IPv4)); +SELECT format('The {0} to all questions is {1}.', 'answer', materialize('2001:0001:130F:0002:0003:09C0:876A:130B' :: IPv6)); +SELECT format('The {0} to all questions is {1}.', 'answer', materialize((42, 43) :: Point)); +SELECT format('The {0} to all questions is {1}.', 'answer', materialize([(0,0),(10,0),(10,10),(0,10)] :: Ring)); +SELECT format('The {0} to all questions is {1}.', 'answer', materialize([[(20, 20), (50, 20), (50, 50), (20, 50)], [(30, 30), (50, 50), (50, 30)]] :: Polygon)); +SELECT format('The {0} to all questions is {1}.', 'answer', materialize([[[(0, 0), (10, 0), (10, 10), (0, 10)]], [[(20, 20), (50, 20), (50, 50), (20, 50)],[(30, 30), (50, 50), (50, 30)]]] :: MultiPolygon)); + +SELECT '-- Nested'; +DROP TABLE IF EXISTS format_nested; +CREATE TABLE format_nested(attrs Nested(k String, v String)) ENGINE = MergeTree ORDER BY tuple(); +INSERT INTO format_nested VALUES (['foo', 'bar'], ['qaz', 'qux']); +SELECT format('The {0} to all questions is {1}.', attrs.k, attrs.v) FROM format_nested; +DROP TABLE format_nested; + +SELECT '-- NULL arguments'; +SELECT format('The {0} to all questions is {1}', NULL, NULL); +SELECT format('The {0} to all questions is {1}', NULL, materialize(NULL :: Nullable(UInt64))); +SELECT format('The {0} to all questions is {1}', materialize(NULL :: Nullable(UInt64)), materialize(NULL :: Nullable(UInt64))); +SELECT format('The {0} to all questions is {1}', 42, materialize(NULL :: Nullable(UInt64))); +SELECT format('The {0} to all questions is {1}', '42', materialize(NULL :: Nullable(UInt64))); +SELECT format('The {0} to all questions is {1}', 42, materialize(NULL :: Nullable(UInt64)), materialize(NULL :: Nullable(UInt64))); +SELECT format('The {0} to all questions is {1}', '42', materialize(NULL :: Nullable(UInt64)), materialize(NULL :: Nullable(UInt64))); + +SELECT '-- Various arguments tests'; +SELECT format('The {0} to all questions is {1}', materialize('Non-const'), materialize(' strings')); +SELECT format('The {0} to all questions is {1}', 'Two arguments ', 'test'); +SELECT format('The {0} to all questions is {1} and {2}', 'Three ', 'arguments', ' test'); +SELECT format('The {0} to all questions is {1} and {2}', materialize(3 :: Int64), ' arguments test', ' with int type'); +SELECT format('The {0} to all questions is {1}', materialize(42 :: Int32), materialize(144 :: UInt64)); +SELECT format('The {0} to all questions is {1} and {2}', materialize(42 :: Int32), materialize(144 :: UInt64), materialize(255 :: UInt32)); +SELECT format('The {0} to all questions is {1}', 42, 144); +SELECT format('The {0} to all questions is {1} and {2}', 42, 144, 255); + +SELECT '-- Single argument tests'; +SELECT format('The answer to all questions is {0}.', 42); +SELECT format('The answer to all questions is {0}.', materialize(42)); +SELECT format('The answer to all questions is {0}.', 'foo'); +SELECT format('The answer to all questions is {0}.', materialize('foo')); +SELECT format('The answer to all questions is {0}.', NULL); +SELECT format('The answer to all questions is {0}.', materialize(NULL :: Nullable(UInt64))); diff --git a/tests/queries/0_stateless/02935_ipv6_bit_operations.reference b/tests/queries/0_stateless/02935_ipv6_bit_operations.reference new file mode 100644 index 00000000000..22d5cda0a39 --- /dev/null +++ b/tests/queries/0_stateless/02935_ipv6_bit_operations.reference @@ -0,0 +1 @@ +11111111111111110000000000000000111111111111111100000000000000001111111111111111000000000000000011111111111111110000000000000000 00000000000000001111111111111111000000000000000011111111111111110000000000000000111111111111111100000000000000001111111111111111 10101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010 01010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101 10101010101010100000000000000000101010101010101000000000000000001010101010101010000000000000000010101010101010100000000000000000 10101010101010100000000000000000101010101010101000000000000000001010101010101010000000000000000010101010101010100000000000000000 1010101010101010000000000000000010101010101010100000000000000000101010101010101000000000000000001010101010101010 1010101010101010000000000000000010101010101010100000000000000000101010101010101000000000000000001010101010101010 01010101010101010000000000000000010101010101010100000000000000000101010101010101000000000000000001010101010101010000000000000000 01010101010101010000000000000000010101010101010100000000000000000101010101010101000000000000000001010101010101010000000000000000 0101010101010101000000000000000001010101010101010000000000000000010101010101010100000000000000000101010101010101 0101010101010101000000000000000001010101010101010000000000000000010101010101010100000000000000000101010101010101 11111111111111111010101010101010111111111111111110101010101010101111111111111111101010101010101011111111111111111010101010101010 11111111111111111010101010101010111111111111111110101010101010101111111111111111101010101010101011111111111111111010101010101010 10101010101010101111111111111111101010101010101011111111111111111010101010101010111111111111111110101010101010101111111111111111 10101010101010101111111111111111101010101010101011111111111111111010101010101010111111111111111110101010101010101111111111111111 11111111111111110101010101010101111111111111111101010101010101011111111111111111010101010101010111111111111111110101010101010101 11111111111111110101010101010101111111111111111101010101010101011111111111111111010101010101010111111111111111110101010101010101 01010101010101011111111111111111010101010101010111111111111111110101010101010101111111111111111101010101010101011111111111111111 01010101010101011111111111111111010101010101010111111111111111110101010101010101111111111111111101010101010101011111111111111111 diff --git a/tests/queries/0_stateless/02935_ipv6_bit_operations.sql b/tests/queries/0_stateless/02935_ipv6_bit_operations.sql new file mode 100644 index 00000000000..6598c2ac539 --- /dev/null +++ b/tests/queries/0_stateless/02935_ipv6_bit_operations.sql @@ -0,0 +1,7 @@ +WITH toIPv6('FFFF:0000:FFFF:0000:FFFF:0000:FFFF:0000') AS ip1, toIPv6('0000:FFFF:0000:FFFF:0000:FFFF:0000:FFFF') AS ip2, + CAST('226854911280625642308916404954512140970', 'UInt128') AS n1, CAST('113427455640312821154458202477256070485', 'UInt128') AS n2 +SELECT bin(ip1), bin(ip2), bin(n1), bin(n2), + bin(bitAnd(ip1, n1)), bin(bitAnd(n1, ip1)), bin(bitAnd(ip2, n1)), bin(bitAnd(n1, ip2)), + bin(bitAnd(ip1, n2)), bin(bitAnd(n2, ip1)), bin(bitAnd(ip2, n2)), bin(bitAnd(n2, ip2)), + bin(bitOr(ip1, n1)), bin(bitOr(n1, ip1)), bin(bitOr(ip2, n1)), bin(bitOr(n1, ip2)), + bin(bitOr(ip1, n2)), bin(bitOr(n2, ip1)), bin(bitOr(ip2, n2)), bin(bitOr(n2, ip2)); diff --git a/tests/queries/0_stateless/02935_parallel_replicas_settings.reference b/tests/queries/0_stateless/02935_parallel_replicas_settings.reference new file mode 100644 index 00000000000..846d77bfa57 --- /dev/null +++ b/tests/queries/0_stateless/02935_parallel_replicas_settings.reference @@ -0,0 +1,4 @@ +10 +1 +10 +1 diff --git a/tests/queries/0_stateless/02935_parallel_replicas_settings.sql b/tests/queries/0_stateless/02935_parallel_replicas_settings.sql new file mode 100644 index 00000000000..be6f1c2958c --- /dev/null +++ b/tests/queries/0_stateless/02935_parallel_replicas_settings.sql @@ -0,0 +1,35 @@ +DROP TABLE IF EXISTS test_parallel_replicas_settings; +CREATE TABLE test_parallel_replicas_settings (n UInt64) ENGINE=MergeTree() ORDER BY tuple(); +INSERT INTO test_parallel_replicas_settings SELECT * FROM numbers(10); + +SET allow_experimental_parallel_reading_from_replicas=2, max_parallel_replicas=3, parallel_replicas_for_non_replicated_merge_tree=1; + +SET cluster_for_parallel_replicas=''; +SELECT count() FROM test_parallel_replicas_settings WHERE NOT ignore(*); -- { serverError CLUSTER_DOESNT_EXIST } + +SET cluster_for_parallel_replicas='parallel_replicas'; +SELECT count() FROM test_parallel_replicas_settings WHERE NOT ignore(*) settings log_comment='0_f621c4f2-4da7-4a7c-bb6d-052c442d0f7f'; + +SYSTEM FLUSH LOGS; + +SELECT count() > 0 FROM system.text_log +WHERE yesterday() <= event_date + AND query_id in (select query_id from system.query_log where current_database=currentDatabase() AND log_comment='0_f621c4f2-4da7-4a7c-bb6d-052c442d0f7f') + AND level = 'Information' + AND message ILIKE '%Disabling ''use_hedged_requests'' in favor of ''allow_experimental_parallel_reading_from_replicas''%' +SETTINGS allow_experimental_parallel_reading_from_replicas=0; + +SET use_hedged_requests=1; +SELECT count() FROM test_parallel_replicas_settings WHERE NOT ignore(*) settings log_comment='1_f621c4f2-4da7-4a7c-bb6d-052c442d0f7f'; + +SYSTEM FLUSH LOGS; + +SET allow_experimental_parallel_reading_from_replicas=0; +SELECT count() > 0 FROM system.text_log +WHERE yesterday() <= event_date + AND query_id in (select query_id from system.query_log where current_database = currentDatabase() AND log_comment = '1_f621c4f2-4da7-4a7c-bb6d-052c442d0f7f') + AND level = 'Warning' + AND message ILIKE '%Setting ''use_hedged_requests'' explicitly with enabled ''allow_experimental_parallel_reading_from_replicas'' has no effect%' +SETTINGS allow_experimental_parallel_reading_from_replicas=0; + +DROP TABLE test_parallel_replicas_settings; diff --git a/tests/queries/0_stateless/02940_json_array_of_unnamed_tuples_inference.reference b/tests/queries/0_stateless/02940_json_array_of_unnamed_tuples_inference.reference new file mode 100644 index 00000000000..aac3e471264 --- /dev/null +++ b/tests/queries/0_stateless/02940_json_array_of_unnamed_tuples_inference.reference @@ -0,0 +1 @@ +data Array(Tuple(Nullable(Int64), Tuple(a Nullable(Int64), b Nullable(Int64)), Nullable(Int64), Nullable(String))) diff --git a/tests/queries/0_stateless/02940_json_array_of_unnamed_tuples_inference.sql b/tests/queries/0_stateless/02940_json_array_of_unnamed_tuples_inference.sql new file mode 100644 index 00000000000..a8a7af1f96c --- /dev/null +++ b/tests/queries/0_stateless/02940_json_array_of_unnamed_tuples_inference.sql @@ -0,0 +1,2 @@ +desc format(JSONEachRow, '{"data" : [[1, null, 3, null], [null, {"a" : 12, "b" : 12}, null, "string"], [null, null, 4, "string"]]}'); + diff --git a/tests/queries/0_stateless/02940_system_stacktrace_optimizations.reference b/tests/queries/0_stateless/02940_system_stacktrace_optimizations.reference new file mode 100644 index 00000000000..f08b8ee767b --- /dev/null +++ b/tests/queries/0_stateless/02940_system_stacktrace_optimizations.reference @@ -0,0 +1,5 @@ +thread = 0 +thread != 0 +Send signal to +thread_name = 'foo' +Send signal to 0 threads (total) diff --git a/tests/queries/0_stateless/02940_system_stacktrace_optimizations.sh b/tests/queries/0_stateless/02940_system_stacktrace_optimizations.sh new file mode 100755 index 00000000000..0e23bb6c42b --- /dev/null +++ b/tests/queries/0_stateless/02940_system_stacktrace_optimizations.sh @@ -0,0 +1,20 @@ +#!/usr/bin/env bash +# Tags: no-parallel + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +# NOTE: due to grep "Cannot obtain a stack trace for thread {}' will be ignored automatically, which is the intention. + +# no message at all +echo "thread = 0" +$CLICKHOUSE_CLIENT --allow_repeated_settings --send_logs_level=test -nm -q "select * from system.stack_trace where thread_id = 0" |& grep -F -o 'Send signal to' + +# send messages to some threads +echo "thread != 0" +$CLICKHOUSE_CLIENT --allow_repeated_settings --send_logs_level=test -nm -q "select * from system.stack_trace where thread_id != 0 format Null" |& grep -F -o 'Send signal to' | grep -v 'Send signal to 0 threads (total)' + +# there is no thread with comm="foo", so no signals will be sent +echo "thread_name = 'foo'" +$CLICKHOUSE_CLIENT --allow_repeated_settings --send_logs_level=test -nm -q "select * from system.stack_trace where thread_name = 'foo' format Null" |& grep -F -o 'Send signal to 0 threads (total)' diff --git a/tests/queries/0_stateless/02941_projections_external_aggregation.reference b/tests/queries/0_stateless/02941_projections_external_aggregation.reference new file mode 100644 index 00000000000..4b1a62520cd --- /dev/null +++ b/tests/queries/0_stateless/02941_projections_external_aggregation.reference @@ -0,0 +1,41 @@ +*** correct aggregation *** +1 0 0 1249950000 +1 0 2 1250000000 +1 1 1 1249975000 +1 1 3 1250025000 +*** correct aggregation with projection *** +1 0 0 1249950000 +1 0 2 1250000000 +1 1 1 1249975000 +1 1 3 1250025000 +*** optimize_aggregation_in_order = 0, max_bytes_before_external_group_by = 1, group_by_two_level_threshold = 1 *** +1 0 0 1249950000 +1 0 2 1250000000 +1 1 1 1249975000 +1 1 3 1250025000 +*** optimize_aggregation_in_order = 1, max_bytes_before_external_group_by = 1, group_by_two_level_threshold = 1 *** +1 0 0 1249950000 +1 0 2 1250000000 +1 1 1 1249975000 +1 1 3 1250025000 +*** after materialization *** +*** correct aggregation *** +1 0 0 1249950000 +1 0 2 1250000000 +1 1 1 1249975000 +1 1 3 1250025000 +*** correct aggregation with projection *** +1 0 0 1249950000 +1 0 2 1250000000 +1 1 1 1249975000 +1 1 3 1250025000 +*** optimize_aggregation_in_order = 0, max_bytes_before_external_group_by = 1, group_by_two_level_threshold = 1 *** +1 0 0 1249950000 +1 0 2 1250000000 +1 1 1 1249975000 +1 1 3 1250025000 +*** optimize_aggregation_in_order = 1, max_bytes_before_external_group_by = 1, group_by_two_level_threshold = 1 *** +1 0 0 1249950000 +1 0 2 1250000000 +1 1 1 1249975000 +1 1 3 1250025000 diff --git a/tests/queries/0_stateless/02941_projections_external_aggregation.sql b/tests/queries/0_stateless/02941_projections_external_aggregation.sql new file mode 100644 index 00000000000..5053773f142 --- /dev/null +++ b/tests/queries/0_stateless/02941_projections_external_aggregation.sql @@ -0,0 +1,66 @@ +DROP TABLE IF EXISTS t_proj_external; + +CREATE TABLE t_proj_external +( + k1 UInt32, + k2 UInt32, + k3 UInt32, + value UInt32 +) +ENGINE = MergeTree +ORDER BY tuple(); + +INSERT INTO t_proj_external SELECT 1, number%2, number%4, number FROM numbers(50000); + +SYSTEM STOP MERGES t_proj_external; + +ALTER TABLE t_proj_external ADD PROJECTION aaaa ( + SELECT + k1, + k2, + k3, + sum(value) + GROUP BY k1, k2, k3 +); + +INSERT INTO t_proj_external SELECT 1, number%2, number%4, number FROM numbers(100000) LIMIT 50000, 100000; + +SELECT '*** correct aggregation ***'; + +SELECT k1, k2, k3, sum(value) v FROM t_proj_external GROUP BY k1, k2, k3 ORDER BY k1, k2, k3 SETTINGS optimize_use_projections = 0; + +SELECT '*** correct aggregation with projection ***'; + +SELECT k1, k2, k3, sum(value) v FROM t_proj_external GROUP BY k1, k2, k3 ORDER BY k1, k2, k3; + +SELECT '*** optimize_aggregation_in_order = 0, max_bytes_before_external_group_by = 1, group_by_two_level_threshold = 1 ***'; + +SELECT k1, k2, k3, sum(value) v FROM t_proj_external GROUP BY k1, k2, k3 ORDER BY k1, k2, k3 SETTINGS optimize_aggregation_in_order = 0, max_bytes_before_external_group_by = 1, group_by_two_level_threshold = 1; + +SELECT '*** optimize_aggregation_in_order = 1, max_bytes_before_external_group_by = 1, group_by_two_level_threshold = 1 ***'; + +SELECT k1, k2, k3, sum(value) v FROM t_proj_external GROUP BY k1, k2, k3 ORDER BY k1, k2, k3 SETTINGS optimize_aggregation_in_order = 1, max_bytes_before_external_group_by = 1, group_by_two_level_threshold = 1; + +SYSTEM START MERGES t_proj_external; + +ALTER TABLE t_proj_external MATERIALIZE PROJECTION aaaa SETTINGS mutations_sync = 2; + +SELECT '*** after materialization ***'; + +SELECT '*** correct aggregation ***'; + +SELECT k1, k2, k3, sum(value) v FROM t_proj_external GROUP BY k1, k2, k3 ORDER BY k1, k2, k3 SETTINGS optimize_use_projections = 0; + +SELECT '*** correct aggregation with projection ***'; + +SELECT k1, k2, k3, sum(value) v FROM t_proj_external GROUP BY k1, k2, k3 ORDER BY k1, k2, k3; + +SELECT '*** optimize_aggregation_in_order = 0, max_bytes_before_external_group_by = 1, group_by_two_level_threshold = 1 ***'; + +SELECT k1, k2, k3, sum(value) v FROM t_proj_external GROUP BY k1, k2, k3 ORDER BY k1, k2, k3 SETTINGS optimize_aggregation_in_order = 0, max_bytes_before_external_group_by = 1, group_by_two_level_threshold = 1; + +SELECT '*** optimize_aggregation_in_order = 1, max_bytes_before_external_group_by = 1, group_by_two_level_threshold = 1 ***'; + +SELECT k1, k2, k3, sum(value) v FROM t_proj_external GROUP BY k1, k2, k3 ORDER BY k1, k2, k3 SETTINGS optimize_aggregation_in_order = 1, max_bytes_before_external_group_by = 1, group_by_two_level_threshold = 1; + +DROP TABLE IF EXISTS t_proj_external; diff --git a/tests/queries/0_stateless/02943_create_query_interpreter_sample_block_fix.reference b/tests/queries/0_stateless/02943_create_query_interpreter_sample_block_fix.reference new file mode 100644 index 00000000000..a0226ef9dd7 --- /dev/null +++ b/tests/queries/0_stateless/02943_create_query_interpreter_sample_block_fix.reference @@ -0,0 +1,9 @@ +['0a'] +-- +['0a'] +['1a'] +-- +['0a'] +-- +['0a'] +['1a'] diff --git a/tests/queries/0_stateless/02943_create_query_interpreter_sample_block_fix.sql b/tests/queries/0_stateless/02943_create_query_interpreter_sample_block_fix.sql new file mode 100644 index 00000000000..0262393fd48 --- /dev/null +++ b/tests/queries/0_stateless/02943_create_query_interpreter_sample_block_fix.sql @@ -0,0 +1,52 @@ +DROP TABLE IF EXISTS test_table; +CREATE TABLE test_table +( + number UInt64 +) +ENGINE=MergeTree ORDER BY number; + +DROP VIEW IF EXISTS test_mv; +CREATE MATERIALIZED VIEW test_mv ENGINE=MergeTree ORDER BY arr +AS +WITH (SELECT '\d[a-z]') AS constant_value +SELECT extractAll(concat(toString(number), 'a'), assumeNotNull(constant_value)) AS arr +FROM test_table; + +INSERT INTO test_table VALUES (0); +SELECT * FROM test_mv ORDER BY arr; + +SELECT '--'; + +INSERT INTO test_table VALUES (1); +SELECT * FROM test_mv ORDER BY arr; + +SELECT '--'; + +TRUNCATE test_table; + +DROP TABLE IF EXISTS regex_test_table; +CREATE TABLE regex_test_table +( + regex String +) +ENGINE = MergeTree ORDER BY regex; + +INSERT INTO regex_test_table VALUES ('\d[a-z]'); + +DROP VIEW test_mv; +CREATE MATERIALIZED VIEW test_mv ENGINE=MergeTree ORDER BY arr +AS +WITH (SELECT regex FROM regex_test_table) AS constant_value +SELECT extractAll(concat(toString(number), 'a'), assumeNotNull(constant_value)) AS arr +FROM test_table; + +INSERT INTO test_table VALUES (0); +SELECT * FROM test_mv ORDER BY arr; + +SELECT '--'; + +INSERT INTO test_table VALUES (1); +SELECT * FROM test_mv ORDER BY arr; + +DROP VIEW test_mv; +DROP TABLE test_table; diff --git a/tests/queries/0_stateless/02943_order_by_all.reference b/tests/queries/0_stateless/02943_order_by_all.reference new file mode 100644 index 00000000000..48d828b6924 --- /dev/null +++ b/tests/queries/0_stateless/02943_order_by_all.reference @@ -0,0 +1,84 @@ +-- no modifiers +A 2 +B 3 +C \N +D 1 +1 D +2 A +3 B +\N C +A 2 +B 3 +C \N +D 1 +1 D +2 A +3 B +\N C +-- with ASC/DESC modifiers +A 2 +B 3 +C \N +D 1 +D 1 +C \N +B 3 +A 2 +A 2 +B 3 +C \N +D 1 +D 1 +C \N +B 3 +A 2 +-- with NULLS FIRST/LAST modifiers +\N C +1 D +2 A +3 B +1 D +2 A +3 B +\N C +\N C +1 D +2 A +3 B +1 D +2 A +3 B +\N C +-- what happens if some column "all" already exists? +B 3 10 +D 1 20 +A 2 30 +C \N 40 +B 3 10 +D 1 20 +A 2 30 +C \N 40 +D 1 +A 2 +B 3 +C \N +D 1 +A 2 +B 3 +C \N +A 2 +B 3 +D 1 +\N +A 2 +B 3 +D 1 +\N +B 3 10 +D 1 20 +A 2 30 +C \N 40 +B 3 10 +D 1 20 +A 2 30 +C \N 40 diff --git a/tests/queries/0_stateless/02943_order_by_all.sql b/tests/queries/0_stateless/02943_order_by_all.sql new file mode 100644 index 00000000000..0756563946c --- /dev/null +++ b/tests/queries/0_stateless/02943_order_by_all.sql @@ -0,0 +1,89 @@ +-- Tests that sort expression ORDER BY ALL + +DROP TABLE IF EXISTS order_by_all; + +CREATE TABLE order_by_all +( + a String, + b Nullable(Int32), + all UInt64, +) +ENGINE = Memory; + +INSERT INTO order_by_all VALUES ('B', 3, 10), ('C', NULL, 40), ('D', 1, 20), ('A', 2, 30); + +SELECT '-- no modifiers'; + +SET allow_experimental_analyzer = 0; +SELECT a, b FROM order_by_all ORDER BY ALL; +SELECT b, a FROM order_by_all ORDER BY ALL; + +SET allow_experimental_analyzer = 1; +SELECT a, b FROM order_by_all ORDER BY ALL; +SELECT b, a FROM order_by_all ORDER BY ALL; + +SELECT '-- with ASC/DESC modifiers'; + +SET allow_experimental_analyzer = 0; +SELECT a, b FROM order_by_all ORDER BY ALL ASC; +SELECT a, b FROM order_by_all ORDER BY ALL DESC; + +SET allow_experimental_analyzer = 1; +SELECT a, b FROM order_by_all ORDER BY ALL ASC; +SELECT a, b FROM order_by_all ORDER BY ALL DESC; + +SELECT '-- with NULLS FIRST/LAST modifiers'; + +SET allow_experimental_analyzer = 0; +SELECT b, a FROM order_by_all ORDER BY ALL NULLS FIRST; +SELECT b, a FROM order_by_all ORDER BY ALL NULLS LAST; + +SET allow_experimental_analyzer = 1; +SELECT b, a FROM order_by_all ORDER BY ALL NULLS FIRST; +SELECT b, a FROM order_by_all ORDER BY ALL NULLS LAST; + +SELECT '-- what happens if some column "all" already exists?'; + +-- columns + +SET allow_experimental_analyzer = 0; +SELECT a, b, all FROM order_by_all ORDER BY all; -- { serverError UNEXPECTED_EXPRESSION } +SELECT a, b, all FROM order_by_all ORDER BY ALL; -- { serverError UNEXPECTED_EXPRESSION } +SELECT a, b, all FROM order_by_all ORDER BY all SETTINGS enable_order_by_all = false; + +SET allow_experimental_analyzer = 1; +SELECT a, b, all FROM order_by_all ORDER BY all; -- { serverError UNEXPECTED_EXPRESSION } +SELECT a, b, all FROM order_by_all ORDER BY ALL; -- { serverError UNEXPECTED_EXPRESSION } +SELECT a, b, all FROM order_by_all ORDER BY all SETTINGS enable_order_by_all = false; + +-- column aliases + +SET allow_experimental_analyzer = 0; +SELECT a, b AS all FROM order_by_all ORDER BY all; -- { serverError UNEXPECTED_EXPRESSION } +SELECT a, b AS all FROM order_by_all ORDER BY ALL; -- { serverError UNEXPECTED_EXPRESSION } +SELECT a, b AS all FROM order_by_all ORDER BY all SETTINGS enable_order_by_all = false; + +SET allow_experimental_analyzer = 1; +SELECT a, b AS all FROM order_by_all ORDER BY all; -- { serverError UNEXPECTED_EXPRESSION } +SELECT a, b AS all FROM order_by_all ORDER BY ALL; -- { serverError UNEXPECTED_EXPRESSION } +SELECT a, b AS all FROM order_by_all ORDER BY all SETTINGS enable_order_by_all = false; + +-- expressions + +SET allow_experimental_analyzer = 0; +SELECT format('{} {}', a, b) AS all FROM order_by_all ORDER BY all; -- { serverError UNEXPECTED_EXPRESSION } +SELECT format('{} {}', a, b) AS all FROM order_by_all ORDER BY ALL; -- { serverError UNEXPECTED_EXPRESSION } +SELECT format('{} {}', a, b) AS all FROM order_by_all ORDER BY all SETTINGS enable_order_by_all = false; + +SET allow_experimental_analyzer = 1; +SELECT format('{} {}', a, b) AS all FROM order_by_all ORDER BY all; -- { serverError UNEXPECTED_EXPRESSION } +SELECT format('{} {}', a, b) AS all FROM order_by_all ORDER BY ALL; -- { serverError UNEXPECTED_EXPRESSION } +SELECT format('{} {}', a, b) AS all FROM order_by_all ORDER BY all SETTINGS enable_order_by_all = false; + +SET allow_experimental_analyzer = 0; +SELECT a, b, all FROM order_by_all ORDER BY all, a; + +SET allow_experimental_analyzer = 1; +SELECT a, b, all FROM order_by_all ORDER BY all, a; + +DROP TABLE order_by_all; diff --git a/tests/queries/0_stateless/02943_positional_arguments_bugs.reference b/tests/queries/0_stateless/02943_positional_arguments_bugs.reference new file mode 100644 index 00000000000..702e1261186 --- /dev/null +++ b/tests/queries/0_stateless/02943_positional_arguments_bugs.reference @@ -0,0 +1,2 @@ +45 1 +processed 99 0 diff --git a/tests/queries/0_stateless/02943_positional_arguments_bugs.sql b/tests/queries/0_stateless/02943_positional_arguments_bugs.sql new file mode 100644 index 00000000000..b8cf73da42d --- /dev/null +++ b/tests/queries/0_stateless/02943_positional_arguments_bugs.sql @@ -0,0 +1,23 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/46628 +DROP TABLE IF EXISTS t; +CREATE TABLE t +( + `n` int +) + ENGINE = MergeTree + ORDER BY n AS +SELECT * +FROM numbers(10); + +SELECT + sum(n), + 1 AS x +FROM t +GROUP BY x; + +SELECT + 'processed' AS type, + max(number) AS max_date, + min(number) AS min_date +FROM numbers(100) +GROUP BY type; diff --git a/tests/queries/0_stateless/02943_use_full_text_skip_index_with_has_any.reference b/tests/queries/0_stateless/02943_use_full_text_skip_index_with_has_any.reference new file mode 100644 index 00000000000..a561f33b324 --- /dev/null +++ b/tests/queries/0_stateless/02943_use_full_text_skip_index_with_has_any.reference @@ -0,0 +1,17 @@ +1 ['this is a test','example.com'] +-- +1 ['this is a test','example.com'] +-- +2 ['another test','another example'] +-- +1 ['this is a test','example.com'] +2 ['another test','another example'] +-- +1 ['this is a test','example.com'] +-- +1 ['this is a test','example.com'] +-- +2 ['another test','another example'] +-- +1 ['this is a test','example.com'] +2 ['another test','another example'] diff --git a/tests/queries/0_stateless/02943_use_full_text_skip_index_with_has_any.sql b/tests/queries/0_stateless/02943_use_full_text_skip_index_with_has_any.sql new file mode 100644 index 00000000000..f1a3833f640 --- /dev/null +++ b/tests/queries/0_stateless/02943_use_full_text_skip_index_with_has_any.sql @@ -0,0 +1,39 @@ +DROP TABLE IF EXISTS tokenbf_v1_hasany_test; +DROP TABLE IF EXISTS ngrambf_v1_hasany_test; + +CREATE TABLE tokenbf_v1_hasany_test +( + id UInt32, + array Array(String), + INDEX idx_array_tokenbf_v1 array TYPE tokenbf_v1(512,3,0) GRANULARITY 1, +) Engine=MergeTree() ORDER BY id SETTINGS index_granularity = 1; + +CREATE TABLE ngrambf_v1_hasany_test +( + id UInt32, + array Array(String), + INDEX idx_array_ngrambf_v1 array TYPE ngrambf_v1(3,512,3,0) GRANULARITY 1, +) Engine=MergeTree() ORDER BY id SETTINGS index_granularity = 1; + +INSERT INTO tokenbf_v1_hasany_test VALUES (1, ['this is a test', 'example.com']), (2, ['another test', 'another example']); +INSERT INTO ngrambf_v1_hasany_test VALUES (1, ['this is a test', 'example.com']), (2, ['another test', 'another example']); + +SELECT * FROM tokenbf_v1_hasany_test WHERE hasAny(array, ['this is a test']) SETTINGS force_data_skipping_indices='idx_array_tokenbf_v1'; +SELECT '--'; +SELECT * FROM tokenbf_v1_hasany_test WHERE hasAny(array, ['example.com']) SETTINGS force_data_skipping_indices='idx_array_tokenbf_v1'; +SELECT '--'; +SELECT * FROM tokenbf_v1_hasany_test WHERE hasAny(array, ['another test']) SETTINGS force_data_skipping_indices='idx_array_tokenbf_v1'; +SELECT '--'; +SELECT * FROM tokenbf_v1_hasany_test WHERE hasAny(array, ['another example', 'example.com']) ORDER BY id ASC SETTINGS force_data_skipping_indices='idx_array_tokenbf_v1'; +SELECT '--'; + +SELECT * FROM ngrambf_v1_hasany_test WHERE hasAny(array, ['this is a test']) SETTINGS force_data_skipping_indices='idx_array_ngrambf_v1'; +SELECT '--'; +SELECT * FROM ngrambf_v1_hasany_test WHERE hasAny(array, ['example.com']) SETTINGS force_data_skipping_indices='idx_array_ngrambf_v1'; +SELECT '--'; +SELECT * FROM ngrambf_v1_hasany_test WHERE hasAny(array, ['another test']) SETTINGS force_data_skipping_indices='idx_array_ngrambf_v1'; +SELECT '--'; +SELECT * FROM ngrambf_v1_hasany_test WHERE hasAny(array, ['another example', 'example.com']) ORDER BY id ASC SETTINGS force_data_skipping_indices='idx_array_ngrambf_v1'; + +DROP TABLE tokenbf_v1_hasany_test; +DROP TABLE ngrambf_v1_hasany_test; diff --git a/tests/queries/0_stateless/02944_dynamically_change_filesystem_cache_size.reference b/tests/queries/0_stateless/02944_dynamically_change_filesystem_cache_size.reference new file mode 100644 index 00000000000..8620171cb99 --- /dev/null +++ b/tests/queries/0_stateless/02944_dynamically_change_filesystem_cache_size.reference @@ -0,0 +1,20 @@ +100 10 10 10 0 0 0 0 /var/lib/clickhouse/filesystem_caches/s3_cache_02944/ 5 5000 0 1 +0 +10 +98 +set max_size from 100 to 10 +10 10 10 10 0 0 8 1 /var/lib/clickhouse/filesystem_caches/s3_cache_02944/ 5 5000 0 1 +1 +8 +set max_size from 10 to 100 +100 10 10 10 0 0 8 1 /var/lib/clickhouse/filesystem_caches/s3_cache_02944/ 5 5000 0 1 +10 +98 +set max_elements from 10 to 2 +100 2 10 10 0 0 18 2 /var/lib/clickhouse/filesystem_caches/s3_cache_02944/ 5 5000 0 1 +2 +18 +set max_elements from 2 to 10 +100 10 10 10 0 0 18 2 /var/lib/clickhouse/filesystem_caches/s3_cache_02944/ 5 5000 0 1 +10 +98 diff --git a/tests/queries/0_stateless/02944_dynamically_change_filesystem_cache_size.sh b/tests/queries/0_stateless/02944_dynamically_change_filesystem_cache_size.sh new file mode 100755 index 00000000000..2e344a6b6e5 --- /dev/null +++ b/tests/queries/0_stateless/02944_dynamically_change_filesystem_cache_size.sh @@ -0,0 +1,88 @@ +#!/usr/bin/env bash +# Tags: no-fasttest, no-parallel, no-s3-storage, no-random-settings + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +disk_name="s3_cache_02944" + +$CLICKHOUSE_CLIENT --query "SYSTEM DROP FILESYSTEM CACHE" +$CLICKHOUSE_CLIENT --query "DESCRIBE FILESYSTEM CACHE '${disk_name}'" + +$CLICKHOUSE_CLIENT -nm --query " +DROP TABLE IF EXISTS test; +CREATE TABLE test (a String) engine=MergeTree() ORDER BY tuple() SETTINGS disk = '$disk_name'; +INSERT INTO test SELECT randomString(100); +SYSTEM DROP FILESYSTEM CACHE; +" + +$CLICKHOUSE_CLIENT --query "SELECT count() FROM system.filesystem_cache WHERE state = 'DOWNLOADED'" + +$CLICKHOUSE_CLIENT --query "SELECT * FROM test FORMAT Null" + +$CLICKHOUSE_CLIENT --query "SELECT count() FROM system.filesystem_cache WHERE state = 'DOWNLOADED'" +$CLICKHOUSE_CLIENT --query "SELECT sum(size) FROM system.filesystem_cache WHERE state = 'DOWNLOADED'" + +config_path=/etc/clickhouse-server/config.d/storage_conf_02944.xml +config_path_tmp=$config_path.tmp + +echo 'set max_size from 100 to 10' +cat $config_path \ +| sed "s|100<\/max_size>|10<\/max_size>|" \ +> $config_path_tmp +mv $config_path_tmp $config_path + +$CLICKHOUSE_CLIENT -nm --query " +set send_logs_level='fatal'; +SYSTEM RELOAD CONFIG" +$CLICKHOUSE_CLIENT --query "DESCRIBE FILESYSTEM CACHE '${disk_name}'" + +$CLICKHOUSE_CLIENT --query "SELECT count() FROM system.filesystem_cache WHERE state = 'DOWNLOADED'" +$CLICKHOUSE_CLIENT --query "SELECT sum(size) FROM system.filesystem_cache WHERE state = 'DOWNLOADED'" + +echo 'set max_size from 10 to 100' +cat $config_path \ +| sed "s|10<\/max_size>|100<\/max_size>|" \ +> $config_path_tmp +mv $config_path_tmp $config_path + +$CLICKHOUSE_CLIENT -nm --query " +set send_logs_level='fatal'; +SYSTEM RELOAD CONFIG" +$CLICKHOUSE_CLIENT --query "DESCRIBE FILESYSTEM CACHE '${disk_name}'" + +$CLICKHOUSE_CLIENT --query "SELECT * FROM test FORMAT Null" + +$CLICKHOUSE_CLIENT --query "SELECT count() FROM system.filesystem_cache WHERE state = 'DOWNLOADED'" +$CLICKHOUSE_CLIENT --query "SELECT sum(size) FROM system.filesystem_cache WHERE state = 'DOWNLOADED'" + +echo 'set max_elements from 10 to 2' +cat $config_path \ +| sed "s|10<\/max_elements>|2<\/max_elements>|" \ +> $config_path_tmp +mv $config_path_tmp $config_path + +$CLICKHOUSE_CLIENT -nm --query " +set send_logs_level='fatal'; +SYSTEM RELOAD CONFIG" +$CLICKHOUSE_CLIENT --query "DESCRIBE FILESYSTEM CACHE '${disk_name}'" + +$CLICKHOUSE_CLIENT --query "SELECT count() FROM system.filesystem_cache WHERE state = 'DOWNLOADED'" +$CLICKHOUSE_CLIENT --query "SELECT sum(size) FROM system.filesystem_cache WHERE state = 'DOWNLOADED'" + +echo 'set max_elements from 2 to 10' +cat $config_path \ +| sed "s|2<\/max_elements>|10<\/max_elements>|" \ +> $config_path_tmp +mv $config_path_tmp $config_path + +$CLICKHOUSE_CLIENT -nm --query " +set send_logs_level='fatal'; +SYSTEM RELOAD CONFIG" +$CLICKHOUSE_CLIENT --query "DESCRIBE FILESYSTEM CACHE '${disk_name}'" + +$CLICKHOUSE_CLIENT --query "SELECT * FROM test FORMAT Null" + +$CLICKHOUSE_CLIENT --query "SELECT count() FROM system.filesystem_cache WHERE state = 'DOWNLOADED'" +$CLICKHOUSE_CLIENT --query "SELECT sum(size) FROM system.filesystem_cache WHERE state = 'DOWNLOADED'" diff --git a/tests/queries/0_stateless/02945_blake3_msan.reference b/tests/queries/0_stateless/02945_blake3_msan.reference new file mode 100644 index 00000000000..5c446c39f9b --- /dev/null +++ b/tests/queries/0_stateless/02945_blake3_msan.reference @@ -0,0 +1 @@ +95066D9DCEB0F4D60F229EF14F6FD26E692C21E480A582808975E55E39BEE1A6 diff --git a/tests/queries/0_stateless/02945_blake3_msan.sql b/tests/queries/0_stateless/02945_blake3_msan.sql new file mode 100644 index 00000000000..cad9b6292dd --- /dev/null +++ b/tests/queries/0_stateless/02945_blake3_msan.sql @@ -0,0 +1,3 @@ +-- Tags: no-fasttest +-- https://github.com/ClickHouse/ClickHouse/issues/57810 +SELECT hex(BLAKE3(BLAKE3('a'))); diff --git a/tests/queries/0_stateless/02946_literal_alias_misclassification.reference b/tests/queries/0_stateless/02946_literal_alias_misclassification.reference new file mode 100644 index 00000000000..d8e5a437352 --- /dev/null +++ b/tests/queries/0_stateless/02946_literal_alias_misclassification.reference @@ -0,0 +1,2 @@ +const 1 +const 2 diff --git a/tests/queries/0_stateless/02946_literal_alias_misclassification.sql b/tests/queries/0_stateless/02946_literal_alias_misclassification.sql new file mode 100644 index 00000000000..0d001bf1e4c --- /dev/null +++ b/tests/queries/0_stateless/02946_literal_alias_misclassification.sql @@ -0,0 +1,24 @@ +DROP TABLE IF EXISTS literal_alias_misclassification; + +CREATE TABLE literal_alias_misclassification +( + `id` Int64, + `a` Nullable(String), + `b` Nullable(Int64) +) +ENGINE = MergeTree +ORDER BY id; + + +INSERT INTO literal_alias_misclassification values(1, 'a', 1); +INSERT INTO literal_alias_misclassification values(2, 'b', 2); + +SELECT 'const' AS r, b +FROM + ( SELECT a AS r, b FROM literal_alias_misclassification ) AS t1 + LEFT JOIN + ( SELECT a AS r FROM literal_alias_misclassification ) AS t2 + ON t1.r = t2.r +ORDER BY b; + +DROP TABLE IF EXISTS literal_alias_misclassification; diff --git a/tests/queries/0_stateless/02946_merge_tree_final_split_ranges_by_primary_key.reference b/tests/queries/0_stateless/02946_merge_tree_final_split_ranges_by_primary_key.reference new file mode 100644 index 00000000000..59acae1c7ef --- /dev/null +++ b/tests/queries/0_stateless/02946_merge_tree_final_split_ranges_by_primary_key.reference @@ -0,0 +1,85 @@ +1 +-- +0 0 +1 1 +2 2 +3 3 +4 4 +5 5 +6 6 +7 7 +8 8 +9 9 +10 10 +11 11 +12 12 +13 13 +14 14 +15 15 +-- +0 0 +1 1 +2 2 +3 3 +4 4 +5 5 +6 6 +7 7 +8 8 +9 9 +10 10 +11 11 +12 12 +13 13 +14 14 +15 15 +-- +0 0 +1 1 +2 2 +3 3 +4 4 +5 5 +6 6 +7 7 +8 8 +9 9 +10 10 +11 11 +12 12 +13 13 +14 14 +15 15 +-- +0 0 +1 1 +2 2 +3 3 +4 4 +5 5 +6 6 +7 7 +8 8 +9 9 +10 10 +11 11 +12 12 +13 13 +14 14 +15 15 +16 16 +17 17 +18 18 +19 19 +20 20 +21 21 +22 22 +23 23 +24 24 +25 25 +26 26 +27 27 +28 28 +29 29 +30 30 +31 31 diff --git a/tests/queries/0_stateless/02946_merge_tree_final_split_ranges_by_primary_key.sql b/tests/queries/0_stateless/02946_merge_tree_final_split_ranges_by_primary_key.sql new file mode 100644 index 00000000000..780ed5b7984 --- /dev/null +++ b/tests/queries/0_stateless/02946_merge_tree_final_split_ranges_by_primary_key.sql @@ -0,0 +1,34 @@ +DROP TABLE IF EXISTS test_table; +CREATE TABLE test_table +( + id UInt64, + value String +) ENGINE=ReplacingMergeTree ORDER BY id SETTINGS index_granularity = 2; + +INSERT INTO test_table SELECT 0, '0'; +INSERT INTO test_table SELECT number + 1, number + 1 FROM numbers(15); +OPTIMIZE TABLE test_table; + +SELECT COUNT() FROM system.parts WHERE database = currentDatabase() AND table = 'test_table' AND active = 1; +SYSTEM STOP MERGES test_table; + +SELECT '--'; + +SELECT id, value FROM test_table FINAL ORDER BY id; + +SELECT '--'; + +INSERT INTO test_table SELECT 5, '5'; +SELECT id, value FROM test_table FINAL ORDER BY id; + +SELECT '--'; + +INSERT INTO test_table SELECT number + 8, number + 8 FROM numbers(8); +SELECT id, value FROM test_table FINAL ORDER BY id; + +SELECT '--'; + +INSERT INTO test_table SELECT number, number FROM numbers(32); +SELECT id, value FROM test_table FINAL ORDER BY id; + +DROP TABLE test_table; diff --git a/tests/queries/0_stateless/02946_parallel_replicas_distributed.reference b/tests/queries/0_stateless/02946_parallel_replicas_distributed.reference new file mode 100644 index 00000000000..ea4483ec305 --- /dev/null +++ b/tests/queries/0_stateless/02946_parallel_replicas_distributed.reference @@ -0,0 +1 @@ +100 4950 diff --git a/tests/queries/0_stateless/02946_parallel_replicas_distributed.sql b/tests/queries/0_stateless/02946_parallel_replicas_distributed.sql new file mode 100644 index 00000000000..1afd4ff0192 --- /dev/null +++ b/tests/queries/0_stateless/02946_parallel_replicas_distributed.sql @@ -0,0 +1,17 @@ +DROP TABLE IF EXISTS test; +DROP TABLE IF EXISTS test_d; + +CREATE TABLE test (id UInt64, date Date) +ENGINE = MergeTree +ORDER BY id +AS select *, '2023-12-25' from numbers(100); + +CREATE TABLE IF NOT EXISTS test_d as test +ENGINE = Distributed(test_cluster_one_shard_three_replicas_localhost, currentDatabase(), test); + +SELECT count(), sum(id) +FROM test_d +SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 3, prefer_localhost_replica = 0, parallel_replicas_for_non_replicated_merge_tree=1; + +DROP TABLE test_d; +DROP TABLE test; diff --git a/tests/queries/0_stateless/02946_parallel_replicas_force_primary_key.reference b/tests/queries/0_stateless/02946_parallel_replicas_force_primary_key.reference new file mode 100644 index 00000000000..64dfee7b7a1 --- /dev/null +++ b/tests/queries/0_stateless/02946_parallel_replicas_force_primary_key.reference @@ -0,0 +1,6 @@ +1 750 +2 750 +3 750 +1 750 +2 750 +3 750 diff --git a/tests/queries/0_stateless/02946_parallel_replicas_force_primary_key.sql b/tests/queries/0_stateless/02946_parallel_replicas_force_primary_key.sql new file mode 100644 index 00000000000..d33c8cdbc93 --- /dev/null +++ b/tests/queries/0_stateless/02946_parallel_replicas_force_primary_key.sql @@ -0,0 +1,49 @@ +DROP TABLE IF EXISTS t1 SYNC; +DROP TABLE IF EXISTS t2 SYNC; +DROP TABLE IF EXISTS t3 SYNC; + +CREATE TABLE t1(k UInt32, v String) ENGINE ReplicatedMergeTree('/02946_parallel_replicas/{database}/test_tbl', 'r1') ORDER BY k; +CREATE TABLE t2(k UInt32, v String) ENGINE ReplicatedMergeTree('/02946_parallel_replicas/{database}/test_tbl', 'r2') ORDER BY k; +CREATE TABLE t3(k UInt32, v String) ENGINE ReplicatedMergeTree('/02946_parallel_replicas/{database}/test_tbl', 'r3') ORDER BY k; + +insert into t1 select number % 4, toString(number) from numbers(1000, 1000); +insert into t2 select number % 4, toString(number) from numbers(2000, 1000); +insert into t3 select number % 4, toString(number) from numbers(3000, 1000); + +system sync replica t1; +system sync replica t2; +system sync replica t3; + +-- w/o parallel replicas +SELECT + k, + count() +FROM t1 +WHERE k > 0 +GROUP BY k +ORDER BY k +SETTINGS force_primary_key = 1, allow_experimental_parallel_reading_from_replicas = 0; + +-- parallel replicas, primary key is used +SET allow_experimental_parallel_reading_from_replicas=1, max_parallel_replicas=3, cluster_for_parallel_replicas='test_cluster_one_shard_three_replicas_localhost'; +SELECT + k, + count() +FROM t1 +WHERE k > 0 +GROUP BY k +ORDER BY k +SETTINGS force_primary_key = 1; + +-- parallel replicas, primary key is NOT used +SELECT + k, + count() +FROM t1 +GROUP BY k +ORDER BY k +SETTINGS force_primary_key = 1; -- { serverError INDEX_NOT_USED } + +DROP TABLE t1 SYNC; +DROP TABLE t2 SYNC; +DROP TABLE t3 SYNC; diff --git a/tests/queries/0_stateless/02947_non_post_request_should_be_readonly.reference b/tests/queries/0_stateless/02947_non_post_request_should_be_readonly.reference new file mode 100644 index 00000000000..9cdea62b413 --- /dev/null +++ b/tests/queries/0_stateless/02947_non_post_request_should_be_readonly.reference @@ -0,0 +1,2 @@ +Cannot execute query in readonly mode +Internal Server Error diff --git a/tests/queries/0_stateless/02947_non_post_request_should_be_readonly.sh b/tests/queries/0_stateless/02947_non_post_request_should_be_readonly.sh new file mode 100755 index 00000000000..4250799b522 --- /dev/null +++ b/tests/queries/0_stateless/02947_non_post_request_should_be_readonly.sh @@ -0,0 +1,16 @@ +#!/usr/bin/env bash +# Tags: no-parallel + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +# This should fail +${CLICKHOUSE_CURL} -X GET -sS "${CLICKHOUSE_URL}&session_id=${SESSION_ID}&query=CREATE+DATABASE+non_post_request_test" | grep -o "Cannot execute query in readonly mode" + +# This should fail +${CLICKHOUSE_CURL} --head -sS "${CLICKHOUSE_URL}&session_id=${SESSION_ID}&query=CREATE+DATABASE+non_post_request_test" | grep -o "Internal Server Error" + +# This should pass - but will throw error "non_post_request_test already exists" if the database was created by any of the above requests. +${CLICKHOUSE_CURL} -X POST -sS "${CLICKHOUSE_URL}&session_id=${SESSION_ID}" -d 'CREATE DATABASE non_post_request_test' +${CLICKHOUSE_CURL} -X POST -sS "${CLICKHOUSE_URL}&session_id=${SESSION_ID}" -d 'DROP DATABASE non_post_request_test' diff --git a/tests/queries/0_stateless/02947_parallel_replicas_remote.reference b/tests/queries/0_stateless/02947_parallel_replicas_remote.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/02947_parallel_replicas_remote.sql b/tests/queries/0_stateless/02947_parallel_replicas_remote.sql new file mode 100644 index 00000000000..345d9f9cb03 --- /dev/null +++ b/tests/queries/0_stateless/02947_parallel_replicas_remote.sql @@ -0,0 +1,12 @@ +DROP TABLE IF EXISTS test; + +CREATE TABLE test (id UInt64, date Date) +ENGINE = MergeTree +ORDER BY id +AS select *, '2023-12-25' from numbers(100); + +SELECT count(), sum(id) +FROM remote('127.0.0.1|127.0.0.2|127.0.0.3|127.0.0.4', currentDatabase(), test) +SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 4, prefer_localhost_replica = 0, parallel_replicas_for_non_replicated_merge_tree = 1; -- { serverError CLUSTER_DOESNT_EXIST } + +DROP TABLE test; diff --git a/tests/queries/0_stateless/02949_parallel_replicas_in_subquery.reference b/tests/queries/0_stateless/02949_parallel_replicas_in_subquery.reference new file mode 100644 index 00000000000..4d33751c699 --- /dev/null +++ b/tests/queries/0_stateless/02949_parallel_replicas_in_subquery.reference @@ -0,0 +1,8 @@ +--- +2 test2 8 +3 test3 8 +4 test4 1985 +--- +1 test1 42 +--- +3 test3 diff --git a/tests/queries/0_stateless/02949_parallel_replicas_in_subquery.sql b/tests/queries/0_stateless/02949_parallel_replicas_in_subquery.sql new file mode 100644 index 00000000000..53b8a761cda --- /dev/null +++ b/tests/queries/0_stateless/02949_parallel_replicas_in_subquery.sql @@ -0,0 +1,26 @@ +DROP TABLE IF EXISTS merge_tree_in_subqueries; +CREATE TABLE merge_tree_in_subqueries (id UInt64, name String, num UInt64) ENGINE = MergeTree ORDER BY (id, name); +INSERT INTO merge_tree_in_subqueries VALUES(1, 'test1', 42); +INSERT INTO merge_tree_in_subqueries VALUES(2, 'test2', 8); +INSERT INTO merge_tree_in_subqueries VALUES(3, 'test3', 8); +INSERT INTO merge_tree_in_subqueries VALUES(4, 'test4', 1985); +INSERT INTO merge_tree_in_subqueries VALUES(5, 'test5', 0); + +SET max_parallel_replicas=3, cluster_for_parallel_replicas='test_cluster_one_shard_three_replicas_localhost', parallel_replicas_for_non_replicated_merge_tree=1; + +SELECT * FROM merge_tree_in_subqueries WHERE id IN (SELECT * FROM system.numbers LIMIT 0) SETTINGS allow_experimental_parallel_reading_from_replicas=2; -- { serverError SUPPORT_IS_DISABLED } +SELECT * FROM merge_tree_in_subqueries WHERE id IN (SELECT * FROM system.numbers LIMIT 0) SETTINGS allow_experimental_parallel_reading_from_replicas=1; + +SELECT '---'; +SELECT * FROM merge_tree_in_subqueries WHERE id IN (SELECT * FROM system.numbers LIMIT 2, 3) ORDER BY id SETTINGS allow_experimental_parallel_reading_from_replicas=2; -- { serverError SUPPORT_IS_DISABLED }; +SELECT * FROM merge_tree_in_subqueries WHERE id IN (SELECT * FROM system.numbers LIMIT 2, 3) ORDER BY id SETTINGS allow_experimental_parallel_reading_from_replicas=1; + +SELECT '---'; +SELECT * FROM merge_tree_in_subqueries WHERE id IN (SELECT 1) ORDER BY id SETTINGS allow_experimental_parallel_reading_from_replicas=2; -- { serverError SUPPORT_IS_DISABLED }; +SELECT * FROM merge_tree_in_subqueries WHERE id IN (SELECT 1) ORDER BY id SETTINGS allow_experimental_parallel_reading_from_replicas=1; + +-- IN with tuples is allowed +SELECT '---'; +SELECT id, name FROM merge_tree_in_subqueries WHERE (id, name) IN (3, 'test3') SETTINGS allow_experimental_parallel_reading_from_replicas=2; + +DROP TABLE IF EXISTS merge_tree_in_subqueries; diff --git a/tests/queries/0_stateless/02949_parallel_replicas_scalar_subquery_big_integer.reference b/tests/queries/0_stateless/02949_parallel_replicas_scalar_subquery_big_integer.reference new file mode 100644 index 00000000000..97bd2c20556 --- /dev/null +++ b/tests/queries/0_stateless/02949_parallel_replicas_scalar_subquery_big_integer.reference @@ -0,0 +1 @@ +6 111111111111111111111111111111111111111 diff --git a/tests/queries/0_stateless/02949_parallel_replicas_scalar_subquery_big_integer.sql b/tests/queries/0_stateless/02949_parallel_replicas_scalar_subquery_big_integer.sql new file mode 100644 index 00000000000..26f87180ab2 --- /dev/null +++ b/tests/queries/0_stateless/02949_parallel_replicas_scalar_subquery_big_integer.sql @@ -0,0 +1,9 @@ +DROP TABLE IF EXISTS test; +CREATE TABLE test (x UInt8) ENGINE = MergeTree ORDER BY x; +INSERT INTO test VALUES (1), (2), (3); + +SET allow_experimental_parallel_reading_from_replicas = 1, max_parallel_replicas = 2, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', prefer_localhost_replica = 0, parallel_replicas_for_non_replicated_merge_tree = 1; + +WITH (SELECT '111111111111111111111111111111111111111'::UInt128) AS v SELECT sum(x), max(v) FROM test; + +DROP TABLE test; diff --git a/tests/queries/0_stateless/02949_ttl_group_by_bug.reference b/tests/queries/0_stateless/02949_ttl_group_by_bug.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/02949_ttl_group_by_bug.sql b/tests/queries/0_stateless/02949_ttl_group_by_bug.sql new file mode 100644 index 00000000000..2888f6e7d66 --- /dev/null +++ b/tests/queries/0_stateless/02949_ttl_group_by_bug.sql @@ -0,0 +1,29 @@ +DROP TABLE IF EXISTS ttl_group_by_bug; + +CREATE TABLE ttl_group_by_bug +(key UInt32, ts DateTime, value UInt32, min_value UInt32 default value, max_value UInt32 default value) +ENGINE = MergeTree() PARTITION BY toYYYYMM(ts) +ORDER BY (key, toStartOfInterval(ts, toIntervalMinute(3)), ts) +TTL ts + INTERVAL 5 MINUTE GROUP BY key, toStartOfInterval(ts, toIntervalMinute(3)) +SET value = sum(value), min_value = min(min_value), max_value = max(max_value), ts=min(toStartOfInterval(ts, toIntervalMinute(3))); + +INSERT INTO ttl_group_by_bug(key, ts, value) SELECT number%5 as key, now() - interval 10 minute + number, 0 FROM numbers(1000); + +OPTIMIZE TABLE ttl_group_by_bug FINAL; + +SELECT * +FROM +( + SELECT + _part, + rowNumberInAllBlocks(), + (key, toStartOfInterval(ts, toIntervalMinute(3)), ts) AS cur, + lagInFrame((key, toStartOfInterval(ts, toIntervalMinute(3)), ts), 1) OVER () AS prev, + 1 + FROM ttl_group_by_bug +) +WHERE cur < prev +LIMIT 2 +SETTINGS max_threads = 1; + +DROP TABLE IF EXISTS ttl_group_by_bug; diff --git a/tests/queries/0_stateless/02950_obfuscator_keywords_more.reference b/tests/queries/0_stateless/02950_obfuscator_keywords_more.reference new file mode 100644 index 00000000000..7c3fcea85ea --- /dev/null +++ b/tests/queries/0_stateless/02950_obfuscator_keywords_more.reference @@ -0,0 +1 @@ +CREATE TABLE test (pill DateTime('UTC'), tart DateTime('Europe/Amsterdam')) ENGINE = ReplicatedVersionedCollapsingMergeTree ORDER BY pill SETTINGS index_granularity = 15414; diff --git a/tests/queries/0_stateless/02950_obfuscator_keywords_more.sh b/tests/queries/0_stateless/02950_obfuscator_keywords_more.sh new file mode 100755 index 00000000000..fb0e7c178e2 --- /dev/null +++ b/tests/queries/0_stateless/02950_obfuscator_keywords_more.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +obf="$CLICKHOUSE_FORMAT --obfuscate" + +echo "CREATE TABLE test (secret1 DateTime('UTC'), secret2 DateTime('Europe/Amsterdam')) ENGINE = ReplicatedVersionedCollapsingMergeTree ORDER BY secret1 SETTINGS index_granularity = 8192;" | $obf diff --git a/tests/queries/0_stateless/02950_parallel_replicas_used_count.reference b/tests/queries/0_stateless/02950_parallel_replicas_used_count.reference new file mode 100644 index 00000000000..21b7b527b7a --- /dev/null +++ b/tests/queries/0_stateless/02950_parallel_replicas_used_count.reference @@ -0,0 +1,8 @@ +100 4950 +1 +89 +90 +91 +92 +93 +1 diff --git a/tests/queries/0_stateless/02950_parallel_replicas_used_count.sql b/tests/queries/0_stateless/02950_parallel_replicas_used_count.sql new file mode 100644 index 00000000000..22f55acd365 --- /dev/null +++ b/tests/queries/0_stateless/02950_parallel_replicas_used_count.sql @@ -0,0 +1,25 @@ +DROP TABLE IF EXISTS test; + +CREATE TABLE test (k UInt64, v String) +ENGINE = MergeTree +ORDER BY k; + +INSERT INTO test SELECT number, toString(number) FROM numbers(100); + +SET allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 3, prefer_localhost_replica = 0, parallel_replicas_for_non_replicated_merge_tree=1, cluster_for_parallel_replicas='test_cluster_one_shard_three_replicas_localhost'; + +-- default coordinator +SELECT count(), sum(k) +FROM test +SETTINGS log_comment = '02950_parallel_replicas_used_replicas_count'; + +SYSTEM FLUSH LOGS; +SELECT ProfileEvents['ParallelReplicasUsedCount'] FROM system.query_log WHERE type = 'QueryFinish' AND query_id IN (SELECT query_id FROM system.query_log WHERE current_database = currentDatabase() AND log_comment = '02950_parallel_replicas_used_replicas_count' AND type = 'QueryFinish' AND initial_query_id = query_id) SETTINGS allow_experimental_parallel_reading_from_replicas=0; + +-- In order coordinator +SELECT k FROM test order by k limit 5 offset 89 SETTINGS optimize_read_in_order=1, log_comment='02950_parallel_replicas_used_replicas_count_2'; + +SYSTEM FLUSH LOGS; +SELECT ProfileEvents['ParallelReplicasUsedCount'] FROM system.query_log WHERE type = 'QueryFinish' AND query_id IN (SELECT query_id FROM system.query_log WHERE current_database = currentDatabase() AND log_comment = '02950_parallel_replicas_used_replicas_count_2' AND type = 'QueryFinish' AND initial_query_id = query_id) SETTINGS allow_experimental_parallel_reading_from_replicas=0; + +DROP TABLE test; diff --git a/tests/queries/0_stateless/02950_part_log_bytes_uncompressed.reference b/tests/queries/0_stateless/02950_part_log_bytes_uncompressed.reference new file mode 100644 index 00000000000..abdcc960be3 --- /dev/null +++ b/tests/queries/0_stateless/02950_part_log_bytes_uncompressed.reference @@ -0,0 +1,7 @@ +NewPart part_log_bytes_uncompressed all_1_1_0 1 1 +MergeParts part_log_bytes_uncompressed all_1_2_1 1 1 +MutatePart part_log_bytes_uncompressed all_1_2_1_3 1 1 +NewPart part_log_bytes_uncompressed all_2_2_0 1 1 +NewPart part_log_bytes_uncompressed all_4_4_0 1 1 +RemovePart part_log_bytes_uncompressed all_4_4_0 1 1 +NewPart part_log_bytes_uncompressed all_4_4_1 0 0 diff --git a/tests/queries/0_stateless/02950_part_log_bytes_uncompressed.sql b/tests/queries/0_stateless/02950_part_log_bytes_uncompressed.sql new file mode 100644 index 00000000000..0c2cef6e004 --- /dev/null +++ b/tests/queries/0_stateless/02950_part_log_bytes_uncompressed.sql @@ -0,0 +1,24 @@ +CREATE TABLE part_log_bytes_uncompressed ( + key UInt8, + value UInt8 +) +Engine=MergeTree() +ORDER BY key; + +INSERT INTO part_log_bytes_uncompressed SELECT 1, 1 FROM numbers(1000); +INSERT INTO part_log_bytes_uncompressed SELECT 2, 1 FROM numbers(1000); + +OPTIMIZE TABLE part_log_bytes_uncompressed FINAL; + +ALTER TABLE part_log_bytes_uncompressed UPDATE value = 3 WHERE 1 = 1 SETTINGS mutations_sync=2; + +INSERT INTO part_log_bytes_uncompressed SELECT 3, 1 FROM numbers(1000); +ALTER TABLE part_log_bytes_uncompressed DROP PART 'all_4_4_0' SETTINGS mutations_sync=2; + +SYSTEM FLUSH LOGS; + +SELECT event_type, table, part_name, bytes_uncompressed > 0, size_in_bytes < bytes_uncompressed FROM system.part_log +WHERE event_date >= yesterday() AND database = currentDatabase() AND table = 'part_log_bytes_uncompressed' +ORDER BY part_name, event_type; + +DROP TABLE part_log_bytes_uncompressed; diff --git a/tests/queries/0_stateless/02950_part_offset_as_primary_key.reference b/tests/queries/0_stateless/02950_part_offset_as_primary_key.reference new file mode 100644 index 00000000000..368f8dd9871 --- /dev/null +++ b/tests/queries/0_stateless/02950_part_offset_as_primary_key.reference @@ -0,0 +1,14 @@ +-4 +-3 +-2 +-1 +0 +-3 +0 +-4 +-2 +-1 +0 +10 +40 +400 diff --git a/tests/queries/0_stateless/02950_part_offset_as_primary_key.sql b/tests/queries/0_stateless/02950_part_offset_as_primary_key.sql new file mode 100644 index 00000000000..736d54023ce --- /dev/null +++ b/tests/queries/0_stateless/02950_part_offset_as_primary_key.sql @@ -0,0 +1,40 @@ +drop table if exists a; + +create table a (i int) engine MergeTree order by i settings index_granularity = 2; +insert into a select -number from numbers(5); + +-- nothing to read +select i from a where _part_offset >= 5 order by i settings max_bytes_to_read = 1; + +-- one granule +select i from a where _part_offset = 0 order by i settings max_rows_to_read = 2; +select i from a where _part_offset = 1 order by i settings max_rows_to_read = 2; +select i from a where _part_offset = 2 order by i settings max_rows_to_read = 2; +select i from a where _part_offset = 3 order by i settings max_rows_to_read = 2; +select i from a where _part_offset = 4 order by i settings max_rows_to_read = 1; + +-- other predicates +select i from a where _part_offset in (1, 4) order by i settings max_rows_to_read = 3; +select i from a where _part_offset not in (1, 4) order by i settings max_rows_to_read = 4; + +-- the force_primary_key check still works +select i from a where _part_offset = 4 order by i settings force_primary_key = 1; -- { serverError INDEX_NOT_USED } + +-- combining with other primary keys doesn't work (makes no sense) +select i from a where i = -3 or _part_offset = 4 order by i settings force_primary_key = 1; -- { serverError INDEX_NOT_USED } + +drop table a; + +drop table if exists b; + +create table b (i int) engine MergeTree order by tuple() settings index_granularity = 2; + +-- all_1_1_0 +insert into b select number * 10 from numbers(5); +-- all_2_2_0 +insert into b select number * 100 from numbers(5); + +-- multiple parts with _part predicate +select i from b where (_part = 'all_1_1_0' and _part_offset in (1, 4)) or (_part = 'all_2_2_0' and _part_offset in (0, 4)) order by i settings max_rows_to_read = 6; + +drop table b; diff --git a/tests/queries/0_stateless/02950_reading_array_tuple_subcolumns.reference b/tests/queries/0_stateless/02950_reading_array_tuple_subcolumns.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/02950_reading_array_tuple_subcolumns.sql b/tests/queries/0_stateless/02950_reading_array_tuple_subcolumns.sql new file mode 100644 index 00000000000..85bf16a885b --- /dev/null +++ b/tests/queries/0_stateless/02950_reading_array_tuple_subcolumns.sql @@ -0,0 +1,15 @@ +DROP TABLE IF EXISTS test; +CREATE TABLE test +( + `id` UInt64, + `t` Tuple(a UInt64, b Array(Tuple(c UInt64, d UInt64))) +) +ENGINE = MergeTree +ORDER BY id +SETTINGS min_rows_for_wide_part = 1, min_bytes_for_wide_part = 1, index_granularity = 8192; +INSERT INTO test SELECT number, tuple(number, arrayMap(x -> tuple(number + 1, number + 2), range(number % 10))) FROM numbers(100000); +INSERT INTO test SELECT number, tuple(number, arrayMap(x -> tuple(number + 1, number + 2), range(number % 10))) FROM numbers(100000); +INSERT INTO test SELECT number, tuple(number, arrayMap(x -> tuple(number + 1, number + 2), range(number % 10))) FROM numbers(100000); +SELECT t.b, t.b.c FROM test ORDER BY id FORMAT Null; +DROP TABLE test; + diff --git a/tests/queries/0_stateless/02951_data.jsonl.zst b/tests/queries/0_stateless/02951_data.jsonl.zst new file mode 100644 index 00000000000..9701cdd5f6e Binary files /dev/null and b/tests/queries/0_stateless/02951_data.jsonl.zst differ diff --git a/tests/queries/0_stateless/02951_parallel_parsing_json_compact_each_row.reference b/tests/queries/0_stateless/02951_parallel_parsing_json_compact_each_row.reference new file mode 100644 index 00000000000..0953b633db6 --- /dev/null +++ b/tests/queries/0_stateless/02951_parallel_parsing_json_compact_each_row.reference @@ -0,0 +1 @@ +15021837090950060251 diff --git a/tests/queries/0_stateless/02951_parallel_parsing_json_compact_each_row.sh b/tests/queries/0_stateless/02951_parallel_parsing_json_compact_each_row.sh new file mode 100755 index 00000000000..bdaac0e0c50 --- /dev/null +++ b/tests/queries/0_stateless/02951_parallel_parsing_json_compact_each_row.sh @@ -0,0 +1,63 @@ +#!/usr/bin/env bash +# Tags: no-parallel + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +${CLICKHOUSE_LOCAL} --input-format-parallel-parsing 1 --query " + SELECT sum(cityHash64(*)) FROM file('$CUR_DIR/02951_data.jsonl.zst', JSONCompactEachRow, ' + time_offset Decimal64(3), + lat Float64, + lon Float64, + altitude String, + ground_speed Float32, + track_degrees Float32, + flags UInt32, + vertical_rate Int32, + aircraft Tuple( + alert Int64, + alt_geom Int64, + gva Int64, + nac_p Int64, + nac_v Int64, + nic Int64, + nic_baro Int64, + rc Int64, + sda Int64, + sil Int64, + sil_type String, + spi Int64, + track Float64, + type String, + version Int64, + category String, + emergency String, + flight String, + squawk String, + baro_rate Int64, + nav_altitude_fms Int64, + nav_altitude_mcp Int64, + nav_modes Array(String), + nav_qnh Float64, + geom_rate Int64, + ias Int64, + mach Float64, + mag_heading Float64, + oat Int64, + roll Float64, + tas Int64, + tat Int64, + true_heading Float64, + wd Int64, + ws Int64, + track_rate Float64, + nav_heading Float64 + ), + source LowCardinality(String), + geometric_altitude Int32, + geometric_vertical_rate Int32, + indicated_airspeed Int32, + roll_angle Float32, + hex String + ')" diff --git a/tests/queries/0_stateless/02952_archive_parsing.reference b/tests/queries/0_stateless/02952_archive_parsing.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/02952_archive_parsing.sql b/tests/queries/0_stateless/02952_archive_parsing.sql new file mode 100644 index 00000000000..49b0223e6ec --- /dev/null +++ b/tests/queries/0_stateless/02952_archive_parsing.sql @@ -0,0 +1 @@ +SELECT * FROM file('::a'); -- { serverError BAD_ARGUMENTS } diff --git a/tests/queries/0_stateless/02952_binary.reference b/tests/queries/0_stateless/02952_binary.reference new file mode 100644 index 00000000000..8205460df96 --- /dev/null +++ b/tests/queries/0_stateless/02952_binary.reference @@ -0,0 +1 @@ +addressToSymbol diff --git a/tests/queries/0_stateless/02952_binary.sh b/tests/queries/0_stateless/02952_binary.sh new file mode 100755 index 00000000000..c55df1a80b1 --- /dev/null +++ b/tests/queries/0_stateless/02952_binary.sh @@ -0,0 +1,7 @@ +#!/usr/bin/env bash + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +${CLICKHOUSE_CURL} -s "${CLICKHOUSE_PORT_HTTP_PROTO}://${CLICKHOUSE_HOST}:${CLICKHOUSE_PORT_HTTP}/binary" 2>/dev/null | grep -oF --max-count 1 'addressToSymbol' diff --git a/tests/queries/0_stateless/02952_clickhouse_local_query_parameters_cli.reference b/tests/queries/0_stateless/02952_clickhouse_local_query_parameters_cli.reference new file mode 100644 index 00000000000..9972842f982 --- /dev/null +++ b/tests/queries/0_stateless/02952_clickhouse_local_query_parameters_cli.reference @@ -0,0 +1 @@ +1 1 diff --git a/tests/queries/0_stateless/02952_clickhouse_local_query_parameters_cli.sh b/tests/queries/0_stateless/02952_clickhouse_local_query_parameters_cli.sh new file mode 100755 index 00000000000..5e9efbbf3ad --- /dev/null +++ b/tests/queries/0_stateless/02952_clickhouse_local_query_parameters_cli.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash +# Tags: no-fasttest + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +$CLICKHOUSE_LOCAL --param_x 1 -q "SELECT {x:UInt64}, {x:String};" diff --git a/tests/queries/0_stateless/02952_conjunction_optimization.reference b/tests/queries/0_stateless/02952_conjunction_optimization.reference new file mode 100644 index 00000000000..64663cea662 --- /dev/null +++ b/tests/queries/0_stateless/02952_conjunction_optimization.reference @@ -0,0 +1,117 @@ +3 another +3 +QUERY id: 0 + PROJECTION COLUMNS + a Int32 + b String + PROJECTION + LIST id: 1, nodes: 2 + COLUMN id: 2, column_name: a, result_type: Int32, source_id: 3 + COLUMN id: 4, column_name: b, result_type: String, source_id: 3 + JOIN TREE + TABLE id: 3, table_name: default.02952_disjunction_optimization + WHERE + FUNCTION id: 5, function_name: notIn, function_type: ordinary, result_type: UInt8 + ARGUMENTS + LIST id: 6, nodes: 2 + COLUMN id: 7, column_name: a, result_type: Int32, source_id: 3 + CONSTANT id: 8, constant_value: Tuple_(UInt64_1, UInt64_2, UInt64_4), constant_value_type: Tuple(UInt8, UInt8, UInt8) +3 another +3 +QUERY id: 0 + PROJECTION COLUMNS + a Int32 + b String + PROJECTION + LIST id: 1, nodes: 2 + COLUMN id: 2, column_name: a, result_type: Int32, source_id: 3 + COLUMN id: 4, column_name: b, result_type: String, source_id: 3 + JOIN TREE + TABLE id: 3, table_name: default.02952_disjunction_optimization + WHERE + FUNCTION id: 5, function_name: and, function_type: ordinary, result_type: Bool + ARGUMENTS + LIST id: 6, nodes: 2 + CONSTANT id: 7, constant_value: UInt64_1, constant_value_type: Bool + FUNCTION id: 8, function_name: notIn, function_type: ordinary, result_type: UInt8 + ARGUMENTS + LIST id: 9, nodes: 2 + COLUMN id: 10, column_name: a, result_type: Int32, source_id: 3 + CONSTANT id: 11, constant_value: Tuple_(UInt64_1, UInt64_2, UInt64_4), constant_value_type: Tuple(UInt8, UInt8, UInt8) +3 another +QUERY id: 0 + PROJECTION COLUMNS + a Int32 + b String + PROJECTION + LIST id: 1, nodes: 2 + COLUMN id: 2, column_name: a, result_type: Int32, source_id: 3 + COLUMN id: 4, column_name: b, result_type: String, source_id: 3 + JOIN TREE + TABLE id: 3, table_name: default.02952_disjunction_optimization + WHERE + FUNCTION id: 5, function_name: and, function_type: ordinary, result_type: UInt8 + ARGUMENTS + LIST id: 6, nodes: 2 + FUNCTION id: 7, function_name: notEquals, function_type: ordinary, result_type: UInt8 + ARGUMENTS + LIST id: 8, nodes: 2 + COLUMN id: 9, column_name: b, result_type: String, source_id: 3 + CONSTANT id: 10, constant_value: \'\', constant_value_type: String + FUNCTION id: 11, function_name: notIn, function_type: ordinary, result_type: UInt8 + ARGUMENTS + LIST id: 12, nodes: 2 + COLUMN id: 13, column_name: a, result_type: Int32, source_id: 3 + CONSTANT id: 14, constant_value: Tuple_(UInt64_1, UInt64_2, UInt64_4), constant_value_type: Tuple(UInt8, UInt8, UInt8) +3 +QUERY id: 0 + PROJECTION COLUMNS + a Int32 + b String + PROJECTION + LIST id: 1, nodes: 2 + COLUMN id: 2, column_name: a, result_type: Int32, source_id: 3 + COLUMN id: 4, column_name: b, result_type: String, source_id: 3 + JOIN TREE + TABLE id: 3, table_name: default.02952_disjunction_optimization + WHERE + FUNCTION id: 5, function_name: and, function_type: ordinary, result_type: UInt8 + ARGUMENTS + LIST id: 6, nodes: 2 + FUNCTION id: 7, function_name: equals, function_type: ordinary, result_type: UInt8 + ARGUMENTS + LIST id: 8, nodes: 2 + COLUMN id: 9, column_name: b, result_type: String, source_id: 3 + CONSTANT id: 10, constant_value: \'\', constant_value_type: String + FUNCTION id: 11, function_name: notIn, function_type: ordinary, result_type: UInt8 + ARGUMENTS + LIST id: 12, nodes: 2 + COLUMN id: 13, column_name: a, result_type: Int32, source_id: 3 + CONSTANT id: 14, constant_value: Tuple_(UInt64_1, UInt64_2, UInt64_4), constant_value_type: Tuple(UInt8, UInt8, UInt8) +3 another +3 +4 +QUERY id: 0 + PROJECTION COLUMNS + a Int32 + b String + PROJECTION + LIST id: 1, nodes: 2 + COLUMN id: 2, column_name: a, result_type: Int32, source_id: 3 + COLUMN id: 4, column_name: b, result_type: String, source_id: 3 + JOIN TREE + TABLE id: 3, table_name: default.02952_disjunction_optimization + WHERE + FUNCTION id: 5, function_name: or, function_type: ordinary, result_type: UInt8 + ARGUMENTS + LIST id: 6, nodes: 2 + FUNCTION id: 7, function_name: notIn, function_type: ordinary, result_type: UInt8 + ARGUMENTS + LIST id: 8, nodes: 2 + COLUMN id: 9, column_name: a, result_type: Int32, source_id: 3 + CONSTANT id: 10, constant_value: Tuple_(UInt64_1, UInt64_2, UInt64_4), constant_value_type: Tuple(UInt8, UInt8, UInt8) + FUNCTION id: 11, function_name: equals, function_type: ordinary, result_type: UInt8 + ARGUMENTS + LIST id: 12, nodes: 2 + COLUMN id: 13, column_name: b, result_type: String, source_id: 3 + CONSTANT id: 14, constant_value: \'\', constant_value_type: String diff --git a/tests/queries/0_stateless/02952_conjunction_optimization.sql b/tests/queries/0_stateless/02952_conjunction_optimization.sql new file mode 100644 index 00000000000..94bc352e4c5 --- /dev/null +++ b/tests/queries/0_stateless/02952_conjunction_optimization.sql @@ -0,0 +1,26 @@ +SET allow_experimental_analyzer = 1; + +DROP TABLE IF EXISTS 02952_disjunction_optimization; + +CREATE TABLE 02952_disjunction_optimization +(a Int32, b String) +ENGINE=Memory; + +INSERT INTO 02952_disjunction_optimization VALUES (1, 'test'), (2, 'test2'), (3, 'another'), (3, ''), (4, ''); + +SELECT * FROM 02952_disjunction_optimization WHERE a <> 1 AND a <> 2 AND a <> 4; +EXPLAIN QUERY TREE SELECT * FROM 02952_disjunction_optimization WHERE a <> 1 AND a <> 2 AND a <> 4; + +SELECT * FROM 02952_disjunction_optimization WHERE a <> 1 AND a <> 2 AND a <> 4 AND true; +EXPLAIN QUERY TREE SELECT * FROM 02952_disjunction_optimization WHERE a <> 1 AND a <> 2 AND a <> 4 AND true; + +SELECT * FROM 02952_disjunction_optimization WHERE a <> 1 AND a <> 2 AND a <> 4 AND b <> ''; +EXPLAIN QUERY TREE SELECT * FROM 02952_disjunction_optimization WHERE a <> 1 AND a <> 2 AND a <> 4 AND b <> ''; + +SELECT * FROM 02952_disjunction_optimization WHERE a <> 1 AND a <> 2 AND b = '' AND a <> 4; +EXPLAIN QUERY TREE SELECT * FROM 02952_disjunction_optimization WHERE a <> 1 AND a <> 2 AND b = '' AND a <> 4; + +SELECT * FROM 02952_disjunction_optimization WHERE (a <> 1 AND a <> 2 AND a <> 4) OR b = ''; +EXPLAIN QUERY TREE SELECT * FROM 02952_disjunction_optimization WHERE (a <> 1 AND a <> 2 AND a <> 4) OR b = ''; + +DROP TABLE 02952_disjunction_optimization; diff --git a/tests/queries/0_stateless/02953_slow_create_view.reference b/tests/queries/0_stateless/02953_slow_create_view.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/02953_slow_create_view.sql b/tests/queries/0_stateless/02953_slow_create_view.sql new file mode 100644 index 00000000000..7824bd97b92 --- /dev/null +++ b/tests/queries/0_stateless/02953_slow_create_view.sql @@ -0,0 +1,44 @@ +drop view if exists slow_view1; + +create view slow_view1 as +with c1 as (select 1 as a), + c2 as (select a from c1), + c3 as (select a from c2), + c4 as (select a from c3), + c5 as (select a from c4), + c6 as (select a from c5), + c7 as (select a from c6), + c8 as (select a from c7), + c9 as (select a from c8), + c10 as (select a from c9), + c11 as (select a from c10), + c12 as (select a from c11), + c13 as (select a from c12), + c14 as (select a from c13), + c15 as (select a from c14), + c16 as (select a from c15), + c17 as (select a from c16), + c18 as (select a from c17), + c19 as (select a from c18), + c20 as (select a from c19), + c21 as (select a from c20), + c22 as (select a from c21), + c23 as (select a from c22), + c24 as (select a from c23), + c25 as (select a from c24), + c26 as (select a from c25), + c27 as (select a from c26), + c28 as (select a from c27), + c29 as (select a from c28), + c30 as (select a from c29), + c31 as (select a from c30), + c32 as (select a from c31), + c33 as (select a from c32), + c34 as (select a from c33), + c35 as (select a from c34), + c36 as (select a from c35), + c37 as (select a from c36), + c38 as (select a from c37), + c39 as (select a from c38), + c40 as (select a from c39) +select a from c21; diff --git a/tests/queries/0_stateless/02954_analyzer_fuzz_i57086.reference b/tests/queries/0_stateless/02954_analyzer_fuzz_i57086.reference new file mode 100644 index 00000000000..f2386499865 --- /dev/null +++ b/tests/queries/0_stateless/02954_analyzer_fuzz_i57086.reference @@ -0,0 +1,2 @@ +limit w/ GROUP BY 0 0 +limit w/ GROUP BY 0 0 diff --git a/tests/queries/0_stateless/02954_analyzer_fuzz_i57086.sql b/tests/queries/0_stateless/02954_analyzer_fuzz_i57086.sql new file mode 100644 index 00000000000..a8029fdd3d6 --- /dev/null +++ b/tests/queries/0_stateless/02954_analyzer_fuzz_i57086.sql @@ -0,0 +1,15 @@ +--https://github.com/ClickHouse/ClickHouse/issues/57086 +SELECT + 'limit w/ GROUP BY', + count(NULL), + number +FROM remote('127.{1,2}', view( + SELECT intDiv(number, 2147483647) AS number + FROM numbers(10) + )) +GROUP BY number +WITH ROLLUP +ORDER BY + count() ASC, + number DESC NULLS LAST + SETTINGS limit = 2, allow_experimental_analyzer = 1; diff --git a/tests/queries/0_stateless/02955_sparkBar_alias_sparkbar.reference b/tests/queries/0_stateless/02955_sparkBar_alias_sparkbar.reference new file mode 100644 index 00000000000..4600566772a --- /dev/null +++ b/tests/queries/0_stateless/02955_sparkBar_alias_sparkbar.reference @@ -0,0 +1,2 @@ +▂▅▂▃▆█ ▂ +▂▅▂▃▆█ ▂ diff --git a/tests/queries/0_stateless/02955_sparkBar_alias_sparkbar.sql b/tests/queries/0_stateless/02955_sparkBar_alias_sparkbar.sql new file mode 100644 index 00000000000..98259fc8029 --- /dev/null +++ b/tests/queries/0_stateless/02955_sparkBar_alias_sparkbar.sql @@ -0,0 +1,12 @@ +SET allow_experimental_analyzer = 1; +DROP TABLE IF EXISTS spark_bar_test; + +CREATE TABLE spark_bar_test (`value` Int64, `event_date` Date) ENGINE = MergeTree ORDER BY event_date; + +INSERT INTO spark_bar_test VALUES (1,'2020-01-01'), (3,'2020-01-02'), (4,'2020-01-02'), (-3,'2020-01-02'), (5,'2020-01-03'), (2,'2020-01-04'), (3,'2020-01-05'), (7,'2020-01-06'), (6,'2020-01-07'), (8,'2020-01-08'), (2,'2020-01-11'); + +SELECT sparkbar(9)(event_date,cnt) FROM (SELECT sum(value) as cnt, event_date FROM spark_bar_test GROUP BY event_date); +SELECT sparkBar(9)(event_date,cnt) FROM (SELECT sum(value) as cnt, event_date FROM spark_bar_test GROUP BY event_date); + +DROP TABLE IF EXISTS spark_bar_test; + diff --git a/tests/queries/0_stateless/02956_clickhouse_local_system_parts.reference b/tests/queries/0_stateless/02956_clickhouse_local_system_parts.reference new file mode 100644 index 00000000000..30365d83930 --- /dev/null +++ b/tests/queries/0_stateless/02956_clickhouse_local_system_parts.reference @@ -0,0 +1 @@ +test all_1_1_0 1 diff --git a/tests/queries/0_stateless/02956_clickhouse_local_system_parts.sh b/tests/queries/0_stateless/02956_clickhouse_local_system_parts.sh new file mode 100755 index 00000000000..e9d8eb081fb --- /dev/null +++ b/tests/queries/0_stateless/02956_clickhouse_local_system_parts.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash +# Tags: no-fasttest + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +$CLICKHOUSE_LOCAL --multiquery "CREATE TABLE test (x UInt8) ENGINE = MergeTree ORDER BY (); INSERT INTO test SELECT 1; SELECT table, name, rows FROM system.parts WHERE database = currentDatabase();" diff --git a/tests/queries/0_stateless/02956_format_constexpr.reference b/tests/queries/0_stateless/02956_format_constexpr.reference new file mode 100644 index 00000000000..d00491fd7e5 --- /dev/null +++ b/tests/queries/0_stateless/02956_format_constexpr.reference @@ -0,0 +1 @@ +1 diff --git a/tests/queries/0_stateless/02956_format_constexpr.sql b/tests/queries/0_stateless/02956_format_constexpr.sql new file mode 100644 index 00000000000..32c61436306 --- /dev/null +++ b/tests/queries/0_stateless/02956_format_constexpr.sql @@ -0,0 +1 @@ +SELECT isConstant(format('{}, world', 'Hello')); diff --git a/tests/queries/0_stateless/02958_transform_enum.reference b/tests/queries/0_stateless/02958_transform_enum.reference new file mode 100644 index 00000000000..4c1476a8922 --- /dev/null +++ b/tests/queries/0_stateless/02958_transform_enum.reference @@ -0,0 +1,4 @@ +Hello 123 +world 456 +Hello test +world best diff --git a/tests/queries/0_stateless/02958_transform_enum.sql b/tests/queries/0_stateless/02958_transform_enum.sql new file mode 100644 index 00000000000..3b0fd40a282 --- /dev/null +++ b/tests/queries/0_stateless/02958_transform_enum.sql @@ -0,0 +1,3 @@ +WITH arrayJoin(['Hello', 'world'])::Enum('Hello', 'world') AS x SELECT x, transform(x, ['Hello', 'world'], [123, 456], 0); +WITH arrayJoin(['Hello', 'world'])::Enum('Hello', 'world') AS x SELECT x, transform(x, ['Hello', 'world', 'goodbye'], [123, 456], 0); -- { serverError UNKNOWN_ELEMENT_OF_ENUM } +WITH arrayJoin(['Hello', 'world'])::Enum('Hello', 'world') AS x SELECT x, transform(x, ['Hello', 'world'], ['test', 'best']::Array(Enum('test' = 123, 'best' = 456, '' = 0)), ''::Enum('test' = 123, 'best' = 456, '' = 0)) AS y; diff --git a/tests/queries/0_stateless/02959_system_database_engines.reference b/tests/queries/0_stateless/02959_system_database_engines.reference new file mode 100644 index 00000000000..c3cc6fe7c9d --- /dev/null +++ b/tests/queries/0_stateless/02959_system_database_engines.reference @@ -0,0 +1,3 @@ +Atomic +Lazy +Ordinary diff --git a/tests/queries/0_stateless/02959_system_database_engines.sql b/tests/queries/0_stateless/02959_system_database_engines.sql new file mode 100644 index 00000000000..67cb20f0400 --- /dev/null +++ b/tests/queries/0_stateless/02959_system_database_engines.sql @@ -0,0 +1 @@ +SELECT * FROM system.database_engines WHERE name IN ('Atomic', 'Lazy', 'Ordinary') ORDER BY name; diff --git a/tests/queries/0_stateless/02960_partition_by_udf.reference b/tests/queries/0_stateless/02960_partition_by_udf.reference new file mode 100644 index 00000000000..f599e28b8ab --- /dev/null +++ b/tests/queries/0_stateless/02960_partition_by_udf.reference @@ -0,0 +1 @@ +10 diff --git a/tests/queries/0_stateless/02960_partition_by_udf.sql b/tests/queries/0_stateless/02960_partition_by_udf.sql new file mode 100644 index 00000000000..3a5b7491694 --- /dev/null +++ b/tests/queries/0_stateless/02960_partition_by_udf.sql @@ -0,0 +1,19 @@ +-- Tags: no-parallel + +DROP FUNCTION IF EXISTS f1; +CREATE FUNCTION f1 AS (x) -> x; + +CREATE TABLE hit +( + `UserID` UInt32, + `URL` String, + `EventTime` DateTime +) +ENGINE = MergeTree +partition by f1(URL) +ORDER BY (EventTime); + +INSERT INTO hit SELECT * FROM generateRandom() LIMIT 10; +SELECT count() FROM hit; + +DROP TABLE hit; diff --git a/tests/queries/0_stateless/02960_validate_database_engines.reference b/tests/queries/0_stateless/02960_validate_database_engines.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/02960_validate_database_engines.sql b/tests/queries/0_stateless/02960_validate_database_engines.sql new file mode 100644 index 00000000000..5d39a76867c --- /dev/null +++ b/tests/queries/0_stateless/02960_validate_database_engines.sql @@ -0,0 +1,14 @@ +-- Tags: no-parallel + +DROP DATABASE IF EXISTS test2960_valid_database_engine; + +-- create database with valid engine. Should succeed. +CREATE DATABASE test2960_valid_database_engine ENGINE = Atomic; + +-- create database with valid engine but arguments are not allowed. Should fail. +CREATE DATABASE test2960_database_engine_args_not_allowed ENGINE = Atomic('foo', 'bar'); -- { serverError BAD_ARGUMENTS } + +-- create database with an invalid engine. Should fail. +CREATE DATABASE test2960_invalid_database_engine ENGINE = Foo; -- { serverError UNKNOWN_DATABASE_ENGINE } + +DROP DATABASE IF EXISTS test2960_valid_database_engine; diff --git a/tests/queries/0_stateless/replication.lib b/tests/queries/0_stateless/replication.lib index 56ec6c18a1f..143332d9974 100755 --- a/tests/queries/0_stateless/replication.lib +++ b/tests/queries/0_stateless/replication.lib @@ -60,21 +60,11 @@ function check_replication_consistency() echo "==================== STACK TRACES ====================" $CLICKHOUSE_CLIENT -q "SELECT query_id, thread_name, thread_id, arrayStringConcat(arrayMap(x -> demangle(addressToSymbol(x)), trace), '\n') FROM system.stack_trace where query_id IN (SELECT query_id FROM system.processes WHERE current_database=currentDatabase() AND query LIKE '%$table_name_prefix%') SETTINGS allow_introspection_functions=1 FORMAT Vertical" echo "==================== MUTATIONS ====================" - $CLICKHOUSE_CLIENT -q "SELECT * FROM system.mutations WHERE current_database=currentDatabase() FORMAT Vertical" + $CLICKHOUSE_CLIENT -q "SELECT * FROM system.mutations WHERE database=currentDatabase() FORMAT Vertical" break fi done - # Touch all data to check that it's readable (and trigger PartCheckThread if needed) - while ! $CLICKHOUSE_CLIENT -q "SELECT * FROM merge(currentDatabase(), '$table_name_prefix') FORMAT Null" 2>/dev/null; do - sleep 1; - num_tries=$((num_tries+1)) - if [ $num_tries -eq 250 ]; then - break - fi - done - time_left=$((300 - num_tries)) - # Do not check anything if all replicas are readonly, # because is this case all replicas are probably lost (it may happen and it's not a bug) res=$($CLICKHOUSE_CLIENT -q "SELECT count() - sum(is_readonly) FROM system.replicas WHERE database=currentDatabase() AND table LIKE '$table_name_prefix%'") @@ -85,6 +75,16 @@ function check_replication_consistency() return 0 fi + # Touch all data to check that it's readable (and trigger PartCheckThread if needed) + while ! $CLICKHOUSE_CLIENT -q "SELECT * FROM merge(currentDatabase(), '$table_name_prefix') FORMAT Null" 2>/dev/null; do + sleep 1; + num_tries=$((num_tries+1)) + if [ $num_tries -eq 250 ]; then + break + fi + done + time_left=$((300 - num_tries)) + # Trigger pullLogsToQueue(...) and updateMutations(...) on some replica to make it pull all mutations, so it will be possible to kill them some_table=$($CLICKHOUSE_CLIENT -q "SELECT name FROM system.tables WHERE database=currentDatabase() AND name like '$table_name_prefix%' ORDER BY rand() LIMIT 1") $CLICKHOUSE_CLIENT -q "SYSTEM SYNC REPLICA $some_table PULL" 1>/dev/null 2>/dev/null ||: diff --git a/tests/queries/1_stateful/00165_jit_aggregate_functions.sql b/tests/queries/1_stateful/00165_jit_aggregate_functions.sql index c826a129b2a..157d5892ad8 100644 --- a/tests/queries/1_stateful/00165_jit_aggregate_functions.sql +++ b/tests/queries/1_stateful/00165_jit_aggregate_functions.sql @@ -1,5 +1,6 @@ SET compile_aggregate_expressions = 1; SET min_count_to_compile_aggregate_expression = 0; +SET max_bytes_before_external_group_by='200M'; -- might be randomized to 1 leading to timeout SELECT 'Aggregation using JIT compilation'; diff --git a/tests/queries/1_stateful/00172_hits_joins.sql.j2 b/tests/queries/1_stateful/00172_hits_joins.sql.j2 index 4617fe5aef8..e891f1ba3c3 100644 --- a/tests/queries/1_stateful/00172_hits_joins.sql.j2 +++ b/tests/queries/1_stateful/00172_hits_joins.sql.j2 @@ -4,6 +4,9 @@ SET max_rows_in_join = '{% if join_algorithm == 'grace_hash' %}10K{% else %}0{% endif %}'; SET grace_hash_join_initial_buckets = 4; +-- Test is slow with external sort / group by +SET max_bytes_before_external_sort = 0, max_bytes_before_external_group_by = 0; + SELECT '--- {{ join_algorithm }} ---'; SET join_algorithm = '{{ join_algorithm }}'; diff --git a/tests/queries/1_stateful/00177_memory_bound_merging.sh b/tests/queries/1_stateful/00177_memory_bound_merging.sh index ce889b338d6..d5cd1a05cd8 100755 --- a/tests/queries/1_stateful/00177_memory_bound_merging.sh +++ b/tests/queries/1_stateful/00177_memory_bound_merging.sh @@ -31,7 +31,7 @@ test1() { GROUP BY CounterID, URL, EventDate ORDER BY URL, EventDate LIMIT 5 OFFSET 10 - SETTINGS optimize_aggregation_in_order = 1, enable_memory_bound_merging_of_aggregation_results = 1, allow_experimental_parallel_reading_from_replicas = 1, parallel_replicas_for_non_replicated_merge_tree = 1, max_parallel_replicas = 3, use_hedged_requests = 0" + SETTINGS optimize_aggregation_in_order = 1, enable_memory_bound_merging_of_aggregation_results = 1, allow_experimental_parallel_reading_from_replicas = 1, parallel_replicas_for_non_replicated_merge_tree = 1, max_parallel_replicas = 3" check_replicas_read_in_order $query_id } @@ -48,7 +48,7 @@ test2() { GROUP BY URL, EventDate ORDER BY URL, EventDate LIMIT 5 OFFSET 10 - SETTINGS optimize_aggregation_in_order = 1, enable_memory_bound_merging_of_aggregation_results = 1, allow_experimental_parallel_reading_from_replicas = 1, parallel_replicas_for_non_replicated_merge_tree = 1, max_parallel_replicas = 3, use_hedged_requests = 0, query_plan_aggregation_in_order = 1" + SETTINGS optimize_aggregation_in_order = 1, enable_memory_bound_merging_of_aggregation_results = 1, allow_experimental_parallel_reading_from_replicas = 1, parallel_replicas_for_non_replicated_merge_tree = 1, max_parallel_replicas = 3, query_plan_aggregation_in_order = 1" check_replicas_read_in_order $query_id } @@ -64,7 +64,7 @@ test3() { FROM test.hits WHERE CounterID = 1704509 AND UserID = 4322253409885123546 GROUP BY URL, EventDate - SETTINGS optimize_aggregation_in_order = 1, enable_memory_bound_merging_of_aggregation_results = 1, allow_experimental_parallel_reading_from_replicas = 1, parallel_replicas_for_non_replicated_merge_tree = 1, max_parallel_replicas = 3, use_hedged_requests = 0 + SETTINGS optimize_aggregation_in_order = 1, enable_memory_bound_merging_of_aggregation_results = 1, allow_experimental_parallel_reading_from_replicas = 1, parallel_replicas_for_non_replicated_merge_tree = 1, max_parallel_replicas = 3 ) WHERE explain LIKE '%Aggr%Transform%' OR explain LIKE '%InOrder%'" } diff --git a/tests/queries/shell_config.sh b/tests/queries/shell_config.sh index 4f28956b91c..c687a63623f 100644 --- a/tests/queries/shell_config.sh +++ b/tests/queries/shell_config.sh @@ -39,10 +39,15 @@ export CLICKHOUSE_LOCAL=${CLICKHOUSE_LOCAL:="${CLICKHOUSE_BINARY}-local"} [ -x "${CLICKHOUSE_BINARY}-server" ] && CLICKHOUSE_SERVER_BINARY=${CLICKHOUSE_SERVER_BINARY:="${CLICKHOUSE_BINARY}-server"} [ -x "${CLICKHOUSE_BINARY}" ] && CLICKHOUSE_SERVER_BINARY=${CLICKHOUSE_SERVER_BINARY:="${CLICKHOUSE_BINARY} server"} export CLICKHOUSE_SERVER_BINARY=${CLICKHOUSE_SERVER_BINARY:="${CLICKHOUSE_BINARY}-server"} +# benchmark +[ -x "${CLICKHOUSE_BINARY}-benchmark" ] && CLICKHOUSE_BENCHMARK_BINARY=${CLICKHOUSE_BENCHMARK_BINARY:="${CLICKHOUSE_BINARY}-benchmark"} +[ -x "${CLICKHOUSE_BINARY}" ] && CLICKHOUSE_BENCHMARK_BINARY=${CLICKHOUSE_BENCHMARK_BINARY:="${CLICKHOUSE_BINARY} benchmark"} +export CLICKHOUSE_BENCHMARK_BINARY="${CLICKHOUSE_BENCHMARK_BINARY:=${CLICKHOUSE_BINARY}-benchmark}" +export CLICKHOUSE_BENCHMARK_OPT="${CLICKHOUSE_BENCHMARK_OPT0:-} ${CLICKHOUSE_BENCHMARK_OPT:-}" +export CLICKHOUSE_BENCHMARK=${CLICKHOUSE_BENCHMARK:="$CLICKHOUSE_BENCHMARK_BINARY ${CLICKHOUSE_BENCHMARK_OPT:-}"} # others export CLICKHOUSE_OBFUSCATOR=${CLICKHOUSE_OBFUSCATOR:="${CLICKHOUSE_BINARY}-obfuscator"} export CLICKHOUSE_COMPRESSOR=${CLICKHOUSE_COMPRESSOR:="${CLICKHOUSE_BINARY}-compressor"} -export CLICKHOUSE_BENCHMARK=${CLICKHOUSE_BENCHMARK:="${CLICKHOUSE_BINARY}-benchmark ${CLICKHOUSE_BENCHMARK_OPT0:-}"} export CLICKHOUSE_GIT_IMPORT=${CLICKHOUSE_GIT_IMPORT="${CLICKHOUSE_BINARY}-git-import"} export CLICKHOUSE_CONFIG=${CLICKHOUSE_CONFIG:="/etc/clickhouse-server/config.xml"} @@ -81,7 +86,7 @@ export CLICKHOUSE_PORT_KEEPER=${CLICKHOUSE_PORT_KEEPER:="9181"} # keeper-client [ -x "${CLICKHOUSE_BINARY}-keeper-client" ] && CLICKHOUSE_KEEPER_CLIENT=${CLICKHOUSE_KEEPER_CLIENT:="${CLICKHOUSE_BINARY}-keeper-client"} -[ -x "${CLICKHOUSE_BINARY}" ] && CLICKHOUSE_KEEPER_CLIENT=${CLICKHOUSE_KEEPER_CLIENT:="${CLICKHOUSE_BINARY} keeper-client"} +[ -x "${CLICKHOUSE_BINARY}" ] && CLICKHOUSE_KEEPER_CLIENT=${CLICKHOUSE_KEEPER_CLIENT:="${CLICKHOUSE_BINARY} keeper-client --port $CLICKHOUSE_PORT_KEEPER"} export CLICKHOUSE_KEEPER_CLIENT=${CLICKHOUSE_KEEPER_CLIENT:="${CLICKHOUSE_BINARY}-keeper-client --port $CLICKHOUSE_PORT_KEEPER"} export CLICKHOUSE_CLIENT_SECURE=${CLICKHOUSE_CLIENT_SECURE:=$(echo "${CLICKHOUSE_CLIENT}" | sed 's/--secure //' | sed 's/'"--port=${CLICKHOUSE_PORT_TCP}"'//g; s/$/'"--secure --accept-invalid-certificate --port=${CLICKHOUSE_PORT_TCP_SECURE}"'/g')} diff --git a/utils/async_loader_graph b/utils/async_loader_graph index 5bd4aba41ee..9c055dfa86e 100755 --- a/utils/async_loader_graph +++ b/utils/async_loader_graph @@ -3,7 +3,7 @@ set -e if [ "$1" == "--help" ] || [ -z "$1" ]; then cat <&2 -SELECT data from 'system.async_loader' table and render .svg graph of load jobs using 'dot' graphviz tool. +SELECT data from 'system.asynchronous_loader' table and render .svg graph of load jobs using 'dot' graphviz tool. USAGE: async_loader_graph CLICKHOUSE_CLIENT CLIENT_OPTIONS EXAMPLES: $ async_loader_graph clickhouse-client > async_loader.svg @@ -18,7 +18,7 @@ CLICKHOUSE_CLIENT="$@" echo 'digraph {' echo 'rankdir=LR;' -$CLICKHOUSE_CLIENT --query='select job, job_id, status, is_blocked, is_ready, is_executing, pool, dependencies from system.async_loader FORMAT JSON' \ +$CLICKHOUSE_CLIENT --query='select job, job_id, status, is_blocked, is_ready, is_executing, pool, dependencies from system.asynchronous_loader FORMAT JSON' \ | jq -r ' .data[] | { "FgLoad": "box", "BgLoad": "hexagon", "BgStartup": "ellipse" } as $shape diff --git a/utils/check-style/aspell-ignore/en/aspell-dict.txt b/utils/check-style/aspell-ignore/en/aspell-dict.txt index a07c2648f92..d68330771e5 100644 --- a/utils/check-style/aspell-ignore/en/aspell-dict.txt +++ b/utils/check-style/aspell-ignore/en/aspell-dict.txt @@ -1,4 +1,4 @@ -personal_ws-1.1 en 2646 +personal_ws-1.1 en 2657 AArch ACLs ALTERs @@ -30,6 +30,7 @@ AppleClang Approximative ArrayJoin ArrowStream +AsyncInsertCacheSize AsynchronousHeavyMetricsCalculationTimeSpent AsynchronousHeavyMetricsUpdateInterval AsynchronousInsert @@ -38,11 +39,6 @@ AsynchronousInsertThreadsActive AsynchronousMetricsCalculationTimeSpent AsynchronousMetricsUpdateInterval AsynchronousReadWait -AsyncInsertCacheSize -TablesLoaderBackgroundThreads -TablesLoaderBackgroundThreadsActive -TablesLoaderForegroundThreads -TablesLoaderForegroundThreadsActive Authenticator Authenticators AutoFDO @@ -506,6 +502,7 @@ Memcheck MemoryCode MemoryDataAndStack MemoryResident +MemoryResidentMax MemorySanitizer MemoryShared MemoryTracking @@ -704,6 +701,8 @@ PrettySpaceMonoBlock PrettySpaceNoEscapes PrettySpaceNoEscapesMonoBlock Prewhere +TotalPrimaryKeyBytesInMemory +TotalPrimaryKeyBytesInMemoryAllocated PrivateKeyPassphraseHandler ProfileEvents Profiler @@ -754,6 +753,7 @@ Redash Reddit Refactorings ReferenceKeyed +Refreshable RegexpTree RemoteRead ReplacingMergeTree @@ -876,6 +876,7 @@ TLSv TPCH TSDB TSVRaw +TSVWithNames TSVs TSan TThe @@ -887,6 +888,10 @@ TabSeparatedRawWithNamesAndTypes TabSeparatedWithNames TabSeparatedWithNamesAndTypes Tabix +TablesLoaderBackgroundThreads +TablesLoaderBackgroundThreadsActive +TablesLoaderForegroundThreads +TablesLoaderForegroundThreadsActive TablesToDropQueueSize TargetSpecific Telegraf @@ -1233,6 +1238,7 @@ changelogs charset charsets chconn +cheatsheet checkouting checksummed checksumming @@ -1848,6 +1854,8 @@ mininum miniselect minmap minmax +minSampleSizeContinuous +minSampleSizeConversion mins misconfiguration mispredictions @@ -2145,6 +2153,7 @@ reddit redis redisstreams refcounter +refreshable regexpExtract regexpQuoteMeta regionHierarchy @@ -2189,8 +2198,6 @@ retentions rethrow retransmit retriable -retuned -reverseDNSQuery reverseUTF rightPad rightPadUTF @@ -2231,6 +2238,7 @@ seektable sequenceCount sequenceMatch sequenceNextNode +seriesPeriodDetectFFT serverTimeZone serverTimezone serverUUID diff --git a/utils/check-style/check-style b/utils/check-style/check-style index 39d371e25d5..88b43afff26 100755 --- a/utils/check-style/check-style +++ b/utils/check-style/check-style @@ -429,3 +429,6 @@ join -v1 <(find $ROOT_PATH/{src,programs,utils} -name '*.h' -printf '%f\n' | sor # Don't allow dynamic compiler check with CMake, because we are using hermetic, reproducible, cross-compiled, static (TLDR, good) builds. ls -1d $ROOT_PATH/contrib/*-cmake | xargs -I@ find @ -name 'CMakeLists.txt' -or -name '*.cmake' | xargs grep --with-filename -i -P 'check_c_compiler_flag|check_cxx_compiler_flag|check_c_source_compiles|check_cxx_source_compiles|check_include_file|check_symbol_exists|cmake_push_check_state|cmake_pop_check_state|find_package|CMAKE_REQUIRED_FLAGS|CheckIncludeFile|CheckCCompilerFlag|CheckCXXCompilerFlag|CheckCSourceCompiles|CheckCXXSourceCompiles|CheckCSymbolExists|CheckCXXSymbolExists' | grep -v Rust && echo "^ It's not allowed to have dynamic compiler checks with CMake." + +# DOS/Windows newlines +find $ROOT_PATH/{base,src,programs,utils,docs} -name '*.md' -or -name '*.h' -or -name '*.cpp' -or -name '*.js' -or -name '*.py' -or -name '*.html' | xargs grep -l -P '\r$' && echo "^ Files contain DOS/Windows newlines (\r\n instead of \n)." diff --git a/utils/check-style/check-typos b/utils/check-style/check-typos index 9194a9464a7..0486efb37b1 100755 --- a/utils/check-style/check-typos +++ b/utils/check-style/check-typos @@ -4,6 +4,7 @@ ROOT_PATH=$(git rev-parse --show-toplevel) +#FIXME: check all (or almost all) repo codespell \ --skip "*generated*,*gperf*,*.bin,*.mrk*,*.idx,checksums.txt,*.dat,*.pyc,*.kate-swp,*obfuscateQueries.cpp,d3-*.js,*.min.js,*.sum,${ROOT_PATH}/utils/check-style/aspell-ignore" \ --ignore-words "${ROOT_PATH}/utils/check-style/codespell-ignore-words.list" \ diff --git a/utils/keeper-bench/Runner.cpp b/utils/keeper-bench/Runner.cpp index 611ca948c53..a4b579f1f7b 100644 --- a/utils/keeper-bench/Runner.cpp +++ b/utils/keeper-bench/Runner.cpp @@ -174,7 +174,7 @@ void Runner::thread(std::vector> zookee || sigaddset(&sig_set, SIGINT) || pthread_sigmask(SIG_BLOCK, &sig_set, nullptr)) { - DB::throwFromErrno("Cannot block signal.", DB::ErrorCodes::CANNOT_BLOCK_SIGNAL); + throw DB::ErrnoException(DB::ErrorCodes::CANNOT_BLOCK_SIGNAL, "Cannot block signal"); } while (true) diff --git a/utils/list-versions/version_date.tsv b/utils/list-versions/version_date.tsv index cb4102b3072..53ad807c44b 100644 --- a/utils/list-versions/version_date.tsv +++ b/utils/list-versions/version_date.tsv @@ -1,3 +1,6 @@ +v23.12.1.1368-stable 2023-12-28 +v23.11.3.23-stable 2023-12-21 +v23.11.2.11-stable 2023-12-13 v23.11.1.2711-stable 2023-12-06 v23.10.5.20-stable 2023-11-25 v23.10.4.25-stable 2023-11-17