diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs index 06e893fabb3..5088bf90ede 100644 --- a/.git-blame-ignore-revs +++ b/.git-blame-ignore-revs @@ -13,3 +13,6 @@ # dbms/ → src/ # (though it is unlikely that you will see it in blame) 06446b4f08a142d6f1bc30664c47ded88ab51782 + +# Applied Black formatter for Python code +e6f5a3f98b21ba99cf274a9833797889e020a2b3 diff --git a/.github/actionlint.yml b/.github/actionlint.yml index 0f88f30d42c..4357bd3eb6b 100644 --- a/.github/actionlint.yml +++ b/.github/actionlint.yml @@ -7,3 +7,4 @@ self-hosted-runner: - stress-tester - style-checker - style-checker-aarch64 + - release-maker diff --git a/.github/workflows/create_release.yml b/.github/workflows/create_release.yml index 3988df3b2b1..972aff90195 100644 --- a/.github/workflows/create_release.yml +++ b/.github/workflows/create_release.yml @@ -6,8 +6,8 @@ concurrency: 'on': workflow_dispatch: inputs: - sha: - description: 'The SHA hash of the commit from which to create the release' + ref: + description: 'Git reference (branch or commit sha) from which to create the release' required: true type: string type: @@ -15,15 +15,152 @@ concurrency: required: true type: choice options: - - new - patch + - new + dry-run: + description: 'Dry run' + required: false + default: true + type: boolean jobs: - Release: - runs-on: [self-hosted, style-checker-aarch64] + CreateRelease: + env: + GH_TOKEN: ${{ secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN }} + runs-on: [self-hosted, release-maker] steps: + - name: DebugInfo + uses: hmarr/debug-action@f7318c783045ac39ed9bb497e22ce835fdafbfe6 + - name: Set envs + # https://docs.github.com/en/actions/learn-github-actions/workflow-commands-for-github-actions#multiline-strings + run: | + cat >> "$GITHUB_ENV" << 'EOF' + ROBOT_CLICKHOUSE_SSH_KEY<> "$GITHUB_ENV" + echo "COMMIT_SHA=$commit_sha" >> "$GITHUB_ENV" + - name: Download All Release Artifacts + if: ${{ inputs.type == 'patch' }} + run: | + python3 ./tests/ci/create_release.py --infile "$RELEASE_INFO_FILE" --download-packages ${{ inputs.dry-run && '--dry-run' || '' }} + - name: Push Git Tag for the Release + run: | + python3 ./tests/ci/create_release.py --push-release-tag --infile "$RELEASE_INFO_FILE" ${{ inputs.dry-run && '--dry-run' || '' }} + - name: Push New Release Branch + if: ${{ inputs.type == 'new' }} + run: | + python3 ./tests/ci/create_release.py --push-new-release-branch --infile "$RELEASE_INFO_FILE" ${{ inputs.dry-run && '--dry-run' || '' }} + - name: Bump CH Version and Update Contributors' List + run: | + python3 ./tests/ci/create_release.py --create-bump-version-pr --infile "$RELEASE_INFO_FILE" ${{ inputs.dry-run && '--dry-run' || '' }} + - name: Checkout master + run: | + git checkout master + - name: Bump Docker versions, Changelog, Security + if: ${{ inputs.type == 'patch' }} + run: | + [ "$(git branch --show-current)" != "master" ] && echo "not on the master" && exit 1 + echo "List versions" + ./utils/list-versions/list-versions.sh > ./utils/list-versions/version_date.tsv + echo "Update docker version" + ./utils/list-versions/update-docker-version.sh + echo "Generate ChangeLog" + export CI=1 + docker run -u "${UID}:${GID}" -e PYTHONUNBUFFERED=1 -e CI=1 --network=host \ + --volume=".:/ClickHouse" clickhouse/style-test \ + /ClickHouse/tests/ci/changelog.py -v --debug-helpers \ + --gh-user-or-token="$GH_TOKEN" --jobs=5 \ + --output="/ClickHouse/docs/changelogs/${{ env.RELEASE_TAG }}.md" ${{ env.RELEASE_TAG }} + git add ./docs/changelogs/${{ env.RELEASE_TAG }}.md + echo "Generate Security" + python3 ./utils/security-generator/generate_security.py > SECURITY.md + git diff HEAD + - name: Generate ChangeLog + if: ${{ inputs.type == 'patch' && ! inputs.dry-run }} + uses: peter-evans/create-pull-request@v6 + with: + author: "robot-clickhouse " + token: ${{ secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN }} + committer: "robot-clickhouse " + commit-message: Update version_date.tsv and changelogs after ${{ env.RELEASE_TAG }} + branch: auto/${{ env.RELEASE_TAG }} + assignees: ${{ github.event.sender.login }} # assign the PR to the tag pusher + delete-branch: true + title: Update version_date.tsv and changelog after ${{ env.RELEASE_TAG }} + labels: do not test + body: | + Update version_date.tsv and changelogs after ${{ env.RELEASE_TAG }} + ### Changelog category (leave one): + - Not for changelog (changelog entry is not required) + - name: Reset changes if Dry-run + if: ${{ inputs.dry-run }} + run: | + git reset --hard HEAD + - name: Checkout back to GITHUB_REF + run: | + git checkout "$GITHUB_REF_NAME" + - name: Create GH Release + if: ${{ inputs.type == 'patch' }} + run: | + python3 ./tests/ci/create_release.py --create-gh-release \ + --infile ${{ env.RELEASE_INFO_FILE }} ${{ inputs.dry-run && '--dry-run' || '' }} + + - name: Export TGZ Packages + if: ${{ inputs.type == 'patch' }} + run: | + python3 ./tests/ci/artifactory.py --export-tgz --infile ${{ env.RELEASE_INFO_FILE }} ${{ inputs.dry-run && '--dry-run' || '' }} + - name: Test TGZ Packages + if: ${{ inputs.type == 'patch' }} + run: | + python3 ./tests/ci/artifactory.py --test-tgz --infile ${{ env.RELEASE_INFO_FILE }} ${{ inputs.dry-run && '--dry-run' || '' }} + - name: Export RPM Packages + if: ${{ inputs.type == 'patch' }} + run: | + python3 ./tests/ci/artifactory.py --export-rpm --infile ${{ env.RELEASE_INFO_FILE }} ${{ inputs.dry-run && '--dry-run' || '' }} + - name: Test RPM Packages + if: ${{ inputs.type == 'patch' }} + run: | + python3 ./tests/ci/artifactory.py --test-rpm --infile ${{ env.RELEASE_INFO_FILE }} ${{ inputs.dry-run && '--dry-run' || '' }} + - name: Export Debian Packages + if: ${{ inputs.type == 'patch' }} + run: | + python3 ./tests/ci/artifactory.py --export-debian --infile ${{ env.RELEASE_INFO_FILE }} ${{ inputs.dry-run && '--dry-run' || '' }} + - name: Test Debian Packages + if: ${{ inputs.type == 'patch' }} + run: | + python3 ./tests/ci/artifactory.py --test-debian --infile ${{ env.RELEASE_INFO_FILE }} ${{ inputs.dry-run && '--dry-run' || '' }} + - name: Docker clickhouse/clickhouse-server building + if: ${{ inputs.type == 'patch' }} + run: | + cd "./tests/ci" + export CHECK_NAME="Docker server image" + python3 docker_server.py --release-type auto --version ${{ env.RELEASE_TAG }} --check-name "$CHECK_NAME" --sha ${{ env.COMMIT_SHA }} ${{ ! inputs.dry-run && '--push' || '' }} + - name: Docker clickhouse/clickhouse-keeper building + if: ${{ inputs.type == 'patch' }} + run: | + cd "./tests/ci" + export CHECK_NAME="Docker keeper image" + python3 docker_server.py --release-type auto --version ${{ env.RELEASE_TAG }} --check-name "$CHECK_NAME" --sha ${{ env.COMMIT_SHA }} ${{ ! inputs.dry-run && '--push' || '' }} + - name: Post Slack Message + if: always() + run: | + echo Slack Message diff --git a/.github/workflows/pull_request.yml b/.github/workflows/pull_request.yml index 259e6d41110..c9f4f858825 100644 --- a/.github/workflows/pull_request.yml +++ b/.github/workflows/pull_request.yml @@ -172,7 +172,7 @@ jobs: ################################# Stage Final ################################# # FinishCheck: - if: ${{ !failure() }} + if: ${{ !failure() && !cancelled() }} needs: [RunConfig, BuildDockers, StyleCheck, FastTest, Builds_1, Builds_2, Builds_Report, Tests_1, Tests_2, Tests_3] runs-on: [self-hosted, style-checker-aarch64] steps: diff --git a/.github/workflows/reusable_test.yml b/.github/workflows/reusable_test.yml index c01dd8ca9d4..ca6df5df14e 100644 --- a/.github/workflows/reusable_test.yml +++ b/.github/workflows/reusable_test.yml @@ -102,6 +102,8 @@ jobs: --job-name '${{inputs.test_name}}' \ --run \ --run-command '''${{inputs.run_command}}''' + # shellcheck disable=SC2319 + echo "JOB_EXIT_CODE=$?" >> "$GITHUB_ENV" - name: Post run if: ${{ !cancelled() }} run: | diff --git a/cmake/limit_jobs.cmake b/cmake/limit_jobs.cmake index 8e48fc9b9d8..17d8dd42a2c 100644 --- a/cmake/limit_jobs.cmake +++ b/cmake/limit_jobs.cmake @@ -42,9 +42,19 @@ endif () # But use 2 parallel jobs, since: # - this is what llvm does # - and I've verfied that lld-11 does not use all available CPU time (in peak) while linking one binary -if (CMAKE_BUILD_TYPE_UC STREQUAL "RELWITHDEBINFO" AND ENABLE_THINLTO AND PARALLEL_LINK_JOBS GREATER 2) - message(STATUS "ThinLTO provides its own parallel linking - limiting parallel link jobs to 2.") - set (PARALLEL_LINK_JOBS 2) +if (CMAKE_BUILD_TYPE_UC STREQUAL "RELWITHDEBINFO" AND ENABLE_THINLTO) + if (ARCH_AARCH64) + # aarch64 builds start to often fail with OOMs (reason not yet clear), for now let's limit the concurrency + message(STATUS "ThinLTO provides its own parallel linking - limiting parallel link jobs to 1.") + set (PARALLEL_LINK_JOBS 1) + if (LINKER_NAME MATCHES "lld") + math(EXPR LTO_JOBS ${NUMBER_OF_LOGICAL_CORES}/4) + set (CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO "${CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO} -Wl,--thinlto-jobs=${LTO_JOBS}") + endif() + elseif (PARALLEL_LINK_JOBS GREATER 2) + message(STATUS "ThinLTO provides its own parallel linking - limiting parallel link jobs to 2.") + set (PARALLEL_LINK_JOBS 2) + endif () endif() message(STATUS "Building sub-tree with ${PARALLEL_COMPILE_JOBS} compile jobs and ${PARALLEL_LINK_JOBS} linker jobs (system: ${NUMBER_OF_LOGICAL_CORES} cores, ${TOTAL_PHYSICAL_MEMORY} MB RAM, 'OFF' means the native core count).") diff --git a/contrib/jemalloc-cmake/CMakeLists.txt b/contrib/jemalloc-cmake/CMakeLists.txt index 1fbfd29a3bd..191886981c3 100644 --- a/contrib/jemalloc-cmake/CMakeLists.txt +++ b/contrib/jemalloc-cmake/CMakeLists.txt @@ -34,11 +34,7 @@ if (OS_LINUX) # avoid spurious latencies and additional work associated with # MADV_DONTNEED. See # https://github.com/ClickHouse/ClickHouse/issues/11121 for motivation. - if (CMAKE_BUILD_TYPE_UC STREQUAL "DEBUG") - set (JEMALLOC_CONFIG_MALLOC_CONF "percpu_arena:percpu,oversize_threshold:0,muzzy_decay_ms:0,dirty_decay_ms:5000") - else() - set (JEMALLOC_CONFIG_MALLOC_CONF "percpu_arena:percpu,oversize_threshold:0,muzzy_decay_ms:0,dirty_decay_ms:5000,prof:true,prof_active:false,background_thread:true") - endif() + set (JEMALLOC_CONFIG_MALLOC_CONF "percpu_arena:percpu,oversize_threshold:0,muzzy_decay_ms:0,dirty_decay_ms:5000,prof:true,prof_active:false,background_thread:true") else() set (JEMALLOC_CONFIG_MALLOC_CONF "oversize_threshold:0,muzzy_decay_ms:0,dirty_decay_ms:5000") endif() diff --git a/contrib/jemalloc-cmake/README b/contrib/jemalloc-cmake/README index 91b58448c1f..89d7c818445 100644 --- a/contrib/jemalloc-cmake/README +++ b/contrib/jemalloc-cmake/README @@ -4,3 +4,14 @@ It allows to integrate JEMalloc into CMake project. - Added JEMALLOC_CONFIG_MALLOC_CONF substitution - Add musl support (USE_MUSL) - Also note, that darwin build requires JEMALLOC_PREFIX, while others do not +- JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE should be disabled + + CLOCK_MONOTONIC_COARSE can go backwards after clock_adjtime(ADJ_FREQUENCY) + Let's disable it for now, and this menas that CLOCK_MONOTONIC will be used, + and this, should not be a problem, since: + - jemalloc do not call clock_gettime() that frequently + - the difference is CLOCK_MONOTONIC 20ns and CLOCK_MONOTONIC_COARSE 4ns + + This can be done with the following command: + + gg JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE | cut -d: -f1 | xargs sed -i 's@#define JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE@/* #undef JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE */@' diff --git a/contrib/jemalloc-cmake/include_freebsd_ppc64le/jemalloc/internal/jemalloc_internal_defs.h.in b/contrib/jemalloc-cmake/include_freebsd_ppc64le/jemalloc/internal/jemalloc_internal_defs.h.in index e027ee97d1a..b58d5c1eb8e 100644 --- a/contrib/jemalloc-cmake/include_freebsd_ppc64le/jemalloc/internal/jemalloc_internal_defs.h.in +++ b/contrib/jemalloc-cmake/include_freebsd_ppc64le/jemalloc/internal/jemalloc_internal_defs.h.in @@ -96,7 +96,7 @@ /* * Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available. */ -#define JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE +/* #undef JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE */ /* * Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available. diff --git a/contrib/jemalloc-cmake/include_linux_aarch64/jemalloc/internal/jemalloc_internal_defs.h.in b/contrib/jemalloc-cmake/include_linux_aarch64/jemalloc/internal/jemalloc_internal_defs.h.in index 597f2d59933..8bfc4b60f3d 100644 --- a/contrib/jemalloc-cmake/include_linux_aarch64/jemalloc/internal/jemalloc_internal_defs.h.in +++ b/contrib/jemalloc-cmake/include_linux_aarch64/jemalloc/internal/jemalloc_internal_defs.h.in @@ -98,7 +98,7 @@ /* * Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available. */ -#define JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE +/* #undef JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE */ /* * Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available. diff --git a/contrib/jemalloc-cmake/include_linux_ppc64le/jemalloc/internal/jemalloc_internal_defs.h.in b/contrib/jemalloc-cmake/include_linux_ppc64le/jemalloc/internal/jemalloc_internal_defs.h.in index 0a72e71496b..d5197c6127f 100644 --- a/contrib/jemalloc-cmake/include_linux_ppc64le/jemalloc/internal/jemalloc_internal_defs.h.in +++ b/contrib/jemalloc-cmake/include_linux_ppc64le/jemalloc/internal/jemalloc_internal_defs.h.in @@ -98,7 +98,7 @@ /* * Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available. */ -#define JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE +/* #undef JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE */ /* * Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available. diff --git a/contrib/jemalloc-cmake/include_linux_riscv64/jemalloc/internal/jemalloc_internal_defs.h.in b/contrib/jemalloc-cmake/include_linux_riscv64/jemalloc/internal/jemalloc_internal_defs.h.in index 597f2d59933..8bfc4b60f3d 100644 --- a/contrib/jemalloc-cmake/include_linux_riscv64/jemalloc/internal/jemalloc_internal_defs.h.in +++ b/contrib/jemalloc-cmake/include_linux_riscv64/jemalloc/internal/jemalloc_internal_defs.h.in @@ -98,7 +98,7 @@ /* * Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available. */ -#define JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE +/* #undef JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE */ /* * Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available. diff --git a/contrib/jemalloc-cmake/include_linux_s390x/jemalloc/internal/jemalloc_internal_defs.h.in b/contrib/jemalloc-cmake/include_linux_s390x/jemalloc/internal/jemalloc_internal_defs.h.in index 531f2bca0c2..b3f3a489a83 100644 --- a/contrib/jemalloc-cmake/include_linux_s390x/jemalloc/internal/jemalloc_internal_defs.h.in +++ b/contrib/jemalloc-cmake/include_linux_s390x/jemalloc/internal/jemalloc_internal_defs.h.in @@ -96,7 +96,7 @@ /* * Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available. */ -#define JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE +/* #undef JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE */ /* * Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available. diff --git a/contrib/jemalloc-cmake/include_linux_x86_64/jemalloc/internal/jemalloc_internal_defs.h.in b/contrib/jemalloc-cmake/include_linux_x86_64/jemalloc/internal/jemalloc_internal_defs.h.in index d21098a4dcc..cf9acdb0b80 100644 --- a/contrib/jemalloc-cmake/include_linux_x86_64/jemalloc/internal/jemalloc_internal_defs.h.in +++ b/contrib/jemalloc-cmake/include_linux_x86_64/jemalloc/internal/jemalloc_internal_defs.h.in @@ -98,7 +98,7 @@ /* * Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available. */ -#define JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE +/* #undef JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE */ /* * Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available. diff --git a/contrib/jemalloc-cmake/include_linux_x86_64_musl/jemalloc/internal/jemalloc_internal_defs.h.in b/contrib/jemalloc-cmake/include_linux_x86_64_musl/jemalloc/internal/jemalloc_internal_defs.h.in index e08a2bed2ec..729bd82c2fa 100644 --- a/contrib/jemalloc-cmake/include_linux_x86_64_musl/jemalloc/internal/jemalloc_internal_defs.h.in +++ b/contrib/jemalloc-cmake/include_linux_x86_64_musl/jemalloc/internal/jemalloc_internal_defs.h.in @@ -99,7 +99,7 @@ /* * Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available. */ -#define JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE +/* #undef JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE */ /* * Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available. diff --git a/contrib/openssl b/contrib/openssl index ee2bb8513b2..66deddc1e53 160000 --- a/contrib/openssl +++ b/contrib/openssl @@ -1 +1 @@ -Subproject commit ee2bb8513b28bf86b35404dd17a0e29305ca9e08 +Subproject commit 66deddc1e53cda8706604a019777259372d1bd62 diff --git a/docker/test/libfuzzer/run_libfuzzer.py b/docker/test/libfuzzer/run_libfuzzer.py index 5ed019490d5..fa67805dfa5 100755 --- a/docker/test/libfuzzer/run_libfuzzer.py +++ b/docker/test/libfuzzer/run_libfuzzer.py @@ -27,19 +27,19 @@ def run_fuzzer(fuzzer: str): parser.read(path) if parser.has_section("asan"): - os.environ[ - "ASAN_OPTIONS" - ] = f"{os.environ['ASAN_OPTIONS']}:{':'.join('%s=%s' % (key, value) for key, value in parser['asan'].items())}" + os.environ["ASAN_OPTIONS"] = ( + f"{os.environ['ASAN_OPTIONS']}:{':'.join('%s=%s' % (key, value) for key, value in parser['asan'].items())}" + ) if parser.has_section("msan"): - os.environ[ - "MSAN_OPTIONS" - ] = f"{os.environ['MSAN_OPTIONS']}:{':'.join('%s=%s' % (key, value) for key, value in parser['msan'].items())}" + os.environ["MSAN_OPTIONS"] = ( + f"{os.environ['MSAN_OPTIONS']}:{':'.join('%s=%s' % (key, value) for key, value in parser['msan'].items())}" + ) if parser.has_section("ubsan"): - os.environ[ - "UBSAN_OPTIONS" - ] = f"{os.environ['UBSAN_OPTIONS']}:{':'.join('%s=%s' % (key, value) for key, value in parser['ubsan'].items())}" + os.environ["UBSAN_OPTIONS"] = ( + f"{os.environ['UBSAN_OPTIONS']}:{':'.join('%s=%s' % (key, value) for key, value in parser['ubsan'].items())}" + ) if parser.has_section("libfuzzer"): custom_libfuzzer_options = " ".join( diff --git a/docker/test/stateful/run.sh b/docker/test/stateful/run.sh index 80e5e81a4b1..35ffeee5438 100755 --- a/docker/test/stateful/run.sh +++ b/docker/test/stateful/run.sh @@ -23,7 +23,10 @@ source /utils.lib /usr/share/clickhouse-test/config/install.sh azurite-blob --blobHost 0.0.0.0 --blobPort 10000 --silent --inMemoryPersistence & + ./setup_minio.sh stateful +./mc admin trace clickminio > /test_output/minio.log & +MC_ADMIN_PID=$! config_logs_export_cluster /etc/clickhouse-server/config.d/system_logs_export.yaml @@ -254,6 +257,8 @@ if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]] sudo clickhouse stop --pid-path /var/run/clickhouse-server2 ||: fi +# Kill minio admin client to stop collecting logs +kill $MC_ADMIN_PID rg -Fa "" /var/log/clickhouse-server/clickhouse-server.log ||: zstd --threads=0 < /var/log/clickhouse-server/clickhouse-server.log > /test_output/clickhouse-server.log.zst ||: diff --git a/docker/test/stateless/run.sh b/docker/test/stateless/run.sh index 2d32d188561..c0bfc12bc75 100755 --- a/docker/test/stateless/run.sh +++ b/docker/test/stateless/run.sh @@ -54,6 +54,9 @@ source /utils.lib /usr/share/clickhouse-test/config/install.sh ./setup_minio.sh stateless +./mc admin trace clickminio > /test_output/minio.log & +MC_ADMIN_PID=$! + ./setup_hdfs_minicluster.sh config_logs_export_cluster /etc/clickhouse-server/config.d/system_logs_export.yaml @@ -383,6 +386,9 @@ if [[ "$USE_SHARED_CATALOG" -eq 1 ]]; then sudo clickhouse stop --pid-path /var/run/clickhouse-server1 ||: fi +# Kill minio admin client to stop collecting logs +kill $MC_ADMIN_PID + rg -Fa "" /var/log/clickhouse-server/clickhouse-server.log ||: rg -A50 -Fa "============" /var/log/clickhouse-server/stderr.log ||: zstd --threads=0 < /var/log/clickhouse-server/clickhouse-server.log > /test_output/clickhouse-server.log.zst & diff --git a/docker/test/stateless/setup_hdfs_minicluster.sh b/docker/test/stateless/setup_hdfs_minicluster.sh index 6671e73562a..15a54f59096 100755 --- a/docker/test/stateless/setup_hdfs_minicluster.sh +++ b/docker/test/stateless/setup_hdfs_minicluster.sh @@ -10,7 +10,7 @@ cd hadoop-3.3.1 export JAVA_HOME=/usr mkdir -p target/test/data chown clickhouse ./target/test/data -sudo -E -u clickhouse bin/mapred minicluster -format -nomr -nnport 12222 >> /test_output/garbage.log 2>&1 & +sudo -E -u clickhouse bin/mapred minicluster -format -nomr -nnport 12222 >> /test_output/hdfs_minicluster.log 2>&1 & while ! nc -z localhost 12222; do sleep 1 diff --git a/docker/test/style/requirements.txt b/docker/test/style/requirements.txt index bb0cd55dd1a..ed73d0d3636 100644 --- a/docker/test/style/requirements.txt +++ b/docker/test/style/requirements.txt @@ -3,7 +3,7 @@ aiosignal==1.3.1 astroid==3.1.0 async-timeout==4.0.3 attrs==23.2.0 -black==23.12.0 +black==24.4.2 boto3==1.34.131 botocore==1.34.131 certifi==2024.6.2 diff --git a/docs/en/development/developer-instruction.md b/docs/en/development/developer-instruction.md index 0a1fe58b16f..4b11fb71230 100644 --- a/docs/en/development/developer-instruction.md +++ b/docs/en/development/developer-instruction.md @@ -226,15 +226,59 @@ Other IDEs you can use are [Sublime Text](https://www.sublimetext.com/), [Visual ## Writing Code {#writing-code} -The description of ClickHouse architecture can be found here: https://clickhouse.com/docs/en/development/architecture/ +Below you can find some quick links which may be useful when writing code for ClickHouse: -The Code Style Guide: https://clickhouse.com/docs/en/development/style/ +- [ClickHouse architecture description](https://clickhouse.com/docs/en/development/architecture/). +- [The code style guide](https://clickhouse.com/docs/en/development/style/). +- [Adding third-party libraries](https://clickhouse.com/docs/en/development/contrib/#adding-third-party-libraries) +- [Writing tests](https://clickhouse.com/docs/en/development/tests/) +- [List of open issues](https://github.com/ClickHouse/ClickHouse/issues?q=is%3Aopen+is%3Aissue+label%3Ahacktoberfest) -Adding third-party libraries: https://clickhouse.com/docs/en/development/contrib/#adding-third-party-libraries +## Writing Documentation {#writing-documentation} -Writing tests: https://clickhouse.com/docs/en/development/tests/ +As part of every pull request which adds a new feature, it is necessary to write documentation for it. If you'd like to preview your documentation changes the instructions for how to build the documentation page locally are available in the README.md file [here](https://github.com/ClickHouse/clickhouse-docs). When adding a new function to ClickHouse you can use the template below as a guide: -List of tasks: https://github.com/ClickHouse/ClickHouse/issues?q=is%3Aopen+is%3Aissue+label%3Ahacktoberfest +```markdown +# newFunctionName + +A short description of the function goes here. It should describe briefly what it does and a typical usage case. + +**Syntax** + +\```sql +newFunctionName(arg1, arg2[, arg3]) +\``` + +**Arguments** + +- `arg1` — Description of the argument. [DataType](../data-types/float.md) +- `arg2` — Description of the argument. [DataType](../data-types/float.md) +- `arg3` — Description of optional argument (optional). [DataType](../data-types/float.md) + +**Implementation Details** + +A description of implementation details if relevant. + +**Returned value** + +- Returns {insert what the function returns here}. [DataType](../data-types/float.md) + +**Example** + +Query: + +\```sql +SELECT 'write your example query here'; +\``` + +Response: + +\```response +┌───────────────────────────────────┐ +│ the result of the query │ +└───────────────────────────────────┘ +\``` +``` ## Test Data {#test-data} diff --git a/docs/en/getting-started/install.md b/docs/en/getting-started/install.md index 98e73dec451..6209ef3c8ee 100644 --- a/docs/en/getting-started/install.md +++ b/docs/en/getting-started/install.md @@ -15,7 +15,7 @@ You have four options for getting up and running with ClickHouse: - **[ClickHouse Cloud](https://clickhouse.com/cloud/):** The official ClickHouse as a service, - built by, maintained and supported by the creators of ClickHouse - **[Quick Install](#quick-install):** an easy-to-download binary for testing and developing with ClickHouse -- **[Production Deployments](#available-installation-options):** ClickHouse can run on any Linux, FreeBSD, or macOS with x86-64, ARM, or PowerPC64LE CPU architecture +- **[Production Deployments](#available-installation-options):** ClickHouse can run on any Linux, FreeBSD, or macOS with x86-64, modern ARM (ARMv8.2-A up), or PowerPC64LE CPU architecture - **[Docker Image](https://hub.docker.com/r/clickhouse/clickhouse-server/):** use the official Docker image in Docker Hub ## ClickHouse Cloud diff --git a/docs/en/operations/settings/merge-tree-settings.md b/docs/en/operations/settings/merge-tree-settings.md index 22c8c704ba2..7278b91f90d 100644 --- a/docs/en/operations/settings/merge-tree-settings.md +++ b/docs/en/operations/settings/merge-tree-settings.md @@ -1030,7 +1030,7 @@ A table with no primary key represents the extreme case of a single equivalence The fewer and the larger the equivalence classes are, the higher the degree of freedom when re-shuffling rows. -The heuristics applied to find the best row order within each equivalence class is suggested by D. Lemir, O. Kaser in [Reordering columns for smaller indexes](https://doi.org/10.1016/j.ins.2011.02.002) and based on sorting the rows within each equivalence class by ascending cardinality of the non-primary key columns. +The heuristics applied to find the best row order within each equivalence class is suggested by D. Lemire, O. Kaser in [Reordering columns for smaller indexes](https://doi.org/10.1016/j.ins.2011.02.002) and based on sorting the rows within each equivalence class by ascending cardinality of the non-primary key columns. It performs three steps: 1. Find all equivalence classes based on the row values in primary key columns. 2. For each equivalence class, calculate (usually estimate) the cardinalities of the non-primary-key columns. diff --git a/docs/en/sql-reference/functions/date-time-functions.md b/docs/en/sql-reference/functions/date-time-functions.md index 46b1167fa33..4f5e5a5d716 100644 --- a/docs/en/sql-reference/functions/date-time-functions.md +++ b/docs/en/sql-reference/functions/date-time-functions.md @@ -2698,6 +2698,204 @@ Like function `YYYYMMDDhhmmssToDate()` but produces a [DateTime64](../data-types Accepts an additional, optional `precision` parameter after the `timezone` parameter. +## changeYear + +Changes the year component of a date or date time. + +**Syntax** +``` sql + +changeYear(date_or_datetime, value) +``` + +**Arguments** + +- `date_or_datetime` - a [Date](../data-types/date.md), [Date32](../data-types/date32.md), [DateTime](../data-types/datetime.md) or [DateTime64](../data-types/datetime64.md) +- `value` - a new value of the year. [Integer](../../sql-reference/data-types/int-uint.md). + +**Returned value** + +- The same type as `date_or_datetime`. + +**Example** + +``` sql +SELECT changeYear(toDate('1999-01-01'), 2000), changeYear(toDateTime64('1999-01-01 00:00:00.000', 3), 2000); +``` + +Result: + +``` +┌─changeYear(toDate('1999-01-01'), 2000)─┬─changeYear(toDateTime64('1999-01-01 00:00:00.000', 3), 2000)─┐ +│ 2000-01-01 │ 2000-01-01 00:00:00.000 │ +└────────────────────────────────────────┴──────────────────────────────────────────────────────────────┘ +``` + +## changeMonth + +Changes the month component of a date or date time. + +**Syntax** + +``` sql +changeMonth(date_or_datetime, value) +``` + +**Arguments** + +- `date_or_datetime` - a [Date](../data-types/date.md), [Date32](../data-types/date32.md), [DateTime](../data-types/datetime.md) or [DateTime64](../data-types/datetime64.md) +- `value` - a new value of the month. [Integer](../../sql-reference/data-types/int-uint.md). + +**Returned value** + +- Returns a value of same type as `date_or_datetime`. + +**Example** + +``` sql +SELECT changeMonth(toDate('1999-01-01'), 2), changeMonth(toDateTime64('1999-01-01 00:00:00.000', 3), 2); +``` + +Result: + +``` +┌─changeMonth(toDate('1999-01-01'), 2)─┬─changeMonth(toDateTime64('1999-01-01 00:00:00.000', 3), 2)─┐ +│ 1999-02-01 │ 1999-02-01 00:00:00.000 │ +└──────────────────────────────────────┴────────────────────────────────────────────────────────────┘ +``` + +## changeDay + +Changes the day component of a date or date time. + +**Syntax** + +``` sql +changeDay(date_or_datetime, value) +``` + +**Arguments** + +- `date_or_datetime` - a [Date](../data-types/date.md), [Date32](../data-types/date32.md), [DateTime](../data-types/datetime.md) or [DateTime64](../data-types/datetime64.md) +- `value` - a new value of the day. [Integer](../../sql-reference/data-types/int-uint.md). + +**Returned value** + +- Returns a value of same type as `date_or_datetime`. + +**Example** + +``` sql +SELECT changeDay(toDate('1999-01-01'), 5), changeDay(toDateTime64('1999-01-01 00:00:00.000', 3), 5); +``` + +Result: + +``` +┌─changeDay(toDate('1999-01-01'), 5)─┬─changeDay(toDateTime64('1999-01-01 00:00:00.000', 3), 5)─┐ +│ 1999-01-05 │ 1999-01-05 00:00:00.000 │ +└────────────────────────────────────┴──────────────────────────────────────────────────────────┘ +``` + +## changeHour + +Changes the hour component of a date or date time. + +**Syntax** + +``` sql +changeHour(date_or_datetime, value) +``` + +**Arguments** + +- `date_or_datetime` - a [Date](../data-types/date.md), [Date32](../data-types/date32.md), [DateTime](../data-types/datetime.md) or [DateTime64](../data-types/datetime64.md) +- `value` - a new value of the hour. [Integer](../../sql-reference/data-types/int-uint.md). + +**Returned value** + +- Returns a value of same type as `date_or_datetime`. If the input is a [Date](../data-types/date.md), return [DateTime](../data-types/datetime.md). If the input is a [Date32](../data-types/date32.md), return [DateTime64](../data-types/datetime64.md). + +**Example** + +``` sql +SELECT changeHour(toDate('1999-01-01'), 14), changeHour(toDateTime64('1999-01-01 00:00:00.000', 3), 14); +``` + +Result: + +``` +┌─changeHour(toDate('1999-01-01'), 14)─┬─changeHour(toDateTime64('1999-01-01 00:00:00.000', 3), 14)─┐ +│ 1999-01-01 14:00:00 │ 1999-01-01 14:00:00.000 │ +└──────────────────────────────────────┴────────────────────────────────────────────────────────────┘ +``` + +## changeMinute + +Changes the minute component of a date or date time. + +**Syntax** + +``` sql +changeMinute(date_or_datetime, value) +``` + +**Arguments** + +- `date_or_datetime` - a [Date](../data-types/date.md), [Date32](../data-types/date32.md), [DateTime](../data-types/datetime.md) or [DateTime64](../data-types/datetime64.md) +- `value` - a new value of the minute. [Integer](../../sql-reference/data-types/int-uint.md). + +**Returned value** + +- Returns a value of same type as `date_or_datetime`. If the input is a [Date](../data-types/date.md), return [DateTime](../data-types/datetime.md). If the input is a [Date32](../data-types/date32.md), return [DateTime64](../data-types/datetime64.md). + +**Example** + +``` sql + SELECT changeMinute(toDate('1999-01-01'), 15), changeMinute(toDateTime64('1999-01-01 00:00:00.000', 3), 15); +``` + +Result: + +``` +┌─changeMinute(toDate('1999-01-01'), 15)─┬─changeMinute(toDateTime64('1999-01-01 00:00:00.000', 3), 15)─┐ +│ 1999-01-01 00:15:00 │ 1999-01-01 00:15:00.000 │ +└────────────────────────────────────────┴──────────────────────────────────────────────────────────────┘ +``` + +## changeSecond + +Changes the second component of a date or date time. + +**Syntax** + +``` sql +changeSecond(date_or_datetime, value) +``` + +**Arguments** + +- `date_or_datetime` - a [Date](../data-types/date.md), [Date32](../data-types/date32.md), [DateTime](../data-types/datetime.md) or [DateTime64](../data-types/datetime64.md) +- `value` - a new value of the second. [Integer](../../sql-reference/data-types/int-uint.md). + +**Returned value** + +- Returns a value of same type as `date_or_datetime`. If the input is a [Date](../data-types/date.md), return [DateTime](../data-types/datetime.md). If the input is a [Date32](../data-types/date32.md), return [DateTime64](../data-types/datetime64.md). + +**Example** + +``` sql +SELECT changeSecond(toDate('1999-01-01'), 15), changeSecond(toDateTime64('1999-01-01 00:00:00.000', 3), 15); +``` + +Result: + +``` +┌─changeSecond(toDate('1999-01-01'), 15)─┬─changeSecond(toDateTime64('1999-01-01 00:00:00.000', 3), 15)─┐ +│ 1999-01-01 00:00:15 │ 1999-01-01 00:00:15.000 │ +└────────────────────────────────────────┴──────────────────────────────────────────────────────────────┘ +``` + ## addYears Adds a specified number of years to a date, a date with time or a string-encoded date / date with time. @@ -2714,6 +2912,7 @@ addYears(date, num) - `num`: Number of years to add. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md). **Returned value** + - Returns `date` plus `num` years. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md). **Example** @@ -2751,6 +2950,7 @@ addQuarters(date, num) - `num`: Number of quarters to add. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md). **Returned value** + - Returns `date` plus `num` quarters. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md). **Example** @@ -2788,6 +2988,7 @@ addMonths(date, num) - `num`: Number of months to add. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md). **Returned value** + - Returns `date` plus `num` months. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md). **Example** @@ -2825,6 +3026,7 @@ addWeeks(date, num) - `num`: Number of weeks to add. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md). **Returned value** + - Returns `date` plus `num` weeks. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md). **Example** @@ -2862,6 +3064,7 @@ addDays(date, num) - `num`: Number of days to add. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md). **Returned value** + - Returns `date` plus `num` days. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md). **Example** @@ -2899,6 +3102,7 @@ addHours(date, num) - `num`: Number of hours to add. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md). **Returned value** +o - Returns `date` plus `num` hours. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md). **Example** @@ -2936,6 +3140,7 @@ addMinutes(date, num) - `num`: Number of minutes to add. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md). **Returned value** + - Returns `date` plus `num` minutes. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md). **Example** @@ -2973,6 +3178,7 @@ addSeconds(date, num) - `num`: Number of seconds to add. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md). **Returned value** + - Returns `date` plus `num` seconds. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md). **Example** @@ -3010,6 +3216,7 @@ addMilliseconds(date_time, num) - `num`: Number of milliseconds to add. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md). **Returned value** + - Returns `date_time` plus `num` milliseconds. [DateTime64](../data-types/datetime64.md). **Example** @@ -3045,6 +3252,7 @@ addMicroseconds(date_time, num) - `num`: Number of microseconds to add. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md). **Returned value** + - Returns `date_time` plus `num` microseconds. [DateTime64](../data-types/datetime64.md). **Example** @@ -3080,6 +3288,7 @@ addNanoseconds(date_time, num) - `num`: Number of nanoseconds to add. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md). **Returned value** + - Returns `date_time` plus `num` nanoseconds. [DateTime64](../data-types/datetime64.md). **Example** @@ -3115,6 +3324,7 @@ addInterval(interval_1, interval_2) - `interval_2`: Second interval to be added. [interval](../data-types/special-data-types/interval.md). **Returned value** + - Returns a tuple of intervals. [tuple](../data-types/tuple.md)([interval](../data-types/special-data-types/interval.md)). :::note @@ -3161,6 +3371,7 @@ addTupleOfIntervals(interval_1, interval_2) - `intervals`: Tuple of intervals to add to `date`. [tuple](../data-types/tuple.md)([interval](../data-types/special-data-types/interval.md)). **Returned value** + - Returns `date` with added `intervals`. [date](../data-types/date.md)/[date32](../data-types/date32.md)/[datetime](../data-types/datetime.md)/[datetime64](../data-types/datetime64.md). **Example** @@ -3195,6 +3406,7 @@ subtractYears(date, num) - `num`: Number of years to subtract. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md). **Returned value** + - Returns `date` minus `num` years. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md). **Example** @@ -3232,6 +3444,7 @@ subtractQuarters(date, num) - `num`: Number of quarters to subtract. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md). **Returned value** + - Returns `date` minus `num` quarters. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md). **Example** @@ -3269,6 +3482,7 @@ subtractMonths(date, num) - `num`: Number of months to subtract. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md). **Returned value** + - Returns `date` minus `num` months. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md). **Example** @@ -3306,6 +3520,7 @@ subtractWeeks(date, num) - `num`: Number of weeks to subtract. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md). **Returned value** + - Returns `date` minus `num` weeks. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md). **Example** @@ -3343,6 +3558,7 @@ subtractDays(date, num) - `num`: Number of days to subtract. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md). **Returned value** + - Returns `date` minus `num` days. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md). **Example** @@ -3380,6 +3596,7 @@ subtractHours(date, num) - `num`: Number of hours to subtract. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md). **Returned value** + - Returns `date` minus `num` hours. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[Datetime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md). **Example** @@ -3417,6 +3634,7 @@ subtractMinutes(date, num) - `num`: Number of minutes to subtract. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md). **Returned value** + - Returns `date` minus `num` minutes. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md). **Example** @@ -3454,6 +3672,7 @@ subtractSeconds(date, num) - `num`: Number of seconds to subtract. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md). **Returned value** + - Returns `date` minus `num` seconds. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md). **Example** @@ -3491,6 +3710,7 @@ subtractMilliseconds(date_time, num) - `num`: Number of milliseconds to subtract. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md). **Returned value** + - Returns `date_time` minus `num` milliseconds. [DateTime64](../data-types/datetime64.md). **Example** @@ -3526,6 +3746,7 @@ subtractMicroseconds(date_time, num) - `num`: Number of microseconds to subtract. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md). **Returned value** + - Returns `date_time` minus `num` microseconds. [DateTime64](../data-types/datetime64.md). **Example** @@ -3561,6 +3782,7 @@ subtractNanoseconds(date_time, num) - `num`: Number of nanoseconds to subtract. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md). **Returned value** + - Returns `date_time` minus `num` nanoseconds. [DateTime64](../data-types/datetime64.md). **Example** @@ -3596,6 +3818,7 @@ subtractInterval(interval_1, interval_2) - `interval_2`: Second interval to be negated. [interval](../data-types/special-data-types/interval.md). **Returned value** + - Returns a tuple of intervals. [tuple](../data-types/tuple.md)([interval](../data-types/special-data-types/interval.md)). :::note @@ -3642,6 +3865,7 @@ subtractTupleOfIntervals(interval_1, interval_2) - `intervals`: Tuple of intervals to subtract from `date`. [tuple](../data-types/tuple.md)([interval](../data-types/special-data-types/interval.md)). **Returned value** + - Returns `date` with subtracted `intervals`. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md). **Example** diff --git a/docs/en/sql-reference/functions/other-functions.md b/docs/en/sql-reference/functions/other-functions.md index 8f9fd9abb0d..260457b3be1 100644 --- a/docs/en/sql-reference/functions/other-functions.md +++ b/docs/en/sql-reference/functions/other-functions.md @@ -2984,6 +2984,66 @@ Result: └─────────┘ ``` +## partitionID + +Computes the [partition ID](../../engines/table-engines/mergetree-family/custom-partitioning-key.md). + +:::note +This function is slow and should not be called for large amount of rows. +::: + +**Syntax** + +```sql +partitionID(x[, y, ...]); +``` + +**Arguments** + +- `x` — Column for which to return the partition ID. +- `y, ...` — Remaining N columns for which to return the partition ID (optional). + +**Returned Value** + +- Partition ID that the row would belong to. [String](../data-types/string.md). + +**Example** + +Query: + +```sql +DROP TABLE IF EXISTS tab; + +CREATE TABLE tab +( + i int, + j int +) +ENGINE = MergeTree +PARTITION BY i +ORDER BY tuple(); + +INSERT INTO tab VALUES (1, 1), (1, 2), (1, 3), (2, 4), (2, 5), (2, 6); + +SELECT i, j, partitionID(i), _partition_id FROM tab ORDER BY i, j; +``` + +Result: + +```response +┌─i─┬─j─┬─partitionID(i)─┬─_partition_id─┐ +│ 1 │ 1 │ 1 │ 1 │ +│ 1 │ 2 │ 1 │ 1 │ +│ 1 │ 3 │ 1 │ 1 │ +└───┴───┴────────────────┴───────────────┘ +┌─i─┬─j─┬─partitionID(i)─┬─_partition_id─┐ +│ 2 │ 4 │ 2 │ 2 │ +│ 2 │ 5 │ 2 │ 2 │ +│ 2 │ 6 │ 2 │ 2 │ +└───┴───┴────────────────┴───────────────┘ +``` + + ## shardNum Returns the index of a shard which processes a part of data in a distributed query. Indices are started from `1`. diff --git a/programs/client/Client.cpp b/programs/client/Client.cpp index 0126613f797..e4b40d98819 100644 --- a/programs/client/Client.cpp +++ b/programs/client/Client.cpp @@ -24,9 +24,8 @@ #include #include #include - +#include #include -#include #include #include @@ -49,6 +48,8 @@ #include #include +#include + namespace fs = std::filesystem; using namespace std::literals; diff --git a/programs/library-bridge/LibraryBridge.cpp b/programs/library-bridge/LibraryBridge.cpp index 8a07ca57104..d35e7a9b832 100644 --- a/programs/library-bridge/LibraryBridge.cpp +++ b/programs/library-bridge/LibraryBridge.cpp @@ -1,5 +1,7 @@ #include "LibraryBridge.h" +#include + int mainEntryClickHouseLibraryBridge(int argc, char ** argv) { DB::LibraryBridge app; diff --git a/programs/library-bridge/LibraryBridgeHandlers.cpp b/programs/library-bridge/LibraryBridgeHandlers.cpp index 8d116e537aa..53f4b31ca06 100644 --- a/programs/library-bridge/LibraryBridgeHandlers.cpp +++ b/programs/library-bridge/LibraryBridgeHandlers.cpp @@ -6,6 +6,7 @@ #include "ExternalDictionaryLibraryHandlerFactory.h" #include +#include #include #include #include diff --git a/programs/local/LocalServer.cpp b/programs/local/LocalServer.cpp index b33e1595056..46b543e49e9 100644 --- a/programs/local/LocalServer.cpp +++ b/programs/local/LocalServer.cpp @@ -376,6 +376,7 @@ void LocalServer::setupUsers() " " " default" " default" + " 1" " " " " " " diff --git a/programs/local/LocalServer.h b/programs/local/LocalServer.h index da2466650a7..0715f358313 100644 --- a/programs/local/LocalServer.h +++ b/programs/local/LocalServer.h @@ -3,11 +3,12 @@ #include #include -#include -#include -#include +#include #include #include +#include +#include +#include #include #include diff --git a/programs/odbc-bridge/ColumnInfoHandler.cpp b/programs/odbc-bridge/ColumnInfoHandler.cpp index 5ff985b3d12..a738d5e8c12 100644 --- a/programs/odbc-bridge/ColumnInfoHandler.cpp +++ b/programs/odbc-bridge/ColumnInfoHandler.cpp @@ -2,6 +2,7 @@ #if USE_ODBC +#include #include #include #include diff --git a/programs/odbc-bridge/IdentifierQuoteHandler.cpp b/programs/odbc-bridge/IdentifierQuoteHandler.cpp index cf5acdc4534..2f00e1171bb 100644 --- a/programs/odbc-bridge/IdentifierQuoteHandler.cpp +++ b/programs/odbc-bridge/IdentifierQuoteHandler.cpp @@ -11,6 +11,7 @@ #include #include #include +#include #include "getIdentifierQuote.h" #include "validateODBCConnectionString.h" #include "ODBCPooledConnectionFactory.h" diff --git a/programs/odbc-bridge/MainHandler.cpp b/programs/odbc-bridge/MainHandler.cpp index 2cf1576ccd7..404f66eab3b 100644 --- a/programs/odbc-bridge/MainHandler.cpp +++ b/programs/odbc-bridge/MainHandler.cpp @@ -9,6 +9,7 @@ #include #include #include +#include #include #include #include diff --git a/programs/odbc-bridge/ODBCBridge.cpp b/programs/odbc-bridge/ODBCBridge.cpp index e91cc3158df..155e26c8a27 100644 --- a/programs/odbc-bridge/ODBCBridge.cpp +++ b/programs/odbc-bridge/ODBCBridge.cpp @@ -1,5 +1,7 @@ #include "ODBCBridge.h" +#include + int mainEntryClickHouseODBCBridge(int argc, char ** argv) { DB::ODBCBridge app; diff --git a/programs/odbc-bridge/SchemaAllowedHandler.cpp b/programs/odbc-bridge/SchemaAllowedHandler.cpp index c7025ca4311..dfb4333240b 100644 --- a/programs/odbc-bridge/SchemaAllowedHandler.cpp +++ b/programs/odbc-bridge/SchemaAllowedHandler.cpp @@ -2,8 +2,10 @@ #if USE_ODBC +#include #include #include +#include #include #include #include diff --git a/programs/server/Server.cpp b/programs/server/Server.cpp index 3b88bb36954..053ddaf8d8b 100644 --- a/programs/server/Server.cpp +++ b/programs/server/Server.cpp @@ -68,6 +68,7 @@ #include #include #include +#include #include #include #include diff --git a/programs/server/play.html b/programs/server/play.html index 507a96382a7..9590a65524c 100644 --- a/programs/server/play.html +++ b/programs/server/play.html @@ -516,6 +516,9 @@ /// Save query in history only if it is different. let previous_query = ''; + /// Start of the last query + let last_query_start = 0; + const current_url = new URL(window.location); const opened_locally = location.protocol == 'file:'; @@ -567,6 +570,8 @@ '&password=' + encodeURIComponent(password) } + last_query_start = performance.now(); + const xhr = new XMLHttpRequest; xhr.open('POST', url, true); @@ -579,7 +584,8 @@ if (posted_request_num != request_num) { return; } else if (this.readyState === XMLHttpRequest.DONE) { - renderResponse(this.status, this.response); + const elapsed_msec = performance.now() - last_query_start; + renderResponse(this.status, this.response, elapsed_msec); /// The query is saved in browser history (in state JSON object) /// as well as in URL fragment identifier. @@ -587,7 +593,8 @@ const state = { query: query, status: this.status, - response: this.response.length > 100000 ? null : this.response /// Lower than the browser's limit. + response: this.response.length > 100000 ? null : this.response, /// Lower than the browser's limit. + elapsed_msec: elapsed_msec, }; const title = "ClickHouse Query: " + query; @@ -617,7 +624,7 @@ xhr.send(query); } - function renderResponse(status, response) { + function renderResponse(status, response, elapsed_msec) { document.getElementById('hourglass').style.display = 'none'; if (status === 200) { @@ -632,6 +639,7 @@ renderChart(json); } else { renderUnparsedResult(response); + stats.innerText = `Elapsed (client-side): ${(elapsed_msec / 1000).toFixed(3)} sec.`; } document.getElementById('check-mark').style.display = 'inline'; } else { @@ -651,7 +659,7 @@ clear(); return; } - renderResponse(event.state.status, event.state.response); + renderResponse(event.state.status, event.state.response, event.state.elapsed_msec); }; if (window.location.hash) { diff --git a/pyproject.toml b/pyproject.toml index 90f089afa41..c89d46c0929 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -17,6 +17,8 @@ src_paths = ["src", "tests/ci", "tests/sqllogic"] [tool.pylint.'MESSAGES CONTROL'] # pytest.mark.parametrize is not callable (not-callable) disable = ''' + pointless-string-statement, + line-too-long, missing-docstring, too-few-public-methods, invalid-name, @@ -36,6 +38,7 @@ disable = ''' bare-except, no-else-return, global-statement, + f-string-without-interpolation, ''' [tool.pylint.SIMILARITIES] diff --git a/src/Access/AccessControl.cpp b/src/Access/AccessControl.cpp index 353358fac65..95a467bbbe5 100644 --- a/src/Access/AccessControl.cpp +++ b/src/Access/AccessControl.cpp @@ -21,7 +21,6 @@ #include #include #include -#include #include #include #include diff --git a/src/AggregateFunctions/AggregateFunctionFactory.cpp b/src/AggregateFunctions/AggregateFunctionFactory.cpp index 6555ae63128..b5c6440a69c 100644 --- a/src/AggregateFunctions/AggregateFunctionFactory.cpp +++ b/src/AggregateFunctions/AggregateFunctionFactory.cpp @@ -6,6 +6,7 @@ #include #include #include +#include static constexpr size_t MAX_AGGREGATE_FUNCTION_NAME_LENGTH = 1000; diff --git a/src/Analyzer/InDepthQueryTreeVisitor.h b/src/Analyzer/InDepthQueryTreeVisitor.h index 62ddc06659c..67e3b75d8b5 100644 --- a/src/Analyzer/InDepthQueryTreeVisitor.h +++ b/src/Analyzer/InDepthQueryTreeVisitor.h @@ -3,7 +3,6 @@ #include #include -#include #include #include diff --git a/src/Analyzer/Passes/AggregateFunctionOfGroupByKeysPass.cpp b/src/Analyzer/Passes/AggregateFunctionOfGroupByKeysPass.cpp index a419bbcc133..b68635ab953 100644 --- a/src/Analyzer/Passes/AggregateFunctionOfGroupByKeysPass.cpp +++ b/src/Analyzer/Passes/AggregateFunctionOfGroupByKeysPass.cpp @@ -10,6 +10,8 @@ #include #include +#include + namespace DB { diff --git a/src/Analyzer/Passes/AggregateFunctionsArithmericOperationsPass.cpp b/src/Analyzer/Passes/AggregateFunctionsArithmericOperationsPass.cpp index 098e2858abc..6578a6c5bd7 100644 --- a/src/Analyzer/Passes/AggregateFunctionsArithmericOperationsPass.cpp +++ b/src/Analyzer/Passes/AggregateFunctionsArithmericOperationsPass.cpp @@ -11,6 +11,8 @@ #include #include +#include + namespace DB { diff --git a/src/Analyzer/Passes/ArrayExistsToHasPass.cpp b/src/Analyzer/Passes/ArrayExistsToHasPass.cpp index 62db502e1dc..bf4c1232d79 100644 --- a/src/Analyzer/Passes/ArrayExistsToHasPass.cpp +++ b/src/Analyzer/Passes/ArrayExistsToHasPass.cpp @@ -11,6 +11,8 @@ #include #include +#include + namespace DB { diff --git a/src/Analyzer/Passes/AutoFinalOnQueryPass.cpp b/src/Analyzer/Passes/AutoFinalOnQueryPass.cpp index 70aa1a41548..f7dd542dbf4 100644 --- a/src/Analyzer/Passes/AutoFinalOnQueryPass.cpp +++ b/src/Analyzer/Passes/AutoFinalOnQueryPass.cpp @@ -8,6 +8,8 @@ #include #include +#include + namespace DB { diff --git a/src/Analyzer/Passes/ConvertOrLikeChainPass.cpp b/src/Analyzer/Passes/ConvertOrLikeChainPass.cpp index eb897ef8746..2b2ac95d7b9 100644 --- a/src/Analyzer/Passes/ConvertOrLikeChainPass.cpp +++ b/src/Analyzer/Passes/ConvertOrLikeChainPass.cpp @@ -22,6 +22,8 @@ #include #include +#include + namespace DB { diff --git a/src/Analyzer/Passes/ConvertQueryToCNFPass.cpp b/src/Analyzer/Passes/ConvertQueryToCNFPass.cpp index 5951e8fc5ea..f66d092429f 100644 --- a/src/Analyzer/Passes/ConvertQueryToCNFPass.cpp +++ b/src/Analyzer/Passes/ConvertQueryToCNFPass.cpp @@ -10,6 +10,8 @@ #include #include +#include + #include #include diff --git a/src/Analyzer/Passes/CountDistinctPass.cpp b/src/Analyzer/Passes/CountDistinctPass.cpp index 23fde89a348..3f9185a07ca 100644 --- a/src/Analyzer/Passes/CountDistinctPass.cpp +++ b/src/Analyzer/Passes/CountDistinctPass.cpp @@ -11,6 +11,8 @@ #include #include +#include + namespace DB { diff --git a/src/Analyzer/Passes/CrossToInnerJoinPass.cpp b/src/Analyzer/Passes/CrossToInnerJoinPass.cpp index 3e2a2055fdb..eb615879893 100644 --- a/src/Analyzer/Passes/CrossToInnerJoinPass.cpp +++ b/src/Analyzer/Passes/CrossToInnerJoinPass.cpp @@ -9,13 +9,14 @@ #include #include #include +#include #include #include #include #include -#include +#include namespace DB diff --git a/src/Analyzer/Passes/FunctionToSubcolumnsPass.cpp b/src/Analyzer/Passes/FunctionToSubcolumnsPass.cpp index 90051779a26..b968f43c6a6 100644 --- a/src/Analyzer/Passes/FunctionToSubcolumnsPass.cpp +++ b/src/Analyzer/Passes/FunctionToSubcolumnsPass.cpp @@ -21,6 +21,8 @@ #include #include +#include + namespace DB { diff --git a/src/Analyzer/Passes/FuseFunctionsPass.cpp b/src/Analyzer/Passes/FuseFunctionsPass.cpp index ac4678fb362..0175e304a2b 100644 --- a/src/Analyzer/Passes/FuseFunctionsPass.cpp +++ b/src/Analyzer/Passes/FuseFunctionsPass.cpp @@ -1,6 +1,7 @@ #include #include +#include #include #include #include diff --git a/src/Analyzer/Passes/GroupingFunctionsResolvePass.cpp b/src/Analyzer/Passes/GroupingFunctionsResolvePass.cpp index e259ffa64a6..91f06183523 100644 --- a/src/Analyzer/Passes/GroupingFunctionsResolvePass.cpp +++ b/src/Analyzer/Passes/GroupingFunctionsResolvePass.cpp @@ -1,6 +1,7 @@ #include #include +#include #include diff --git a/src/Analyzer/Passes/IfChainToMultiIfPass.cpp b/src/Analyzer/Passes/IfChainToMultiIfPass.cpp index beb2247607e..cff98adb6af 100644 --- a/src/Analyzer/Passes/IfChainToMultiIfPass.cpp +++ b/src/Analyzer/Passes/IfChainToMultiIfPass.cpp @@ -4,6 +4,7 @@ #include #include +#include #include #include diff --git a/src/Analyzer/Passes/IfTransformStringsToEnumPass.cpp b/src/Analyzer/Passes/IfTransformStringsToEnumPass.cpp index 29b392a0951..d966f129d08 100644 --- a/src/Analyzer/Passes/IfTransformStringsToEnumPass.cpp +++ b/src/Analyzer/Passes/IfTransformStringsToEnumPass.cpp @@ -5,6 +5,7 @@ #include #include #include +#include #include #include diff --git a/src/Analyzer/Passes/LogicalExpressionOptimizerPass.cpp b/src/Analyzer/Passes/LogicalExpressionOptimizerPass.cpp index 698602ca5bc..854697bca9f 100644 --- a/src/Analyzer/Passes/LogicalExpressionOptimizerPass.cpp +++ b/src/Analyzer/Passes/LogicalExpressionOptimizerPass.cpp @@ -8,6 +8,7 @@ #include #include #include +#include #include #include diff --git a/src/Analyzer/Passes/MultiIfToIfPass.cpp b/src/Analyzer/Passes/MultiIfToIfPass.cpp index c42ea61b34a..0b441032b02 100644 --- a/src/Analyzer/Passes/MultiIfToIfPass.cpp +++ b/src/Analyzer/Passes/MultiIfToIfPass.cpp @@ -2,6 +2,7 @@ #include #include +#include #include #include diff --git a/src/Analyzer/Passes/NormalizeCountVariantsPass.cpp b/src/Analyzer/Passes/NormalizeCountVariantsPass.cpp index f9066b0363c..3a8b6e75d40 100644 --- a/src/Analyzer/Passes/NormalizeCountVariantsPass.cpp +++ b/src/Analyzer/Passes/NormalizeCountVariantsPass.cpp @@ -7,6 +7,7 @@ #include #include #include +#include #include #include diff --git a/src/Analyzer/Passes/OptimizeDateOrDateTimeConverterWithPreimagePass.cpp b/src/Analyzer/Passes/OptimizeDateOrDateTimeConverterWithPreimagePass.cpp index a8364f1ab7a..feb8bcc792d 100644 --- a/src/Analyzer/Passes/OptimizeDateOrDateTimeConverterWithPreimagePass.cpp +++ b/src/Analyzer/Passes/OptimizeDateOrDateTimeConverterWithPreimagePass.cpp @@ -9,6 +9,7 @@ #include #include #include +#include namespace DB { diff --git a/src/Analyzer/Passes/OptimizeGroupByFunctionKeysPass.cpp b/src/Analyzer/Passes/OptimizeGroupByFunctionKeysPass.cpp index fd8c3e6ee6c..bfb6aa1dd07 100644 --- a/src/Analyzer/Passes/OptimizeGroupByFunctionKeysPass.cpp +++ b/src/Analyzer/Passes/OptimizeGroupByFunctionKeysPass.cpp @@ -8,6 +8,7 @@ #include #include #include +#include namespace DB { diff --git a/src/Analyzer/Passes/OptimizeGroupByInjectiveFunctionsPass.cpp b/src/Analyzer/Passes/OptimizeGroupByInjectiveFunctionsPass.cpp index a30ad2a1590..fc989003c02 100644 --- a/src/Analyzer/Passes/OptimizeGroupByInjectiveFunctionsPass.cpp +++ b/src/Analyzer/Passes/OptimizeGroupByInjectiveFunctionsPass.cpp @@ -3,6 +3,7 @@ #include #include #include +#include #include #include diff --git a/src/Analyzer/Passes/OptimizeRedundantFunctionsInOrderByPass.cpp b/src/Analyzer/Passes/OptimizeRedundantFunctionsInOrderByPass.cpp index b13af7f00ae..4f0271b4914 100644 --- a/src/Analyzer/Passes/OptimizeRedundantFunctionsInOrderByPass.cpp +++ b/src/Analyzer/Passes/OptimizeRedundantFunctionsInOrderByPass.cpp @@ -8,6 +8,7 @@ #include #include #include +#include namespace DB { diff --git a/src/Analyzer/Passes/RewriteAggregateFunctionWithIfPass.cpp b/src/Analyzer/Passes/RewriteAggregateFunctionWithIfPass.cpp index 05377f8514e..c1adf05ac76 100644 --- a/src/Analyzer/Passes/RewriteAggregateFunctionWithIfPass.cpp +++ b/src/Analyzer/Passes/RewriteAggregateFunctionWithIfPass.cpp @@ -6,6 +6,8 @@ #include #include +#include + #include #include diff --git a/src/Analyzer/Passes/RewriteSumFunctionWithSumAndCountPass.cpp b/src/Analyzer/Passes/RewriteSumFunctionWithSumAndCountPass.cpp index 524ee807721..a42b4750f47 100644 --- a/src/Analyzer/Passes/RewriteSumFunctionWithSumAndCountPass.cpp +++ b/src/Analyzer/Passes/RewriteSumFunctionWithSumAndCountPass.cpp @@ -6,6 +6,7 @@ #include #include #include +#include #include namespace DB diff --git a/src/Analyzer/Passes/SumIfToCountIfPass.cpp b/src/Analyzer/Passes/SumIfToCountIfPass.cpp index e5ee8a0d0b2..1524629dc81 100644 --- a/src/Analyzer/Passes/SumIfToCountIfPass.cpp +++ b/src/Analyzer/Passes/SumIfToCountIfPass.cpp @@ -1,19 +1,16 @@ #include -#include -#include - #include #include -#include - -#include - -#include - #include #include #include +#include +#include +#include +#include +#include +#include namespace DB { diff --git a/src/Analyzer/Passes/UniqInjectiveFunctionsEliminationPass.cpp b/src/Analyzer/Passes/UniqInjectiveFunctionsEliminationPass.cpp index 947952ac3a8..cb8ef35fc13 100644 --- a/src/Analyzer/Passes/UniqInjectiveFunctionsEliminationPass.cpp +++ b/src/Analyzer/Passes/UniqInjectiveFunctionsEliminationPass.cpp @@ -9,6 +9,8 @@ #include #include +#include + namespace DB { diff --git a/src/Analyzer/Passes/UniqToCountPass.cpp b/src/Analyzer/Passes/UniqToCountPass.cpp index 83705f54b38..3f73322d1fa 100644 --- a/src/Analyzer/Passes/UniqToCountPass.cpp +++ b/src/Analyzer/Passes/UniqToCountPass.cpp @@ -9,6 +9,8 @@ #include #include +#include + namespace DB { diff --git a/src/Analyzer/QueryTreeBuilder.cpp b/src/Analyzer/QueryTreeBuilder.cpp index dd083dd5df6..a62b6e56ac5 100644 --- a/src/Analyzer/QueryTreeBuilder.cpp +++ b/src/Analyzer/QueryTreeBuilder.cpp @@ -40,6 +40,8 @@ #include #include +#include + #include #include diff --git a/src/Analyzer/Resolve/IdentifierResolveScope.cpp b/src/Analyzer/Resolve/IdentifierResolveScope.cpp index ae363b57047..eb3e2179440 100644 --- a/src/Analyzer/Resolve/IdentifierResolveScope.cpp +++ b/src/Analyzer/Resolve/IdentifierResolveScope.cpp @@ -1,8 +1,9 @@ #include -#include #include #include +#include +#include namespace DB { diff --git a/src/Analyzer/Resolve/IdentifierResolver.cpp b/src/Analyzer/Resolve/IdentifierResolver.cpp index 692a31b66ba..9dd8aa1a05f 100644 --- a/src/Analyzer/Resolve/IdentifierResolver.cpp +++ b/src/Analyzer/Resolve/IdentifierResolver.cpp @@ -25,6 +25,8 @@ #include #include +#include + namespace DB { namespace ErrorCodes diff --git a/src/Analyzer/Resolve/QueryAnalyzer.cpp b/src/Analyzer/Resolve/QueryAnalyzer.cpp index 576c4943ccb..5f7b06231d9 100644 --- a/src/Analyzer/Resolve/QueryAnalyzer.cpp +++ b/src/Analyzer/Resolve/QueryAnalyzer.cpp @@ -64,6 +64,8 @@ #include #include +#include + namespace ProfileEvents { extern const Event ScalarSubqueriesGlobalCacheHit; @@ -1490,6 +1492,10 @@ void QueryAnalyzer::qualifyColumnNodesWithProjectionNames(const QueryTreeNodes & additional_column_qualification_parts = {table_expression_node->getAlias()}; else if (auto * table_node = table_expression_node->as()) additional_column_qualification_parts = {table_node->getStorageID().getDatabaseName(), table_node->getStorageID().getTableName()}; + else if (auto * query_node = table_expression_node->as(); query_node && query_node->isCTE()) + additional_column_qualification_parts = {query_node->getCTEName()}; + else if (auto * union_node = table_expression_node->as(); union_node && union_node->isCTE()) + additional_column_qualification_parts = {union_node->getCTEName()}; size_t additional_column_qualification_parts_size = additional_column_qualification_parts.size(); const auto & table_expression_data = scope.getTableExpressionDataOrThrow(table_expression_node); @@ -4455,9 +4461,8 @@ void QueryAnalyzer::initializeTableExpressionData(const QueryTreeNodePtr & table { auto left_table_expression = extractLeftTableExpression(scope_query_node->getJoinTree()); if (table_expression_node.get() == left_table_expression.get() && - scope.joins_count == 1 && - scope.context->getSettingsRef().single_join_prefer_left_table) - table_expression_data.should_qualify_columns = false; + scope.joins_count == 1 && scope.context->getSettingsRef().single_join_prefer_left_table) + table_expression_data.should_qualify_columns = false; } scope.table_expression_node_to_data.emplace(table_expression_node, std::move(table_expression_data)); diff --git a/src/Analyzer/TableNode.cpp b/src/Analyzer/TableNode.cpp index 1c4d022542d..d5a4b06f652 100644 --- a/src/Analyzer/TableNode.cpp +++ b/src/Analyzer/TableNode.cpp @@ -10,6 +10,8 @@ #include +#include + namespace DB { diff --git a/src/Analyzer/Utils.cpp b/src/Analyzer/Utils.cpp index cf4a3f77e34..d10bbd9bd23 100644 --- a/src/Analyzer/Utils.cpp +++ b/src/Analyzer/Utils.cpp @@ -1,5 +1,7 @@ #include +#include + #include #include #include diff --git a/src/Backups/BackupEntriesCollector.cpp b/src/Backups/BackupEntriesCollector.cpp index 64ab2112326..3ee7a05600e 100644 --- a/src/Backups/BackupEntriesCollector.cpp +++ b/src/Backups/BackupEntriesCollector.cpp @@ -17,6 +17,7 @@ #include #include #include +#include #include #include diff --git a/src/Backups/BackupIO_S3.cpp b/src/Backups/BackupIO_S3.cpp index 56544312c26..aef37021227 100644 --- a/src/Backups/BackupIO_S3.cpp +++ b/src/Backups/BackupIO_S3.cpp @@ -1,6 +1,7 @@ #include #if USE_AWS_S3 +#include #include #include #include diff --git a/src/Backups/BackupsWorker.cpp b/src/Backups/BackupsWorker.cpp index 69d9c52ebd9..15a7d7c1eca 100644 --- a/src/Backups/BackupsWorker.cpp +++ b/src/Backups/BackupsWorker.cpp @@ -26,6 +26,7 @@ #include #include #include +#include #include diff --git a/src/Backups/DDLAdjustingForBackupVisitor.cpp b/src/Backups/DDLAdjustingForBackupVisitor.cpp index 7e5ce91629b..910831195a3 100644 --- a/src/Backups/DDLAdjustingForBackupVisitor.cpp +++ b/src/Backups/DDLAdjustingForBackupVisitor.cpp @@ -1,11 +1,11 @@ #include +#include +#include #include #include #include -#include -#include - #include +#include namespace DB diff --git a/src/Backups/RestorerFromBackup.cpp b/src/Backups/RestorerFromBackup.cpp index 454a0468e9f..3056f9fe421 100644 --- a/src/Backups/RestorerFromBackup.cpp +++ b/src/Backups/RestorerFromBackup.cpp @@ -23,8 +23,9 @@ #include #include #include -#include +#include +#include #include #include diff --git a/src/Backups/WithRetries.cpp b/src/Backups/WithRetries.cpp index 66851fa42ce..181e6331ac9 100644 --- a/src/Backups/WithRetries.cpp +++ b/src/Backups/WithRetries.cpp @@ -1,5 +1,7 @@ -#include #include +#include + +#include namespace DB { diff --git a/src/Bridge/IBridge.cpp b/src/Bridge/IBridge.cpp index c25d7bd2fed..de48a4f2b84 100644 --- a/src/Bridge/IBridge.cpp +++ b/src/Bridge/IBridge.cpp @@ -8,6 +8,7 @@ #include #include #include +#include #include #include #include diff --git a/src/BridgeHelper/LibraryBridgeHelper.cpp b/src/BridgeHelper/LibraryBridgeHelper.cpp index 84bfe096e79..12ca5564e80 100644 --- a/src/BridgeHelper/LibraryBridgeHelper.cpp +++ b/src/BridgeHelper/LibraryBridgeHelper.cpp @@ -1,5 +1,7 @@ #include "LibraryBridgeHelper.h" +#include +#include #include namespace DB diff --git a/src/Client/ClientBase.h b/src/Client/ClientBase.h index a260fd5761e..d2b10d7ed2e 100644 --- a/src/Client/ClientBase.h +++ b/src/Client/ClientBase.h @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include diff --git a/src/Client/ConnectionEstablisher.cpp b/src/Client/ConnectionEstablisher.cpp index 05839b44452..4bab643ed31 100644 --- a/src/Client/ConnectionEstablisher.cpp +++ b/src/Client/ConnectionEstablisher.cpp @@ -1,6 +1,7 @@ #include #include #include +#include namespace ProfileEvents { diff --git a/src/Client/ConnectionPool.cpp b/src/Client/ConnectionPool.cpp index 5cabb1465d1..ed2e7c3c725 100644 --- a/src/Client/ConnectionPool.cpp +++ b/src/Client/ConnectionPool.cpp @@ -1,4 +1,5 @@ #include +#include #include @@ -85,4 +86,15 @@ ConnectionPoolFactory & ConnectionPoolFactory::instance() return ret; } +IConnectionPool::Entry ConnectionPool::get(const DB::ConnectionTimeouts& timeouts, const DB::Settings& settings, + bool force_connected) +{ + Entry entry = Base::get(settings.connection_pool_max_wait_ms.totalMilliseconds()); + + if (force_connected) + entry->forceConnected(timeouts); + + return entry; +} + } diff --git a/src/Client/ConnectionPool.h b/src/Client/ConnectionPool.h index 725a5e91ac0..d83ecdd75f9 100644 --- a/src/Client/ConnectionPool.h +++ b/src/Client/ConnectionPool.h @@ -4,12 +4,13 @@ #include #include #include -#include #include namespace DB { +struct Settings; + /** Interface for connection pools. * * Usage (using the usual `ConnectionPool` example) @@ -102,15 +103,7 @@ public: Entry get(const ConnectionTimeouts & timeouts, /// NOLINT const Settings & settings, - bool force_connected = true) override - { - Entry entry = Base::get(settings.connection_pool_max_wait_ms.totalMilliseconds()); - - if (force_connected) - entry->forceConnected(timeouts); - - return entry; - } + bool force_connected = true) override; std::string getDescription() const { diff --git a/src/Client/HedgedConnections.cpp b/src/Client/HedgedConnections.cpp index 4effc3adafa..dd8348ea04f 100644 --- a/src/Client/HedgedConnections.cpp +++ b/src/Client/HedgedConnections.cpp @@ -3,6 +3,7 @@ #include #include +#include #include #include diff --git a/src/Client/HedgedConnectionsFactory.h b/src/Client/HedgedConnectionsFactory.h index 51da85ea178..63e39819f58 100644 --- a/src/Client/HedgedConnectionsFactory.h +++ b/src/Client/HedgedConnectionsFactory.h @@ -8,13 +8,14 @@ #include #include #include -#include #include #include namespace DB { +struct Settings; + /** Class for establishing hedged connections with replicas. * The process of establishing connection is divided on stages, on each stage if * replica doesn't respond for a long time, we start establishing connection with diff --git a/src/Client/MultiplexedConnections.cpp b/src/Client/MultiplexedConnections.cpp index bcef286ecbc..207bf6c9e07 100644 --- a/src/Client/MultiplexedConnections.cpp +++ b/src/Client/MultiplexedConnections.cpp @@ -2,6 +2,7 @@ #include #include +#include #include #include #include diff --git a/src/Common/DateLUT.cpp b/src/Common/DateLUT.cpp index 3a20fb1a125..f9bbe2fbf40 100644 --- a/src/Common/DateLUT.cpp +++ b/src/Common/DateLUT.cpp @@ -2,7 +2,9 @@ #include #include +#include #include +#include #include #include diff --git a/src/Common/GetPriorityForLoadBalancing.cpp b/src/Common/GetPriorityForLoadBalancing.cpp index dc5704ef6b5..77b7867b45d 100644 --- a/src/Common/GetPriorityForLoadBalancing.cpp +++ b/src/Common/GetPriorityForLoadBalancing.cpp @@ -1,3 +1,4 @@ +#include #include #include diff --git a/src/Common/GetPriorityForLoadBalancing.h b/src/Common/GetPriorityForLoadBalancing.h index 01dae9a1289..d4c68803866 100644 --- a/src/Common/GetPriorityForLoadBalancing.h +++ b/src/Common/GetPriorityForLoadBalancing.h @@ -1,6 +1,10 @@ #pragma once -#include +#include +#include + +#include +#include namespace DB { diff --git a/src/Common/NamedCollections/NamedCollectionsFactory.cpp b/src/Common/NamedCollections/NamedCollectionsFactory.cpp index 2faea1957ba..bc283d2be1a 100644 --- a/src/Common/NamedCollections/NamedCollectionsFactory.cpp +++ b/src/Common/NamedCollections/NamedCollectionsFactory.cpp @@ -1,6 +1,7 @@ #include #include #include +#include #include namespace DB diff --git a/src/Common/NamedCollections/NamedCollectionsMetadataStorage.cpp b/src/Common/NamedCollections/NamedCollectionsMetadataStorage.cpp index b3671350f92..3c6561fa51e 100644 --- a/src/Common/NamedCollections/NamedCollectionsMetadataStorage.cpp +++ b/src/Common/NamedCollections/NamedCollectionsMetadataStorage.cpp @@ -5,6 +5,7 @@ #include #include #include +#include #include #include #include diff --git a/src/Common/ProfileEvents.cpp b/src/Common/ProfileEvents.cpp index b28c1b2e87e..871ba7cab8b 100644 --- a/src/Common/ProfileEvents.cpp +++ b/src/Common/ProfileEvents.cpp @@ -508,6 +508,7 @@ The server successfully detected this situation and will download merged part fr M(FileSegmentHolderCompleteMicroseconds, "File segments holder complete() time") \ M(FileSegmentFailToIncreasePriority, "Number of times the priority was not increased due to a high contention on the cache lock") \ M(FilesystemCacheFailToReserveSpaceBecauseOfLockContention, "Number of times space reservation was skipped due to a high contention on the cache lock") \ + M(FilesystemCacheFailToReserveSpaceBecauseOfCacheResize, "Number of times space reservation was skipped due to the cache is being resized") \ M(FilesystemCacheHoldFileSegments, "Filesystem cache file segments count, which were hold") \ M(FilesystemCacheUnusedHoldFileSegments, "Filesystem cache file segments count, which were hold, but not used (because of seek or LIMIT n, etc)") \ M(FilesystemCacheFreeSpaceKeepingThreadRun, "Number of times background thread executed free space keeping job") \ diff --git a/src/Common/ProgressIndication.cpp b/src/Common/ProgressIndication.cpp index 0b482cb09be..8f0fb3cac6c 100644 --- a/src/Common/ProgressIndication.cpp +++ b/src/Common/ProgressIndication.cpp @@ -1,6 +1,7 @@ #include "ProgressIndication.h" #include #include +#include #include #include #include diff --git a/src/Common/ProgressIndication.h b/src/Common/ProgressIndication.h index ae39fb49bcc..188ec4ed764 100644 --- a/src/Common/ProgressIndication.h +++ b/src/Common/ProgressIndication.h @@ -1,14 +1,15 @@ #pragma once -#include -#include -#include #include #include #include #include #include +#include +#include +#include +#include namespace DB { diff --git a/src/Common/ThreadFuzzer.cpp b/src/Common/ThreadFuzzer.cpp index 7b96d1f075b..719ac9593ff 100644 --- a/src/Common/ThreadFuzzer.cpp +++ b/src/Common/ThreadFuzzer.cpp @@ -12,13 +12,14 @@ #include #include -#include +#include #include #include +#include +#include #include -#include #include "config.h" // USE_JEMALLOC diff --git a/src/Coordination/FourLetterCommand.cpp b/src/Coordination/FourLetterCommand.cpp index 8de9f8dfa1c..d0ecbe744a0 100644 --- a/src/Coordination/FourLetterCommand.cpp +++ b/src/Coordination/FourLetterCommand.cpp @@ -1,5 +1,6 @@ #include +#include #include #include #include diff --git a/src/Coordination/KeeperContext.cpp b/src/Coordination/KeeperContext.cpp index a36a074ce89..4eab03e3c2c 100644 --- a/src/Coordination/KeeperContext.cpp +++ b/src/Coordination/KeeperContext.cpp @@ -3,6 +3,7 @@ #include +#include #include #include #include diff --git a/src/Coordination/KeeperDispatcher.cpp b/src/Coordination/KeeperDispatcher.cpp index fc72e5fab59..8c7e6405153 100644 --- a/src/Coordination/KeeperDispatcher.cpp +++ b/src/Coordination/KeeperDispatcher.cpp @@ -5,8 +5,8 @@ #include #include -#include "Common/ZooKeeper/IKeeper.h" -#include "Common/ZooKeeper/ZooKeeperCommon.h" +#include +#include #include #include #include @@ -14,6 +14,7 @@ #include #include #include +#include #include diff --git a/src/Coordination/KeeperDispatcher.h b/src/Coordination/KeeperDispatcher.h index a487b886d98..41fb8451ee4 100644 --- a/src/Coordination/KeeperDispatcher.h +++ b/src/Coordination/KeeperDispatcher.h @@ -11,7 +11,6 @@ #include #include #include -#include #include #include #include diff --git a/src/Coordination/KeeperServer.cpp b/src/Coordination/KeeperServer.cpp index 09a1132e96d..dc9658e895f 100644 --- a/src/Coordination/KeeperServer.cpp +++ b/src/Coordination/KeeperServer.cpp @@ -3,9 +3,7 @@ #include "config.h" -#include -#include -#include +#include #include #include #include @@ -30,6 +28,10 @@ #include #include +#include +#include +#include + #pragma clang diagnostic ignored "-Wdeprecated-declarations" #include diff --git a/src/Coordination/KeeperServer.h b/src/Coordination/KeeperServer.h index 5e45a552cba..2eb630b4dd6 100644 --- a/src/Coordination/KeeperServer.h +++ b/src/Coordination/KeeperServer.h @@ -1,6 +1,5 @@ #pragma once -#include #include #include #include @@ -17,6 +16,9 @@ namespace DB using RaftAppendResult = nuraft::ptr>>; +struct KeeperConfigurationAndSettings; +using KeeperConfigurationAndSettingsPtr = std::shared_ptr; + class KeeperServer { private: diff --git a/src/Coordination/KeeperStateMachine.cpp b/src/Coordination/KeeperStateMachine.cpp index 3d3d862e1dd..d9b9ba528ee 100644 --- a/src/Coordination/KeeperStateMachine.cpp +++ b/src/Coordination/KeeperStateMachine.cpp @@ -1,6 +1,7 @@ #include #include #include +#include #include #include #include diff --git a/src/Coordination/KeeperStateMachine.h b/src/Coordination/KeeperStateMachine.h index 7ea14aa2d30..6357fd170df 100644 --- a/src/Coordination/KeeperStateMachine.h +++ b/src/Coordination/KeeperStateMachine.h @@ -1,6 +1,5 @@ #pragma once -#include #include #include #include diff --git a/src/Coordination/KeeperStateManager.h b/src/Coordination/KeeperStateManager.h index 60f6dbe7b62..e5ab5fb4807 100644 --- a/src/Coordination/KeeperStateManager.h +++ b/src/Coordination/KeeperStateManager.h @@ -1,14 +1,13 @@ #pragma once -#include #include #include -#include +#include +#include #include #include #include "Coordination/KeeperStateMachine.h" #include "Coordination/RaftServerConfig.h" -#include namespace DB { diff --git a/src/Coordination/KeeperStorage.cpp b/src/Coordination/KeeperStorage.cpp index 1542eb0d71a..efb57ae96e2 100644 --- a/src/Coordination/KeeperStorage.cpp +++ b/src/Coordination/KeeperStorage.cpp @@ -20,11 +20,12 @@ #include #include +#include #include #include +#include #include #include -#include #include #include diff --git a/src/Coordination/tests/gtest_coordination.cpp b/src/Coordination/tests/gtest_coordination.cpp index a329bec8e2a..b65e9358a72 100644 --- a/src/Coordination/tests/gtest_coordination.cpp +++ b/src/Coordination/tests/gtest_coordination.cpp @@ -7,6 +7,7 @@ #include #include #include +#include #include #include #include diff --git a/src/Core/ExternalTable.cpp b/src/Core/ExternalTable.cpp index bc72c996384..af681cd5639 100644 --- a/src/Core/ExternalTable.cpp +++ b/src/Core/ExternalTable.cpp @@ -16,6 +16,7 @@ #include #include +#include #include #include #include diff --git a/src/Core/LoadBalancing.h b/src/Core/LoadBalancing.h new file mode 100644 index 00000000000..23758895ce2 --- /dev/null +++ b/src/Core/LoadBalancing.h @@ -0,0 +1,27 @@ +#pragma once + +#include + +namespace DB +{ + +enum class LoadBalancing : uint8_t +{ + /// among replicas with a minimum number of errors selected randomly + RANDOM = 0, + /// a replica is selected among the replicas with the minimum number of errors + /// with the minimum number of distinguished characters in the replica name prefix and local hostname prefix + NEAREST_HOSTNAME, + /// just like NEAREST_HOSTNAME, but it count distinguished characters in a levenshtein distance manner + HOSTNAME_LEVENSHTEIN_DISTANCE, + // replicas with the same number of errors are accessed in the same order + // as they are specified in the configuration. + IN_ORDER, + /// if first replica one has higher number of errors, + /// pick a random one from replicas with minimum number of errors + FIRST_OR_RANDOM, + // round robin across replicas with the same number of errors. + ROUND_ROBIN, +}; + +} diff --git a/src/Core/QueryLogElementType.h b/src/Core/QueryLogElementType.h new file mode 100644 index 00000000000..99361f945cc --- /dev/null +++ b/src/Core/QueryLogElementType.h @@ -0,0 +1,17 @@ +#pragma once + +#include + +namespace DB +{ + +// Make it signed for compatibility with DataTypeEnum8 +enum QueryLogElementType : int8_t +{ + QUERY_START = 1, + QUERY_FINISH = 2, + EXCEPTION_BEFORE_START = 3, + EXCEPTION_WHILE_PROCESSING = 4, +}; + +} diff --git a/src/Core/SchemaInferenceMode.h b/src/Core/SchemaInferenceMode.h new file mode 100644 index 00000000000..17d1d62a60e --- /dev/null +++ b/src/Core/SchemaInferenceMode.h @@ -0,0 +1,14 @@ +#pragma once + +#include + +namespace DB +{ + +enum class SchemaInferenceMode : uint8_t +{ + DEFAULT, + UNION, +}; + +} diff --git a/src/Core/SettingsEnums.h b/src/Core/SettingsEnums.h index 67fbce31be8..f6d9593ca56 100644 --- a/src/Core/SettingsEnums.h +++ b/src/Core/SettingsEnums.h @@ -1,8 +1,12 @@ #pragma once #include +#include #include +#include +#include #include +#include #include #include #include @@ -115,25 +119,6 @@ constexpr auto getEnumValues(); return getEnumValues().size();\ } -enum class LoadBalancing : uint8_t -{ - /// among replicas with a minimum number of errors selected randomly - RANDOM = 0, - /// a replica is selected among the replicas with the minimum number of errors - /// with the minimum number of distinguished characters in the replica name prefix and local hostname prefix - NEAREST_HOSTNAME, - /// just like NEAREST_HOSTNAME, but it count distinguished characters in a levenshtein distance manner - HOSTNAME_LEVENSHTEIN_DISTANCE, - // replicas with the same number of errors are accessed in the same order - // as they are specified in the configuration. - IN_ORDER, - /// if first replica one has higher number of errors, - /// pick a random one from replicas with minimum number of errors - FIRST_OR_RANDOM, - // round robin across replicas with the same number of errors. - ROUND_ROBIN, -}; - DECLARE_SETTING_ENUM(LoadBalancing) DECLARE_SETTING_ENUM(JoinStrictness) @@ -204,16 +189,6 @@ DECLARE_SETTING_ENUM_WITH_RENAME(ParquetVersion, FormatSettings::ParquetVersion) DECLARE_SETTING_ENUM(LogsLevel) - -// Make it signed for compatibility with DataTypeEnum8 -enum QueryLogElementType : int8_t -{ - QUERY_START = 1, - QUERY_FINISH = 2, - EXCEPTION_BEFORE_START = 3, - EXCEPTION_WHILE_PROCESSING = 4, -}; - DECLARE_SETTING_ENUM_WITH_RENAME(LogQueriesType, QueryLogElementType) @@ -292,13 +267,6 @@ enum class StreamingHandleErrorMode : uint8_t DECLARE_SETTING_ENUM(StreamingHandleErrorMode) -enum class ShortCircuitFunctionEvaluation : uint8_t -{ - ENABLE, // Use short-circuit function evaluation for functions that are suitable for it. - FORCE_ENABLE, // Use short-circuit function evaluation for all functions. - DISABLE, // Disable short-circuit function evaluation. -}; - DECLARE_SETTING_ENUM(ShortCircuitFunctionEvaluation) enum class TransactionsWaitCSNMode : uint8_t @@ -367,12 +335,6 @@ DECLARE_SETTING_ENUM(ObjectStorageQueueAction) DECLARE_SETTING_ENUM(ExternalCommandStderrReaction) -enum class SchemaInferenceMode : uint8_t -{ - DEFAULT, - UNION, -}; - DECLARE_SETTING_ENUM(SchemaInferenceMode) DECLARE_SETTING_ENUM_WITH_RENAME(DateTimeOverflowBehavior, FormatSettings::DateTimeOverflowBehavior) diff --git a/src/Core/ShortCircuitFunctionEvaluation.h b/src/Core/ShortCircuitFunctionEvaluation.h new file mode 100644 index 00000000000..3785efe8015 --- /dev/null +++ b/src/Core/ShortCircuitFunctionEvaluation.h @@ -0,0 +1,15 @@ +#pragma once + +#include + +namespace DB +{ + +enum class ShortCircuitFunctionEvaluation : uint8_t +{ + ENABLE, // Use short-circuit function evaluation for functions that are suitable for it. + FORCE_ENABLE, // Use short-circuit function evaluation for all functions. + DISABLE, // Disable short-circuit function evaluation. +}; + +} diff --git a/src/Core/SortDescription.h b/src/Core/SortDescription.h index 33fd6017599..5c6f3e3150a 100644 --- a/src/Core/SortDescription.h +++ b/src/Core/SortDescription.h @@ -1,15 +1,16 @@ #pragma once -#include -#include -#include -#include + #include -#include #include #include #include +#include +#include +#include +#include + namespace DB { diff --git a/src/Daemon/BaseDaemon.cpp b/src/Daemon/BaseDaemon.cpp index 48f76769a09..9b132c3e199 100644 --- a/src/Daemon/BaseDaemon.cpp +++ b/src/Daemon/BaseDaemon.cpp @@ -4,6 +4,7 @@ #include #include #include +#include #include #include #include diff --git a/src/DataTypes/DataTypeDecimalBase.cpp b/src/DataTypes/DataTypeDecimalBase.cpp index 62218694924..2adfda5bb27 100644 --- a/src/DataTypes/DataTypeDecimalBase.cpp +++ b/src/DataTypes/DataTypeDecimalBase.cpp @@ -1,3 +1,4 @@ +#include #include #include #include diff --git a/src/DataTypes/DataTypeFactory.cpp b/src/DataTypes/DataTypeFactory.cpp index 8c8f9999ada..07dc4395bfe 100644 --- a/src/DataTypes/DataTypeFactory.cpp +++ b/src/DataTypes/DataTypeFactory.cpp @@ -10,6 +10,7 @@ #include #include #include +#include #include #include diff --git a/src/DataTypes/EnumValues.h b/src/DataTypes/EnumValues.h index 889878bc60f..9762ff5870f 100644 --- a/src/DataTypes/EnumValues.h +++ b/src/DataTypes/EnumValues.h @@ -7,7 +7,7 @@ namespace DB { -namespace ErrorCodesEnumValues +namespace ErrorCodes { extern const int BAD_ARGUMENTS; } diff --git a/src/Databases/DDLDependencyVisitor.cpp b/src/Databases/DDLDependencyVisitor.cpp index c85e8f5688a..d81dc7a76d8 100644 --- a/src/Databases/DDLDependencyVisitor.cpp +++ b/src/Databases/DDLDependencyVisitor.cpp @@ -16,6 +16,7 @@ #include #include #include +#include #include diff --git a/src/Databases/DatabaseAtomic.cpp b/src/Databases/DatabaseAtomic.cpp index f58b90f1134..9e7004e72a9 100644 --- a/src/Databases/DatabaseAtomic.cpp +++ b/src/Databases/DatabaseAtomic.cpp @@ -17,6 +17,7 @@ #include #include #include +#include namespace fs = std::filesystem; diff --git a/src/Databases/DatabaseDictionary.cpp b/src/Databases/DatabaseDictionary.cpp index adb9a659fcd..52196e75c4a 100644 --- a/src/Databases/DatabaseDictionary.cpp +++ b/src/Databases/DatabaseDictionary.cpp @@ -10,6 +10,7 @@ #include #include #include +#include namespace DB diff --git a/src/Databases/DatabaseFactory.cpp b/src/Databases/DatabaseFactory.cpp index 3a4c1423f7c..05a5e057c55 100644 --- a/src/Databases/DatabaseFactory.cpp +++ b/src/Databases/DatabaseFactory.cpp @@ -9,6 +9,7 @@ #include #include #include +#include namespace fs = std::filesystem; diff --git a/src/Databases/DatabaseFilesystem.cpp b/src/Databases/DatabaseFilesystem.cpp index b27a816a60d..31701e665a1 100644 --- a/src/Databases/DatabaseFilesystem.cpp +++ b/src/Databases/DatabaseFilesystem.cpp @@ -1,6 +1,7 @@ #include #include +#include #include #include #include diff --git a/src/Databases/DatabaseHDFS.cpp b/src/Databases/DatabaseHDFS.cpp index 060991d1290..eccaae5f22e 100644 --- a/src/Databases/DatabaseHDFS.cpp +++ b/src/Databases/DatabaseHDFS.cpp @@ -15,6 +15,7 @@ #include #include #include +#include #include diff --git a/src/Databases/DatabaseOnDisk.cpp b/src/Databases/DatabaseOnDisk.cpp index b8154372116..07a250e72c7 100644 --- a/src/Databases/DatabaseOnDisk.cpp +++ b/src/Databases/DatabaseOnDisk.cpp @@ -28,6 +28,7 @@ #include #include #include +#include namespace fs = std::filesystem; diff --git a/src/Databases/DatabaseOrdinary.cpp b/src/Databases/DatabaseOrdinary.cpp index 7d4bb07e8ef..6555c4444e2 100644 --- a/src/Databases/DatabaseOrdinary.cpp +++ b/src/Databases/DatabaseOrdinary.cpp @@ -1,5 +1,6 @@ #include +#include #include #include #include @@ -30,6 +31,7 @@ #include #include #include +#include #include #include diff --git a/src/Databases/DatabaseReplicated.cpp b/src/Databases/DatabaseReplicated.cpp index 4ca9afc49eb..7ce2859e962 100644 --- a/src/Databases/DatabaseReplicated.cpp +++ b/src/Databases/DatabaseReplicated.cpp @@ -13,6 +13,8 @@ #include #include #include +#include +#include #include #include #include diff --git a/src/Databases/DatabaseReplicatedWorker.cpp b/src/Databases/DatabaseReplicatedWorker.cpp index 31d6f7876a8..1ef88dc03bc 100644 --- a/src/Databases/DatabaseReplicatedWorker.cpp +++ b/src/Databases/DatabaseReplicatedWorker.cpp @@ -3,6 +3,7 @@ #include #include #include +#include #include namespace fs = std::filesystem; diff --git a/src/Databases/DatabaseS3.cpp b/src/Databases/DatabaseS3.cpp index 1589cc1c75d..2b2d978a846 100644 --- a/src/Databases/DatabaseS3.cpp +++ b/src/Databases/DatabaseS3.cpp @@ -5,6 +5,7 @@ #include #include +#include #include #include #include diff --git a/src/Databases/MySQL/DatabaseMySQL.cpp b/src/Databases/MySQL/DatabaseMySQL.cpp index bb24373a7e1..7aa29018f4d 100644 --- a/src/Databases/MySQL/DatabaseMySQL.cpp +++ b/src/Databases/MySQL/DatabaseMySQL.cpp @@ -30,6 +30,7 @@ # include # include # include +# include # include # include # include diff --git a/src/Databases/MySQL/FetchTablesColumnsList.cpp b/src/Databases/MySQL/FetchTablesColumnsList.cpp index e78f4aa2234..0828438cad2 100644 --- a/src/Databases/MySQL/FetchTablesColumnsList.cpp +++ b/src/Databases/MySQL/FetchTablesColumnsList.cpp @@ -2,6 +2,7 @@ #if USE_MYSQL #include +#include #include #include #include diff --git a/src/Databases/MySQL/FetchTablesColumnsList.h b/src/Databases/MySQL/FetchTablesColumnsList.h index 736a0ffd607..ec12cdc73b0 100644 --- a/src/Databases/MySQL/FetchTablesColumnsList.h +++ b/src/Databases/MySQL/FetchTablesColumnsList.h @@ -12,11 +12,12 @@ #include #include -#include namespace DB { +struct Settings; + std::map fetchTablesColumnsList( mysqlxx::PoolWithFailover & pool, const String & database_name, diff --git a/src/Databases/MySQL/MaterializeMetadata.h b/src/Databases/MySQL/MaterializeMetadata.h index e78b7132c8d..36d3fb569d8 100644 --- a/src/Databases/MySQL/MaterializeMetadata.h +++ b/src/Databases/MySQL/MaterializeMetadata.h @@ -6,6 +6,7 @@ #include #include +#include #include #include #include diff --git a/src/Databases/PostgreSQL/DatabaseMaterializedPostgreSQL.cpp b/src/Databases/PostgreSQL/DatabaseMaterializedPostgreSQL.cpp index e38d09e99b2..6b0548b85c7 100644 --- a/src/Databases/PostgreSQL/DatabaseMaterializedPostgreSQL.cpp +++ b/src/Databases/PostgreSQL/DatabaseMaterializedPostgreSQL.cpp @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include diff --git a/src/Databases/PostgreSQL/DatabasePostgreSQL.cpp b/src/Databases/PostgreSQL/DatabasePostgreSQL.cpp index b22356bebea..a846e23cd4f 100644 --- a/src/Databases/PostgreSQL/DatabasePostgreSQL.cpp +++ b/src/Databases/PostgreSQL/DatabasePostgreSQL.cpp @@ -22,6 +22,7 @@ #include #include #include +#include #include namespace fs = std::filesystem; diff --git a/src/Databases/SQLite/DatabaseSQLite.cpp b/src/Databases/SQLite/DatabaseSQLite.cpp index e758ea35de5..132a978140c 100644 --- a/src/Databases/SQLite/DatabaseSQLite.cpp +++ b/src/Databases/SQLite/DatabaseSQLite.cpp @@ -3,6 +3,7 @@ #if USE_SQLITE #include +#include #include #include #include diff --git a/src/Dictionaries/DirectDictionary.cpp b/src/Dictionaries/DirectDictionary.cpp index 2c0f7653aff..f3b32402c20 100644 --- a/src/Dictionaries/DirectDictionary.cpp +++ b/src/Dictionaries/DirectDictionary.cpp @@ -1,6 +1,7 @@ #include "DirectDictionary.h" #include +#include #include #include diff --git a/src/Dictionaries/ExecutablePoolDictionarySource.cpp b/src/Dictionaries/ExecutablePoolDictionarySource.cpp index d8111afdc19..217a67453e4 100644 --- a/src/Dictionaries/ExecutablePoolDictionarySource.cpp +++ b/src/Dictionaries/ExecutablePoolDictionarySource.cpp @@ -8,6 +8,8 @@ #include #include +#include + #include #include #include diff --git a/src/Dictionaries/HTTPDictionarySource.cpp b/src/Dictionaries/HTTPDictionarySource.cpp index dae8ec06d30..663c63dd6c6 100644 --- a/src/Dictionaries/HTTPDictionarySource.cpp +++ b/src/Dictionaries/HTTPDictionarySource.cpp @@ -1,12 +1,13 @@ #include "HTTPDictionarySource.h" +#include #include #include #include #include #include #include -#include #include +#include #include #include #include diff --git a/src/Dictionaries/HashedArrayDictionary.cpp b/src/Dictionaries/HashedArrayDictionary.cpp index 9b194acf87f..d7d50dfb0a6 100644 --- a/src/Dictionaries/HashedArrayDictionary.cpp +++ b/src/Dictionaries/HashedArrayDictionary.cpp @@ -2,6 +2,7 @@ #include #include +#include #include #include #include diff --git a/src/Dictionaries/IPAddressDictionary.cpp b/src/Dictionaries/IPAddressDictionary.cpp index 206516edab0..41fafcc162b 100644 --- a/src/Dictionaries/IPAddressDictionary.cpp +++ b/src/Dictionaries/IPAddressDictionary.cpp @@ -1,11 +1,11 @@ #include "IPAddressDictionary.h" -#include -#include + #include #include #include #include #include +#include #include #include #include @@ -23,6 +23,9 @@ #include #include +#include +#include + namespace DB { diff --git a/src/Dictionaries/PolygonDictionaryImplementations.cpp b/src/Dictionaries/PolygonDictionaryImplementations.cpp index e21fadb0e7e..84abeed2731 100644 --- a/src/Dictionaries/PolygonDictionaryImplementations.cpp +++ b/src/Dictionaries/PolygonDictionaryImplementations.cpp @@ -8,6 +8,7 @@ #include #include +#include #include diff --git a/src/Dictionaries/PostgreSQLDictionarySource.cpp b/src/Dictionaries/PostgreSQLDictionarySource.cpp index b35e14577a8..f62a9a009d8 100644 --- a/src/Dictionaries/PostgreSQLDictionarySource.cpp +++ b/src/Dictionaries/PostgreSQLDictionarySource.cpp @@ -2,6 +2,7 @@ #include #include +#include #include "DictionarySourceFactory.h" #include "registerDictionaries.h" diff --git a/src/Dictionaries/RegExpTreeDictionary.cpp b/src/Dictionaries/RegExpTreeDictionary.cpp index 830f9d09364..2522de60183 100644 --- a/src/Dictionaries/RegExpTreeDictionary.cpp +++ b/src/Dictionaries/RegExpTreeDictionary.cpp @@ -14,6 +14,7 @@ #include #include #include +#include #include #include @@ -1002,9 +1003,9 @@ void registerDictionaryRegExpTree(DictionaryFactory & factory) dict_struct, std::move(source_ptr), configuration, - context->getSettings().regexp_dict_allow_hyperscan, - context->getSettings().regexp_dict_flag_case_insensitive, - context->getSettings().regexp_dict_flag_dotall); + context->getSettingsRef().regexp_dict_allow_hyperscan, + context->getSettingsRef().regexp_dict_flag_case_insensitive, + context->getSettingsRef().regexp_dict_flag_dotall); }; factory.registerLayout("regexp_tree", create_layout, true); diff --git a/src/Dictionaries/XDBCDictionarySource.cpp b/src/Dictionaries/XDBCDictionarySource.cpp index 1ebfc4a29b0..590d000f16e 100644 --- a/src/Dictionaries/XDBCDictionarySource.cpp +++ b/src/Dictionaries/XDBCDictionarySource.cpp @@ -10,11 +10,13 @@ #include #include #include +#include #include "DictionarySourceFactory.h" #include "DictionaryStructure.h" #include "readInvalidateQuery.h" #include "registerDictionaries.h" #include +#include #include #include #include "config.h" @@ -239,7 +241,7 @@ void registerDictionarySourceXDBC(DictionarySourceFactory & factory) #if USE_ODBC BridgeHelperPtr bridge = std::make_shared>( global_context, - global_context->getSettings().http_receive_timeout, + global_context->getSettingsRef().http_receive_timeout, config.getString(config_prefix + ".odbc.connection_string"), config.getBool(config_prefix + ".settings.odbc_bridge_use_connection_pooling", global_context->getSettingsRef().odbc_bridge_use_connection_pooling)); diff --git a/src/Dictionaries/registerCacheDictionaries.cpp b/src/Dictionaries/registerCacheDictionaries.cpp index b79261955ff..f2d027575f5 100644 --- a/src/Dictionaries/registerCacheDictionaries.cpp +++ b/src/Dictionaries/registerCacheDictionaries.cpp @@ -2,6 +2,7 @@ #include "CacheDictionaryStorage.h" #include "SSDCacheDictionaryStorage.h" #include +#include #include #include diff --git a/src/Dictionaries/registerHashedDictionary.cpp b/src/Dictionaries/registerHashedDictionary.cpp index 5fc4f5d5cb6..068b71170cc 100644 --- a/src/Dictionaries/registerHashedDictionary.cpp +++ b/src/Dictionaries/registerHashedDictionary.cpp @@ -1,8 +1,8 @@ +#include #include #include #include #include - #include namespace DB diff --git a/src/Dictionaries/registerRangeHashedDictionary.cpp b/src/Dictionaries/registerRangeHashedDictionary.cpp index 8123b811198..bc142a6bddc 100644 --- a/src/Dictionaries/registerRangeHashedDictionary.cpp +++ b/src/Dictionaries/registerRangeHashedDictionary.cpp @@ -1,5 +1,6 @@ #include +#include #include #include #include diff --git a/src/Disks/IO/ReadBufferFromWebServer.cpp b/src/Disks/IO/ReadBufferFromWebServer.cpp index 7c5de8a13de..837452aa9f2 100644 --- a/src/Disks/IO/ReadBufferFromWebServer.cpp +++ b/src/Disks/IO/ReadBufferFromWebServer.cpp @@ -1,9 +1,12 @@ #include "ReadBufferFromWebServer.h" -#include +#include +#include +#include #include #include -#include +#include + #include diff --git a/src/Disks/ObjectStorages/AzureBlobStorage/AzureBlobStorageCommon.cpp b/src/Disks/ObjectStorages/AzureBlobStorage/AzureBlobStorageCommon.cpp index d9dfedadd48..0aa3b9c40b5 100644 --- a/src/Disks/ObjectStorages/AzureBlobStorage/AzureBlobStorageCommon.cpp +++ b/src/Disks/ObjectStorages/AzureBlobStorage/AzureBlobStorageCommon.cpp @@ -4,6 +4,7 @@ #include #include +#include #include #include #include diff --git a/src/Disks/ObjectStorages/AzureBlobStorage/AzureBlobStorageCommon.h b/src/Disks/ObjectStorages/AzureBlobStorage/AzureBlobStorageCommon.h index 19ba48ea225..bb2f0270924 100644 --- a/src/Disks/ObjectStorages/AzureBlobStorage/AzureBlobStorageCommon.h +++ b/src/Disks/ObjectStorages/AzureBlobStorage/AzureBlobStorageCommon.h @@ -11,7 +11,6 @@ #include #include -#include #include #include #include @@ -19,6 +18,8 @@ namespace DB { +struct Settings; + namespace AzureBlobStorage { diff --git a/src/Disks/ObjectStorages/ObjectStorageFactory.cpp b/src/Disks/ObjectStorages/ObjectStorageFactory.cpp index d9535358daf..d42da6dc9c4 100644 --- a/src/Disks/ObjectStorages/ObjectStorageFactory.cpp +++ b/src/Disks/ObjectStorages/ObjectStorageFactory.cpp @@ -24,6 +24,7 @@ #include #include #include +#include #include diff --git a/src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp b/src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp index a6781e0ab35..a6672e14e10 100644 --- a/src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp +++ b/src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp @@ -15,6 +15,7 @@ #include #include #include +#include #include #include diff --git a/src/Disks/ObjectStorages/S3/diskSettings.cpp b/src/Disks/ObjectStorages/S3/diskSettings.cpp index 62df98f51e6..de78ce4594d 100644 --- a/src/Disks/ObjectStorages/S3/diskSettings.cpp +++ b/src/Disks/ObjectStorages/S3/diskSettings.cpp @@ -1,14 +1,14 @@ #include -#include -#include #if USE_AWS_S3 +#include #include #include #include #include #include +#include #include #include #include @@ -17,6 +17,7 @@ #include #include #include +#include #include #include diff --git a/src/Disks/ObjectStorages/Web/WebObjectStorage.cpp b/src/Disks/ObjectStorages/Web/WebObjectStorage.cpp index e837e056acc..7f7a3fe1a62 100644 --- a/src/Disks/ObjectStorages/Web/WebObjectStorage.cpp +++ b/src/Disks/ObjectStorages/Web/WebObjectStorage.cpp @@ -3,6 +3,8 @@ #include #include +#include + #include #include #include diff --git a/src/Formats/FormatFactory.cpp b/src/Formats/FormatFactory.cpp index 3d92023318e..1271cdfb7ad 100644 --- a/src/Formats/FormatFactory.cpp +++ b/src/Formats/FormatFactory.cpp @@ -17,6 +17,7 @@ #include #include #include +#include #include @@ -175,7 +176,7 @@ FormatSettings getFormatSettings(const ContextPtr & context, const Settings & se format_settings.parquet.write_page_index = settings.output_format_parquet_write_page_index; format_settings.parquet.local_read_min_bytes_for_seek = settings.input_format_parquet_local_file_min_bytes_for_seek; format_settings.pretty.charset = settings.output_format_pretty_grid_charset.toString() == "ASCII" ? FormatSettings::Pretty::Charset::ASCII : FormatSettings::Pretty::Charset::UTF8; - format_settings.pretty.color = settings.output_format_pretty_color; + format_settings.pretty.color = settings.output_format_pretty_color.valueOr(2); format_settings.pretty.max_column_pad_width = settings.output_format_pretty_max_column_pad_width; format_settings.pretty.max_rows = settings.output_format_pretty_max_rows; format_settings.pretty.max_value_width = settings.output_format_pretty_max_value_width; diff --git a/src/Formats/FormatSettings.h b/src/Formats/FormatSettings.h index 8b38ffba524..a736bb1a558 100644 --- a/src/Formats/FormatSettings.h +++ b/src/Formats/FormatSettings.h @@ -1,10 +1,9 @@ #pragma once -#include #include +#include #include #include -#include namespace DB { @@ -293,7 +292,8 @@ struct FormatSettings UInt64 max_value_width = 10000; UInt64 max_value_width_apply_for_single_value = false; bool highlight_digit_groups = true; - SettingFieldUInt64Auto color{"auto"}; + /// Set to 2 for auto + UInt64 color = 2; bool output_format_pretty_row_numbers = false; UInt64 output_format_pretty_single_large_number_tip_threshold = 1'000'000; diff --git a/src/Formats/NumpyDataTypes.h b/src/Formats/NumpyDataTypes.h index 062f743c0ea..6ccdf65f457 100644 --- a/src/Formats/NumpyDataTypes.h +++ b/src/Formats/NumpyDataTypes.h @@ -2,6 +2,7 @@ #include #include #include +#include namespace ErrorCodes { diff --git a/src/Formats/ReadSchemaUtils.cpp b/src/Formats/ReadSchemaUtils.cpp index 1920459c378..dbbd728ed72 100644 --- a/src/Formats/ReadSchemaUtils.cpp +++ b/src/Formats/ReadSchemaUtils.cpp @@ -7,6 +7,7 @@ #include #include #include +#include namespace DB { diff --git a/src/Functions/CastOverloadResolver.cpp b/src/Functions/CastOverloadResolver.cpp index 0f54ff52ba2..10a08c6e35f 100644 --- a/src/Functions/CastOverloadResolver.cpp +++ b/src/Functions/CastOverloadResolver.cpp @@ -4,6 +4,7 @@ #include #include #include +#include #include #include diff --git a/src/Functions/FunctionBinaryArithmetic.h b/src/Functions/FunctionBinaryArithmetic.h index 5d19ba44d9b..ee4e28a7858 100644 --- a/src/Functions/FunctionBinaryArithmetic.h +++ b/src/Functions/FunctionBinaryArithmetic.h @@ -1703,7 +1703,7 @@ public: { if constexpr (is_division) { - if (context->getSettingsRef().decimal_check_overflow) + if (decimalCheckArithmeticOverflow(context)) { /// Check overflow by using operands scale (based on big decimal division implementation details): /// big decimal arithmetic is based on big integers, decimal operands are converted to big integers diff --git a/src/Functions/FunctionCustomWeekToDateOrDate32.h b/src/Functions/FunctionCustomWeekToDateOrDate32.h index c904bfaec30..6ae8c847b65 100644 --- a/src/Functions/FunctionCustomWeekToDateOrDate32.h +++ b/src/Functions/FunctionCustomWeekToDateOrDate32.h @@ -1,4 +1,5 @@ #pragma once +#include #include namespace DB diff --git a/src/Functions/FunctionDateOrDateTimeToDateOrDate32.h b/src/Functions/FunctionDateOrDateTimeToDateOrDate32.h index 6eb3e534b62..28caef36879 100644 --- a/src/Functions/FunctionDateOrDateTimeToDateOrDate32.h +++ b/src/Functions/FunctionDateOrDateTimeToDateOrDate32.h @@ -1,4 +1,5 @@ #pragma once +#include #include namespace DB diff --git a/src/Functions/FunctionDateOrDateTimeToDateTimeOrDateTime64.h b/src/Functions/FunctionDateOrDateTimeToDateTimeOrDateTime64.h index 9f1066fd687..d5a1f28b7cd 100644 --- a/src/Functions/FunctionDateOrDateTimeToDateTimeOrDateTime64.h +++ b/src/Functions/FunctionDateOrDateTimeToDateTimeOrDateTime64.h @@ -1,4 +1,6 @@ #pragma once + +#include #include namespace DB diff --git a/src/Functions/FunctionFactory.cpp b/src/Functions/FunctionFactory.cpp index 004ef745a93..de6d5a9e1c1 100644 --- a/src/Functions/FunctionFactory.cpp +++ b/src/Functions/FunctionFactory.cpp @@ -4,6 +4,7 @@ #include #include +#include #include diff --git a/src/Functions/FunctionGenerateRandomStructure.cpp b/src/Functions/FunctionGenerateRandomStructure.cpp index 6dc68134502..0c6f4287ecb 100644 --- a/src/Functions/FunctionGenerateRandomStructure.cpp +++ b/src/Functions/FunctionGenerateRandomStructure.cpp @@ -8,6 +8,7 @@ #include #include #include +#include #include #include @@ -354,6 +355,11 @@ namespace } } + FunctionPtr FunctionGenerateRandomStructure::create(DB::ContextPtr context) + { + return std::make_shared(context->getSettingsRef().allow_suspicious_low_cardinality_types.value); + } + DataTypePtr FunctionGenerateRandomStructure::getReturnTypeImpl(const DataTypes & arguments) const { if (arguments.size() > 2) diff --git a/src/Functions/FunctionGenerateRandomStructure.h b/src/Functions/FunctionGenerateRandomStructure.h index 894096a6e07..5901565696f 100644 --- a/src/Functions/FunctionGenerateRandomStructure.h +++ b/src/Functions/FunctionGenerateRandomStructure.h @@ -17,10 +17,7 @@ public: { } - static FunctionPtr create(ContextPtr context) - { - return std::make_shared(context->getSettingsRef().allow_suspicious_low_cardinality_types.value); - } + static FunctionPtr create(ContextPtr context); String getName() const override { return name; } diff --git a/src/Functions/FunctionJoinGet.cpp b/src/Functions/FunctionJoinGet.cpp index 0a2859fe864..a67f3a977db 100644 --- a/src/Functions/FunctionJoinGet.cpp +++ b/src/Functions/FunctionJoinGet.cpp @@ -1,5 +1,6 @@ #include #include +#include #include #include #include diff --git a/src/Functions/FunctionTokens.h b/src/Functions/FunctionTokens.h index 0ca47126198..f1435ca5651 100644 --- a/src/Functions/FunctionTokens.h +++ b/src/Functions/FunctionTokens.h @@ -17,6 +17,7 @@ #include #include #include +#include namespace DB diff --git a/src/Functions/FunctionUnixTimestamp64.cpp b/src/Functions/FunctionUnixTimestamp64.cpp new file mode 100644 index 00000000000..a22c035a0ba --- /dev/null +++ b/src/Functions/FunctionUnixTimestamp64.cpp @@ -0,0 +1,13 @@ +#include +#include + +namespace DB +{ + +FunctionFromUnixTimestamp64::FunctionFromUnixTimestamp64(size_t target_scale_, const char * name_, ContextPtr context) + : target_scale(target_scale_) + , name(name_) + , allow_nonconst_timezone_arguments(context->getSettingsRef().allow_nonconst_timezone_arguments) +{} + +} diff --git a/src/Functions/FunctionUnixTimestamp64.h b/src/Functions/FunctionUnixTimestamp64.h index e282bcfbfe2..bbd7e586a67 100644 --- a/src/Functions/FunctionUnixTimestamp64.h +++ b/src/Functions/FunctionUnixTimestamp64.h @@ -107,11 +107,7 @@ private: const bool allow_nonconst_timezone_arguments; public: - FunctionFromUnixTimestamp64(size_t target_scale_, const char * name_, ContextPtr context) - : target_scale(target_scale_) - , name(name_) - , allow_nonconst_timezone_arguments(context->getSettings().allow_nonconst_timezone_arguments) - {} + FunctionFromUnixTimestamp64(size_t target_scale_, const char * name_, ContextPtr context); String getName() const override { return name; } size_t getNumberOfArguments() const override { return 0; } diff --git a/src/Functions/FunctionsCodingIP.cpp b/src/Functions/FunctionsCodingIP.cpp index e01967274f4..a134e39fbcd 100644 --- a/src/Functions/FunctionsCodingIP.cpp +++ b/src/Functions/FunctionsCodingIP.cpp @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include diff --git a/src/Functions/FunctionsConversion.cpp b/src/Functions/FunctionsConversion.cpp index f3e54d2fbd9..1342e3f2c5d 100644 --- a/src/Functions/FunctionsConversion.cpp +++ b/src/Functions/FunctionsConversion.cpp @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include diff --git a/src/Functions/FunctionsJSON.cpp b/src/Functions/FunctionsJSON.cpp index db1602b1939..47040545677 100644 --- a/src/Functions/FunctionsJSON.cpp +++ b/src/Functions/FunctionsJSON.cpp @@ -10,6 +10,7 @@ #include #include +#include #include #include diff --git a/src/Functions/FunctionsMultiStringFuzzySearch.h b/src/Functions/FunctionsMultiStringFuzzySearch.h index 18b411e9839..a92a6570279 100644 --- a/src/Functions/FunctionsMultiStringFuzzySearch.h +++ b/src/Functions/FunctionsMultiStringFuzzySearch.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include diff --git a/src/Functions/FunctionsMultiStringSearch.h b/src/Functions/FunctionsMultiStringSearch.h index c0ed90aa042..03db2651fd0 100644 --- a/src/Functions/FunctionsMultiStringSearch.h +++ b/src/Functions/FunctionsMultiStringSearch.h @@ -5,6 +5,7 @@ #include #include #include +#include #include #include #include diff --git a/src/Functions/FunctionsStringSearch.h b/src/Functions/FunctionsStringSearch.h index 924a66c1130..fba6336ebff 100644 --- a/src/Functions/FunctionsStringSearch.h +++ b/src/Functions/FunctionsStringSearch.h @@ -5,6 +5,7 @@ #include #include #include +#include #include #include #include diff --git a/src/Functions/FunctionsTextClassification.h b/src/Functions/FunctionsTextClassification.h index 8e0f236366d..90e8af06ccc 100644 --- a/src/Functions/FunctionsTextClassification.h +++ b/src/Functions/FunctionsTextClassification.h @@ -2,6 +2,7 @@ #include #include +#include #include #include #include diff --git a/src/Functions/PerformanceAdaptors.h b/src/Functions/PerformanceAdaptors.h index ef2c788bf43..48daf13ba68 100644 --- a/src/Functions/PerformanceAdaptors.h +++ b/src/Functions/PerformanceAdaptors.h @@ -4,6 +4,7 @@ #include #include +#include #include #include diff --git a/src/Functions/UserDefined/ExternalUserDefinedExecutableFunctionsLoader.cpp b/src/Functions/UserDefined/ExternalUserDefinedExecutableFunctionsLoader.cpp index 2c031158c48..a20e1807487 100644 --- a/src/Functions/UserDefined/ExternalUserDefinedExecutableFunctionsLoader.cpp +++ b/src/Functions/UserDefined/ExternalUserDefinedExecutableFunctionsLoader.cpp @@ -2,6 +2,7 @@ #include #include +#include #include @@ -184,7 +185,7 @@ ExternalLoader::LoadableMutablePtr ExternalUserDefinedExecutableFunctionsLoader: pool_size = config.getUInt64(key_in_config + ".pool_size", 16); max_command_execution_time = config.getUInt64(key_in_config + ".max_command_execution_time", 10); - size_t max_execution_time_seconds = static_cast(getContext()->getSettings().max_execution_time.totalSeconds()); + size_t max_execution_time_seconds = static_cast(getContext()->getSettingsRef().max_execution_time.totalSeconds()); if (max_execution_time_seconds != 0 && max_command_execution_time > max_execution_time_seconds) max_command_execution_time = max_execution_time_seconds; } diff --git a/src/Functions/UserDefined/UserDefinedSQLObjectsBackup.cpp b/src/Functions/UserDefined/UserDefinedSQLObjectsBackup.cpp index c15958f81c9..da79e17f18a 100644 --- a/src/Functions/UserDefined/UserDefinedSQLObjectsBackup.cpp +++ b/src/Functions/UserDefined/UserDefinedSQLObjectsBackup.cpp @@ -13,6 +13,7 @@ #include #include #include +#include namespace DB diff --git a/src/Functions/UserDefined/UserDefinedSQLObjectsDiskStorage.cpp b/src/Functions/UserDefined/UserDefinedSQLObjectsDiskStorage.cpp index b406cc8d317..4c004d2537c 100644 --- a/src/Functions/UserDefined/UserDefinedSQLObjectsDiskStorage.cpp +++ b/src/Functions/UserDefined/UserDefinedSQLObjectsDiskStorage.cpp @@ -9,6 +9,8 @@ #include #include +#include + #include #include #include diff --git a/src/Functions/UserDefined/UserDefinedSQLObjectsZooKeeperStorage.cpp b/src/Functions/UserDefined/UserDefinedSQLObjectsZooKeeperStorage.cpp index 766d63eafb0..01e7e3995fa 100644 --- a/src/Functions/UserDefined/UserDefinedSQLObjectsZooKeeperStorage.cpp +++ b/src/Functions/UserDefined/UserDefinedSQLObjectsZooKeeperStorage.cpp @@ -14,7 +14,7 @@ #include #include #include - +#include namespace DB { diff --git a/src/Functions/array/array.cpp b/src/Functions/array/array.cpp index 03b51808799..dfe589fb74f 100644 --- a/src/Functions/array/array.cpp +++ b/src/Functions/array/array.cpp @@ -3,6 +3,7 @@ #include #include #include +#include #include #include diff --git a/src/Functions/array/range.cpp b/src/Functions/array/range.cpp index 1ac698a2745..b8b03ab5e74 100644 --- a/src/Functions/array/range.cpp +++ b/src/Functions/array/range.cpp @@ -9,6 +9,7 @@ #include #include #include +#include #include #include #include diff --git a/src/Functions/castOrDefault.cpp b/src/Functions/castOrDefault.cpp index 995b5fa91e7..1ece17ca1e3 100644 --- a/src/Functions/castOrDefault.cpp +++ b/src/Functions/castOrDefault.cpp @@ -1,3 +1,4 @@ +#include #include #include #include diff --git a/src/Functions/changeDate.cpp b/src/Functions/changeDate.cpp new file mode 100644 index 00000000000..19e4c165ee3 --- /dev/null +++ b/src/Functions/changeDate.cpp @@ -0,0 +1,399 @@ +#include "Common/DateLUTImpl.h" +#include "Common/Exception.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int LOGICAL_ERROR; +} + +namespace +{ + +enum class Component +{ + Year, + Month, + Day, + Hour, + Minute, + Second +}; + +} + +template +class FunctionChangeDate : public IFunction +{ +public: + static constexpr auto name = Traits::name; + + static FunctionPtr create(ContextPtr) { return std::make_shared(); } + String getName() const override { return Traits::name; } + bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return true; } + size_t getNumberOfArguments() const override { return 2; } + + DataTypePtr getReturnTypeImpl(const ColumnsWithTypeAndName & arguments) const override + { + FunctionArgumentDescriptors args{ + {"date_or_datetime", static_cast(&isDateOrDate32OrDateTimeOrDateTime64), nullptr, "Date or date with time"}, + {"value", static_cast(&isNativeInteger), nullptr, "Integer"} + }; + validateFunctionArguments(*this, arguments, args); + + const auto & input_type = arguments[0].type; + + if constexpr (Traits::component == Component::Hour || Traits::component == Component::Minute || Traits::component == Component::Second) + { + if (isDate(input_type)) + return std::make_shared(); + if (isDate32(input_type)) + return std::make_shared(DataTypeDateTime64::default_scale); + } + + return input_type; + } + + ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count) const override + { + const auto & input_type = arguments[0].type; + if (isDate(input_type)) + { + if constexpr (Traits::component == Component::Hour || Traits::component == Component::Minute || Traits::component == Component::Second) + return execute(arguments, input_type, result_type, input_rows_count); + return execute(arguments, input_type, result_type, input_rows_count); + } + if (isDate32(input_type)) + { + if constexpr (Traits::component == Component::Hour || Traits::component == Component::Minute || Traits::component == Component::Second) + return execute(arguments, input_type, result_type, input_rows_count); + return execute(arguments, input_type, result_type, input_rows_count); + } + if (isDateTime(input_type)) + return execute(arguments, input_type, result_type, input_rows_count); + if (isDateTime64(input_type)) + return execute(arguments, input_type, result_type, input_rows_count); + + throw Exception(ErrorCodes::LOGICAL_ERROR, "Invalid input type"); + } + + template + ColumnPtr execute(const ColumnsWithTypeAndName & arguments, const DataTypePtr & input_type, const DataTypePtr & result_type, size_t input_rows_count) const + { + typename ResultDataType::ColumnType::MutablePtr result_col; + if constexpr (std::is_same_v) + { + auto scale = DataTypeDateTime64::default_scale; + if constexpr (std::is_same_v) + scale = typeid_cast(*result_type).getScale(); + result_col = ResultDataType::ColumnType::create(input_rows_count, scale); + } + else + result_col = ResultDataType::ColumnType::create(input_rows_count); + + auto date_time_col = arguments[0].column->convertToFullIfNeeded(); + const auto & date_time_col_data = typeid_cast(*date_time_col).getData(); + + auto value_col = castColumn(arguments[1], std::make_shared()); + value_col = value_col->convertToFullIfNeeded(); + const auto & value_col_data = typeid_cast(*value_col).getData(); + + auto & result_col_data = result_col->getData(); + + if constexpr (std::is_same_v) + { + const auto scale = typeid_cast(*result_type).getScale(); + const auto & date_lut = typeid_cast(*result_type).getTimeZone(); + + Int64 deg = 1; + for (size_t j = 0; j < scale; ++j) + deg *= 10; + + for (size_t i = 0; i < input_rows_count; ++i) + { + Int64 time = date_lut.toNumYYYYMMDDhhmmss(date_time_col_data[i] / deg); + Int64 fraction = date_time_col_data[i] % deg; + + result_col_data[i] = getChangedDate(time, value_col_data[i], result_type, date_lut, scale, fraction); + } + } + else if constexpr (std::is_same_v && std::is_same_v) + { + const auto & date_lut = typeid_cast(*result_type).getTimeZone(); + for (size_t i = 0; i < input_rows_count; ++i) + { + Int64 time = static_cast(date_lut.toNumYYYYMMDD(ExtendedDayNum(date_time_col_data[i]))) * 1'000'000; + result_col_data[i] = getChangedDate(time, value_col_data[i], result_type, date_lut, 3, 0); + } + } + else if constexpr (std::is_same_v && std::is_same_v) + { + const auto & date_lut = typeid_cast(*result_type).getTimeZone(); + for (size_t i = 0; i < input_rows_count; ++i) + { + Int64 time = static_cast(date_lut.toNumYYYYMMDD(ExtendedDayNum(date_time_col_data[i]))) * 1'000'000; + result_col_data[i] = static_cast(getChangedDate(time, value_col_data[i], result_type, date_lut)); + } + } + else if constexpr (std::is_same_v) + { + const auto & date_lut = typeid_cast(*result_type).getTimeZone(); + for (size_t i = 0; i < input_rows_count; ++i) + { + Int64 time = date_lut.toNumYYYYMMDDhhmmss(date_time_col_data[i]); + result_col_data[i] = static_cast(getChangedDate(time, value_col_data[i], result_type, date_lut)); + } + } + else + { + const auto & date_lut = DateLUT::instance(); + for (size_t i = 0; i < input_rows_count; ++i) + { + Int64 time; + if (isDate(input_type)) + time = static_cast(date_lut.toNumYYYYMMDD(DayNum(date_time_col_data[i]))) * 1'000'000; + else + time = static_cast(date_lut.toNumYYYYMMDD(ExtendedDayNum(date_time_col_data[i]))) * 1'000'000; + + if (isDate(result_type)) + result_col_data[i] = static_cast(getChangedDate(time, value_col_data[i], result_type, date_lut)); + else + result_col_data[i] = static_cast(getChangedDate(time, value_col_data[i], result_type, date_lut)); + } + } + + return result_col; + } + + Int64 getChangedDate(Int64 time, Float64 new_value, const DataTypePtr & result_type, const DateLUTImpl & date_lut, Int64 scale = 0, Int64 fraction = 0) const + { + auto year = time / 10'000'000'000; + auto month = (time % 10'000'000'000) / 100'000'000; + auto day = (time % 100'000'000) / 1'000'000; + auto hours = (time % 1'000'000) / 10'000; + auto minutes = (time % 10'000) / 100; + auto seconds = time % 100; + + Int64 min_date = 0, max_date = 0; + Int16 min_year, max_year; + if (isDate(result_type)) + { + min_date = date_lut.makeDayNum(1970, 1, 1); + max_date = date_lut.makeDayNum(2149, 6, 6); + min_year = 1970; + max_year = 2149; + } + else if (isDate32(result_type)) + { + min_date = date_lut.makeDayNum(1900, 1, 1); + max_date = date_lut.makeDayNum(2299, 12, 31); + min_year = 1900; + max_year = 2299; + } + else if (isDateTime(result_type)) + { + min_date = 0; + max_date = 0x0FFFFFFFFLL; + min_year = 1970; + max_year = 2106; + } + else + { + min_date = DecimalUtils::decimalFromComponents( + date_lut.makeDateTime(1900, 1, 1, 0, 0, 0), + static_cast(0), + static_cast(scale)); + Int64 deg = 1; + for (Int64 j = 0; j < scale; ++j) + deg *= 10; + max_date = DecimalUtils::decimalFromComponents( + date_lut.makeDateTime(2299, 12, 31, 23, 59, 59), + static_cast(deg - 1), + static_cast(scale)); + min_year = 1900; + max_year = 2299; + } + + switch (Traits::component) + { + case Component::Year: + if (new_value < min_year) + return min_date; + else if (new_value > max_year) + return max_date; + year = static_cast(new_value); + break; + case Component::Month: + if (new_value < 1 || new_value > 12) + return min_date; + month = static_cast(new_value); + break; + case Component::Day: + if (new_value < 1 || new_value > 31) + return min_date; + day = static_cast(new_value); + break; + case Component::Hour: + if (new_value < 0 || new_value > 23) + return min_date; + hours = static_cast(new_value); + break; + case Component::Minute: + if (new_value < 0 || new_value > 59) + return min_date; + minutes = static_cast(new_value); + break; + case Component::Second: + if (new_value < 0 || new_value > 59) + return min_date; + seconds = static_cast(new_value); + break; + } + + Int64 result; + if (isDate(result_type) || isDate32(result_type)) + result = date_lut.makeDayNum(year, month, day); + else if (isDateTime(result_type)) + result = date_lut.makeDateTime(year, month, day, hours, minutes, seconds); + else +#ifndef __clang_analyzer__ + /// ^^ This looks funny. It is the least terrible suppression of a false positive reported by clang-analyzer (a sub-class + /// of clang-tidy checks) deep down in 'decimalFromComponents'. Usual suppressions of the form NOLINT* don't work here (they + /// would only affect code in _this_ file), and suppressing the issue in 'decimalFromComponents' may suppress true positives. + result = DecimalUtils::decimalFromComponents( + date_lut.makeDateTime(year, month, day, hours, minutes, seconds), + fraction, + static_cast(scale)); +#else + { + UNUSED(fraction); + result = 0; + } +#endif + + if (result < min_date) + return min_date; + + if (result > max_date) + return max_date; + + return result; + } +}; + + +struct ChangeYearTraits +{ + static constexpr auto name = "changeYear"; + static constexpr auto component = Component::Year; +}; + +struct ChangeMonthTraits +{ + static constexpr auto name = "changeMonth"; + static constexpr auto component = Component::Month; +}; + +struct ChangeDayTraits +{ + static constexpr auto name = "changeDay"; + static constexpr auto component = Component::Day; +}; + +struct ChangeHourTraits +{ + static constexpr auto name = "changeHour"; + static constexpr auto component = Component::Hour; +}; + +struct ChangeMinuteTraits +{ + static constexpr auto name = "changeMinute"; + static constexpr auto component = Component::Minute; +}; + +struct ChangeSecondTraits +{ + static constexpr auto name = "changeSecond"; + static constexpr auto component = Component::Second; +}; + +REGISTER_FUNCTION(ChangeDate) +{ + { + FunctionDocumentation::Description description = "Changes the year component of a date or date time."; + FunctionDocumentation::Syntax syntax = "changeYear(date_or_datetime, value);"; + FunctionDocumentation::Arguments arguments = {{"date_or_datetime", "The value to change. Type: Date, Date32, DateTime, or DateTime64"}, {"value", "The new value. Type: [U]Int*"}}; + FunctionDocumentation::ReturnedValue returned_value = "The same type as date_or_datetime."; + FunctionDocumentation::Categories categories = {"Dates and Times"}; + FunctionDocumentation function_documentation = {.description = description, .syntax = syntax, .arguments = arguments, .returned_value = returned_value, .categories = categories}; + factory.registerFunction>(function_documentation); + } + { + FunctionDocumentation::Description description = "Changes the month component of a date or date time."; + FunctionDocumentation::Syntax syntax = "changeMonth(date_or_datetime, value);"; + FunctionDocumentation::Arguments arguments = {{"date_or_datetime", "The value to change. Type: Date, Date32, DateTime, or DateTime64"}, {"value", "The new value. Type: [U]Int*"}}; + FunctionDocumentation::ReturnedValue returned_value = "The same type as date_or_datetime."; + FunctionDocumentation::Categories categories = {"Dates and Times"}; + FunctionDocumentation function_documentation = {.description = description, .syntax = syntax, .arguments = arguments, .returned_value = returned_value, .categories = categories}; + factory.registerFunction>(function_documentation); + } + { + FunctionDocumentation::Description description = "Changes the day component of a date or date time."; + FunctionDocumentation::Syntax syntax = "changeDay(date_or_datetime, value);"; + FunctionDocumentation::Arguments arguments = {{"date_or_datetime", "The value to change. Type: Date, Date32, DateTime, or DateTime64"}, {"value", "The new value. Type: [U]Int*"}}; + FunctionDocumentation::ReturnedValue returned_value = "The same type as date_or_datetime."; + FunctionDocumentation::Categories categories = {"Dates and Times"}; + FunctionDocumentation function_documentation = {.description = description, .syntax = syntax, .arguments = arguments, .returned_value = returned_value, .categories = categories}; + factory.registerFunction>(function_documentation); + } + { + FunctionDocumentation::Description description = "Changes the hour component of a date or date time."; + FunctionDocumentation::Syntax syntax = "changeHour(date_or_datetime, value);"; + FunctionDocumentation::Arguments arguments = {{"date_or_datetime", "The value to change. Type: Date, Date32, DateTime, or DateTime64"}, {"value", "The new value. Type: [U]Int*"}}; + FunctionDocumentation::ReturnedValue returned_value = "The same type as date_or_datetime. If the input is a Date, return DateTime. If the input is a Date32, return DateTime64."; + FunctionDocumentation::Categories categories = {"Dates and Times"}; + FunctionDocumentation function_documentation = {.description = description, .syntax = syntax, .arguments = arguments, .returned_value = returned_value, .categories = categories}; + factory.registerFunction>(function_documentation); + } + { + FunctionDocumentation::Description description = "Changes the minute component of a date or date time."; + FunctionDocumentation::Syntax syntax = "changeMinute(date_or_datetime, value);"; + FunctionDocumentation::Arguments arguments = {{"date_or_datetime", "The value to change. Type: Date, Date32, DateTime, or DateTime64"}, {"value", "The new value. Type: [U]Int*"}}; + FunctionDocumentation::ReturnedValue returned_value = "The same type as date_or_datetime. If the input is a Date, return DateTime. If the input is a Date32, return DateTime64."; + FunctionDocumentation::Categories categories = {"Dates and Times"}; + FunctionDocumentation function_documentation = {.description = description, .syntax = syntax, .arguments = arguments, .returned_value = returned_value, .categories = categories}; + factory.registerFunction>(function_documentation); + } + { + FunctionDocumentation::Description description = "Changes the second component of a date or date time."; + FunctionDocumentation::Syntax syntax = "changeSecond(date_or_datetime, value);"; + FunctionDocumentation::Arguments arguments = {{"date_or_datetime", "The value to change. Type: Date, Date32, DateTime, or DateTime64"}, {"value", "The new value. Type: [U]Int*"}}; + FunctionDocumentation::ReturnedValue returned_value = "The same type as date_or_datetime. If the input is a Date, return DateTime. If the input is a Date32, return DateTime64."; + FunctionDocumentation::Categories categories = {"Dates and Times"}; + FunctionDocumentation function_documentation = {.description = description, .syntax = syntax, .arguments = arguments, .returned_value = returned_value, .categories = categories}; + factory.registerFunction>(function_documentation); + } +} + +} diff --git a/src/Functions/formatDateTime.cpp b/src/Functions/formatDateTime.cpp index 1c5dc05c5aa..c3a5fe036c3 100644 --- a/src/Functions/formatDateTime.cpp +++ b/src/Functions/formatDateTime.cpp @@ -23,6 +23,7 @@ #include #include #include +#include #include #include @@ -782,9 +783,9 @@ public: static FunctionPtr create(ContextPtr context) { return std::make_shared(context); } explicit FunctionFormatDateTimeImpl(ContextPtr context) - : mysql_M_is_month_name(context->getSettings().formatdatetime_parsedatetime_m_is_month_name) - , mysql_f_prints_single_zero(context->getSettings().formatdatetime_f_prints_single_zero) - , mysql_format_ckl_without_leading_zeros(context->getSettings().formatdatetime_format_without_leading_zeros) + : mysql_M_is_month_name(context->getSettingsRef().formatdatetime_parsedatetime_m_is_month_name) + , mysql_f_prints_single_zero(context->getSettingsRef().formatdatetime_f_prints_single_zero) + , mysql_format_ckl_without_leading_zeros(context->getSettingsRef().formatdatetime_format_without_leading_zeros) { } diff --git a/src/Functions/formatQuery.cpp b/src/Functions/formatQuery.cpp index 655ea2e7cde..d10b3f9a5b7 100644 --- a/src/Functions/formatQuery.cpp +++ b/src/Functions/formatQuery.cpp @@ -1,5 +1,6 @@ #include #include +#include #include #include #include diff --git a/src/Functions/getClientHTTPHeader.cpp b/src/Functions/getClientHTTPHeader.cpp index ebd070a90b9..140f39d03b8 100644 --- a/src/Functions/getClientHTTPHeader.cpp +++ b/src/Functions/getClientHTTPHeader.cpp @@ -5,6 +5,7 @@ #include #include #include +#include namespace DB diff --git a/src/Functions/getSetting.cpp b/src/Functions/getSetting.cpp index d6b8946e760..aed6b2119e4 100644 --- a/src/Functions/getSetting.cpp +++ b/src/Functions/getSetting.cpp @@ -5,6 +5,7 @@ #include #include #include +#include namespace DB diff --git a/src/Functions/hasColumnInTable.cpp b/src/Functions/hasColumnInTable.cpp index 8ea16f688ee..00714997b4a 100644 --- a/src/Functions/hasColumnInTable.cpp +++ b/src/Functions/hasColumnInTable.cpp @@ -1,3 +1,4 @@ +#include #include #include #include diff --git a/src/Functions/if.cpp b/src/Functions/if.cpp index 4c08cd3b931..30eaa26fa20 100644 --- a/src/Functions/if.cpp +++ b/src/Functions/if.cpp @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include diff --git a/src/Functions/keyvaluepair/extractKeyValuePairs.cpp b/src/Functions/keyvaluepair/extractKeyValuePairs.cpp index 94f02861af0..02e8412bbf3 100644 --- a/src/Functions/keyvaluepair/extractKeyValuePairs.cpp +++ b/src/Functions/keyvaluepair/extractKeyValuePairs.cpp @@ -1,5 +1,6 @@ #include #include +#include #include #include diff --git a/src/Functions/lemmatize.cpp b/src/Functions/lemmatize.cpp index e1168e077bb..76e5ed7432e 100644 --- a/src/Functions/lemmatize.cpp +++ b/src/Functions/lemmatize.cpp @@ -3,6 +3,7 @@ #if USE_NLP #include +#include #include #include #include diff --git a/src/Functions/map.cpp b/src/Functions/map.cpp index 66cd10a3f0b..0d42ce2abe1 100644 --- a/src/Functions/map.cpp +++ b/src/Functions/map.cpp @@ -10,6 +10,7 @@ #include #include #include +#include namespace DB diff --git a/src/Functions/multiIf.cpp b/src/Functions/multiIf.cpp index d3bf5618f66..c4b675fcf6c 100644 --- a/src/Functions/multiIf.cpp +++ b/src/Functions/multiIf.cpp @@ -5,6 +5,7 @@ #include #include #include +#include #include #include #include diff --git a/src/Functions/neighbor.cpp b/src/Functions/neighbor.cpp index 62f129109f9..49b8980e7e2 100644 --- a/src/Functions/neighbor.cpp +++ b/src/Functions/neighbor.cpp @@ -1,3 +1,4 @@ +#include #include #include #include diff --git a/src/Functions/now.cpp b/src/Functions/now.cpp index 827b800a243..352c05f8f32 100644 --- a/src/Functions/now.cpp +++ b/src/Functions/now.cpp @@ -1,6 +1,7 @@ #include #include #include +#include #include #include #include @@ -90,7 +91,7 @@ public: size_t getNumberOfArguments() const override { return 0; } static FunctionOverloadResolverPtr create(ContextPtr context) { return std::make_unique(context); } explicit NowOverloadResolver(ContextPtr context) - : allow_nonconst_timezone_arguments(context->getSettings().allow_nonconst_timezone_arguments) + : allow_nonconst_timezone_arguments(context->getSettingsRef().allow_nonconst_timezone_arguments) {} DataTypePtr getReturnTypeImpl(const ColumnsWithTypeAndName & arguments) const override diff --git a/src/Functions/now64.cpp b/src/Functions/now64.cpp index d6f8474c984..23ab8cad003 100644 --- a/src/Functions/now64.cpp +++ b/src/Functions/now64.cpp @@ -1,6 +1,7 @@ #include #include +#include #include #include #include @@ -118,7 +119,7 @@ public: size_t getNumberOfArguments() const override { return 0; } static FunctionOverloadResolverPtr create(ContextPtr context) { return std::make_unique(context); } explicit Now64OverloadResolver(ContextPtr context) - : allow_nonconst_timezone_arguments(context->getSettings().allow_nonconst_timezone_arguments) + : allow_nonconst_timezone_arguments(context->getSettingsRef().allow_nonconst_timezone_arguments) {} DataTypePtr getReturnTypeImpl(const ColumnsWithTypeAndName & arguments) const override diff --git a/src/Functions/nowInBlock.cpp b/src/Functions/nowInBlock.cpp index 74f420986c8..291e43bdce5 100644 --- a/src/Functions/nowInBlock.cpp +++ b/src/Functions/nowInBlock.cpp @@ -4,6 +4,7 @@ #include #include #include +#include #include @@ -31,7 +32,7 @@ public: return std::make_shared(context); } explicit FunctionNowInBlock(ContextPtr context) - : allow_nonconst_timezone_arguments(context->getSettings().allow_nonconst_timezone_arguments) + : allow_nonconst_timezone_arguments(context->getSettingsRef().allow_nonconst_timezone_arguments) {} String getName() const override diff --git a/src/Functions/parseDateTime.cpp b/src/Functions/parseDateTime.cpp index 339eb4cb26c..d2353c19a61 100644 --- a/src/Functions/parseDateTime.cpp +++ b/src/Functions/parseDateTime.cpp @@ -2,6 +2,7 @@ #include #include #include +#include #include #include @@ -564,8 +565,8 @@ namespace static FunctionPtr create(ContextPtr context) { return std::make_shared(context); } explicit FunctionParseDateTimeImpl(ContextPtr context) - : mysql_M_is_month_name(context->getSettings().formatdatetime_parsedatetime_m_is_month_name) - , mysql_parse_ckl_without_leading_zeros(context->getSettings().parsedatetime_parse_without_leading_zeros) + : mysql_M_is_month_name(context->getSettingsRef().formatdatetime_parsedatetime_m_is_month_name) + , mysql_parse_ckl_without_leading_zeros(context->getSettingsRef().parsedatetime_parse_without_leading_zeros) { } diff --git a/src/Functions/partitionId.cpp b/src/Functions/partitionId.cpp index e2e9038cd8b..dab56f39adc 100644 --- a/src/Functions/partitionId.cpp +++ b/src/Functions/partitionId.cpp @@ -66,6 +66,7 @@ public: REGISTER_FUNCTION(PartitionId) { factory.registerFunction(); + factory.registerAlias("partitionID", "partitionId"); } } diff --git a/src/Functions/pointInPolygon.cpp b/src/Functions/pointInPolygon.cpp index 6b413829bd1..32d5905a513 100644 --- a/src/Functions/pointInPolygon.cpp +++ b/src/Functions/pointInPolygon.cpp @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include diff --git a/src/Functions/runningAccumulate.cpp b/src/Functions/runningAccumulate.cpp index d585affd91b..c94cbe67216 100644 --- a/src/Functions/runningAccumulate.cpp +++ b/src/Functions/runningAccumulate.cpp @@ -1,5 +1,6 @@ #include #include +#include #include #include #include diff --git a/src/Functions/runningDifference.h b/src/Functions/runningDifference.h index fe477d13744..d51ea805f3c 100644 --- a/src/Functions/runningDifference.h +++ b/src/Functions/runningDifference.h @@ -1,6 +1,7 @@ #pragma once #include #include +#include #include #include #include diff --git a/src/Functions/serverConstants.cpp b/src/Functions/serverConstants.cpp index e7e423058f1..b74edfc3121 100644 --- a/src/Functions/serverConstants.cpp +++ b/src/Functions/serverConstants.cpp @@ -5,6 +5,7 @@ #include #include #include +#include #include #include #include diff --git a/src/Functions/sleep.h b/src/Functions/sleep.h index 22748e86888..62ee19fa904 100644 --- a/src/Functions/sleep.h +++ b/src/Functions/sleep.h @@ -7,6 +7,7 @@ #include #include #include +#include #include #include #include diff --git a/src/Functions/snowflake.cpp b/src/Functions/snowflake.cpp index 31ea6a28ece..0d4d65ee4b0 100644 --- a/src/Functions/snowflake.cpp +++ b/src/Functions/snowflake.cpp @@ -8,6 +8,7 @@ #include #include #include +#include #include diff --git a/src/Functions/snowflakeIDToDateTime.cpp b/src/Functions/snowflakeIDToDateTime.cpp index 9a1d5b8a74b..6c57c72fff1 100644 --- a/src/Functions/snowflakeIDToDateTime.cpp +++ b/src/Functions/snowflakeIDToDateTime.cpp @@ -9,6 +9,7 @@ #include #include #include +#include #include @@ -38,7 +39,7 @@ public: static FunctionPtr create(ContextPtr context) { return std::make_shared(context); } explicit FunctionSnowflakeIDToDateTime(ContextPtr context) - : allow_nonconst_timezone_arguments(context->getSettings().allow_nonconst_timezone_arguments) + : allow_nonconst_timezone_arguments(context->getSettingsRef().allow_nonconst_timezone_arguments) {} String getName() const override { return name; } @@ -109,7 +110,7 @@ public: static FunctionPtr create(ContextPtr context) { return std::make_shared(context); } explicit FunctionSnowflakeIDToDateTime64(ContextPtr context) - : allow_nonconst_timezone_arguments(context->getSettings().allow_nonconst_timezone_arguments) + : allow_nonconst_timezone_arguments(context->getSettingsRef().allow_nonconst_timezone_arguments) {} String getName() const override { return name; } diff --git a/src/Functions/stem.cpp b/src/Functions/stem.cpp index d326d17a7a8..5b845cf332b 100644 --- a/src/Functions/stem.cpp +++ b/src/Functions/stem.cpp @@ -3,6 +3,7 @@ #if USE_NLP #include +#include #include #include #include diff --git a/src/Functions/synonyms.cpp b/src/Functions/synonyms.cpp index 6b3e205785c..28dd83627d9 100644 --- a/src/Functions/synonyms.cpp +++ b/src/Functions/synonyms.cpp @@ -5,6 +5,7 @@ #include #include #include +#include #include #include #include diff --git a/src/Functions/throwIf.cpp b/src/Functions/throwIf.cpp index c6ff5b9151f..becc6d2f772 100644 --- a/src/Functions/throwIf.cpp +++ b/src/Functions/throwIf.cpp @@ -4,6 +4,7 @@ #include #include #include +#include #include #include #include diff --git a/src/Functions/toTimezone.cpp b/src/Functions/toTimezone.cpp index a0d90351898..cbcdf9bc772 100644 --- a/src/Functions/toTimezone.cpp +++ b/src/Functions/toTimezone.cpp @@ -9,6 +9,7 @@ #include #include +#include namespace DB { @@ -88,7 +89,7 @@ public: size_t getNumberOfArguments() const override { return 2; } static FunctionOverloadResolverPtr create(ContextPtr context) { return std::make_unique(context); } explicit ToTimeZoneOverloadResolver(ContextPtr context) - : allow_nonconst_timezone_arguments(context->getSettings().allow_nonconst_timezone_arguments) + : allow_nonconst_timezone_arguments(context->getSettingsRef().allow_nonconst_timezone_arguments) {} DataTypePtr getReturnTypeImpl(const ColumnsWithTypeAndName & arguments) const override diff --git a/src/Functions/toTypeName.cpp b/src/Functions/toTypeName.cpp index 63bcc58d65e..abe02148d4a 100644 --- a/src/Functions/toTypeName.cpp +++ b/src/Functions/toTypeName.cpp @@ -1,6 +1,7 @@ #include #include #include +#include #include #include diff --git a/src/Functions/tuple.cpp b/src/Functions/tuple.cpp index 71f50abba6c..84772f79d08 100644 --- a/src/Functions/tuple.cpp +++ b/src/Functions/tuple.cpp @@ -1,8 +1,15 @@ #include +#include + namespace DB { +FunctionPtr FunctionTuple::create(DB::ContextPtr context) +{ + return std::make_shared(context->getSettingsRef().enable_named_columns_in_function_tuple); +} + REGISTER_FUNCTION(Tuple) { factory.registerFunction(FunctionDocumentation{ diff --git a/src/Functions/tuple.h b/src/Functions/tuple.h index 94529d86861..03ba7ec9ef2 100644 --- a/src/Functions/tuple.h +++ b/src/Functions/tuple.h @@ -21,11 +21,7 @@ class FunctionTuple : public IFunction public: static constexpr auto name = "tuple"; - /// maybe_unused: false-positive - [[maybe_unused]] static FunctionPtr create(ContextPtr context) - { - return std::make_shared(context->getSettingsRef().enable_named_columns_in_function_tuple); - } + static FunctionPtr create(ContextPtr context); explicit FunctionTuple(bool enable_named_columns_ = false) : enable_named_columns(enable_named_columns_) { } diff --git a/src/Functions/visibleWidth.cpp b/src/Functions/visibleWidth.cpp index 9a3edd9fbec..ebd4a1ff713 100644 --- a/src/Functions/visibleWidth.cpp +++ b/src/Functions/visibleWidth.cpp @@ -6,6 +6,7 @@ #include #include #include +#include #include diff --git a/src/Functions/widthBucket.cpp b/src/Functions/widthBucket.cpp index 62ed460ca9d..e804808b699 100644 --- a/src/Functions/widthBucket.cpp +++ b/src/Functions/widthBucket.cpp @@ -3,6 +3,7 @@ #include #include #include +#include #include #include #include @@ -15,6 +16,7 @@ #include #include #include +#include #include #include diff --git a/src/IO/ConnectionTimeouts.cpp b/src/IO/ConnectionTimeouts.cpp index da6214ae477..662a2c067cf 100644 --- a/src/IO/ConnectionTimeouts.cpp +++ b/src/IO/ConnectionTimeouts.cpp @@ -1,7 +1,10 @@ +#include +#include #include -#include #include +#include + namespace DB { diff --git a/src/IO/ConnectionTimeouts.h b/src/IO/ConnectionTimeouts.h index b86ec44d21c..b84db46d656 100644 --- a/src/IO/ConnectionTimeouts.h +++ b/src/IO/ConnectionTimeouts.h @@ -1,7 +1,6 @@ #pragma once #include -#include #include #include @@ -10,6 +9,7 @@ namespace DB { +struct ServerSettings; struct Settings; #define APPLY_FOR_ALL_CONNECTION_TIMEOUT_MEMBERS(M) \ diff --git a/src/IO/S3/Client.cpp b/src/IO/S3/Client.cpp index 55441cfb86b..3b958dea046 100644 --- a/src/IO/S3/Client.cpp +++ b/src/IO/S3/Client.cpp @@ -23,10 +23,11 @@ #include #include - #include #include +#include + #include diff --git a/src/IO/S3/copyS3File.cpp b/src/IO/S3/copyS3File.cpp index 0b3e5e50f3d..361b5a707fd 100644 --- a/src/IO/S3/copyS3File.cpp +++ b/src/IO/S3/copyS3File.cpp @@ -290,9 +290,9 @@ namespace if (!total_size) throw Exception(ErrorCodes::LOGICAL_ERROR, "Chosen multipart upload for an empty file. This must not happen"); - auto max_part_number = request_settings.max_part_number; - auto min_upload_part_size = request_settings.min_upload_part_size; - auto max_upload_part_size = request_settings.max_upload_part_size; + UInt64 max_part_number = request_settings.max_part_number; + UInt64 min_upload_part_size = request_settings.min_upload_part_size; + UInt64 max_upload_part_size = request_settings.max_upload_part_size; if (!max_part_number) throw Exception(ErrorCodes::INVALID_CONFIG_PARAMETER, "max_part_number must not be 0"); diff --git a/src/IO/S3Common.cpp b/src/IO/S3Common.cpp index 490bf8c2d0c..a794cdbcf05 100644 --- a/src/IO/S3Common.cpp +++ b/src/IO/S3Common.cpp @@ -6,6 +6,7 @@ #include #include #include +#include #include #include "config.h" diff --git a/src/IO/S3Common.h b/src/IO/S3Common.h index 2dca08871d3..fcfd0cfffae 100644 --- a/src/IO/S3Common.h +++ b/src/IO/S3Common.h @@ -8,7 +8,7 @@ #include #include #include -#include +#include #include #include #include @@ -30,6 +30,8 @@ namespace ErrorCodes extern const int S3_ERROR; } +struct Settings; + class S3Exception : public Exception { public: diff --git a/src/IO/S3Settings.cpp b/src/IO/S3Settings.cpp index a5a50c873cb..f99e8c0105b 100644 --- a/src/IO/S3Settings.cpp +++ b/src/IO/S3Settings.cpp @@ -1,9 +1,11 @@ #include +#include #include -#include #include +#include + namespace DB { diff --git a/src/Interpreters/ActionsVisitor.cpp b/src/Interpreters/ActionsVisitor.cpp index 9efb1d89a47..a874067e43c 100644 --- a/src/Interpreters/ActionsVisitor.cpp +++ b/src/Interpreters/ActionsVisitor.cpp @@ -8,6 +8,7 @@ #include #include +#include #include #include @@ -442,7 +443,7 @@ FutureSetPtr makeExplicitSet( else block = createBlockForSet(left_arg_type, right_arg, set_element_types, context); - return prepared_sets.addFromTuple(set_key, block, context->getSettings()); + return prepared_sets.addFromTuple(set_key, block, context->getSettingsRef()); } class ScopeStack::Index diff --git a/src/Interpreters/ArrayJoinAction.cpp b/src/Interpreters/ArrayJoinAction.cpp index 54ff53764e7..df7a0b48057 100644 --- a/src/Interpreters/ArrayJoinAction.cpp +++ b/src/Interpreters/ArrayJoinAction.cpp @@ -1,4 +1,5 @@ #include +#include #include #include #include diff --git a/src/Interpreters/Cache/FileCache.cpp b/src/Interpreters/Cache/FileCache.cpp index 0d33e39ffa3..a3848fa3a75 100644 --- a/src/Interpreters/Cache/FileCache.cpp +++ b/src/Interpreters/Cache/FileCache.cpp @@ -30,6 +30,7 @@ namespace ProfileEvents extern const Event FilesystemCacheFailToReserveSpaceBecauseOfLockContention; extern const Event FilesystemCacheFreeSpaceKeepingThreadRun; extern const Event FilesystemCacheFreeSpaceKeepingThreadWorkMilliseconds; + extern const Event FilesystemCacheFailToReserveSpaceBecauseOfCacheResize; } namespace DB @@ -813,7 +814,7 @@ bool FileCache::tryReserve( /// ok compared to the number of cases this check will help. if (cache_is_being_resized.load(std::memory_order_relaxed)) { - ProfileEvents::increment(ProfileEvents::FilesystemCacheFailToReserveSpaceBecauseOfLockContention); + ProfileEvents::increment(ProfileEvents::FilesystemCacheFailToReserveSpaceBecauseOfCacheResize); return false; } diff --git a/src/Interpreters/Cache/WriteBufferToFileSegment.cpp b/src/Interpreters/Cache/WriteBufferToFileSegment.cpp index dd038948adf..e6ebf6ad50c 100644 --- a/src/Interpreters/Cache/WriteBufferToFileSegment.cpp +++ b/src/Interpreters/Cache/WriteBufferToFileSegment.cpp @@ -12,6 +12,8 @@ #include #include +#include + namespace DB { diff --git a/src/Interpreters/ClusterProxy/SelectStreamFactory.cpp b/src/Interpreters/ClusterProxy/SelectStreamFactory.cpp index 94a02eacdaa..e35d31d2350 100644 --- a/src/Interpreters/ClusterProxy/SelectStreamFactory.cpp +++ b/src/Interpreters/ClusterProxy/SelectStreamFactory.cpp @@ -5,6 +5,7 @@ #include #include #include +#include #include #include #include diff --git a/src/Interpreters/Context.cpp b/src/Interpreters/Context.cpp index f2626696492..fc1e87e7b7e 100644 --- a/src/Interpreters/Context.cpp +++ b/src/Interpreters/Context.cpp @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include @@ -824,8 +825,69 @@ void ContextSharedMutex::lockSharedImpl() ProfileEvents::increment(ProfileEvents::ContextLockWaitMicroseconds, watch.elapsedMicroseconds()); } -ContextData::ContextData() = default; -ContextData::ContextData(const ContextData &) = default; +ContextData::ContextData() +{ + settings = std::make_unique(); +} +ContextData::ContextData(const ContextData &o) : + shared(o.shared), + client_info(o.client_info), + external_tables_initializer_callback(o.external_tables_initializer_callback), + input_initializer_callback(o.input_initializer_callback), + input_blocks_reader(o.input_blocks_reader), + user_id(o.user_id), + current_roles(o.current_roles), + settings_constraints_and_current_profiles(o.settings_constraints_and_current_profiles), + access(o.access), + need_recalculate_access(o.need_recalculate_access), + current_database(o.current_database), + settings(std::make_unique(*o.settings)), + progress_callback(o.progress_callback), + file_progress_callback(o.file_progress_callback), + process_list_elem(o.process_list_elem), + has_process_list_elem(o.has_process_list_elem), + insertion_table_info(o.insertion_table_info), + is_distributed(o.is_distributed), + default_format(o.default_format), + insert_format(o.insert_format), + external_tables_mapping(o.external_tables_mapping), + scalars(o.scalars), + special_scalars(o.special_scalars), + next_task_callback(o.next_task_callback), + merge_tree_read_task_callback(o.merge_tree_read_task_callback), + merge_tree_all_ranges_callback(o.merge_tree_all_ranges_callback), + parallel_replicas_group_uuid(o.parallel_replicas_group_uuid), + client_protocol_version(o.client_protocol_version), + query_access_info(std::make_shared(*o.query_access_info)), + query_factories_info(o.query_factories_info), + query_privileges_info(o.query_privileges_info), + async_read_counters(o.async_read_counters), + view_source(o.view_source), + table_function_results(o.table_function_results), + query_context(o.query_context), + session_context(o.session_context), + global_context(o.global_context), + buffer_context(o.buffer_context), + is_internal_query(o.is_internal_query), + temp_data_on_disk(o.temp_data_on_disk), + classifier(o.classifier), + prepared_sets_cache(o.prepared_sets_cache), + offset_parallel_replicas_enabled(o.offset_parallel_replicas_enabled), + kitchen_sink(o.kitchen_sink), + part_uuids(o.part_uuids), + ignored_part_uuids(o.ignored_part_uuids), + query_parameters(o.query_parameters), + host_context(o.host_context), + metadata_transaction(o.metadata_transaction), + merge_tree_transaction(o.merge_tree_transaction), + merge_tree_transaction_holder(o.merge_tree_transaction_holder), + remote_read_query_throttler(o.remote_read_query_throttler), + remote_write_query_throttler(o.remote_write_query_throttler), + local_read_query_throttler(o.local_read_query_throttler), + local_write_query_throttler(o.local_write_query_throttler), + backups_query_throttler(o.backups_query_throttler) +{ +} Context::Context() = default; Context::Context(const Context & rhs) : ContextData(rhs), std::enable_shared_from_this(rhs) {} @@ -865,7 +927,6 @@ ContextMutablePtr Context::createCopy(const ContextPtr & other) { SharedLockGuard lock(other->mutex); auto new_context = std::shared_ptr(new Context(*other)); - new_context->query_access_info = std::make_shared(*other->query_access_info); return new_context; } @@ -971,7 +1032,7 @@ Strings Context::getWarnings() const } /// Make setting's name ordered std::set obsolete_settings; - for (const auto & setting : settings) + for (const auto & setting : *settings) { if (setting.isValueChanged() && setting.isObsolete()) obsolete_settings.emplace(setting.getName()); @@ -1486,7 +1547,7 @@ std::shared_ptr Context::getAccess() const /// If setUserID() was never called then this must be the global context with the full access. bool full_access = !user_id; - return ContextAccessParams{user_id, full_access, /* use_default_roles= */ false, current_roles, settings, current_database, client_info}; + return ContextAccessParams{user_id, full_access, /* use_default_roles= */ false, current_roles, *settings, current_database, client_info}; }; /// Check if the current access rights are still valid, otherwise get parameters for recalculating access rights. @@ -1568,7 +1629,7 @@ void Context::setCurrentProfilesWithLock(const SettingsProfilesInfo & profiles_i checkSettingsConstraintsWithLock(profiles_info.settings, SettingSource::PROFILE); applySettingsChangesWithLock(profiles_info.settings, lock); settings_constraints_and_current_profiles = profiles_info.getConstraintsAndProfileIDs(settings_constraints_and_current_profiles); - contextSanityClampSettingsWithLock(*this, settings, lock); + contextSanityClampSettingsWithLock(*this, *settings, lock); } void Context::setCurrentProfile(const String & profile_name, bool check_constraints) @@ -2209,15 +2270,15 @@ bool Context::displaySecretsInShowAndSelect() const Settings Context::getSettings() const { SharedLockGuard lock(mutex); - return settings; + return *settings; } void Context::setSettings(const Settings & settings_) { std::lock_guard lock(mutex); - settings = settings_; + *settings = settings_; need_recalculate_access = true; - contextSanityClampSettings(*this, settings); + contextSanityClampSettings(*this, *settings); } void Context::setSettingWithLock(std::string_view name, const String & value, const std::lock_guard & lock) @@ -2227,10 +2288,10 @@ void Context::setSettingWithLock(std::string_view name, const String & value, co setCurrentProfileWithLock(value, true /*check_constraints*/, lock); return; } - settings.set(name, value); + settings->set(name, value); if (ContextAccessParams::dependsOnSettingName(name)) need_recalculate_access = true; - contextSanityClampSettingsWithLock(*this, settings, lock); + contextSanityClampSettingsWithLock(*this, *settings, lock); } void Context::setSettingWithLock(std::string_view name, const Field & value, const std::lock_guard & lock) @@ -2240,7 +2301,7 @@ void Context::setSettingWithLock(std::string_view name, const Field & value, con setCurrentProfileWithLock(value.safeGet(), true /*check_constraints*/, lock); return; } - settings.set(name, value); + settings->set(name, value); if (ContextAccessParams::dependsOnSettingName(name)) need_recalculate_access = true; } @@ -2250,7 +2311,7 @@ void Context::applySettingChangeWithLock(const SettingChange & change, const std try { setSettingWithLock(change.name, change.value, lock); - contextSanityClampSettingsWithLock(*this, settings, lock); + contextSanityClampSettingsWithLock(*this, *settings, lock); } catch (Exception & e) { @@ -2265,7 +2326,7 @@ void Context::applySettingsChangesWithLock(const SettingsChanges & changes, cons { for (const SettingChange & change : changes) applySettingChangeWithLock(change, lock); - applySettingsQuirks(settings); + applySettingsQuirks(*settings); } void Context::setSetting(std::string_view name, const String & value) @@ -2278,7 +2339,7 @@ void Context::setSetting(std::string_view name, const Field & value) { std::lock_guard lock(mutex); setSettingWithLock(name, value, lock); - contextSanityClampSettingsWithLock(*this, settings, lock); + contextSanityClampSettingsWithLock(*this, *settings, lock); } void Context::setServerSetting(std::string_view name, const Field & value) @@ -2311,37 +2372,37 @@ void Context::applySettingsChanges(const SettingsChanges & changes) void Context::checkSettingsConstraintsWithLock(const SettingsProfileElements & profile_elements, SettingSource source) { - getSettingsConstraintsAndCurrentProfilesWithLock()->constraints.check(settings, profile_elements, source); + getSettingsConstraintsAndCurrentProfilesWithLock()->constraints.check(*settings, profile_elements, source); if (getApplicationType() == ApplicationType::LOCAL || getApplicationType() == ApplicationType::SERVER) - doSettingsSanityCheckClamp(settings, getLogger("SettingsSanity")); + doSettingsSanityCheckClamp(*settings, getLogger("SettingsSanity")); } void Context::checkSettingsConstraintsWithLock(const SettingChange & change, SettingSource source) { - getSettingsConstraintsAndCurrentProfilesWithLock()->constraints.check(settings, change, source); + getSettingsConstraintsAndCurrentProfilesWithLock()->constraints.check(*settings, change, source); if (getApplicationType() == ApplicationType::LOCAL || getApplicationType() == ApplicationType::SERVER) - doSettingsSanityCheckClamp(settings, getLogger("SettingsSanity")); + doSettingsSanityCheckClamp(*settings, getLogger("SettingsSanity")); } void Context::checkSettingsConstraintsWithLock(const SettingsChanges & changes, SettingSource source) { - getSettingsConstraintsAndCurrentProfilesWithLock()->constraints.check(settings, changes, source); + getSettingsConstraintsAndCurrentProfilesWithLock()->constraints.check(*settings, changes, source); if (getApplicationType() == ApplicationType::LOCAL || getApplicationType() == ApplicationType::SERVER) - doSettingsSanityCheckClamp(settings, getLogger("SettingsSanity")); + doSettingsSanityCheckClamp(*settings, getLogger("SettingsSanity")); } void Context::checkSettingsConstraintsWithLock(SettingsChanges & changes, SettingSource source) { - getSettingsConstraintsAndCurrentProfilesWithLock()->constraints.check(settings, changes, source); + getSettingsConstraintsAndCurrentProfilesWithLock()->constraints.check(*settings, changes, source); if (getApplicationType() == ApplicationType::LOCAL || getApplicationType() == ApplicationType::SERVER) - doSettingsSanityCheckClamp(settings, getLogger("SettingsSanity")); + doSettingsSanityCheckClamp(*settings, getLogger("SettingsSanity")); } void Context::clampToSettingsConstraintsWithLock(SettingsChanges & changes, SettingSource source) { - getSettingsConstraintsAndCurrentProfilesWithLock()->constraints.clamp(settings, changes, source); + getSettingsConstraintsAndCurrentProfilesWithLock()->constraints.clamp(*settings, changes, source); if (getApplicationType() == ApplicationType::LOCAL || getApplicationType() == ApplicationType::SERVER) - doSettingsSanityCheckClamp(settings, getLogger("SettingsSanity")); + doSettingsSanityCheckClamp(*settings, getLogger("SettingsSanity")); } void Context::checkMergeTreeSettingsConstraintsWithLock(const MergeTreeSettings & merge_tree_settings, const SettingsChanges & changes) const @@ -2364,8 +2425,8 @@ void Context::checkSettingsConstraints(const SettingChange & change, SettingSour void Context::checkSettingsConstraints(const SettingsChanges & changes, SettingSource source) { SharedLockGuard lock(mutex); - getSettingsConstraintsAndCurrentProfilesWithLock()->constraints.check(settings, changes, source); - doSettingsSanityCheckClamp(settings, getLogger("SettingsSanity")); + getSettingsConstraintsAndCurrentProfilesWithLock()->constraints.check(*settings, changes, source); + doSettingsSanityCheckClamp(*settings, getLogger("SettingsSanity")); } void Context::checkSettingsConstraints(SettingsChanges & changes, SettingSource source) @@ -2390,7 +2451,7 @@ void Context::resetSettingsToDefaultValue(const std::vector & names) { std::lock_guard lock(mutex); for (const String & name: names) - settings.setDefaultValue(name); + settings->setDefaultValue(name); } std::shared_ptr Context::getSettingsConstraintsAndCurrentProfilesWithLock() const @@ -2610,14 +2671,14 @@ void Context::makeQueryContextForMerge(const MergeTreeSettings & merge_tree_sett { makeQueryContext(); classifier.reset(); // It is assumed that there are no active queries running using this classifier, otherwise this will lead to crashes - settings.workload = merge_tree_settings.merge_workload.value.empty() ? getMergeWorkload() : merge_tree_settings.merge_workload; + settings->workload = merge_tree_settings.merge_workload.value.empty() ? getMergeWorkload() : merge_tree_settings.merge_workload; } void Context::makeQueryContextForMutate(const MergeTreeSettings & merge_tree_settings) { makeQueryContext(); classifier.reset(); // It is assumed that there are no active queries running using this classifier, otherwise this will lead to crashes - settings.workload = merge_tree_settings.mutation_workload.value.empty() ? getMutationWorkload() : merge_tree_settings.mutation_workload; + settings->workload = merge_tree_settings.mutation_workload.value.empty() ? getMutationWorkload() : merge_tree_settings.mutation_workload; } void Context::makeSessionContext() @@ -3900,7 +3961,7 @@ void Context::reloadClusterConfig() const } const auto & config = cluster_config ? *cluster_config : getConfigRef(); - auto new_clusters = std::make_shared(config, settings, getMacros()); + auto new_clusters = std::make_shared(config, *settings, getMacros()); { std::lock_guard lock(shared->clusters_mutex); @@ -3935,7 +3996,7 @@ std::shared_ptr Context::getClustersImpl(std::lock_guard & if (!shared->clusters) { const auto & config = shared->clusters_config ? *shared->clusters_config : getConfigRef(); - shared->clusters = std::make_shared(config, settings, getMacros()); + shared->clusters = std::make_shared(config, *settings, getMacros()); } return shared->clusters; @@ -3967,9 +4028,9 @@ void Context::setClustersConfig(const ConfigurationPtr & config, bool enable_dis shared->clusters_config = config; if (!shared->clusters) - shared->clusters = std::make_shared(*shared->clusters_config, settings, getMacros(), config_name); + shared->clusters = std::make_shared(*shared->clusters_config, *settings, getMacros(), config_name); else - shared->clusters->updateClusters(*shared->clusters_config, settings, config_name, old_clusters_config); + shared->clusters->updateClusters(*shared->clusters_config, *settings, config_name, old_clusters_config); ++shared->clusters_version; } @@ -4230,7 +4291,7 @@ std::shared_ptr Context::getBackupLog() const std::shared_ptr Context::getBlobStorageLog() const { - bool enable_blob_storage_log = settings.enable_blob_storage_log; + bool enable_blob_storage_log = settings->enable_blob_storage_log; if (hasQueryContext()) enable_blob_storage_log = getQueryContext()->getSettingsRef().enable_blob_storage_log; @@ -4671,8 +4732,8 @@ void Context::setDefaultProfiles(const Poco::Util::AbstractConfiguration & confi shared->system_profile_name = config.getString("system_profile", shared->default_profile_name); setCurrentProfile(shared->system_profile_name); - applySettingsQuirks(settings, getLogger("SettingsQuirks")); - doSettingsSanityCheckClamp(settings, getLogger("SettingsSanity")); + applySettingsQuirks(*settings, getLogger("SettingsQuirks")); + doSettingsSanityCheckClamp(*settings, getLogger("SettingsSanity")); shared->buffer_profile_name = config.getString("buffer_profile", shared->system_profile_name); buffer_context = Context::createCopy(shared_from_this()); @@ -5241,11 +5302,11 @@ AsynchronousInsertQueue * Context::tryGetAsynchronousInsertQueue() const void Context::setAsynchronousInsertQueue(const std::shared_ptr & ptr) { - AsynchronousInsertQueue::validateSettings(settings, getLogger("Context")); + AsynchronousInsertQueue::validateSettings(*settings, getLogger("Context")); SharedLockGuard lock(shared->mutex); - if (std::chrono::milliseconds(settings.async_insert_poll_timeout_ms) == std::chrono::milliseconds::zero()) + if (std::chrono::milliseconds(settings->async_insert_poll_timeout_ms) == std::chrono::milliseconds::zero()) throw Exception(ErrorCodes::INVALID_SETTING_VALUE, "Setting async_insert_poll_timeout_ms can't be zero"); shared->async_insert_queue = ptr; @@ -5392,69 +5453,69 @@ ReadSettings Context::getReadSettings() const { ReadSettings res; - std::string_view read_method_str = settings.local_filesystem_read_method.value; + std::string_view read_method_str = settings->local_filesystem_read_method.value; if (auto opt_method = magic_enum::enum_cast(read_method_str)) res.local_fs_method = *opt_method; else throw Exception(ErrorCodes::UNKNOWN_READ_METHOD, "Unknown read method '{}' for local filesystem", read_method_str); - read_method_str = settings.remote_filesystem_read_method.value; + read_method_str = settings->remote_filesystem_read_method.value; if (auto opt_method = magic_enum::enum_cast(read_method_str)) res.remote_fs_method = *opt_method; else throw Exception(ErrorCodes::UNKNOWN_READ_METHOD, "Unknown read method '{}' for remote filesystem", read_method_str); - res.local_fs_prefetch = settings.local_filesystem_read_prefetch; - res.remote_fs_prefetch = settings.remote_filesystem_read_prefetch; + res.local_fs_prefetch = settings->local_filesystem_read_prefetch; + res.remote_fs_prefetch = settings->remote_filesystem_read_prefetch; - res.load_marks_asynchronously = settings.load_marks_asynchronously; + res.load_marks_asynchronously = settings->load_marks_asynchronously; - res.enable_filesystem_read_prefetches_log = settings.enable_filesystem_read_prefetches_log; + res.enable_filesystem_read_prefetches_log = settings->enable_filesystem_read_prefetches_log; - res.remote_fs_read_max_backoff_ms = settings.remote_fs_read_max_backoff_ms; - res.remote_fs_read_backoff_max_tries = settings.remote_fs_read_backoff_max_tries; - res.enable_filesystem_cache = settings.enable_filesystem_cache; - res.read_from_filesystem_cache_if_exists_otherwise_bypass_cache = settings.read_from_filesystem_cache_if_exists_otherwise_bypass_cache; - res.enable_filesystem_cache_log = settings.enable_filesystem_cache_log; - res.filesystem_cache_segments_batch_size = settings.filesystem_cache_segments_batch_size; - res.filesystem_cache_reserve_space_wait_lock_timeout_milliseconds = settings.filesystem_cache_reserve_space_wait_lock_timeout_milliseconds; + res.remote_fs_read_max_backoff_ms = settings->remote_fs_read_max_backoff_ms; + res.remote_fs_read_backoff_max_tries = settings->remote_fs_read_backoff_max_tries; + res.enable_filesystem_cache = settings->enable_filesystem_cache; + res.read_from_filesystem_cache_if_exists_otherwise_bypass_cache = settings->read_from_filesystem_cache_if_exists_otherwise_bypass_cache; + res.enable_filesystem_cache_log = settings->enable_filesystem_cache_log; + res.filesystem_cache_segments_batch_size = settings->filesystem_cache_segments_batch_size; + res.filesystem_cache_reserve_space_wait_lock_timeout_milliseconds = settings->filesystem_cache_reserve_space_wait_lock_timeout_milliseconds; - res.filesystem_cache_max_download_size = settings.filesystem_cache_max_download_size; - res.skip_download_if_exceeds_query_cache = settings.skip_download_if_exceeds_query_cache; + res.filesystem_cache_max_download_size = settings->filesystem_cache_max_download_size; + res.skip_download_if_exceeds_query_cache = settings->skip_download_if_exceeds_query_cache; res.page_cache = getPageCache(); - res.use_page_cache_for_disks_without_file_cache = settings.use_page_cache_for_disks_without_file_cache; - res.read_from_page_cache_if_exists_otherwise_bypass_cache = settings.read_from_page_cache_if_exists_otherwise_bypass_cache; - res.page_cache_inject_eviction = settings.page_cache_inject_eviction; + res.use_page_cache_for_disks_without_file_cache = settings->use_page_cache_for_disks_without_file_cache; + res.read_from_page_cache_if_exists_otherwise_bypass_cache = settings->read_from_page_cache_if_exists_otherwise_bypass_cache; + res.page_cache_inject_eviction = settings->page_cache_inject_eviction; - res.remote_read_min_bytes_for_seek = settings.remote_read_min_bytes_for_seek; + res.remote_read_min_bytes_for_seek = settings->remote_read_min_bytes_for_seek; /// Zero read buffer will not make progress. - if (!settings.max_read_buffer_size) + if (!settings->max_read_buffer_size) { throw Exception(ErrorCodes::INVALID_SETTING_VALUE, - "Invalid value '{}' for max_read_buffer_size", settings.max_read_buffer_size); + "Invalid value '{}' for max_read_buffer_size", settings->max_read_buffer_size); } res.local_fs_buffer_size - = settings.max_read_buffer_size_local_fs ? settings.max_read_buffer_size_local_fs : settings.max_read_buffer_size; + = settings->max_read_buffer_size_local_fs ? settings->max_read_buffer_size_local_fs : settings->max_read_buffer_size; res.remote_fs_buffer_size - = settings.max_read_buffer_size_remote_fs ? settings.max_read_buffer_size_remote_fs : settings.max_read_buffer_size; - res.prefetch_buffer_size = settings.prefetch_buffer_size; - res.direct_io_threshold = settings.min_bytes_to_use_direct_io; - res.mmap_threshold = settings.min_bytes_to_use_mmap_io; - res.priority = Priority{settings.read_priority}; + = settings->max_read_buffer_size_remote_fs ? settings->max_read_buffer_size_remote_fs : settings->max_read_buffer_size; + res.prefetch_buffer_size = settings->prefetch_buffer_size; + res.direct_io_threshold = settings->min_bytes_to_use_direct_io; + res.mmap_threshold = settings->min_bytes_to_use_mmap_io; + res.priority = Priority{settings->read_priority}; res.remote_throttler = getRemoteReadThrottler(); res.local_throttler = getLocalReadThrottler(); - res.http_max_tries = settings.http_max_tries; - res.http_retry_initial_backoff_ms = settings.http_retry_initial_backoff_ms; - res.http_retry_max_backoff_ms = settings.http_retry_max_backoff_ms; - res.http_skip_not_found_url_for_globs = settings.http_skip_not_found_url_for_globs; - res.http_make_head_request = settings.http_make_head_request; + res.http_max_tries = settings->http_max_tries; + res.http_retry_initial_backoff_ms = settings->http_retry_initial_backoff_ms; + res.http_retry_max_backoff_ms = settings->http_retry_max_backoff_ms; + res.http_skip_not_found_url_for_globs = settings->http_skip_not_found_url_for_globs; + res.http_make_head_request = settings->http_make_head_request; res.mmap_cache = getMMappedFileCache().get(); @@ -5465,13 +5526,13 @@ WriteSettings Context::getWriteSettings() const { WriteSettings res; - res.enable_filesystem_cache_on_write_operations = settings.enable_filesystem_cache_on_write_operations; - res.enable_filesystem_cache_log = settings.enable_filesystem_cache_log; - res.throw_on_error_from_cache = settings.throw_on_error_from_cache_on_write_operations; - res.filesystem_cache_reserve_space_wait_lock_timeout_milliseconds = settings.filesystem_cache_reserve_space_wait_lock_timeout_milliseconds; + res.enable_filesystem_cache_on_write_operations = settings->enable_filesystem_cache_on_write_operations; + res.enable_filesystem_cache_log = settings->enable_filesystem_cache_log; + res.throw_on_error_from_cache = settings->throw_on_error_from_cache_on_write_operations; + res.filesystem_cache_reserve_space_wait_lock_timeout_milliseconds = settings->filesystem_cache_reserve_space_wait_lock_timeout_milliseconds; - res.s3_allow_parallel_part_upload = settings.s3_allow_parallel_part_upload; - res.azure_allow_parallel_part_upload = settings.azure_allow_parallel_part_upload; + res.s3_allow_parallel_part_upload = settings->s3_allow_parallel_part_upload; + res.azure_allow_parallel_part_upload = settings->azure_allow_parallel_part_upload; res.remote_throttler = getRemoteWriteThrottler(); res.local_throttler = getLocalWriteThrottler(); @@ -5519,7 +5580,7 @@ bool Context::canUseParallelReplicasOnFollower() const bool Context::canUseParallelReplicasCustomKey() const { - return settings.max_parallel_replicas > 1 && getParallelReplicasMode() == Context::ParallelReplicasMode::CUSTOM_KEY; + return settings->max_parallel_replicas > 1 && getParallelReplicasMode() == Context::ParallelReplicasMode::CUSTOM_KEY; } bool Context::canUseParallelReplicasCustomKeyForCluster(const Cluster & cluster) const @@ -5529,7 +5590,7 @@ bool Context::canUseParallelReplicasCustomKeyForCluster(const Cluster & cluster) bool Context::canUseOffsetParallelReplicas() const { - return offset_parallel_replicas_enabled && settings.max_parallel_replicas > 1 + return offset_parallel_replicas_enabled && settings->max_parallel_replicas > 1 && getParallelReplicasMode() != Context::ParallelReplicasMode::READ_TASKS; } @@ -5541,13 +5602,13 @@ void Context::disableOffsetParallelReplicas() ClusterPtr Context::getClusterForParallelReplicas() const { /// check cluster for parallel replicas - if (settings.cluster_for_parallel_replicas.value.empty()) + if (settings->cluster_for_parallel_replicas.value.empty()) throw Exception( ErrorCodes::CLUSTER_DOESNT_EXIST, "Reading in parallel from replicas is enabled but cluster to execute query is not provided. Please set " "'cluster_for_parallel_replicas' setting"); - return getCluster(settings.cluster_for_parallel_replicas); + return getCluster(settings->cluster_for_parallel_replicas); } void Context::setPreparedSetsCache(const PreparedSetsCachePtr & cache) @@ -5575,4 +5636,39 @@ const ServerSettings & Context::getServerSettings() const return shared->server_settings; } +uint64_t HTTPContext::getMaxHstsAge() const +{ + return context->getSettingsRef().hsts_max_age; +} + + uint64_t HTTPContext::getMaxUriSize() const +{ + return context->getSettingsRef().http_max_uri_size; +} + +uint64_t HTTPContext::getMaxFields() const +{ +return context->getSettingsRef().http_max_fields; +} + +uint64_t HTTPContext::getMaxFieldNameSize() const +{ +return context->getSettingsRef().http_max_field_name_size; +} + +uint64_t HTTPContext::getMaxFieldValueSize() const +{ +return context->getSettingsRef().http_max_field_value_size; +} + +Poco::Timespan HTTPContext::getReceiveTimeout() const +{ +return context->getSettingsRef().http_receive_timeout; +} + +Poco::Timespan HTTPContext::getSendTimeout() const +{ +return context->getSettingsRef().http_send_timeout; +} + } diff --git a/src/Interpreters/Context.h b/src/Interpreters/Context.h index 8c5492bcbc8..284cac50769 100644 --- a/src/Interpreters/Context.h +++ b/src/Interpreters/Context.h @@ -11,9 +11,11 @@ #include #include #include -#include #include +#include #include +#include +#include #include #include #include @@ -129,6 +131,7 @@ class ShellCommand; class ICompressionCodec; class AccessControl; class GSSAcceptorContext; +struct Settings; struct SettingsConstraintsAndProfileIDs; class SettingsProfileElements; class RemoteHostFilter; @@ -276,7 +279,7 @@ protected: mutable std::shared_ptr access; mutable bool need_recalculate_access = true; String current_database; - Settings settings; /// Setting for query execution. + std::unique_ptr settings{}; /// Setting for query execution. using ProgressCallback = std::function; ProgressCallback progress_callback; /// Callback for tracking progress of query execution. @@ -952,7 +955,7 @@ public: void makeSessionContext(); void makeGlobalContext(); - const Settings & getSettingsRef() const { return settings; } + const Settings & getSettingsRef() const { return *settings; } void setProgressCallback(ProgressCallback callback); /// Used in executeQuery() to pass it to the QueryPipeline. @@ -1428,40 +1431,19 @@ struct HTTPContext : public IHTTPContext : context(Context::createCopy(context_)) {} - uint64_t getMaxHstsAge() const override - { - return context->getSettingsRef().hsts_max_age; - } + uint64_t getMaxHstsAge() const override; - uint64_t getMaxUriSize() const override - { - return context->getSettingsRef().http_max_uri_size; - } + uint64_t getMaxUriSize() const override; - uint64_t getMaxFields() const override - { - return context->getSettingsRef().http_max_fields; - } + uint64_t getMaxFields() const override; - uint64_t getMaxFieldNameSize() const override - { - return context->getSettingsRef().http_max_field_name_size; - } + uint64_t getMaxFieldNameSize() const override; - uint64_t getMaxFieldValueSize() const override - { - return context->getSettingsRef().http_max_field_value_size; - } + uint64_t getMaxFieldValueSize() const override; - Poco::Timespan getReceiveTimeout() const override - { - return context->getSettingsRef().http_receive_timeout; - } + Poco::Timespan getReceiveTimeout() const override; - Poco::Timespan getSendTimeout() const override - { - return context->getSettingsRef().http_send_timeout; - } + Poco::Timespan getSendTimeout() const override; ContextPtr context; }; diff --git a/src/Interpreters/DDLTask.cpp b/src/Interpreters/DDLTask.cpp index 6c346836ed8..fa197d59c13 100644 --- a/src/Interpreters/DDLTask.cpp +++ b/src/Interpreters/DDLTask.cpp @@ -2,6 +2,7 @@ #include #include #include +#include #include #include #include diff --git a/src/Interpreters/DDLTask.h b/src/Interpreters/DDLTask.h index 0b0460b26c8..515a35d8671 100644 --- a/src/Interpreters/DDLTask.h +++ b/src/Interpreters/DDLTask.h @@ -3,6 +3,7 @@ #include #include #include +#include #include #include diff --git a/src/Interpreters/DDLWorker.cpp b/src/Interpreters/DDLWorker.cpp index 5639eed552e..697fd0f406b 100644 --- a/src/Interpreters/DDLWorker.cpp +++ b/src/Interpreters/DDLWorker.cpp @@ -25,6 +25,7 @@ #include #include #include +#include #include #include #include diff --git a/src/Interpreters/DatabaseCatalog.cpp b/src/Interpreters/DatabaseCatalog.cpp index 964baea1891..bb2dd158710 100644 --- a/src/Interpreters/DatabaseCatalog.cpp +++ b/src/Interpreters/DatabaseCatalog.cpp @@ -1281,10 +1281,6 @@ void DatabaseCatalog::rescheduleDropTableTask() auto min_drop_time = getMinDropTime(); time_t schedule_after_ms = min_drop_time > current_time ? (min_drop_time - current_time) * 1000 : 0; - LOG_TRACE( - log, - "Have {} tables in queue to drop. Schedule background task in {} seconds", - tables_marked_dropped.size(), schedule_after_ms / 1000); (*drop_task)->scheduleAfter(schedule_after_ms); } diff --git a/src/Interpreters/ExecuteScalarSubqueriesVisitor.cpp b/src/Interpreters/ExecuteScalarSubqueriesVisitor.cpp index a70ff3c6c53..4bd1c47d5a0 100644 --- a/src/Interpreters/ExecuteScalarSubqueriesVisitor.cpp +++ b/src/Interpreters/ExecuteScalarSubqueriesVisitor.cpp @@ -2,6 +2,7 @@ #include #include +#include #include #include #include diff --git a/src/Interpreters/ExpressionActionsSettings.h b/src/Interpreters/ExpressionActionsSettings.h index 326de983748..384a61a54a2 100644 --- a/src/Interpreters/ExpressionActionsSettings.h +++ b/src/Interpreters/ExpressionActionsSettings.h @@ -1,7 +1,7 @@ #pragma once +#include #include -#include #include diff --git a/src/Interpreters/ExternalDictionariesLoader.cpp b/src/Interpreters/ExternalDictionariesLoader.cpp index 1685c06d387..5f6d7e33927 100644 --- a/src/Interpreters/ExternalDictionariesLoader.cpp +++ b/src/Interpreters/ExternalDictionariesLoader.cpp @@ -6,6 +6,7 @@ #include #include #include +#include #include "config.h" diff --git a/src/Interpreters/FilesystemCacheLog.h b/src/Interpreters/FilesystemCacheLog.h index 27c616ff40c..6d92fbd7a43 100644 --- a/src/Interpreters/FilesystemCacheLog.h +++ b/src/Interpreters/FilesystemCacheLog.h @@ -2,7 +2,6 @@ #include #include -#include #include #include #include diff --git a/src/Interpreters/GlobalSubqueriesVisitor.h b/src/Interpreters/GlobalSubqueriesVisitor.h index 64b6eb5dce9..986134a6998 100644 --- a/src/Interpreters/GlobalSubqueriesVisitor.h +++ b/src/Interpreters/GlobalSubqueriesVisitor.h @@ -2,6 +2,7 @@ #include #include +#include #include #include #include diff --git a/src/Interpreters/GraceHashJoin.cpp b/src/Interpreters/GraceHashJoin.cpp index 6970048269b..a241d1cd258 100644 --- a/src/Interpreters/GraceHashJoin.cpp +++ b/src/Interpreters/GraceHashJoin.cpp @@ -10,6 +10,7 @@ #include #include #include +#include #include #include diff --git a/src/Interpreters/IInterpreter.cpp b/src/Interpreters/IInterpreter.cpp index 148e73e43ce..a291199f24b 100644 --- a/src/Interpreters/IInterpreter.cpp +++ b/src/Interpreters/IInterpreter.cpp @@ -1,3 +1,4 @@ +#include #include #include #include diff --git a/src/Interpreters/IInterpreterUnionOrSelectQuery.cpp b/src/Interpreters/IInterpreterUnionOrSelectQuery.cpp index fed29b410db..d8f6df05ca4 100644 --- a/src/Interpreters/IInterpreterUnionOrSelectQuery.cpp +++ b/src/Interpreters/IInterpreterUnionOrSelectQuery.cpp @@ -1,4 +1,6 @@ #include + +#include #include #include #include @@ -15,6 +17,23 @@ namespace DB { +IInterpreterUnionOrSelectQuery::IInterpreterUnionOrSelectQuery(const DB::ASTPtr& query_ptr_, + const DB::ContextMutablePtr& context_, const DB::SelectQueryOptions& options_) + : query_ptr(query_ptr_) + , context(context_) + , options(options_) + , max_streams(context->getSettingsRef().max_threads) +{ + if (options.shard_num) + context->addSpecialScalar( + "_shard_num", + Block{{DataTypeUInt32().createColumnConst(1, *options.shard_num), std::make_shared(), "_shard_num"}}); + if (options.shard_count) + context->addSpecialScalar( + "_shard_count", + Block{{DataTypeUInt32().createColumnConst(1, *options.shard_count), std::make_shared(), "_shard_count"}}); +} + QueryPipelineBuilder IInterpreterUnionOrSelectQuery::buildQueryPipeline() { QueryPlan query_plan; diff --git a/src/Interpreters/IInterpreterUnionOrSelectQuery.h b/src/Interpreters/IInterpreterUnionOrSelectQuery.h index e4425a73505..ac8e90b5f52 100644 --- a/src/Interpreters/IInterpreterUnionOrSelectQuery.h +++ b/src/Interpreters/IInterpreterUnionOrSelectQuery.h @@ -17,21 +17,7 @@ public: { } - IInterpreterUnionOrSelectQuery(const ASTPtr & query_ptr_, const ContextMutablePtr & context_, const SelectQueryOptions & options_) - : query_ptr(query_ptr_) - , context(context_) - , options(options_) - , max_streams(context->getSettingsRef().max_threads) - { - if (options.shard_num) - context->addSpecialScalar( - "_shard_num", - Block{{DataTypeUInt32().createColumnConst(1, *options.shard_num), std::make_shared(), "_shard_num"}}); - if (options.shard_count) - context->addSpecialScalar( - "_shard_count", - Block{{DataTypeUInt32().createColumnConst(1, *options.shard_count), std::make_shared(), "_shard_count"}}); - } + IInterpreterUnionOrSelectQuery(const ASTPtr & query_ptr_, const ContextMutablePtr & context_, const SelectQueryOptions & options_); virtual void buildQueryPlan(QueryPlan & query_plan) = 0; QueryPipelineBuilder buildQueryPipeline(); diff --git a/src/Interpreters/IdentifierSemantic.cpp b/src/Interpreters/IdentifierSemantic.cpp index 36972aeb03d..24e594661e9 100644 --- a/src/Interpreters/IdentifierSemantic.cpp +++ b/src/Interpreters/IdentifierSemantic.cpp @@ -2,6 +2,8 @@ #include +#include + #include #include diff --git a/src/Interpreters/InJoinSubqueriesPreprocessor.cpp b/src/Interpreters/InJoinSubqueriesPreprocessor.cpp index 3b3ef928b42..22ecb6a5db2 100644 --- a/src/Interpreters/InJoinSubqueriesPreprocessor.cpp +++ b/src/Interpreters/InJoinSubqueriesPreprocessor.cpp @@ -11,6 +11,7 @@ #include #include #include +#include namespace DB diff --git a/src/Interpreters/InterpreterAlterQuery.cpp b/src/Interpreters/InterpreterAlterQuery.cpp index c70a3397f4e..8f9c6bc533e 100644 --- a/src/Interpreters/InterpreterAlterQuery.cpp +++ b/src/Interpreters/InterpreterAlterQuery.cpp @@ -3,6 +3,8 @@ #include #include +#include +#include #include #include #include @@ -24,7 +26,6 @@ #include #include #include -#include #include #include @@ -175,10 +176,9 @@ BlockIO InterpreterAlterQuery::executeToTable(const ASTAlterQuery & alter) else throw Exception(ErrorCodes::LOGICAL_ERROR, "Wrong parameter type in ALTER query"); - if (!getContext()->getSettings().allow_experimental_statistics && ( - command_ast->type == ASTAlterCommand::ADD_STATISTICS || - command_ast->type == ASTAlterCommand::DROP_STATISTICS || - command_ast->type == ASTAlterCommand::MATERIALIZE_STATISTICS)) + if (!getContext()->getSettingsRef().allow_experimental_statistics + && (command_ast->type == ASTAlterCommand::ADD_STATISTICS || command_ast->type == ASTAlterCommand::DROP_STATISTICS + || command_ast->type == ASTAlterCommand::MATERIALIZE_STATISTICS)) throw Exception(ErrorCodes::INCORRECT_QUERY, "Alter table with statistics is now disabled. Turn on allow_experimental_statistics"); } diff --git a/src/Interpreters/InterpreterCheckQuery.cpp b/src/Interpreters/InterpreterCheckQuery.cpp index 4a84a7bf570..5c4fb7a1443 100644 --- a/src/Interpreters/InterpreterCheckQuery.cpp +++ b/src/Interpreters/InterpreterCheckQuery.cpp @@ -12,6 +12,8 @@ #include #include +#include + #include #include diff --git a/src/Interpreters/InterpreterCreateIndexQuery.cpp b/src/Interpreters/InterpreterCreateIndexQuery.cpp index a439cb672c8..1c6abe842d4 100644 --- a/src/Interpreters/InterpreterCreateIndexQuery.cpp +++ b/src/Interpreters/InterpreterCreateIndexQuery.cpp @@ -1,5 +1,6 @@ #include +#include #include #include #include diff --git a/src/Interpreters/InterpreterCreateQuery.cpp b/src/Interpreters/InterpreterCreateQuery.cpp index 0ee2bb6c0e9..84d7f0a587c 100644 --- a/src/Interpreters/InterpreterCreateQuery.cpp +++ b/src/Interpreters/InterpreterCreateQuery.cpp @@ -5,7 +5,7 @@ #include #include -#include "Common/Exception.h" +#include #include #include #include @@ -14,8 +14,8 @@ #include #include #include +#include #include -#include #include #include @@ -34,10 +34,11 @@ #include #include +#include #include #include -#include #include +#include #include #include diff --git a/src/Interpreters/InterpreterCreateQuery.h b/src/Interpreters/InterpreterCreateQuery.h index 70ef29e6b07..3982ea2cabc 100644 --- a/src/Interpreters/InterpreterCreateQuery.h +++ b/src/Interpreters/InterpreterCreateQuery.h @@ -1,7 +1,6 @@ #pragma once #include -#include #include #include #include diff --git a/src/Interpreters/InterpreterDeleteQuery.cpp b/src/Interpreters/InterpreterDeleteQuery.cpp index 39d5d9e9cef..84fe0a9f4ae 100644 --- a/src/Interpreters/InterpreterDeleteQuery.cpp +++ b/src/Interpreters/InterpreterDeleteQuery.cpp @@ -2,6 +2,7 @@ #include #include +#include #include #include #include diff --git a/src/Interpreters/InterpreterDescribeQuery.cpp b/src/Interpreters/InterpreterDescribeQuery.cpp index 87b0eb89302..39fc85a5e23 100644 --- a/src/Interpreters/InterpreterDescribeQuery.cpp +++ b/src/Interpreters/InterpreterDescribeQuery.cpp @@ -4,6 +4,7 @@ #include #include #include +#include #include #include #include diff --git a/src/Interpreters/InterpreterDropIndexQuery.cpp b/src/Interpreters/InterpreterDropIndexQuery.cpp index f052aa201f1..8777532e4d0 100644 --- a/src/Interpreters/InterpreterDropIndexQuery.cpp +++ b/src/Interpreters/InterpreterDropIndexQuery.cpp @@ -1,4 +1,5 @@ #include +#include #include #include #include diff --git a/src/Interpreters/InterpreterDropQuery.cpp b/src/Interpreters/InterpreterDropQuery.cpp index ba788a2e1f3..b68b3ddcd48 100644 --- a/src/Interpreters/InterpreterDropQuery.cpp +++ b/src/Interpreters/InterpreterDropQuery.cpp @@ -14,6 +14,7 @@ #include #include #include +#include #include #include "config.h" diff --git a/src/Interpreters/InterpreterExplainQuery.cpp b/src/Interpreters/InterpreterExplainQuery.cpp index 7c7b4b3f95a..d7addcd6e34 100644 --- a/src/Interpreters/InterpreterExplainQuery.cpp +++ b/src/Interpreters/InterpreterExplainQuery.cpp @@ -29,6 +29,7 @@ #include #include +#include #include #include @@ -69,7 +70,7 @@ namespace static void visit(ASTSelectQuery & select, ASTPtr & node, Data & data) { /// we need to read statistic when `allow_statistics_optimize` is enabled. - bool only_analyze = !data.getContext()->getSettings().allow_statistics_optimize; + bool only_analyze = !data.getContext()->getSettingsRef().allow_statistics_optimize; InterpreterSelectQuery interpreter( node, data.getContext(), SelectQueryOptions(QueryProcessingStage::FetchColumns).analyze(only_analyze).modify()); diff --git a/src/Interpreters/InterpreterFactory.cpp b/src/Interpreters/InterpreterFactory.cpp index 387d056ffe7..12b3b510098 100644 --- a/src/Interpreters/InterpreterFactory.cpp +++ b/src/Interpreters/InterpreterFactory.cpp @@ -60,6 +60,7 @@ #include #include #include +#include namespace ProfileEvents diff --git a/src/Interpreters/InterpreterInsertQuery.cpp b/src/Interpreters/InterpreterInsertQuery.cpp index f396db70d21..ef298d4d45a 100644 --- a/src/Interpreters/InterpreterInsertQuery.cpp +++ b/src/Interpreters/InterpreterInsertQuery.cpp @@ -5,6 +5,7 @@ #include #include #include +#include #include #include #include diff --git a/src/Interpreters/InterpreterKillQueryQuery.cpp b/src/Interpreters/InterpreterKillQueryQuery.cpp index 6d6b1085ffb..7eb487ba7b3 100644 --- a/src/Interpreters/InterpreterKillQueryQuery.cpp +++ b/src/Interpreters/InterpreterKillQueryQuery.cpp @@ -16,6 +16,7 @@ #include #include #include +#include #include #include #include diff --git a/src/Interpreters/InterpreterRenameQuery.cpp b/src/Interpreters/InterpreterRenameQuery.cpp index 32c475d138f..c6e52590f89 100644 --- a/src/Interpreters/InterpreterRenameQuery.cpp +++ b/src/Interpreters/InterpreterRenameQuery.cpp @@ -9,6 +9,7 @@ #include #include #include +#include #include diff --git a/src/Interpreters/InterpreterSelectIntersectExceptQuery.cpp b/src/Interpreters/InterpreterSelectIntersectExceptQuery.cpp index 6eac2db20c9..b0da4caa3ac 100644 --- a/src/Interpreters/InterpreterSelectIntersectExceptQuery.cpp +++ b/src/Interpreters/InterpreterSelectIntersectExceptQuery.cpp @@ -1,6 +1,7 @@ #include #include +#include #include #include #include diff --git a/src/Interpreters/InterpreterSelectQuery.cpp b/src/Interpreters/InterpreterSelectQuery.cpp index f13710e9ad5..cd91f9532b9 100644 --- a/src/Interpreters/InterpreterSelectQuery.cpp +++ b/src/Interpreters/InterpreterSelectQuery.cpp @@ -84,6 +84,8 @@ #include #include #include +#include +#include #include #include #include diff --git a/src/Interpreters/InterpreterSelectWithUnionQuery.cpp b/src/Interpreters/InterpreterSelectWithUnionQuery.cpp index cc1d7dd6531..e953a07b41d 100644 --- a/src/Interpreters/InterpreterSelectWithUnionQuery.cpp +++ b/src/Interpreters/InterpreterSelectWithUnionQuery.cpp @@ -1,6 +1,7 @@ #include #include +#include #include #include #include diff --git a/src/Interpreters/InterpreterShowColumnsQuery.cpp b/src/Interpreters/InterpreterShowColumnsQuery.cpp index f32ebceaa63..d8fff4e6026 100644 --- a/src/Interpreters/InterpreterShowColumnsQuery.cpp +++ b/src/Interpreters/InterpreterShowColumnsQuery.cpp @@ -3,6 +3,7 @@ #include #include +#include #include #include #include diff --git a/src/Interpreters/InterpreterSystemQuery.cpp b/src/Interpreters/InterpreterSystemQuery.cpp index d3526941b33..c284acfa308 100644 --- a/src/Interpreters/InterpreterSystemQuery.cpp +++ b/src/Interpreters/InterpreterSystemQuery.cpp @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include diff --git a/src/Interpreters/InterpreterTransactionControlQuery.cpp b/src/Interpreters/InterpreterTransactionControlQuery.cpp index 13872fbe3f5..e92d6f9c5e7 100644 --- a/src/Interpreters/InterpreterTransactionControlQuery.cpp +++ b/src/Interpreters/InterpreterTransactionControlQuery.cpp @@ -3,6 +3,7 @@ #include #include #include +#include namespace DB { diff --git a/src/Interpreters/JoinedTables.cpp b/src/Interpreters/JoinedTables.cpp index 457ed3ef4a6..0de2bf9cb1f 100644 --- a/src/Interpreters/JoinedTables.cpp +++ b/src/Interpreters/JoinedTables.cpp @@ -1,5 +1,6 @@ #include +#include #include #include diff --git a/src/Interpreters/MutationsInterpreter.cpp b/src/Interpreters/MutationsInterpreter.cpp index 6d3a4f30b34..480c6736bc5 100644 --- a/src/Interpreters/MutationsInterpreter.cpp +++ b/src/Interpreters/MutationsInterpreter.cpp @@ -1,3 +1,4 @@ +#include #include #include #include @@ -17,6 +18,7 @@ #include #include #include +#include #include #include #include @@ -41,6 +43,7 @@ #include #include #include +#include namespace DB { diff --git a/src/Interpreters/MutationsNonDeterministicHelpers.cpp b/src/Interpreters/MutationsNonDeterministicHelpers.cpp index 7a4cb91acc0..bcff3e18b25 100644 --- a/src/Interpreters/MutationsNonDeterministicHelpers.cpp +++ b/src/Interpreters/MutationsNonDeterministicHelpers.cpp @@ -5,6 +5,7 @@ #include #include #include +#include #include #include #include diff --git a/src/Interpreters/NormalizeSelectWithUnionQueryVisitor.h b/src/Interpreters/NormalizeSelectWithUnionQueryVisitor.h index e8194f0dfe1..b2f55003da5 100644 --- a/src/Interpreters/NormalizeSelectWithUnionQueryVisitor.h +++ b/src/Interpreters/NormalizeSelectWithUnionQueryVisitor.h @@ -1,11 +1,10 @@ #pragma once -#include - +#include #include #include -#include +#include namespace DB { diff --git a/src/Interpreters/PredicateExpressionsOptimizer.cpp b/src/Interpreters/PredicateExpressionsOptimizer.cpp index 8dc8c1c92cc..af67e62be1a 100644 --- a/src/Interpreters/PredicateExpressionsOptimizer.cpp +++ b/src/Interpreters/PredicateExpressionsOptimizer.cpp @@ -1,5 +1,6 @@ #include +#include #include #include #include diff --git a/src/Interpreters/PreparedSets.cpp b/src/Interpreters/PreparedSets.cpp index b04c8b1b725..43a04f8aff0 100644 --- a/src/Interpreters/PreparedSets.cpp +++ b/src/Interpreters/PreparedSets.cpp @@ -5,6 +5,7 @@ #include #include #include +#include #include #include #include diff --git a/src/Interpreters/QueryLog.cpp b/src/Interpreters/QueryLog.cpp index e63a2ae31aa..527159dc981 100644 --- a/src/Interpreters/QueryLog.cpp +++ b/src/Interpreters/QueryLog.cpp @@ -5,6 +5,7 @@ #include #include #include +#include #include #include #include diff --git a/src/Interpreters/QueryLog.h b/src/Interpreters/QueryLog.h index bbaa7179757..d9cce4b9dd7 100644 --- a/src/Interpreters/QueryLog.h +++ b/src/Interpreters/QueryLog.h @@ -3,7 +3,7 @@ #include #include #include -#include +#include #include #include #include @@ -22,6 +22,8 @@ namespace ProfileEvents namespace DB { +struct Settings; + /** Allows to log information about queries execution: * - info about start of query execution; diff --git a/src/Interpreters/QueryViewsLog.h b/src/Interpreters/QueryViewsLog.h index 2de06fe3ddc..8f382ba6183 100644 --- a/src/Interpreters/QueryViewsLog.h +++ b/src/Interpreters/QueryViewsLog.h @@ -6,7 +6,7 @@ #include #include -#include +#include #include #include #include diff --git a/src/Interpreters/RewriteCountVariantsVisitor.cpp b/src/Interpreters/RewriteCountVariantsVisitor.cpp index f207bc51527..4a541c3765a 100644 --- a/src/Interpreters/RewriteCountVariantsVisitor.cpp +++ b/src/Interpreters/RewriteCountVariantsVisitor.cpp @@ -6,6 +6,7 @@ #include #include #include +#include #include diff --git a/src/Interpreters/Session.cpp b/src/Interpreters/Session.cpp index fb80b12ee60..5b378087707 100644 --- a/src/Interpreters/Session.cpp +++ b/src/Interpreters/Session.cpp @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include diff --git a/src/Interpreters/TableJoin.h b/src/Interpreters/TableJoin.h index 8e83233e54c..ed14fe4273c 100644 --- a/src/Interpreters/TableJoin.h +++ b/src/Interpreters/TableJoin.h @@ -2,7 +2,6 @@ #include #include -#include #include #include #include diff --git a/src/Interpreters/ThreadStatusExt.cpp b/src/Interpreters/ThreadStatusExt.cpp index 6ec6a64b13d..1f7c6b1fe68 100644 --- a/src/Interpreters/ThreadStatusExt.cpp +++ b/src/Interpreters/ThreadStatusExt.cpp @@ -20,6 +20,7 @@ #include #include #include +#include #include #include diff --git a/src/Interpreters/evaluateConstantExpression.cpp b/src/Interpreters/evaluateConstantExpression.cpp index 4e1a2bcf5ee..ac80ad2fb68 100644 --- a/src/Interpreters/evaluateConstantExpression.cpp +++ b/src/Interpreters/evaluateConstantExpression.cpp @@ -4,6 +4,7 @@ #include #include #include +#include #include #include #include @@ -24,6 +25,7 @@ #include #include #include + #include diff --git a/src/Interpreters/executeDDLQueryOnCluster.cpp b/src/Interpreters/executeDDLQueryOnCluster.cpp index 9c3f85128cf..5d237d28089 100644 --- a/src/Interpreters/executeDDLQueryOnCluster.cpp +++ b/src/Interpreters/executeDDLQueryOnCluster.cpp @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include diff --git a/src/Interpreters/formatWithPossiblyHidingSecrets.h b/src/Interpreters/formatWithPossiblyHidingSecrets.h index 25e1e7a5616..039bcbc2bca 100644 --- a/src/Interpreters/formatWithPossiblyHidingSecrets.h +++ b/src/Interpreters/formatWithPossiblyHidingSecrets.h @@ -2,6 +2,8 @@ #include "Access/ContextAccess.h" #include "Interpreters/Context.h" +#include + namespace DB { struct SecretHidingFormatSettings diff --git a/src/Interpreters/getCustomKeyFilterForParallelReplicas.cpp b/src/Interpreters/getCustomKeyFilterForParallelReplicas.cpp index 43be7c5f043..6f902c7cd7e 100644 --- a/src/Interpreters/getCustomKeyFilterForParallelReplicas.cpp +++ b/src/Interpreters/getCustomKeyFilterForParallelReplicas.cpp @@ -1,5 +1,7 @@ #include +#include + #include #include #include diff --git a/src/Interpreters/getHeaderForProcessingStage.cpp b/src/Interpreters/getHeaderForProcessingStage.cpp index 06c5d424d2f..cf18cbbb54a 100644 --- a/src/Interpreters/getHeaderForProcessingStage.cpp +++ b/src/Interpreters/getHeaderForProcessingStage.cpp @@ -1,5 +1,6 @@ #include #include +#include #include #include #include diff --git a/src/Interpreters/interpretSubquery.cpp b/src/Interpreters/interpretSubquery.cpp index 6b42345f1d6..340f6d1d805 100644 --- a/src/Interpreters/interpretSubquery.cpp +++ b/src/Interpreters/interpretSubquery.cpp @@ -1,4 +1,5 @@ #include +#include #include #include diff --git a/src/Interpreters/loadMetadata.cpp b/src/Interpreters/loadMetadata.cpp index 9c3922b8bda..b5148174043 100644 --- a/src/Interpreters/loadMetadata.cpp +++ b/src/Interpreters/loadMetadata.cpp @@ -24,6 +24,7 @@ #include #include #include +#include #include diff --git a/src/Interpreters/parseColumnsListForTableFunction.cpp b/src/Interpreters/parseColumnsListForTableFunction.cpp index 3529863a623..b9fdaabede1 100644 --- a/src/Interpreters/parseColumnsListForTableFunction.cpp +++ b/src/Interpreters/parseColumnsListForTableFunction.cpp @@ -1,3 +1,4 @@ +#include #include #include #include @@ -21,6 +22,18 @@ extern const int ILLEGAL_COLUMN; } +DataTypeValidationSettings::DataTypeValidationSettings(const DB::Settings& settings) + : allow_suspicious_low_cardinality_types(settings.allow_suspicious_low_cardinality_types) + , allow_experimental_object_type(settings.allow_experimental_object_type) + , allow_suspicious_fixed_string_types(settings.allow_suspicious_fixed_string_types) + , allow_experimental_variant_type(settings.allow_experimental_variant_type) + , allow_suspicious_variant_types(settings.allow_suspicious_variant_types) + , validate_nested_types(settings.validate_experimental_and_suspicious_types_inside_nested_types) + , allow_experimental_dynamic_type(settings.allow_experimental_dynamic_type) +{ +} + + void validateDataType(const DataTypePtr & type_to_check, const DataTypeValidationSettings & settings) { auto validate_callback = [&](const IDataType & data_type) diff --git a/src/Interpreters/parseColumnsListForTableFunction.h b/src/Interpreters/parseColumnsListForTableFunction.h index e2d2bc97ff7..6f15c585e4f 100644 --- a/src/Interpreters/parseColumnsListForTableFunction.h +++ b/src/Interpreters/parseColumnsListForTableFunction.h @@ -2,28 +2,19 @@ #include #include -#include namespace DB { class Context; +struct Settings; struct DataTypeValidationSettings { DataTypeValidationSettings() = default; - explicit DataTypeValidationSettings(const Settings & settings) - : allow_suspicious_low_cardinality_types(settings.allow_suspicious_low_cardinality_types) - , allow_experimental_object_type(settings.allow_experimental_object_type) - , allow_suspicious_fixed_string_types(settings.allow_suspicious_fixed_string_types) - , allow_experimental_variant_type(settings.allow_experimental_variant_type) - , allow_suspicious_variant_types(settings.allow_suspicious_variant_types) - , validate_nested_types(settings.validate_experimental_and_suspicious_types_inside_nested_types) - , allow_experimental_dynamic_type(settings.allow_experimental_dynamic_type) - { - } + explicit DataTypeValidationSettings(const Settings & settings); bool allow_suspicious_low_cardinality_types = true; bool allow_experimental_object_type = true; diff --git a/src/Interpreters/removeOnClusterClauseIfNeeded.cpp b/src/Interpreters/removeOnClusterClauseIfNeeded.cpp index dd20164925c..a6595d330fc 100644 --- a/src/Interpreters/removeOnClusterClauseIfNeeded.cpp +++ b/src/Interpreters/removeOnClusterClauseIfNeeded.cpp @@ -3,6 +3,7 @@ #include #include #include +#include #include #include #include @@ -57,13 +58,13 @@ ASTPtr removeOnClusterClauseIfNeeded(const ASTPtr & query, ContextPtr context, c return query; if ((isUserDefinedFunctionQuery(query) - && context->getSettings().ignore_on_cluster_for_replicated_udf_queries + && context->getSettingsRef().ignore_on_cluster_for_replicated_udf_queries && context->getUserDefinedSQLObjectsStorage().isReplicated()) || (isAccessControlQuery(query) - && context->getSettings().ignore_on_cluster_for_replicated_access_entities_queries + && context->getSettingsRef().ignore_on_cluster_for_replicated_access_entities_queries && context->getAccessControl().containsStorage(ReplicatedAccessStorage::STORAGE_TYPE)) || (isNamedCollectionQuery(query) - && context->getSettings().ignore_on_cluster_for_replicated_named_collections_queries + && context->getSettingsRef().ignore_on_cluster_for_replicated_named_collections_queries && NamedCollectionFactory::instance().usesReplicatedStorage())) { LOG_DEBUG(getLogger("removeOnClusterClauseIfNeeded"), "ON CLUSTER clause was ignored for query {}", query->getID()); diff --git a/src/Planner/CollectSets.cpp b/src/Planner/CollectSets.cpp index 52a0d748d63..9cac4e61380 100644 --- a/src/Planner/CollectSets.cpp +++ b/src/Planner/CollectSets.cpp @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include diff --git a/src/Planner/Planner.cpp b/src/Planner/Planner.cpp index 49c4e264f7d..5640d84731f 100644 --- a/src/Planner/Planner.cpp +++ b/src/Planner/Planner.cpp @@ -3,7 +3,8 @@ #include #include #include -#include +#include +#include #include #include @@ -40,6 +41,7 @@ #include #include +#include #include #include diff --git a/src/Planner/PlannerActionsVisitor.cpp b/src/Planner/PlannerActionsVisitor.cpp index 7a12d5d690d..a0c7f9b2516 100644 --- a/src/Planner/PlannerActionsVisitor.cpp +++ b/src/Planner/PlannerActionsVisitor.cpp @@ -30,6 +30,8 @@ #include #include +#include + namespace DB { diff --git a/src/Planner/PlannerExpressionAnalysis.cpp b/src/Planner/PlannerExpressionAnalysis.cpp index ceb506d1bbb..68e0ef9785e 100644 --- a/src/Planner/PlannerExpressionAnalysis.cpp +++ b/src/Planner/PlannerExpressionAnalysis.cpp @@ -23,6 +23,8 @@ #include #include +#include + namespace DB { diff --git a/src/Planner/PlannerJoinTree.cpp b/src/Planner/PlannerJoinTree.cpp index d18342880f0..ab599331a9d 100644 --- a/src/Planner/PlannerJoinTree.cpp +++ b/src/Planner/PlannerJoinTree.cpp @@ -1,5 +1,7 @@ #include +#include + #include #include diff --git a/src/Planner/PlannerJoins.cpp b/src/Planner/PlannerJoins.cpp index 05420cc3301..d9360a58240 100644 --- a/src/Planner/PlannerJoins.cpp +++ b/src/Planner/PlannerJoins.cpp @@ -43,6 +43,7 @@ #include #include +#include #include namespace DB @@ -812,7 +813,7 @@ static std::shared_ptr tryCreateJoin(JoinAlgorithm algorithm, query_context->getServerSettings().max_entries_for_hash_table_stats, settings.max_size_to_preallocate_for_joins}; return std::make_shared( - query_context, table_join, query_context->getSettings().max_threads, right_table_expression_header, params); + query_context, table_join, query_context->getSettingsRef().max_threads, right_table_expression_header, params); } return std::make_shared(table_join, right_table_expression_header, query_context->getSettingsRef().join_any_take_last_row); diff --git a/src/Planner/PlannerSorting.cpp b/src/Planner/PlannerSorting.cpp index 611a26f78fa..1c6e820fed1 100644 --- a/src/Planner/PlannerSorting.cpp +++ b/src/Planner/PlannerSorting.cpp @@ -1,5 +1,7 @@ #include +#include + #include #include diff --git a/src/Planner/PlannerWindowFunctions.cpp b/src/Planner/PlannerWindowFunctions.cpp index ce74d82c08d..225852de5a7 100644 --- a/src/Planner/PlannerWindowFunctions.cpp +++ b/src/Planner/PlannerWindowFunctions.cpp @@ -4,6 +4,8 @@ #include #include +#include + #include #include diff --git a/src/Planner/Utils.cpp b/src/Planner/Utils.cpp index 18a6d297838..3c54c57a28c 100644 --- a/src/Planner/Utils.cpp +++ b/src/Planner/Utils.cpp @@ -33,6 +33,8 @@ #include #include +#include + #include #include #include diff --git a/src/Planner/findParallelReplicasQuery.cpp b/src/Planner/findParallelReplicasQuery.cpp index f2bc1f060d8..c89a70be541 100644 --- a/src/Planner/findParallelReplicasQuery.cpp +++ b/src/Planner/findParallelReplicasQuery.cpp @@ -4,6 +4,7 @@ #include #include #include +#include #include #include #include diff --git a/src/Processors/Executors/PipelineExecutor.cpp b/src/Processors/Executors/PipelineExecutor.cpp index 49ec9999521..82cad471a29 100644 --- a/src/Processors/Executors/PipelineExecutor.cpp +++ b/src/Processors/Executors/PipelineExecutor.cpp @@ -12,6 +12,7 @@ #include #include #include +#include #ifndef NDEBUG #include diff --git a/src/Processors/Formats/Impl/ConstantExpressionTemplate.cpp b/src/Processors/Formats/Impl/ConstantExpressionTemplate.cpp index fe82d1b1c53..06e8668cd7c 100644 --- a/src/Processors/Formats/Impl/ConstantExpressionTemplate.cpp +++ b/src/Processors/Formats/Impl/ConstantExpressionTemplate.cpp @@ -2,6 +2,7 @@ #include #include #include +#include #include #include #include diff --git a/src/Processors/Formats/Impl/FormRowInputFormat.cpp b/src/Processors/Formats/Impl/FormRowInputFormat.cpp index d3c6f3798cc..698efb896ea 100644 --- a/src/Processors/Formats/Impl/FormRowInputFormat.cpp +++ b/src/Processors/Formats/Impl/FormRowInputFormat.cpp @@ -1,7 +1,9 @@ -#include +#include #include "Formats/EscapingRuleUtils.h" #include +#include + namespace DB { diff --git a/src/Processors/Formats/Impl/NpyRowInputFormat.cpp b/src/Processors/Formats/Impl/NpyRowInputFormat.cpp index 65e0f9dd192..773cbc9268e 100644 --- a/src/Processors/Formats/Impl/NpyRowInputFormat.cpp +++ b/src/Processors/Formats/Impl/NpyRowInputFormat.cpp @@ -445,6 +445,9 @@ bool NpyRowInputFormat::readRow(MutableColumns & columns, RowReadExtension & /* elements_in_current_column *= header.shape[i]; } + if (typeid_cast(current_column)) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Unexpected nesting level of column '{}', expected {}", column->getName(), header.shape.size() - 1); + for (size_t i = 0; i != elements_in_current_column; ++i) readValue(current_column); diff --git a/src/Processors/Formats/Impl/PrettyBlockOutputFormat.h b/src/Processors/Formats/Impl/PrettyBlockOutputFormat.h index 4c52300fbd1..698efecd4b2 100644 --- a/src/Processors/Formats/Impl/PrettyBlockOutputFormat.h +++ b/src/Processors/Formats/Impl/PrettyBlockOutputFormat.h @@ -74,7 +74,8 @@ void registerPrettyFormatWithNoEscapesAndMonoBlock(FormatFactory & factory, cons const Block & sample, const FormatSettings & format_settings) { - bool color = !no_escapes && format_settings.pretty.color.valueOr(format_settings.is_writing_to_terminal); + bool color = !no_escapes + && (format_settings.pretty.color == 1 || (format_settings.pretty.color == 2 && format_settings.is_writing_to_terminal)); return std::make_shared(buf, sample, format_settings, mono_block, color); }); if (!mono_block) diff --git a/src/Processors/Formats/Impl/ValuesBlockInputFormat.cpp b/src/Processors/Formats/Impl/ValuesBlockInputFormat.cpp index 1493779ec2d..c23d717d52f 100644 --- a/src/Processors/Formats/Impl/ValuesBlockInputFormat.cpp +++ b/src/Processors/Formats/Impl/ValuesBlockInputFormat.cpp @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include diff --git a/src/Processors/Formats/InputFormatErrorsLogger.cpp b/src/Processors/Formats/InputFormatErrorsLogger.cpp index 814c4679cf9..b5aa936df07 100644 --- a/src/Processors/Formats/InputFormatErrorsLogger.cpp +++ b/src/Processors/Formats/InputFormatErrorsLogger.cpp @@ -5,6 +5,7 @@ #include #include #include +#include namespace DB diff --git a/src/Processors/QueryPlan/CreateSetAndFilterOnTheFlyStep.h b/src/Processors/QueryPlan/CreateSetAndFilterOnTheFlyStep.h index 70ae4c6156e..27171511703 100644 --- a/src/Processors/QueryPlan/CreateSetAndFilterOnTheFlyStep.h +++ b/src/Processors/QueryPlan/CreateSetAndFilterOnTheFlyStep.h @@ -1,4 +1,6 @@ #pragma once + +#include #include #include diff --git a/src/Processors/QueryPlan/DistributedCreateLocalPlan.cpp b/src/Processors/QueryPlan/DistributedCreateLocalPlan.cpp index 1f4f271fa6e..14f58585d3d 100644 --- a/src/Processors/QueryPlan/DistributedCreateLocalPlan.cpp +++ b/src/Processors/QueryPlan/DistributedCreateLocalPlan.cpp @@ -1,6 +1,7 @@ #include #include +#include #include #include #include diff --git a/src/Processors/QueryPlan/Optimizations/optimizePrewhere.cpp b/src/Processors/QueryPlan/Optimizations/optimizePrewhere.cpp index 1badd315200..3cb7f8a5d83 100644 --- a/src/Processors/QueryPlan/Optimizations/optimizePrewhere.cpp +++ b/src/Processors/QueryPlan/Optimizations/optimizePrewhere.cpp @@ -1,3 +1,4 @@ +#include #include #include #include @@ -8,6 +9,7 @@ #include #include #include + namespace DB { diff --git a/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp b/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp index e1ef38022b5..28eb4da2e17 100644 --- a/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp +++ b/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp @@ -22,9 +22,10 @@ #include #include #include -#include "Storages/KeyDescription.h" +#include #include #include +#include #include diff --git a/src/Processors/QueryPlan/Optimizations/optimizeUseAggregateProjection.cpp b/src/Processors/QueryPlan/Optimizations/optimizeUseAggregateProjection.cpp index 70327bc95b4..9e2c2dcc96d 100644 --- a/src/Processors/QueryPlan/Optimizations/optimizeUseAggregateProjection.cpp +++ b/src/Processors/QueryPlan/Optimizations/optimizeUseAggregateProjection.cpp @@ -17,6 +17,7 @@ #include #include +#include #include #include #include @@ -515,10 +516,11 @@ AggregateProjectionCandidates getAggregateProjectionCandidates( if (!candidates.minmax_projection) { - auto it = std::find_if(agg_projections.begin(), agg_projections.end(), [&](const auto * projection) - { - return projection->name == context->getSettings().preferred_optimize_projection_name.value; - }); + auto it = std::find_if( + agg_projections.begin(), + agg_projections.end(), + [&](const auto * projection) + { return projection->name == context->getSettingsRef().preferred_optimize_projection_name.value; }); if (it != agg_projections.end()) { diff --git a/src/Processors/QueryPlan/Optimizations/optimizeUseNormalProjection.cpp b/src/Processors/QueryPlan/Optimizations/optimizeUseNormalProjection.cpp index 0af3869ccf1..291800c2c86 100644 --- a/src/Processors/QueryPlan/Optimizations/optimizeUseNormalProjection.cpp +++ b/src/Processors/QueryPlan/Optimizations/optimizeUseNormalProjection.cpp @@ -7,9 +7,11 @@ #include #include #include +#include #include #include #include + #include namespace DB::QueryPlanOptimizations @@ -110,10 +112,10 @@ std::optional optimizeUseNormalProjections(Stack & stack, QueryPlan::Nod return {}; ContextPtr context = reading->getContext(); - auto it = std::find_if(normal_projections.begin(), normal_projections.end(), [&](const auto * projection) - { - return projection->name == context->getSettings().preferred_optimize_projection_name.value; - }); + auto it = std::find_if( + normal_projections.begin(), + normal_projections.end(), + [&](const auto * projection) { return projection->name == context->getSettingsRef().preferred_optimize_projection_name.value; }); if (it != normal_projections.end()) { diff --git a/src/Processors/QueryPlan/Optimizations/projectionsCommon.cpp b/src/Processors/QueryPlan/Optimizations/projectionsCommon.cpp index af1578d6af8..4d60a1135a6 100644 --- a/src/Processors/QueryPlan/Optimizations/projectionsCommon.cpp +++ b/src/Processors/QueryPlan/Optimizations/projectionsCommon.cpp @@ -5,6 +5,7 @@ #include #include +#include #include #include #include diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.cpp b/src/Processors/QueryPlan/ReadFromMergeTree.cpp index dc6d96a721e..6e86d14f5cc 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.cpp +++ b/src/Processors/QueryPlan/ReadFromMergeTree.cpp @@ -31,6 +31,7 @@ #include #include #include +#include #include #include #include @@ -40,6 +41,7 @@ #include #include #include +#include #include #include #include diff --git a/src/Processors/QueryPlan/ReadFromRemote.cpp b/src/Processors/QueryPlan/ReadFromRemote.cpp index 84c2515e8ca..fe724a8a198 100644 --- a/src/Processors/QueryPlan/ReadFromRemote.cpp +++ b/src/Processors/QueryPlan/ReadFromRemote.cpp @@ -16,6 +16,7 @@ #include #include #include +#include #include #include #include diff --git a/src/Processors/QueryPlan/ReadFromStreamLikeEngine.cpp b/src/Processors/QueryPlan/ReadFromStreamLikeEngine.cpp index 4a257bba922..51117ad3c9b 100644 --- a/src/Processors/QueryPlan/ReadFromStreamLikeEngine.cpp +++ b/src/Processors/QueryPlan/ReadFromStreamLikeEngine.cpp @@ -1,5 +1,6 @@ #include +#include #include #include diff --git a/src/Processors/QueryPlan/ReadFromSystemNumbersStep.cpp b/src/Processors/QueryPlan/ReadFromSystemNumbersStep.cpp index eb974259c5e..a3ae035afdd 100644 --- a/src/Processors/QueryPlan/ReadFromSystemNumbersStep.cpp +++ b/src/Processors/QueryPlan/ReadFromSystemNumbersStep.cpp @@ -12,6 +12,7 @@ #include #include #include +#include #include diff --git a/src/Processors/QueryPlan/SortingStep.cpp b/src/Processors/QueryPlan/SortingStep.cpp index 1c40f84d23d..48fad9f5fdb 100644 --- a/src/Processors/QueryPlan/SortingStep.cpp +++ b/src/Processors/QueryPlan/SortingStep.cpp @@ -1,4 +1,3 @@ -#include #include #include #include @@ -10,10 +9,13 @@ #include #include #include +#include #include #include +#include + namespace CurrentMetrics { extern const Metric TemporaryFilesForSort; diff --git a/src/Processors/QueryPlan/TotalsHavingStep.cpp b/src/Processors/QueryPlan/TotalsHavingStep.cpp index ac5e144bf4a..10384f6296a 100644 --- a/src/Processors/QueryPlan/TotalsHavingStep.cpp +++ b/src/Processors/QueryPlan/TotalsHavingStep.cpp @@ -5,6 +5,7 @@ #include #include #include +#include namespace DB { diff --git a/src/Processors/Sources/MySQLSource.cpp b/src/Processors/Sources/MySQLSource.cpp index 985a82a7b17..5d533a7747e 100644 --- a/src/Processors/Sources/MySQLSource.cpp +++ b/src/Processors/Sources/MySQLSource.cpp @@ -2,7 +2,9 @@ #if USE_MYSQL #include + #include +#include #include #include #include diff --git a/src/Processors/Sources/MySQLSource.h b/src/Processors/Sources/MySQLSource.h index 4ae5af22dab..b99e48a7759 100644 --- a/src/Processors/Sources/MySQLSource.h +++ b/src/Processors/Sources/MySQLSource.h @@ -6,11 +6,12 @@ #include #include #include -#include namespace DB { +struct Settings; + struct StreamSettings { /// Check if setting is enabled, otherwise use common `max_block_size` setting. diff --git a/src/Processors/Sources/RecursiveCTESource.cpp b/src/Processors/Sources/RecursiveCTESource.cpp index 221198c622a..40e72e9cbb1 100644 --- a/src/Processors/Sources/RecursiveCTESource.cpp +++ b/src/Processors/Sources/RecursiveCTESource.cpp @@ -17,6 +17,8 @@ #include #include +#include + namespace DB { diff --git a/src/Processors/TTL/TTLAggregationAlgorithm.cpp b/src/Processors/TTL/TTLAggregationAlgorithm.cpp index 45e8a96412e..2d7a37d0abe 100644 --- a/src/Processors/TTL/TTLAggregationAlgorithm.cpp +++ b/src/Processors/TTL/TTLAggregationAlgorithm.cpp @@ -1,5 +1,6 @@ -#include +#include #include +#include namespace DB { diff --git a/src/Processors/Transforms/DistinctSortedChunkTransform.h b/src/Processors/Transforms/DistinctSortedChunkTransform.h index 188e3d5c4c7..cdb20410a5b 100644 --- a/src/Processors/Transforms/DistinctSortedChunkTransform.h +++ b/src/Processors/Transforms/DistinctSortedChunkTransform.h @@ -5,6 +5,7 @@ #include #include #include +#include namespace DB { diff --git a/src/Processors/Transforms/DistinctSortedTransform.h b/src/Processors/Transforms/DistinctSortedTransform.h index 86c5c968e4b..52819f1afd7 100644 --- a/src/Processors/Transforms/DistinctSortedTransform.h +++ b/src/Processors/Transforms/DistinctSortedTransform.h @@ -1,6 +1,7 @@ #pragma once #include +#include #include #include #include diff --git a/src/Processors/Transforms/TotalsHavingTransform.cpp b/src/Processors/Transforms/TotalsHavingTransform.cpp index aa86879e62c..3f278ca884f 100644 --- a/src/Processors/Transforms/TotalsHavingTransform.cpp +++ b/src/Processors/Transforms/TotalsHavingTransform.cpp @@ -6,6 +6,7 @@ #include #include +#include #include #include diff --git a/src/Processors/Transforms/buildPushingToViewsChain.cpp b/src/Processors/Transforms/buildPushingToViewsChain.cpp index 25fbf13b0e7..c616c882ed9 100644 --- a/src/Processors/Transforms/buildPushingToViewsChain.cpp +++ b/src/Processors/Transforms/buildPushingToViewsChain.cpp @@ -24,6 +24,7 @@ #include #include #include +#include #include #include diff --git a/src/Processors/Transforms/getSourceFromASTInsertQuery.cpp b/src/Processors/Transforms/getSourceFromASTInsertQuery.cpp index a2a42f27c3f..c33e87815b1 100644 --- a/src/Processors/Transforms/getSourceFromASTInsertQuery.cpp +++ b/src/Processors/Transforms/getSourceFromASTInsertQuery.cpp @@ -14,7 +14,8 @@ #include #include #include "IO/CompressionMethod.h" -#include "Parsers/ASTLiteral.h" +#include +#include namespace DB diff --git a/src/QueryPipeline/RemoteQueryExecutor.cpp b/src/QueryPipeline/RemoteQueryExecutor.cpp index 14457d2df43..b08f2002f64 100644 --- a/src/QueryPipeline/RemoteQueryExecutor.cpp +++ b/src/QueryPipeline/RemoteQueryExecutor.cpp @@ -5,6 +5,7 @@ #include #include #include +#include #include #include #include @@ -907,4 +908,10 @@ void RemoteQueryExecutor::setProfileInfoCallback(ProfileInfoCallback callback) std::lock_guard guard(was_cancelled_mutex); profile_info_callback = std::move(callback); } + +bool RemoteQueryExecutor::needToSkipUnavailableShard() const +{ + return context->getSettingsRef().skip_unavailable_shards && (0 == connections->size()); +} + } diff --git a/src/QueryPipeline/RemoteQueryExecutor.h b/src/QueryPipeline/RemoteQueryExecutor.h index 6b1539bd08e..04a59cc3b7e 100644 --- a/src/QueryPipeline/RemoteQueryExecutor.h +++ b/src/QueryPipeline/RemoteQueryExecutor.h @@ -218,7 +218,7 @@ public: IConnections & getConnections() { return *connections; } - bool needToSkipUnavailableShard() const { return context->getSettingsRef().skip_unavailable_shards && (0 == connections->size()); } + bool needToSkipUnavailableShard() const; bool isReplicaUnavailable() const { return extension && extension->parallel_reading_coordinator && connections->size() == 0; } diff --git a/src/Server/GRPCServer.cpp b/src/Server/GRPCServer.cpp index cb36df1efc0..d8a4d7f0e1f 100644 --- a/src/Server/GRPCServer.cpp +++ b/src/Server/GRPCServer.cpp @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include @@ -1082,8 +1083,8 @@ namespace read_buffer = wrapReadBufferWithCompressionMethod(std::move(read_buffer), input_compression_method); assert(!pipeline); - auto source = query_context->getInputFormat( - input_format, *read_buffer, header, query_context->getSettings().max_insert_block_size); + auto source + = query_context->getInputFormat(input_format, *read_buffer, header, query_context->getSettingsRef().max_insert_block_size); pipeline = std::make_unique(std::move(source)); pipeline_executor = std::make_unique(*pipeline); @@ -1151,8 +1152,7 @@ namespace external_table_context->applySettingsChanges(settings_changes); } auto in = external_table_context->getInputFormat( - format, *buf, metadata_snapshot->getSampleBlock(), - external_table_context->getSettings().max_insert_block_size); + format, *buf, metadata_snapshot->getSampleBlock(), external_table_context->getSettingsRef().max_insert_block_size); QueryPipelineBuilder cur_pipeline; cur_pipeline.init(Pipe(std::move(in))); diff --git a/src/Server/HTTP/sendExceptionToHTTPClient.cpp b/src/Server/HTTP/sendExceptionToHTTPClient.cpp index 78650758e35..022a763a9a2 100644 --- a/src/Server/HTTP/sendExceptionToHTTPClient.cpp +++ b/src/Server/HTTP/sendExceptionToHTTPClient.cpp @@ -1,5 +1,6 @@ #include +#include #include #include #include diff --git a/src/Server/HTTP/setReadOnlyIfHTTPMethodIdempotent.cpp b/src/Server/HTTP/setReadOnlyIfHTTPMethodIdempotent.cpp index d42bd77e339..48a3b39a3e7 100644 --- a/src/Server/HTTP/setReadOnlyIfHTTPMethodIdempotent.cpp +++ b/src/Server/HTTP/setReadOnlyIfHTTPMethodIdempotent.cpp @@ -1,5 +1,6 @@ #include +#include #include #include diff --git a/src/Server/HTTPHandler.cpp b/src/Server/HTTPHandler.cpp index 370af79e456..c8a58527f2c 100644 --- a/src/Server/HTTPHandler.cpp +++ b/src/Server/HTTPHandler.cpp @@ -4,6 +4,8 @@ #include #include #include +#include +#include #include #include #include diff --git a/src/Server/InterserverIOHTTPHandler.cpp b/src/Server/InterserverIOHTTPHandler.cpp index 45c28babe3a..e46021c8e68 100644 --- a/src/Server/InterserverIOHTTPHandler.cpp +++ b/src/Server/InterserverIOHTTPHandler.cpp @@ -3,6 +3,7 @@ #include #include +#include #include #include #include diff --git a/src/Server/KeeperReadinessHandler.cpp b/src/Server/KeeperReadinessHandler.cpp index c973be040c8..1972c3263b2 100644 --- a/src/Server/KeeperReadinessHandler.cpp +++ b/src/Server/KeeperReadinessHandler.cpp @@ -5,6 +5,7 @@ #include #include +#include #include #include #include diff --git a/src/Server/KeeperTCPHandler.cpp b/src/Server/KeeperTCPHandler.cpp index 5f26542d39c..4849f5827c1 100644 --- a/src/Server/KeeperTCPHandler.cpp +++ b/src/Server/KeeperTCPHandler.cpp @@ -3,6 +3,7 @@ #if USE_NURAFT # include +# include # include # include # include diff --git a/src/Server/MySQLHandler.cpp b/src/Server/MySQLHandler.cpp index c0f015bfcd5..b6d795b1e69 100644 --- a/src/Server/MySQLHandler.cpp +++ b/src/Server/MySQLHandler.cpp @@ -8,6 +8,7 @@ #include #include #include +#include #include #include #include diff --git a/src/Server/MySQLHandler.h b/src/Server/MySQLHandler.h index 2f891ebf810..f632ef51c00 100644 --- a/src/Server/MySQLHandler.h +++ b/src/Server/MySQLHandler.h @@ -8,6 +8,7 @@ #include #include #include +#include #include "IO/ReadBufferFromString.h" #include "IServer.h" diff --git a/src/Server/NotFoundHandler.cpp b/src/Server/NotFoundHandler.cpp index 38f56921c89..304021ee8dc 100644 --- a/src/Server/NotFoundHandler.cpp +++ b/src/Server/NotFoundHandler.cpp @@ -1,6 +1,7 @@ #include #include +#include #include namespace DB diff --git a/src/Server/PostgreSQLHandler.cpp b/src/Server/PostgreSQLHandler.cpp index 8ba8421e6f0..cde31b4c58a 100644 --- a/src/Server/PostgreSQLHandler.cpp +++ b/src/Server/PostgreSQLHandler.cpp @@ -13,6 +13,7 @@ #include #include #include +#include #if USE_SSL # include diff --git a/src/Server/ProxyV1Handler.cpp b/src/Server/ProxyV1Handler.cpp index d5e6ab23360..5b33a119cef 100644 --- a/src/Server/ProxyV1Handler.cpp +++ b/src/Server/ProxyV1Handler.cpp @@ -1,3 +1,4 @@ +#include #include #include #include diff --git a/src/Server/ReplicasStatusHandler.cpp b/src/Server/ReplicasStatusHandler.cpp index 67823117758..f43357db0a8 100644 --- a/src/Server/ReplicasStatusHandler.cpp +++ b/src/Server/ReplicasStatusHandler.cpp @@ -1,5 +1,6 @@ #include +#include #include #include #include @@ -7,6 +8,7 @@ #include #include #include +#include #include #include diff --git a/src/Server/StaticRequestHandler.cpp b/src/Server/StaticRequestHandler.cpp index 331b7a84857..f3981dea9fb 100644 --- a/src/Server/StaticRequestHandler.cpp +++ b/src/Server/StaticRequestHandler.cpp @@ -4,13 +4,14 @@ #include "HTTPHandlerFactory.h" #include "HTTPResponseHeaderWriter.h" +#include #include #include #include -#include #include -#include +#include #include +#include #include diff --git a/src/Server/WebUIRequestHandler.cpp b/src/Server/WebUIRequestHandler.cpp index 68d3ff0b325..a3d098014e7 100644 --- a/src/Server/WebUIRequestHandler.cpp +++ b/src/Server/WebUIRequestHandler.cpp @@ -5,8 +5,9 @@ #include #include -#include +#include #include +#include #include #include diff --git a/src/Storages/AlterCommands.cpp b/src/Storages/AlterCommands.cpp index 35a5e95e643..7891042bb96 100644 --- a/src/Storages/AlterCommands.cpp +++ b/src/Storages/AlterCommands.cpp @@ -1,4 +1,5 @@ #include +#include #include #include #include @@ -33,9 +34,9 @@ #include #include #include +#include #include #include -#include #include diff --git a/src/Storages/Distributed/DistributedAsyncInsertBatch.cpp b/src/Storages/Distributed/DistributedAsyncInsertBatch.cpp index 06d4c185840..e1facec5b40 100644 --- a/src/Storages/Distributed/DistributedAsyncInsertBatch.cpp +++ b/src/Storages/Distributed/DistributedAsyncInsertBatch.cpp @@ -196,6 +196,16 @@ void DistributedAsyncInsertBatch::readText(ReadBuffer & in) UInt64 idx; in >> idx >> "\n"; files.push_back(std::filesystem::absolute(fmt::format("{}/{}.bin", parent.path, idx)).string()); + + ReadBufferFromFile header_buffer(files.back()); + const DistributedAsyncInsertHeader & header = DistributedAsyncInsertHeader::read(header_buffer, parent.log); + total_bytes += total_bytes; + + if (header.rows) + { + total_rows += header.rows; + total_bytes += header.bytes; + } } recovered = true; diff --git a/src/Storages/Distributed/DistributedSink.cpp b/src/Storages/Distributed/DistributedSink.cpp index e556bda2561..a2bd7237854 100644 --- a/src/Storages/Distributed/DistributedSink.cpp +++ b/src/Storages/Distributed/DistributedSink.cpp @@ -32,6 +32,7 @@ #include #include #include +#include #include diff --git a/src/Storages/FileLog/FileLogSettings.cpp b/src/Storages/FileLog/FileLogSettings.cpp index 8e245285b9a..cf2d6b64d7c 100644 --- a/src/Storages/FileLog/FileLogSettings.cpp +++ b/src/Storages/FileLog/FileLogSettings.cpp @@ -3,6 +3,7 @@ #include #include #include +#include namespace DB diff --git a/src/Storages/Hive/StorageHive.cpp b/src/Storages/Hive/StorageHive.cpp index 255dadea387..bdbb12338fd 100644 --- a/src/Storages/Hive/StorageHive.cpp +++ b/src/Storages/Hive/StorageHive.cpp @@ -321,7 +321,7 @@ public: Chunk generateChunkFromMetadata() { - size_t num_rows = std::min(current_file_remained_rows, UInt64(getContext()->getSettings().max_block_size)); + size_t num_rows = std::min(current_file_remained_rows, UInt64(getContext()->getSettingsRef().max_block_size)); current_file_remained_rows -= num_rows; Block source_block; @@ -964,7 +964,7 @@ HiveFiles StorageHive::collectHiveFiles( /// Hive files to collect HiveFiles hive_files; Int64 hit_parttions_num = 0; - Int64 hive_max_query_partitions = context_->getSettings().max_partitions_to_read; + Int64 hive_max_query_partitions = context_->getSettingsRef().max_partitions_to_read; /// Mutext to protect hive_files, which maybe appended in multiple threads std::mutex hive_files_mutex; ThreadPool pool{CurrentMetrics::StorageHiveThreads, CurrentMetrics::StorageHiveThreadsActive, CurrentMetrics::StorageHiveThreadsScheduled, max_threads}; diff --git a/src/Storages/IStorage.cpp b/src/Storages/IStorage.cpp index 1f7ac23eb82..3b9c6ff28fa 100644 --- a/src/Storages/IStorage.cpp +++ b/src/Storages/IStorage.cpp @@ -1,6 +1,7 @@ #include #include +#include #include #include #include diff --git a/src/Storages/IStorageCluster.cpp b/src/Storages/IStorageCluster.cpp index 9c5b29ae265..63467603d16 100644 --- a/src/Storages/IStorageCluster.cpp +++ b/src/Storages/IStorageCluster.cpp @@ -1,6 +1,7 @@ #include #include +#include #include #include #include diff --git a/src/Storages/LiveView/StorageLiveView.cpp b/src/Storages/LiveView/StorageLiveView.cpp index 57a1ea302f9..f1323d45c26 100644 --- a/src/Storages/LiveView/StorageLiveView.cpp +++ b/src/Storages/LiveView/StorageLiveView.cpp @@ -27,6 +27,7 @@ limitations under the License. */ #include #include #include +#include #include #include diff --git a/src/Storages/MaterializedView/RefreshTask.cpp b/src/Storages/MaterializedView/RefreshTask.cpp index bc8cb0ce69a..29631b95542 100644 --- a/src/Storages/MaterializedView/RefreshTask.cpp +++ b/src/Storages/MaterializedView/RefreshTask.cpp @@ -3,6 +3,7 @@ #include #include +#include #include #include #include diff --git a/src/Storages/MergeTree/ApproximateNearestNeighborIndexesCommon.cpp b/src/Storages/MergeTree/ApproximateNearestNeighborIndexesCommon.cpp index 69e54dd5f0c..7354243732c 100644 --- a/src/Storages/MergeTree/ApproximateNearestNeighborIndexesCommon.cpp +++ b/src/Storages/MergeTree/ApproximateNearestNeighborIndexesCommon.cpp @@ -1,5 +1,6 @@ #include +#include #include #include #include @@ -54,7 +55,7 @@ ApproximateNearestNeighborInformation::Metric stringToMetric(std::string_view me ApproximateNearestNeighborCondition::ApproximateNearestNeighborCondition(const SelectQueryInfo & query_info, ContextPtr context) : block_with_constants(KeyCondition::getBlockWithConstants(query_info.query, query_info.syntax_analyzer_result, context)) , index_granularity(context->getMergeTreeSettings().index_granularity) - , max_limit_for_ann_queries(context->getSettings().max_limit_for_ann_queries) + , max_limit_for_ann_queries(context->getSettingsRef().max_limit_for_ann_queries) , index_is_useful(checkQueryStructure(query_info)) {} diff --git a/src/Storages/MergeTree/AsyncBlockIDsCache.cpp b/src/Storages/MergeTree/AsyncBlockIDsCache.cpp index 9d64592ed64..6606b00e287 100644 --- a/src/Storages/MergeTree/AsyncBlockIDsCache.cpp +++ b/src/Storages/MergeTree/AsyncBlockIDsCache.cpp @@ -1,7 +1,8 @@ +#include +#include +#include #include #include -#include -#include #include diff --git a/src/Storages/MergeTree/DataPartsExchange.cpp b/src/Storages/MergeTree/DataPartsExchange.cpp index b7fdcc63e1b..8e73021d3e7 100644 --- a/src/Storages/MergeTree/DataPartsExchange.cpp +++ b/src/Storages/MergeTree/DataPartsExchange.cpp @@ -15,6 +15,7 @@ #include #include #include +#include #include #include #include diff --git a/src/Storages/MergeTree/IMergeTreeDataPart.cpp b/src/Storages/MergeTree/IMergeTreeDataPart.cpp index c2e0e778220..70044db7336 100644 --- a/src/Storages/MergeTree/IMergeTreeDataPart.cpp +++ b/src/Storages/MergeTree/IMergeTreeDataPart.cpp @@ -24,6 +24,7 @@ #include #include #include +#include #include #include #include diff --git a/src/Storages/MergeTree/IMergeTreeDataPartWriter.h b/src/Storages/MergeTree/IMergeTreeDataPartWriter.h index d9e9a433827..adaa4eddb98 100644 --- a/src/Storages/MergeTree/IMergeTreeDataPartWriter.h +++ b/src/Storages/MergeTree/IMergeTreeDataPartWriter.h @@ -1,11 +1,10 @@ #pragma once +#include #include -#include #include #include #include -#include #include #include @@ -13,6 +12,9 @@ namespace DB { +struct MergeTreeSettings; +using MergeTreeSettingsPtr = std::shared_ptr; + Block getBlockAndPermute(const Block & block, const Names & names, const IColumn::Permutation * permutation); Block permuteBlockIfNeeded(const Block & block, const IColumn::Permutation * permutation); diff --git a/src/Storages/MergeTree/IMergedBlockOutputStream.cpp b/src/Storages/MergeTree/IMergedBlockOutputStream.cpp index 89c813ab233..70e838e666a 100644 --- a/src/Storages/MergeTree/IMergedBlockOutputStream.cpp +++ b/src/Storages/MergeTree/IMergedBlockOutputStream.cpp @@ -1,5 +1,6 @@ #include #include +#include #include #include diff --git a/src/Storages/MergeTree/IMergedBlockOutputStream.h b/src/Storages/MergeTree/IMergedBlockOutputStream.h index a9b058418ea..cfcfb177e05 100644 --- a/src/Storages/MergeTree/IMergedBlockOutputStream.h +++ b/src/Storages/MergeTree/IMergedBlockOutputStream.h @@ -1,16 +1,18 @@ #pragma once #include -#include -#include -#include #include #include +#include +#include #include namespace DB { +struct MergeTreeSettings; +using MergeTreeSettingsPtr = std::shared_ptr; + class IMergedBlockOutputStream { public: diff --git a/src/Storages/MergeTree/KeyCondition.cpp b/src/Storages/MergeTree/KeyCondition.cpp index d38001a0feb..40eef8e7431 100644 --- a/src/Storages/MergeTree/KeyCondition.cpp +++ b/src/Storages/MergeTree/KeyCondition.cpp @@ -23,6 +23,7 @@ #include #include #include +#include #include #include #include diff --git a/src/Storages/MergeTree/MergeFromLogEntryTask.cpp b/src/Storages/MergeTree/MergeFromLogEntryTask.cpp index 79efb0ca8b3..4c947487d21 100644 --- a/src/Storages/MergeTree/MergeFromLogEntryTask.cpp +++ b/src/Storages/MergeTree/MergeFromLogEntryTask.cpp @@ -3,6 +3,7 @@ #include #include #include +#include #include #include #include diff --git a/src/Storages/MergeTree/MergeTask.cpp b/src/Storages/MergeTree/MergeTask.cpp index c8f1a08128b..fc64fae9a58 100644 --- a/src/Storages/MergeTree/MergeTask.cpp +++ b/src/Storages/MergeTree/MergeTask.cpp @@ -7,6 +7,7 @@ #include #include +#include #include #include #include @@ -16,6 +17,7 @@ #include #include #include +#include #include #include #include @@ -422,6 +424,16 @@ bool MergeTask::ExecuteAndFinalizeHorizontalPart::prepare() return false; } +bool MergeTask::enabledBlockNumberColumn(GlobalRuntimeContextPtr global_ctx) +{ + return global_ctx->data->getSettings()->enable_block_number_column && global_ctx->metadata_snapshot->getGroupByTTLs().empty(); +} + +bool MergeTask::enabledBlockOffsetColumn(GlobalRuntimeContextPtr global_ctx) +{ + return global_ctx->data->getSettings()->enable_block_offset_column && global_ctx->metadata_snapshot->getGroupByTTLs().empty(); +} + void MergeTask::addGatheringColumn(GlobalRuntimeContextPtr global_ctx, const String & name, const DataTypePtr & type) { if (global_ctx->storage_columns.contains(name)) diff --git a/src/Storages/MergeTree/MergeTask.h b/src/Storages/MergeTree/MergeTask.h index 56909d1b7a0..8b0f2130e8e 100644 --- a/src/Storages/MergeTree/MergeTask.h +++ b/src/Storages/MergeTree/MergeTask.h @@ -408,15 +408,8 @@ private: Stages::const_iterator stages_iterator = stages.begin(); - static bool enabledBlockNumberColumn(GlobalRuntimeContextPtr global_ctx) - { - return global_ctx->data->getSettings()->enable_block_number_column && global_ctx->metadata_snapshot->getGroupByTTLs().empty(); - } - - static bool enabledBlockOffsetColumn(GlobalRuntimeContextPtr global_ctx) - { - return global_ctx->data->getSettings()->enable_block_offset_column && global_ctx->metadata_snapshot->getGroupByTTLs().empty(); - } + static bool enabledBlockNumberColumn(GlobalRuntimeContextPtr global_ctx); + static bool enabledBlockOffsetColumn(GlobalRuntimeContextPtr global_ctx); static void addGatheringColumn(GlobalRuntimeContextPtr global_ctx, const String & name, const DataTypePtr & type); }; diff --git a/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp index e31f6db5409..78a551591a6 100644 --- a/src/Storages/MergeTree/MergeTreeData.cpp +++ b/src/Storages/MergeTree/MergeTreeData.cpp @@ -22,6 +22,7 @@ #include #include #include +#include #include #include #include @@ -73,6 +74,7 @@ #include #include #include +#include #include #include #include @@ -474,7 +476,7 @@ StoragePolicyPtr MergeTreeData::getStoragePolicy() const ConditionSelectivityEstimator MergeTreeData::getConditionSelectivityEstimatorByPredicate( const StorageSnapshotPtr & storage_snapshot, const ActionsDAGPtr & filter_dag, ContextPtr local_context) const { - if (!local_context->getSettings().allow_statistics_optimize) + if (!local_context->getSettingsRef().allow_statistics_optimize) return {}; const auto & parts = assert_cast(*storage_snapshot->data).parts; @@ -6173,6 +6175,11 @@ bool MergeTreeData::hasProjection() const return false; } +bool MergeTreeData::areAsynchronousInsertsEnabled() const +{ + return getSettings()->async_insert; +} + MergeTreeData::ProjectionPartsVector MergeTreeData::getAllProjectionPartsVector(MergeTreeData::DataPartStateVector * out_states) const { ProjectionPartsVector res; @@ -7394,6 +7401,13 @@ std::pair MergeTreeData::cloneAn return std::make_pair(dst_data_part, std::move(temporary_directory_lock)); } +bool MergeTreeData::canUseAdaptiveGranularity() const +{ + const auto settings = getSettings(); + return settings->index_granularity_bytes != 0 + && (settings->enable_mixed_granularity_parts || !has_non_adaptive_index_granularity_parts); +} + String MergeTreeData::getFullPathOnDisk(const DiskPtr & disk) const { return disk->getPath() + relative_data_path; diff --git a/src/Storages/MergeTree/MergeTreeData.h b/src/Storages/MergeTree/MergeTreeData.h index c8b721038c6..7076b680521 100644 --- a/src/Storages/MergeTree/MergeTreeData.h +++ b/src/Storages/MergeTree/MergeTreeData.h @@ -18,7 +18,6 @@ #include #include #include -#include #include #include #include @@ -65,6 +64,7 @@ using BackupEntries = std::vector; +struct MergeTreeSettings; struct WriteSettings; /// Auxiliary struct holding information about the future merged or mutated part. @@ -441,7 +441,7 @@ public: bool hasProjection() const override; - bool areAsynchronousInsertsEnabled() const override { return getSettings()->async_insert; } + bool areAsynchronousInsertsEnabled() const override; bool supportsTrivialCountOptimization(const StorageSnapshotPtr &, ContextPtr) const override; @@ -860,12 +860,7 @@ public: /// Returns true if table can create new parts with adaptive granularity /// Has additional constraint in replicated version - virtual bool canUseAdaptiveGranularity() const - { - const auto settings = getSettings(); - return settings->index_granularity_bytes != 0 && - (settings->enable_mixed_granularity_parts || !has_non_adaptive_index_granularity_parts); - } + virtual bool canUseAdaptiveGranularity() const; /// Get constant pointer to storage settings. /// Copy this pointer into your scope and you will @@ -1735,14 +1730,6 @@ struct CurrentlySubmergingEmergingTagger ~CurrentlySubmergingEmergingTagger(); }; - -/// TODO: move it somewhere -[[ maybe_unused ]] static bool needSyncPart(size_t input_rows, size_t input_bytes, const MergeTreeSettings & settings) -{ - return ((settings.min_rows_to_fsync_after_merge && input_rows >= settings.min_rows_to_fsync_after_merge) - || (settings.min_compressed_bytes_to_fsync_after_merge && input_bytes >= settings.min_compressed_bytes_to_fsync_after_merge)); -} - /// Look at MutationCommands if it contains mutations for AlterConversions, update the counter. /// Return true if the counter had been updated bool updateAlterConversionsMutations(const MutationCommands & commands, std::atomic & alter_conversions_mutations, bool remove); diff --git a/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp b/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp index 2d49e1df19b..3c223b8d748 100644 --- a/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp +++ b/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include diff --git a/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.cpp b/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.cpp index 5c9191dbb54..6dc7e649b06 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.cpp +++ b/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.cpp @@ -1,5 +1,6 @@ #include #include +#include #include #include #include diff --git a/src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp b/src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp index 74ea89a8864..3fbabe1dd52 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp +++ b/src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp @@ -7,6 +7,7 @@ #include #include #include +#include namespace DB { diff --git a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp index 2e287ff3042..c5214f0f3d3 100644 --- a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp +++ b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp @@ -6,6 +6,7 @@ #include #include #include +#include #include #include #include @@ -36,6 +37,7 @@ #include #include +#include #include #include #include diff --git a/src/Storages/MergeTree/MergeTreeDataWriter.cpp b/src/Storages/MergeTree/MergeTreeDataWriter.cpp index 5c8aa32949d..73244b714bf 100644 --- a/src/Storages/MergeTree/MergeTreeDataWriter.cpp +++ b/src/Storages/MergeTree/MergeTreeDataWriter.cpp @@ -12,12 +12,14 @@ #include #include #include +#include #include #include #include #include #include #include +#include #include diff --git a/src/Storages/MergeTree/MergeTreeIOSettings.cpp b/src/Storages/MergeTree/MergeTreeIOSettings.cpp new file mode 100644 index 00000000000..58c3bd28d6a --- /dev/null +++ b/src/Storages/MergeTree/MergeTreeIOSettings.cpp @@ -0,0 +1,37 @@ +#include +#include +#include +#include +#include + +namespace DB +{ + +MergeTreeWriterSettings::MergeTreeWriterSettings( + const Settings & global_settings, + const WriteSettings & query_write_settings_, + const MergeTreeSettingsPtr & storage_settings, + bool can_use_adaptive_granularity_, + bool rewrite_primary_key_, + bool blocks_are_granules_size_) + : min_compress_block_size( + storage_settings->min_compress_block_size ? storage_settings->min_compress_block_size : global_settings.min_compress_block_size) + , max_compress_block_size( + storage_settings->max_compress_block_size ? storage_settings->max_compress_block_size : global_settings.max_compress_block_size) + , marks_compression_codec(storage_settings->marks_compression_codec) + , marks_compress_block_size(storage_settings->marks_compress_block_size) + , compress_primary_key(storage_settings->compress_primary_key) + , primary_key_compression_codec(storage_settings->primary_key_compression_codec) + , primary_key_compress_block_size(storage_settings->primary_key_compress_block_size) + , can_use_adaptive_granularity(can_use_adaptive_granularity_) + , rewrite_primary_key(rewrite_primary_key_) + , blocks_are_granules_size(blocks_are_granules_size_) + , query_write_settings(query_write_settings_) + , max_threads_for_annoy_index_creation(global_settings.max_threads_for_annoy_index_creation) + , low_cardinality_max_dictionary_size(global_settings.low_cardinality_max_dictionary_size) + , low_cardinality_use_single_dictionary_for_part(global_settings.low_cardinality_use_single_dictionary_for_part != 0) + , use_compact_variant_discriminators_serialization(storage_settings->use_compact_variant_discriminators_serialization) +{ +} + +} diff --git a/src/Storages/MergeTree/MergeTreeIOSettings.h b/src/Storages/MergeTree/MergeTreeIOSettings.h index 04171656fcf..c79ca1e66ee 100644 --- a/src/Storages/MergeTree/MergeTreeIOSettings.h +++ b/src/Storages/MergeTree/MergeTreeIOSettings.h @@ -1,15 +1,17 @@ #pragma once #include -#include -#include -#include #include #include +#include namespace DB { +struct MergeTreeSettings; +using MergeTreeSettingsPtr = std::shared_ptr; +struct Settings; + class MMappedFileCache; using MMappedFileCachePtr = std::shared_ptr; @@ -58,27 +60,7 @@ struct MergeTreeWriterSettings const MergeTreeSettingsPtr & storage_settings, bool can_use_adaptive_granularity_, bool rewrite_primary_key_, - bool blocks_are_granules_size_ = false) - : min_compress_block_size( - storage_settings->min_compress_block_size ? storage_settings->min_compress_block_size : global_settings.min_compress_block_size) - , max_compress_block_size( - storage_settings->max_compress_block_size ? storage_settings->max_compress_block_size - : global_settings.max_compress_block_size) - , marks_compression_codec(storage_settings->marks_compression_codec) - , marks_compress_block_size(storage_settings->marks_compress_block_size) - , compress_primary_key(storage_settings->compress_primary_key) - , primary_key_compression_codec(storage_settings->primary_key_compression_codec) - , primary_key_compress_block_size(storage_settings->primary_key_compress_block_size) - , can_use_adaptive_granularity(can_use_adaptive_granularity_) - , rewrite_primary_key(rewrite_primary_key_) - , blocks_are_granules_size(blocks_are_granules_size_) - , query_write_settings(query_write_settings_) - , max_threads_for_annoy_index_creation(global_settings.max_threads_for_annoy_index_creation) - , low_cardinality_max_dictionary_size(global_settings.low_cardinality_max_dictionary_size) - , low_cardinality_use_single_dictionary_for_part(global_settings.low_cardinality_use_single_dictionary_for_part != 0) - , use_compact_variant_discriminators_serialization(storage_settings->use_compact_variant_discriminators_serialization) - { - } + bool blocks_are_granules_size_ = false); size_t min_compress_block_size; size_t max_compress_block_size; diff --git a/src/Storages/MergeTree/MergeTreeIndexAnnoy.cpp b/src/Storages/MergeTree/MergeTreeIndexAnnoy.cpp index e492ca0aec2..c66aa2373e7 100644 --- a/src/Storages/MergeTree/MergeTreeIndexAnnoy.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexAnnoy.cpp @@ -5,6 +5,7 @@ #include #include #include +#include #include #include #include @@ -230,7 +231,7 @@ MergeTreeIndexConditionAnnoy::MergeTreeIndexConditionAnnoy( ContextPtr context) : ann_condition(query, context) , distance_function(distance_function_) - , search_k(context->getSettings().annoy_index_search_k_nodes) + , search_k(context->getSettingsRef().annoy_index_search_k_nodes) {} bool MergeTreeIndexConditionAnnoy::mayBeTrueOnGranule(MergeTreeIndexGranulePtr /*idx_granule*/) const diff --git a/src/Storages/MergeTree/MergeTreeIndexGranularityInfo.cpp b/src/Storages/MergeTree/MergeTreeIndexGranularityInfo.cpp index 62bb31dff68..067a692a3b5 100644 --- a/src/Storages/MergeTree/MergeTreeIndexGranularityInfo.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexGranularityInfo.cpp @@ -1,5 +1,6 @@ -#include #include +#include +#include namespace fs = std::filesystem; diff --git a/src/Storages/MergeTree/MergeTreePartInfo.h b/src/Storages/MergeTree/MergeTreePartInfo.h index 9bb79e21144..f128722b03b 100644 --- a/src/Storages/MergeTree/MergeTreePartInfo.h +++ b/src/Storages/MergeTree/MergeTreePartInfo.h @@ -101,9 +101,8 @@ struct MergeTreePartInfo bool isFakeDropRangePart() const { - /// Another max level was previously used for REPLACE/MOVE PARTITION - auto another_max_level = std::numeric_limits::max(); - return level == MergeTreePartInfo::MAX_LEVEL || level == another_max_level; + /// LEGACY_MAX_LEVEL was previously used for REPLACE/MOVE PARTITION + return level == MergeTreePartInfo::MAX_LEVEL || level == MergeTreePartInfo::LEGACY_MAX_LEVEL; } String getPartNameAndCheckFormat(MergeTreeDataFormatVersion format_version) const; diff --git a/src/Storages/MergeTree/MergeTreePartsMover.cpp b/src/Storages/MergeTree/MergeTreePartsMover.cpp index 1db70162bff..9223d6fd5b1 100644 --- a/src/Storages/MergeTree/MergeTreePartsMover.cpp +++ b/src/Storages/MergeTree/MergeTreePartsMover.cpp @@ -1,5 +1,6 @@ -#include #include +#include +#include #include #include diff --git a/src/Storages/MergeTree/MergeTreePrefetchedReadPool.cpp b/src/Storages/MergeTree/MergeTreePrefetchedReadPool.cpp index 2c249f7b63b..b6f626c992b 100644 --- a/src/Storages/MergeTree/MergeTreePrefetchedReadPool.cpp +++ b/src/Storages/MergeTree/MergeTreePrefetchedReadPool.cpp @@ -14,6 +14,7 @@ #include #include #include +#include namespace ProfileEvents diff --git a/src/Storages/MergeTree/MergeTreeReadPool.cpp b/src/Storages/MergeTree/MergeTreeReadPool.cpp index dc1ba030f45..1daf46b6e25 100644 --- a/src/Storages/MergeTree/MergeTreeReadPool.cpp +++ b/src/Storages/MergeTree/MergeTreeReadPool.cpp @@ -7,6 +7,7 @@ #include #include #include +#include #include @@ -335,4 +336,12 @@ void MergeTreeReadPool::fillPerThreadInfo(size_t threads, size_t sum_marks) } } +MergeTreeReadPool::BackoffSettings::BackoffSettings(const DB::Settings& settings) + : min_read_latency_ms(settings.read_backoff_min_latency_ms.totalMilliseconds()), + max_throughput(settings.read_backoff_max_throughput), + min_interval_between_events_ms(settings.read_backoff_min_interval_between_events_ms.totalMilliseconds()), + min_events(settings.read_backoff_min_events), + min_concurrency(settings.read_backoff_min_concurrency) +{} + } diff --git a/src/Storages/MergeTree/MergeTreeReadPool.h b/src/Storages/MergeTree/MergeTreeReadPool.h index cb0e8a9657f..cd12b9db9fd 100644 --- a/src/Storages/MergeTree/MergeTreeReadPool.h +++ b/src/Storages/MergeTree/MergeTreeReadPool.h @@ -64,12 +64,7 @@ public: size_t min_concurrency = 1; /// Constants above is just an example. - explicit BackoffSettings(const Settings & settings) - : min_read_latency_ms(settings.read_backoff_min_latency_ms.totalMilliseconds()), - max_throughput(settings.read_backoff_max_throughput), - min_interval_between_events_ms(settings.read_backoff_min_interval_between_events_ms.totalMilliseconds()), - min_events(settings.read_backoff_min_events), - min_concurrency(settings.read_backoff_min_concurrency) {} + explicit BackoffSettings(const Settings & settings); BackoffSettings() : min_read_latency_ms(0) {} }; diff --git a/src/Storages/MergeTree/MergeTreeSequentialSource.cpp b/src/Storages/MergeTree/MergeTreeSequentialSource.cpp index 02f8d6f4f6a..ecf7ac08294 100644 --- a/src/Storages/MergeTree/MergeTreeSequentialSource.cpp +++ b/src/Storages/MergeTree/MergeTreeSequentialSource.cpp @@ -2,6 +2,7 @@ #include #include #include +#include #include #include #include diff --git a/src/Storages/MergeTree/MergeTreeSettings.cpp b/src/Storages/MergeTree/MergeTreeSettings.cpp index 5d6f08d3c53..c968ad84936 100644 --- a/src/Storages/MergeTree/MergeTreeSettings.cpp +++ b/src/Storages/MergeTree/MergeTreeSettings.cpp @@ -115,6 +115,16 @@ void MergeTreeSettings::loadFromQuery(ASTStorage & storage_def, ContextPtr conte #undef ADD_IF_ABSENT } +bool MergeTreeSettings::isReadonlySetting(const String & name) +{ + return name == "index_granularity" || name == "index_granularity_bytes" || name == "enable_mixed_granularity_parts"; +} + +bool MergeTreeSettings::isPartFormatSetting(const String & name) +{ + return name == "min_bytes_for_wide_part" || name == "min_rows_for_wide_part"; +} + void MergeTreeSettings::sanityCheck(size_t background_pool_tasks) const { if (number_of_free_entries_in_pool_to_execute_mutation > background_pool_tasks) diff --git a/src/Storages/MergeTree/MergeTreeSettings.h b/src/Storages/MergeTree/MergeTreeSettings.h index c0afd781c7e..f5ada81cf55 100644 --- a/src/Storages/MergeTree/MergeTreeSettings.h +++ b/src/Storages/MergeTree/MergeTreeSettings.h @@ -268,17 +268,8 @@ struct MergeTreeSettings : public BaseSettings, public /// NOTE: will rewrite the AST to add immutable settings. void loadFromQuery(ASTStorage & storage_def, ContextPtr context, bool is_attach); - /// We check settings after storage creation - static bool isReadonlySetting(const String & name) - { - return name == "index_granularity" || name == "index_granularity_bytes" - || name == "enable_mixed_granularity_parts"; - } - - static bool isPartFormatSetting(const String & name) - { - return name == "min_bytes_for_wide_part" || name == "min_rows_for_wide_part"; - } + static bool isReadonlySetting(const String & name); + static bool isPartFormatSetting(const String & name); /// Check that the values are sane taking also query-level settings into account. void sanityCheck(size_t background_pool_tasks) const; @@ -295,4 +286,10 @@ namespace MergeTreeColumnSettings void validate(const SettingsChanges & changes); } +[[maybe_unused]] static bool needSyncPart(size_t input_rows, size_t input_bytes, const MergeTreeSettings & settings) +{ + return ( + (settings.min_rows_to_fsync_after_merge && input_rows >= settings.min_rows_to_fsync_after_merge) + || (settings.min_compressed_bytes_to_fsync_after_merge && input_bytes >= settings.min_compressed_bytes_to_fsync_after_merge)); +} } diff --git a/src/Storages/MergeTree/MergeTreeSink.cpp b/src/Storages/MergeTree/MergeTreeSink.cpp index 05751e0fa6f..5c71994d68d 100644 --- a/src/Storages/MergeTree/MergeTreeSink.cpp +++ b/src/Storages/MergeTree/MergeTreeSink.cpp @@ -3,6 +3,7 @@ #include #include #include +#include namespace ProfileEvents { diff --git a/src/Storages/MergeTree/MergeTreeWhereOptimizer.cpp b/src/Storages/MergeTree/MergeTreeWhereOptimizer.cpp index a9a5fddace4..e7b3585ecda 100644 --- a/src/Storages/MergeTree/MergeTreeWhereOptimizer.cpp +++ b/src/Storages/MergeTree/MergeTreeWhereOptimizer.cpp @@ -1,3 +1,4 @@ +#include #include #include #include diff --git a/src/Storages/MergeTree/MergedBlockOutputStream.cpp b/src/Storages/MergeTree/MergedBlockOutputStream.cpp index 9d696b70d9f..38869aebaa5 100644 --- a/src/Storages/MergeTree/MergedBlockOutputStream.cpp +++ b/src/Storages/MergeTree/MergedBlockOutputStream.cpp @@ -4,6 +4,7 @@ #include #include #include +#include namespace DB diff --git a/src/Storages/MergeTree/MergedColumnOnlyOutputStream.cpp b/src/Storages/MergeTree/MergedColumnOnlyOutputStream.cpp index 5ae6517a236..c167ac87317 100644 --- a/src/Storages/MergeTree/MergedColumnOnlyOutputStream.cpp +++ b/src/Storages/MergeTree/MergedColumnOnlyOutputStream.cpp @@ -1,5 +1,6 @@ #include #include +#include #include #include diff --git a/src/Storages/MergeTree/MutateFromLogEntryTask.cpp b/src/Storages/MergeTree/MutateFromLogEntryTask.cpp index 4c96cbf2c97..73084f487b9 100644 --- a/src/Storages/MergeTree/MutateFromLogEntryTask.cpp +++ b/src/Storages/MergeTree/MutateFromLogEntryTask.cpp @@ -2,6 +2,7 @@ #include #include +#include #include #include diff --git a/src/Storages/MergeTree/MutatePlainMergeTreeTask.cpp b/src/Storages/MergeTree/MutatePlainMergeTreeTask.cpp index 20f387137e7..9aec074deae 100644 --- a/src/Storages/MergeTree/MutatePlainMergeTreeTask.cpp +++ b/src/Storages/MergeTree/MutatePlainMergeTreeTask.cpp @@ -3,6 +3,7 @@ #include #include #include +#include namespace DB { diff --git a/src/Storages/MergeTree/MutateTask.cpp b/src/Storages/MergeTree/MutateTask.cpp index a552ee89aee..f74cccd518b 100644 --- a/src/Storages/MergeTree/MutateTask.cpp +++ b/src/Storages/MergeTree/MutateTask.cpp @@ -1,8 +1,10 @@ #include +#include #include #include #include +#include #include #include #include @@ -19,11 +21,13 @@ #include #include #include +#include #include #include #include #include #include +#include #include #include #include diff --git a/src/Storages/MergeTree/PartMovesBetweenShardsOrchestrator.cpp b/src/Storages/MergeTree/PartMovesBetweenShardsOrchestrator.cpp index 4228d7b70b6..10c7bef72fc 100644 --- a/src/Storages/MergeTree/PartMovesBetweenShardsOrchestrator.cpp +++ b/src/Storages/MergeTree/PartMovesBetweenShardsOrchestrator.cpp @@ -1,5 +1,6 @@ #include #include +#include #include #include #include diff --git a/src/Storages/MergeTree/RPNBuilder.cpp b/src/Storages/MergeTree/RPNBuilder.cpp index 4a18d606bb7..93087b0d3fe 100644 --- a/src/Storages/MergeTree/RPNBuilder.cpp +++ b/src/Storages/MergeTree/RPNBuilder.cpp @@ -1,6 +1,7 @@ #include #include +#include #include #include diff --git a/src/Storages/MergeTree/ReplicatedMergeMutateTaskBase.cpp b/src/Storages/MergeTree/ReplicatedMergeMutateTaskBase.cpp index 2fc5238827d..24929365b72 100644 --- a/src/Storages/MergeTree/ReplicatedMergeMutateTaskBase.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeMutateTaskBase.cpp @@ -1,8 +1,9 @@ #include -#include #include +#include #include +#include #include diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeAttachThread.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeAttachThread.cpp index 336d19692d4..6e22a3515bc 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeAttachThread.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeAttachThread.cpp @@ -1,3 +1,4 @@ +#include #include #include #include diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.cpp index 328c03a5b94..7aef249c366 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.cpp @@ -1,13 +1,14 @@ +#include +#include #include #include -#include -#include #include #include #include #include +#include namespace DB diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeMergeStrategyPicker.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeMergeStrategyPicker.cpp index 192f0d23f96..3e64a4c7c52 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeMergeStrategyPicker.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeMergeStrategyPicker.cpp @@ -1,6 +1,7 @@ +#include +#include #include #include -#include #include #include diff --git a/src/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.cpp b/src/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.cpp index d7601e6e638..dc242a7b084 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.cpp @@ -1,5 +1,6 @@ #include #include +#include #include #include #include diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp index 30ba95c46f0..627bda3f8bf 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp @@ -2,6 +2,7 @@ #include #include #include +#include #include #include #include diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.cpp index 35f355d1d9b..268cead2a0e 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.cpp @@ -1,5 +1,6 @@ #include #include +#include #include #include #include diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp index 4b4f4c33e7d..83e8a7a3bc5 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp @@ -9,8 +9,10 @@ #include #include #include +#include #include #include +#include #include #include #include @@ -574,6 +576,16 @@ bool ReplicatedMergeTreeSinkImpl::writeExistingPart(MergeTreeData::Mutabl } } +template +bool ReplicatedMergeTreeSinkImpl::lastBlockIsDuplicate() const +{ + /// If MV is responsible for deduplication, block is not considered duplicating. + if (context->getSettingsRef().deduplicate_blocks_in_dependent_materialized_views) + return false; + + return last_block_is_duplicate; +} + template std::vector ReplicatedMergeTreeSinkImpl::detectConflictsInAsyncBlockIDs(const std::vector & ids) { diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeSink.h b/src/Storages/MergeTree/ReplicatedMergeTreeSink.h index 39623c20584..8a37d85a750 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeSink.h +++ b/src/Storages/MergeTree/ReplicatedMergeTreeSink.h @@ -60,14 +60,7 @@ public: bool writeExistingPart(MergeTreeData::MutableDataPartPtr & part); /// For proper deduplication in MaterializedViews - bool lastBlockIsDuplicate() const override - { - /// If MV is responsible for deduplication, block is not considered duplicating. - if (context->getSettingsRef().deduplicate_blocks_in_dependent_materialized_views) - return false; - - return last_block_is_duplicate; - } + bool lastBlockIsDuplicate() const override; struct DelayedChunk; private: diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeTableMetadata.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeTableMetadata.cpp index 287a4d20543..c4bae5352cb 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeTableMetadata.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeTableMetadata.cpp @@ -1,5 +1,6 @@ #include #include +#include #include #include #include diff --git a/src/Storages/MergeTree/StorageFromMergeTreeDataPart.h b/src/Storages/MergeTree/StorageFromMergeTreeDataPart.h index a94508ad41f..82ebbf09988 100644 --- a/src/Storages/MergeTree/StorageFromMergeTreeDataPart.h +++ b/src/Storages/MergeTree/StorageFromMergeTreeDataPart.h @@ -1,25 +1,17 @@ #pragma once +#include #include #include #include #include -#include -#include -#include -#include #include -#include -#include namespace DB { -namespace ErrorCodes -{ - extern const int LOGICAL_ERROR; -} +class QueryPlan; /// A Storage that allows reading from a single MergeTree data part. class StorageFromMergeTreeDataPart final : public IStorage @@ -46,20 +38,7 @@ public: String getName() const override { return "FromMergeTreeDataPart"; } - StorageSnapshotPtr getStorageSnapshot( - const StorageMetadataPtr & metadata_snapshot, ContextPtr /*query_context*/) const override - { - const auto & storage_columns = metadata_snapshot->getColumns(); - if (!hasDynamicSubcolumns(storage_columns)) - return std::make_shared(*this, metadata_snapshot); - - auto data_parts = storage.getDataPartsVectorForInternalUsage(); - - auto object_columns = getConcreteObjectColumns( - data_parts.begin(), data_parts.end(), storage_columns, [](const auto & part) -> const auto & { return part->getColumns(); }); - - return std::make_shared(*this, metadata_snapshot, std::move(object_columns)); - } + StorageSnapshotPtr getStorageSnapshot(const StorageMetadataPtr & metadata_snapshot, ContextPtr /*query_context*/) const override; void read( QueryPlan & query_plan, @@ -69,21 +48,7 @@ public: ContextPtr context, QueryProcessingStage::Enum /*processed_stage*/, size_t max_block_size, - size_t num_streams) override - { - query_plan.addStep(MergeTreeDataSelectExecutor(storage) - .readFromParts( - parts, - alter_conversions, - column_names, - storage_snapshot, - query_info, - context, - max_block_size, - num_streams, - nullptr, - analysis_result_ptr)); - } + size_t num_streams) override; bool supportsPrewhere() const override { return true; } @@ -102,12 +67,7 @@ public: return storage.getPartitionIDFromQuery(ast, context); } - bool materializeTTLRecalculateOnly() const - { - if (parts.empty()) - throw Exception(ErrorCodes::LOGICAL_ERROR, "parts must not be empty for materializeTTLRecalculateOnly"); - return parts.front()->storage.getSettings()->materialize_ttl_recalculate_only; - } + bool materializeTTLRecalculateOnly() const; bool hasLightweightDeletedMask() const override { diff --git a/src/Storages/MergeTree/checkDataPart.cpp b/src/Storages/MergeTree/checkDataPart.cpp index 525960d5314..774fd95ebc6 100644 --- a/src/Storages/MergeTree/checkDataPart.cpp +++ b/src/Storages/MergeTree/checkDataPart.cpp @@ -6,6 +6,7 @@ #include #include #include +#include #include #include #include diff --git a/src/Storages/MergeTree/registerStorageMergeTree.cpp b/src/Storages/MergeTree/registerStorageMergeTree.cpp index d234103e52b..3f0603f6900 100644 --- a/src/Storages/MergeTree/registerStorageMergeTree.cpp +++ b/src/Storages/MergeTree/registerStorageMergeTree.cpp @@ -1,11 +1,14 @@ #include #include #include +#include #include #include #include #include +#include +#include #include #include #include diff --git a/src/Storages/MySQL/MySQLSettings.cpp b/src/Storages/MySQL/MySQLSettings.cpp index fd53174f4f6..1c88e4a94e9 100644 --- a/src/Storages/MySQL/MySQLSettings.cpp +++ b/src/Storages/MySQL/MySQLSettings.cpp @@ -6,6 +6,7 @@ #include #include #include +#include namespace DB diff --git a/src/Storages/NamedCollectionsHelpers.cpp b/src/Storages/NamedCollectionsHelpers.cpp index ba90f21c907..b58aed573ae 100644 --- a/src/Storages/NamedCollectionsHelpers.cpp +++ b/src/Storages/NamedCollectionsHelpers.cpp @@ -2,6 +2,7 @@ #include #include #include +#include #include #include #include @@ -117,7 +118,7 @@ MutableNamedCollectionPtr tryGetNamedCollectionWithOverrides( if (asts.size() == 1) return collection_copy; - const auto allow_override_by_default = context->getSettings().allow_named_collection_override_by_default; + const auto allow_override_by_default = context->getSettingsRef().allow_named_collection_override_by_default; for (auto * it = std::next(asts.begin()); it != asts.end(); ++it) { @@ -162,7 +163,7 @@ MutableNamedCollectionPtr tryGetNamedCollectionWithOverrides( Poco::Util::AbstractConfiguration::Keys keys; config.keys(config_prefix, keys); - const auto allow_override_by_default = context->getSettings().allow_named_collection_override_by_default; + const auto allow_override_by_default = context->getSettingsRef().allow_named_collection_override_by_default; for (const auto & key : keys) { if (collection_copy->isOverridable(key, allow_override_by_default)) diff --git a/src/Storages/NamedCollectionsHelpers.h b/src/Storages/NamedCollectionsHelpers.h index b4aea096c59..f444a581eb6 100644 --- a/src/Storages/NamedCollectionsHelpers.h +++ b/src/Storages/NamedCollectionsHelpers.h @@ -9,14 +9,14 @@ #include +namespace DB +{ + namespace ErrorCodes { extern const int BAD_ARGUMENTS; } -namespace DB -{ - /// Helper function to get named collection for table engine. /// Table engines have collection name as first argument of ast and other arguments are key-value overrides. MutableNamedCollectionPtr tryGetNamedCollectionWithOverrides( diff --git a/src/Storages/ObjectStorage/Azure/Configuration.cpp b/src/Storages/ObjectStorage/Azure/Configuration.cpp index 91cc02de0f0..f0a0a562b92 100644 --- a/src/Storages/ObjectStorage/Azure/Configuration.cpp +++ b/src/Storages/ObjectStorage/Azure/Configuration.cpp @@ -14,6 +14,7 @@ #include #include #include +#include #include #include diff --git a/src/Storages/ObjectStorage/DataLakes/IcebergMetadata.cpp b/src/Storages/ObjectStorage/DataLakes/IcebergMetadata.cpp index 0484f86542c..6d18b13df01 100644 --- a/src/Storages/ObjectStorage/DataLakes/IcebergMetadata.cpp +++ b/src/Storages/ObjectStorage/DataLakes/IcebergMetadata.cpp @@ -3,7 +3,7 @@ #if USE_AWS_S3 && USE_AVRO #include - +#include #include #include #include diff --git a/src/Storages/ObjectStorage/HDFS/Configuration.cpp b/src/Storages/ObjectStorage/HDFS/Configuration.cpp index 155f51adf61..e8071be6f02 100644 --- a/src/Storages/ObjectStorage/HDFS/Configuration.cpp +++ b/src/Storages/ObjectStorage/HDFS/Configuration.cpp @@ -2,6 +2,7 @@ #if USE_HDFS #include +#include #include #include #include diff --git a/src/Storages/ObjectStorage/ReadBufferIterator.cpp b/src/Storages/ObjectStorage/ReadBufferIterator.cpp index a47049791ae..0bb64fd61b7 100644 --- a/src/Storages/ObjectStorage/ReadBufferIterator.cpp +++ b/src/Storages/ObjectStorage/ReadBufferIterator.cpp @@ -1,6 +1,7 @@ #include #include #include +#include #include diff --git a/src/Storages/ObjectStorage/S3/Configuration.cpp b/src/Storages/ObjectStorage/S3/Configuration.cpp index b33d55105e9..094ca069e7a 100644 --- a/src/Storages/ObjectStorage/S3/Configuration.cpp +++ b/src/Storages/ObjectStorage/S3/Configuration.cpp @@ -1,6 +1,7 @@ #include #if USE_AWS_S3 +#include #include #include #include diff --git a/src/Storages/ObjectStorage/StorageObjectStorage.cpp b/src/Storages/ObjectStorage/StorageObjectStorage.cpp index d2cc73f14d7..8291327992c 100644 --- a/src/Storages/ObjectStorage/StorageObjectStorage.cpp +++ b/src/Storages/ObjectStorage/StorageObjectStorage.cpp @@ -1,6 +1,7 @@ #include #include +#include #include #include #include diff --git a/src/Storages/ObjectStorage/StorageObjectStorage.h b/src/Storages/ObjectStorage/StorageObjectStorage.h index f97d2620fe5..6ee4ce0c16f 100644 --- a/src/Storages/ObjectStorage/StorageObjectStorage.h +++ b/src/Storages/ObjectStorage/StorageObjectStorage.h @@ -1,6 +1,7 @@ #pragma once #include #include +#include #include #include #include diff --git a/src/Storages/ObjectStorage/StorageObjectStorageSink.cpp b/src/Storages/ObjectStorage/StorageObjectStorageSink.cpp index f2f6eac333c..000e73c70ce 100644 --- a/src/Storages/ObjectStorage/StorageObjectStorageSink.cpp +++ b/src/Storages/ObjectStorage/StorageObjectStorageSink.cpp @@ -2,6 +2,7 @@ #include #include #include +#include #include namespace DB diff --git a/src/Storages/ObjectStorage/StorageObjectStorageSource.cpp b/src/Storages/ObjectStorage/StorageObjectStorageSource.cpp index 707e7603368..93f8eaacbc0 100644 --- a/src/Storages/ObjectStorage/StorageObjectStorageSource.cpp +++ b/src/Storages/ObjectStorage/StorageObjectStorageSource.cpp @@ -13,7 +13,7 @@ #include #include #include -#include +#include namespace fs = std::filesystem; @@ -425,7 +425,7 @@ std::unique_ptr StorageObjectStorageSource::createReadBuffer( /// FIXME: Changing this setting to default value breaks something around parquet reading read_settings.remote_read_min_bytes_for_seek = read_settings.remote_fs_buffer_size; - const bool object_too_small = object_size <= 2 * context_->getSettings().max_download_buffer_size; + const bool object_too_small = object_size <= 2 * context_->getSettingsRef().max_download_buffer_size; const bool use_prefetch = object_too_small && read_settings.remote_fs_method == RemoteFSReadMethod::threadpool; read_settings.remote_fs_method = use_prefetch ? RemoteFSReadMethod::threadpool : RemoteFSReadMethod::read; /// User's object may change, don't cache it. diff --git a/src/Storages/ObjectStorage/registerStorageObjectStorage.cpp b/src/Storages/ObjectStorage/registerStorageObjectStorage.cpp index bf595b2f5d4..b5f4cf5bb54 100644 --- a/src/Storages/ObjectStorage/registerStorageObjectStorage.cpp +++ b/src/Storages/ObjectStorage/registerStorageObjectStorage.cpp @@ -1,9 +1,10 @@ +#include +#include #include #include #include #include #include -#include namespace DB { diff --git a/src/Storages/ObjectStorageQueue/ObjectStorageQueueMetadata.h b/src/Storages/ObjectStorageQueue/ObjectStorageQueueMetadata.h index 05060931b5a..e5fae047ac5 100644 --- a/src/Storages/ObjectStorageQueue/ObjectStorageQueueMetadata.h +++ b/src/Storages/ObjectStorageQueue/ObjectStorageQueueMetadata.h @@ -2,21 +2,19 @@ #include "config.h" #include -#include -#include #include -#include +#include #include #include #include #include +#include namespace fs = std::filesystem; namespace Poco { class Logger; } namespace DB { -struct ObjectStorageQueueSettings; class StorageObjectStorageQueue; struct ObjectStorageQueueTableMetadata; struct StorageInMemoryMetadata; diff --git a/src/Storages/ReadFinalForExternalReplicaStorage.cpp b/src/Storages/ReadFinalForExternalReplicaStorage.cpp index e1d52eefc20..5070b4a17d8 100644 --- a/src/Storages/ReadFinalForExternalReplicaStorage.cpp +++ b/src/Storages/ReadFinalForExternalReplicaStorage.cpp @@ -2,6 +2,7 @@ #if USE_MYSQL || USE_LIBPQXX +#include #include #include #include diff --git a/src/Storages/ReadInOrderOptimizer.cpp b/src/Storages/ReadInOrderOptimizer.cpp index e0ef967d491..9c8c4c2fe79 100644 --- a/src/Storages/ReadInOrderOptimizer.cpp +++ b/src/Storages/ReadInOrderOptimizer.cpp @@ -1,5 +1,6 @@ #include +#include #include #include #include diff --git a/src/Storages/RocksDB/EmbeddedRocksDBBulkSink.cpp b/src/Storages/RocksDB/EmbeddedRocksDBBulkSink.cpp index 90792c59d38..b53f694bc5a 100644 --- a/src/Storages/RocksDB/EmbeddedRocksDBBulkSink.cpp +++ b/src/Storages/RocksDB/EmbeddedRocksDBBulkSink.cpp @@ -9,6 +9,7 @@ #include #include +#include #include #include #include diff --git a/src/Storages/RocksDB/StorageEmbeddedRocksDB.cpp b/src/Storages/RocksDB/StorageEmbeddedRocksDB.cpp index b9d3e071b6c..365915cfd68 100644 --- a/src/Storages/RocksDB/StorageEmbeddedRocksDB.cpp +++ b/src/Storages/RocksDB/StorageEmbeddedRocksDB.cpp @@ -28,6 +28,7 @@ #include #include #include +#include #include #include #include diff --git a/src/Storages/RocksDB/StorageSystemRocksDB.cpp b/src/Storages/RocksDB/StorageSystemRocksDB.cpp index 5105b190fd9..744d43e1c77 100644 --- a/src/Storages/RocksDB/StorageSystemRocksDB.cpp +++ b/src/Storages/RocksDB/StorageSystemRocksDB.cpp @@ -9,6 +9,7 @@ #include #include #include +#include #include #include #include diff --git a/src/Storages/SetSettings.cpp b/src/Storages/SetSettings.cpp index baa3d231067..4e6dd6a0519 100644 --- a/src/Storages/SetSettings.cpp +++ b/src/Storages/SetSettings.cpp @@ -3,6 +3,7 @@ #include #include #include +#include namespace DB diff --git a/src/Storages/StorageBuffer.cpp b/src/Storages/StorageBuffer.cpp index a3f6b6afc5d..c403287c43c 100644 --- a/src/Storages/StorageBuffer.cpp +++ b/src/Storages/StorageBuffer.cpp @@ -38,6 +38,7 @@ #include #include #include +#include namespace ProfileEvents diff --git a/src/Storages/StorageExecutable.cpp b/src/Storages/StorageExecutable.cpp index e475211deb3..381c20c616d 100644 --- a/src/Storages/StorageExecutable.cpp +++ b/src/Storages/StorageExecutable.cpp @@ -8,6 +8,7 @@ #include #include +#include #include #include @@ -147,7 +148,7 @@ void StorageExecutable::read( for (auto & input_query : input_queries) { QueryPipelineBuilder builder; - if (context->getSettings().allow_experimental_analyzer) + if (context->getSettingsRef().allow_experimental_analyzer) builder = InterpreterSelectQueryAnalyzer(input_query, context, {}).buildQueryPipeline(); else builder = InterpreterSelectWithUnionQuery(input_query, context, {}).buildQueryPipeline(); @@ -225,7 +226,7 @@ void registerStorageExecutable(StorageFactory & factory) { size_t max_command_execution_time = 10; - size_t max_execution_time_seconds = static_cast(args.getContext()->getSettings().max_execution_time.totalSeconds()); + size_t max_execution_time_seconds = static_cast(args.getContext()->getSettingsRef().max_execution_time.totalSeconds()); if (max_execution_time_seconds != 0 && max_command_execution_time > max_execution_time_seconds) max_command_execution_time = max_execution_time_seconds; diff --git a/src/Storages/StorageExternalDistributed.cpp b/src/Storages/StorageExternalDistributed.cpp index d712bd10da4..951c87807bb 100644 --- a/src/Storages/StorageExternalDistributed.cpp +++ b/src/Storages/StorageExternalDistributed.cpp @@ -1,6 +1,6 @@ #include "StorageExternalDistributed.h" - +#include #include #include #include diff --git a/src/Storages/StorageFactory.cpp b/src/Storages/StorageFactory.cpp index 9d12a1569d8..060b271d8f4 100644 --- a/src/Storages/StorageFactory.cpp +++ b/src/Storages/StorageFactory.cpp @@ -4,6 +4,7 @@ #include #include #include +#include #include #include diff --git a/src/Storages/StorageFile.cpp b/src/Storages/StorageFile.cpp index 42e27a13ca9..e2b010bf48f 100644 --- a/src/Storages/StorageFile.cpp +++ b/src/Storages/StorageFile.cpp @@ -55,6 +55,8 @@ #include "Formats/FormatSettings.h" #include +#include + #include #include diff --git a/src/Storages/StorageFromMergeTreeDataPart.cpp b/src/Storages/StorageFromMergeTreeDataPart.cpp new file mode 100644 index 00000000000..481d9e66901 --- /dev/null +++ b/src/Storages/StorageFromMergeTreeDataPart.cpp @@ -0,0 +1,59 @@ +#include +#include +#include +#include + +namespace DB +{ + +namespace ErrorCodes +{ +extern const int LOGICAL_ERROR; +} + +bool StorageFromMergeTreeDataPart::materializeTTLRecalculateOnly() const +{ + if (parts.empty()) + throw Exception(ErrorCodes::LOGICAL_ERROR, "parts must not be empty for materializeTTLRecalculateOnly"); + return parts.front()->storage.getSettings()->materialize_ttl_recalculate_only; +} + +void StorageFromMergeTreeDataPart::read( + QueryPlan & query_plan, + const Names & column_names, + const StorageSnapshotPtr & storage_snapshot, + SelectQueryInfo & query_info, + ContextPtr context, + QueryProcessingStage::Enum /*processed_stage*/, + size_t max_block_size, + size_t num_streams) +{ + query_plan.addStep(MergeTreeDataSelectExecutor(storage).readFromParts( + parts, + alter_conversions, + column_names, + storage_snapshot, + query_info, + context, + max_block_size, + num_streams, + nullptr, + analysis_result_ptr)); +} + +StorageSnapshotPtr +StorageFromMergeTreeDataPart::getStorageSnapshot(const StorageMetadataPtr & metadata_snapshot, ContextPtr /*query_context*/) const +{ + const auto & storage_columns = metadata_snapshot->getColumns(); + if (!hasDynamicSubcolumns(storage_columns)) + return std::make_shared(*this, metadata_snapshot); + + auto data_parts = storage.getDataPartsVectorForInternalUsage(); + + auto object_columns = getConcreteObjectColumns( + data_parts.begin(), data_parts.end(), storage_columns, [](const auto & part) -> const auto & { return part->getColumns(); }); + + return std::make_shared(*this, metadata_snapshot, std::move(object_columns)); +} + +} diff --git a/src/Storages/StorageGenerateRandom.cpp b/src/Storages/StorageGenerateRandom.cpp index 754bc096958..f3eb88f3207 100644 --- a/src/Storages/StorageGenerateRandom.cpp +++ b/src/Storages/StorageGenerateRandom.cpp @@ -30,6 +30,7 @@ #include #include +#include #include #include diff --git a/src/Storages/StorageInMemoryMetadata.cpp b/src/Storages/StorageInMemoryMetadata.cpp index a5bae0acce5..2226de3e64f 100644 --- a/src/Storages/StorageInMemoryMetadata.cpp +++ b/src/Storages/StorageInMemoryMetadata.cpp @@ -3,6 +3,8 @@ #include #include +#include + #include #include #include diff --git a/src/Storages/StorageJoin.cpp b/src/Storages/StorageJoin.cpp index 47e41cccc96..a0d6cf11b64 100644 --- a/src/Storages/StorageJoin.cpp +++ b/src/Storages/StorageJoin.cpp @@ -15,6 +15,7 @@ #include #include #include +#include #include #include diff --git a/src/Storages/StorageKeeperMap.cpp b/src/Storages/StorageKeeperMap.cpp index 20f99070000..ce760a8d63f 100644 --- a/src/Storages/StorageKeeperMap.cpp +++ b/src/Storages/StorageKeeperMap.cpp @@ -8,6 +8,7 @@ #include #include #include +#include #include diff --git a/src/Storages/StorageLog.cpp b/src/Storages/StorageLog.cpp index de0324d7998..ee0a37b50e3 100644 --- a/src/Storages/StorageLog.cpp +++ b/src/Storages/StorageLog.cpp @@ -4,6 +4,7 @@ #include #include #include +#include #include diff --git a/src/Storages/StorageMaterializedView.cpp b/src/Storages/StorageMaterializedView.cpp index ec1559b71a4..57d95a98f11 100644 --- a/src/Storages/StorageMaterializedView.cpp +++ b/src/Storages/StorageMaterializedView.cpp @@ -25,6 +25,7 @@ #include #include #include +#include #include #include #include diff --git a/src/Storages/StorageMemory.cpp b/src/Storages/StorageMemory.cpp index 57da72d06ed..cd892488b86 100644 --- a/src/Storages/StorageMemory.cpp +++ b/src/Storages/StorageMemory.cpp @@ -1,5 +1,5 @@ -#include #include +#include #include #include @@ -37,7 +37,6 @@ #include #include - namespace DB { diff --git a/src/Storages/StorageMerge.cpp b/src/Storages/StorageMerge.cpp index 2dbe82c92d8..f5bc183931f 100644 --- a/src/Storages/StorageMerge.cpp +++ b/src/Storages/StorageMerge.cpp @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include diff --git a/src/Storages/StorageMergeTree.cpp b/src/Storages/StorageMergeTree.cpp index 611289ffd78..40b3a12297b 100644 --- a/src/Storages/StorageMergeTree.cpp +++ b/src/Storages/StorageMergeTree.cpp @@ -5,6 +5,7 @@ #include #include +#include #include #include #include @@ -32,6 +33,7 @@ #include #include #include +#include #include #include #include @@ -656,12 +658,13 @@ void StorageMergeTree::mutate(const MutationCommands & commands, ContextPtr quer { /// It's important to serialize order of mutations with alter queries because /// they can depend on each other. - if (auto alter_lock = tryLockForAlter(query_context->getSettings().lock_acquire_timeout); alter_lock == std::nullopt) + if (auto alter_lock = tryLockForAlter(query_context->getSettingsRef().lock_acquire_timeout); alter_lock == std::nullopt) { - throw Exception(ErrorCodes::TIMEOUT_EXCEEDED, - "Cannot start mutation in {}ms because some metadata-changing ALTER (MODIFY|RENAME|ADD|DROP) is currently executing. " - "You can change this timeout with `lock_acquire_timeout` setting", - query_context->getSettings().lock_acquire_timeout.totalMilliseconds()); + throw Exception( + ErrorCodes::TIMEOUT_EXCEEDED, + "Cannot start mutation in {}ms because some metadata-changing ALTER (MODIFY|RENAME|ADD|DROP) is currently executing. " + "You can change this timeout with `lock_acquire_timeout` setting", + query_context->getSettingsRef().lock_acquire_timeout.totalMilliseconds()); } version = startMutation(commands, query_context); } diff --git a/src/Storages/StorageMySQL.cpp b/src/Storages/StorageMySQL.cpp index da391909dff..f2e19ad4c80 100644 --- a/src/Storages/StorageMySQL.cpp +++ b/src/Storages/StorageMySQL.cpp @@ -21,6 +21,7 @@ #include #include #include +#include #include #include diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index 9fb0f082c6c..72f725965e0 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -21,7 +21,9 @@ #include #include +#include #include +#include #include @@ -40,6 +42,7 @@ #include #include #include +#include #include #include #include @@ -7360,7 +7363,7 @@ void StorageReplicatedMergeTree::fetchPartition( if (try_no) LOG_INFO(log, "Some of parts ({}) are missing. Will try to fetch covering parts.", missing_parts.size()); - if (try_no >= query_context->getSettings().max_fetch_partition_retries_count) + if (try_no >= query_context->getSettingsRef().max_fetch_partition_retries_count) throw Exception(ErrorCodes::TOO_MANY_RETRIES_TO_FETCH_PARTS, "Too many retries to fetch parts from {}:{}", from_zookeeper_name, best_replica_path); diff --git a/src/Storages/StorageSet.cpp b/src/Storages/StorageSet.cpp index 5b7f9fc0ac2..22bd01e0071 100644 --- a/src/Storages/StorageSet.cpp +++ b/src/Storages/StorageSet.cpp @@ -1,3 +1,4 @@ +#include #include #include #include diff --git a/src/Storages/StorageSet.h b/src/Storages/StorageSet.h index 67a9528ff5e..5b6d899b48d 100644 --- a/src/Storages/StorageSet.h +++ b/src/Storages/StorageSet.h @@ -2,7 +2,6 @@ #include #include -#include namespace DB diff --git a/src/Storages/StorageStripeLog.cpp b/src/Storages/StorageStripeLog.cpp index 43560a6d95d..68eaa75b416 100644 --- a/src/Storages/StorageStripeLog.cpp +++ b/src/Storages/StorageStripeLog.cpp @@ -7,6 +7,8 @@ #include #include +#include + #include #include #include @@ -289,7 +291,7 @@ StorageStripeLog::StorageStripeLog( , data_file_path(table_path + "data.bin") , index_file_path(table_path + "index.mrk") , file_checker(disk, table_path + "sizes.json") - , max_compress_block_size(context_->getSettings().max_compress_block_size) + , max_compress_block_size(context_->getSettingsRef().max_compress_block_size) , log(getLogger("StorageStripeLog")) { StorageInMemoryMetadata storage_metadata; diff --git a/src/Storages/StorageURL.cpp b/src/Storages/StorageURL.cpp index f7560fa7910..87911230819 100644 --- a/src/Storages/StorageURL.cpp +++ b/src/Storages/StorageURL.cpp @@ -36,7 +36,10 @@ #include #include #include + #include +#include +#include #include #include diff --git a/src/Storages/StorageURLCluster.cpp b/src/Storages/StorageURLCluster.cpp index 592bd71f546..664f170c17e 100644 --- a/src/Storages/StorageURLCluster.cpp +++ b/src/Storages/StorageURLCluster.cpp @@ -3,6 +3,7 @@ #include #include +#include #include #include #include diff --git a/src/Storages/StorageView.cpp b/src/Storages/StorageView.cpp index 016de94c17c..5f768bce978 100644 --- a/src/Storages/StorageView.cpp +++ b/src/Storages/StorageView.cpp @@ -19,6 +19,8 @@ #include +#include + #include #include #include diff --git a/src/Storages/StorageXDBC.cpp b/src/Storages/StorageXDBC.cpp index fb8fa2d6da4..fa23a574ade 100644 --- a/src/Storages/StorageXDBC.cpp +++ b/src/Storages/StorageXDBC.cpp @@ -4,6 +4,8 @@ #include #include +#include +#include #include #include #include diff --git a/src/Storages/System/StorageSystemColumns.cpp b/src/Storages/System/StorageSystemColumns.cpp index 8dd8d3b6154..6a7810b97f9 100644 --- a/src/Storages/System/StorageSystemColumns.cpp +++ b/src/Storages/System/StorageSystemColumns.cpp @@ -4,6 +4,7 @@ #include #include #include +#include #include #include #include diff --git a/src/Storages/System/StorageSystemDDLWorkerQueue.cpp b/src/Storages/System/StorageSystemDDLWorkerQueue.cpp index f96b839a322..ae1a233ea81 100644 --- a/src/Storages/System/StorageSystemDDLWorkerQueue.cpp +++ b/src/Storages/System/StorageSystemDDLWorkerQueue.cpp @@ -9,6 +9,7 @@ #include #include #include +#include #include #include #include diff --git a/src/Storages/System/StorageSystemDetachedParts.cpp b/src/Storages/System/StorageSystemDetachedParts.cpp index fbc99ab865e..7e4c1de1c65 100644 --- a/src/Storages/System/StorageSystemDetachedParts.cpp +++ b/src/Storages/System/StorageSystemDetachedParts.cpp @@ -1,5 +1,6 @@ #include +#include #include #include #include diff --git a/src/Storages/System/StorageSystemErrors.cpp b/src/Storages/System/StorageSystemErrors.cpp index 2da268305f8..2955aebeb7e 100644 --- a/src/Storages/System/StorageSystemErrors.cpp +++ b/src/Storages/System/StorageSystemErrors.cpp @@ -5,6 +5,7 @@ #include #include #include +#include namespace DB diff --git a/src/Storages/System/StorageSystemEvents.cpp b/src/Storages/System/StorageSystemEvents.cpp index 822d5c77788..f74c316e3fc 100644 --- a/src/Storages/System/StorageSystemEvents.cpp +++ b/src/Storages/System/StorageSystemEvents.cpp @@ -1,4 +1,5 @@ #include +#include #include #include #include diff --git a/src/Storages/System/StorageSystemMergeTreeSettings.cpp b/src/Storages/System/StorageSystemMergeTreeSettings.cpp index 7781e3789a4..18730010e53 100644 --- a/src/Storages/System/StorageSystemMergeTreeSettings.cpp +++ b/src/Storages/System/StorageSystemMergeTreeSettings.cpp @@ -1,7 +1,9 @@ +#include #include #include #include #include +#include #include #include diff --git a/src/Storages/System/StorageSystemMergeTreeSettings.h b/src/Storages/System/StorageSystemMergeTreeSettings.h index e2913a7e55b..4d4150dcec2 100644 --- a/src/Storages/System/StorageSystemMergeTreeSettings.h +++ b/src/Storages/System/StorageSystemMergeTreeSettings.h @@ -1,6 +1,5 @@ #pragma once -#include #include diff --git a/src/Storages/System/StorageSystemPartsBase.h b/src/Storages/System/StorageSystemPartsBase.h index be945162b39..8671fd850f8 100644 --- a/src/Storages/System/StorageSystemPartsBase.h +++ b/src/Storages/System/StorageSystemPartsBase.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include diff --git a/src/Storages/System/StorageSystemRemoteDataPaths.cpp b/src/Storages/System/StorageSystemRemoteDataPaths.cpp index a5c496db7e7..00b4824cc3d 100644 --- a/src/Storages/System/StorageSystemRemoteDataPaths.cpp +++ b/src/Storages/System/StorageSystemRemoteDataPaths.cpp @@ -2,6 +2,7 @@ #include #include #include +#include #include #include #include diff --git a/src/Storages/System/StorageSystemSettings.cpp b/src/Storages/System/StorageSystemSettings.cpp index b437108b00e..f1e9d542fec 100644 --- a/src/Storages/System/StorageSystemSettings.cpp +++ b/src/Storages/System/StorageSystemSettings.cpp @@ -1,4 +1,6 @@ #include + +#include #include #include #include diff --git a/src/Storages/System/StorageSystemSettingsProfileElements.cpp b/src/Storages/System/StorageSystemSettingsProfileElements.cpp index 2af3e6dfd05..40a669351b1 100644 --- a/src/Storages/System/StorageSystemSettingsProfileElements.cpp +++ b/src/Storages/System/StorageSystemSettingsProfileElements.cpp @@ -6,6 +6,7 @@ #include #include #include +#include #include #include #include diff --git a/src/Storages/System/StorageSystemStackTrace.cpp b/src/Storages/System/StorageSystemStackTrace.cpp index ba7433fb9ae..cc221bbea69 100644 --- a/src/Storages/System/StorageSystemStackTrace.cpp +++ b/src/Storages/System/StorageSystemStackTrace.cpp @@ -25,6 +25,7 @@ #include #include #include +#include #include #include #include diff --git a/src/Storages/System/StorageSystemUsers.cpp b/src/Storages/System/StorageSystemUsers.cpp index 541bd852140..b4d83058c82 100644 --- a/src/Storages/System/StorageSystemUsers.cpp +++ b/src/Storages/System/StorageSystemUsers.cpp @@ -18,8 +18,12 @@ #include #include #include -#include "base/types.h" + #include + +#include +#include + #include diff --git a/src/Storages/System/StorageSystemZooKeeper.cpp b/src/Storages/System/StorageSystemZooKeeper.cpp index cb46cd19517..ba9fac6d289 100644 --- a/src/Storages/System/StorageSystemZooKeeper.cpp +++ b/src/Storages/System/StorageSystemZooKeeper.cpp @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include diff --git a/src/Storages/TTLDescription.cpp b/src/Storages/TTLDescription.cpp index f831465277d..ac091e7cf3c 100644 --- a/src/Storages/TTLDescription.cpp +++ b/src/Storages/TTLDescription.cpp @@ -1,6 +1,7 @@ #include #include +#include #include #include #include diff --git a/src/Storages/WindowView/StorageWindowView.cpp b/src/Storages/WindowView/StorageWindowView.cpp index 77e6ee9cb24..b842cdda022 100644 --- a/src/Storages/WindowView/StorageWindowView.cpp +++ b/src/Storages/WindowView/StorageWindowView.cpp @@ -48,6 +48,7 @@ #include #include #include +#include #include #include diff --git a/src/Storages/buildQueryTreeForShard.cpp b/src/Storages/buildQueryTreeForShard.cpp index 84ba92bba00..69e8f0a7880 100644 --- a/src/Storages/buildQueryTreeForShard.cpp +++ b/src/Storages/buildQueryTreeForShard.cpp @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include diff --git a/src/Storages/getStructureOfRemoteTable.cpp b/src/Storages/getStructureOfRemoteTable.cpp index 6ea7bdc312d..56071abaa95 100644 --- a/src/Storages/getStructureOfRemoteTable.cpp +++ b/src/Storages/getStructureOfRemoteTable.cpp @@ -1,4 +1,5 @@ #include "getStructureOfRemoteTable.h" +#include #include #include #include diff --git a/src/Storages/transformQueryForExternalDatabase.cpp b/src/Storages/transformQueryForExternalDatabase.cpp index fc85bde11d9..f1b73e939b8 100644 --- a/src/Storages/transformQueryForExternalDatabase.cpp +++ b/src/Storages/transformQueryForExternalDatabase.cpp @@ -1,5 +1,6 @@ #include #include +#include #include #include #include diff --git a/src/TableFunctions/ITableFunctionXDBC.cpp b/src/TableFunctions/ITableFunctionXDBC.cpp index a5c16b3a5aa..83d0ac4ef41 100644 --- a/src/TableFunctions/ITableFunctionXDBC.cpp +++ b/src/TableFunctions/ITableFunctionXDBC.cpp @@ -1,8 +1,10 @@ +#include +#include #include -#include -#include -#include #include +#include +#include +#include #include #include #include diff --git a/src/TableFunctions/TableFunctionExplain.cpp b/src/TableFunctions/TableFunctionExplain.cpp index df2835dd630..552b9fde986 100644 --- a/src/TableFunctions/TableFunctionExplain.cpp +++ b/src/TableFunctions/TableFunctionExplain.cpp @@ -1,3 +1,4 @@ +#include #include #include #include diff --git a/src/TableFunctions/TableFunctionFactory.cpp b/src/TableFunctions/TableFunctionFactory.cpp index ce3daff0785..6ecdeb47779 100644 --- a/src/TableFunctions/TableFunctionFactory.cpp +++ b/src/TableFunctions/TableFunctionFactory.cpp @@ -4,6 +4,7 @@ #include #include #include +#include #include #include diff --git a/src/TableFunctions/TableFunctionFile.cpp b/src/TableFunctions/TableFunctionFile.cpp index 28bf72e07fb..1b6d86f8fa5 100644 --- a/src/TableFunctions/TableFunctionFile.cpp +++ b/src/TableFunctions/TableFunctionFile.cpp @@ -4,6 +4,7 @@ #include "registerTableFunctions.h" #include +#include #include #include #include diff --git a/src/TableFunctions/TableFunctionFileCluster.cpp b/src/TableFunctions/TableFunctionFileCluster.cpp index 3e53349b022..514fa3143d0 100644 --- a/src/TableFunctions/TableFunctionFileCluster.cpp +++ b/src/TableFunctions/TableFunctionFileCluster.cpp @@ -1,3 +1,4 @@ +#include #include #include #include diff --git a/src/TableFunctions/TableFunctionFormat.cpp b/src/TableFunctions/TableFunctionFormat.cpp index ad2a142a140..5a0f47653d5 100644 --- a/src/TableFunctions/TableFunctionFormat.cpp +++ b/src/TableFunctions/TableFunctionFormat.cpp @@ -1,5 +1,7 @@ #include +#include + #include #include diff --git a/src/TableFunctions/TableFunctionMySQL.cpp b/src/TableFunctions/TableFunctionMySQL.cpp index b027cf42880..ffd318f36f7 100644 --- a/src/TableFunctions/TableFunctionMySQL.cpp +++ b/src/TableFunctions/TableFunctionMySQL.cpp @@ -3,6 +3,8 @@ #if USE_MYSQL #include + +#include #include #include #include diff --git a/src/TableFunctions/TableFunctionPostgreSQL.cpp b/src/TableFunctions/TableFunctionPostgreSQL.cpp index 508f85df6a3..d690da9949a 100644 --- a/src/TableFunctions/TableFunctionPostgreSQL.cpp +++ b/src/TableFunctions/TableFunctionPostgreSQL.cpp @@ -4,6 +4,7 @@ #include #include +#include #include #include #include diff --git a/src/TableFunctions/TableFunctionRemote.cpp b/src/TableFunctions/TableFunctionRemote.cpp index d7774bb6478..e60c31b2d77 100644 --- a/src/TableFunctions/TableFunctionRemote.cpp +++ b/src/TableFunctions/TableFunctionRemote.cpp @@ -17,6 +17,7 @@ #include #include #include +#include #include #include "registerTableFunctions.h" diff --git a/src/TableFunctions/TableFunctionView.cpp b/src/TableFunctions/TableFunctionView.cpp index 2a50fb2d006..57501df6d4d 100644 --- a/src/TableFunctions/TableFunctionView.cpp +++ b/src/TableFunctions/TableFunctionView.cpp @@ -1,3 +1,4 @@ +#include #include #include #include diff --git a/src/TableFunctions/TableFunctionViewIfPermitted.cpp b/src/TableFunctions/TableFunctionViewIfPermitted.cpp index b1691803988..935be6c1987 100644 --- a/src/TableFunctions/TableFunctionViewIfPermitted.cpp +++ b/src/TableFunctions/TableFunctionViewIfPermitted.cpp @@ -1,3 +1,4 @@ +#include #include #include #include diff --git a/tests/ci/artifactory.py b/tests/ci/artifactory.py new file mode 100644 index 00000000000..1a062d05a23 --- /dev/null +++ b/tests/ci/artifactory.py @@ -0,0 +1,355 @@ +import argparse +import time +from pathlib import Path +from typing import Optional +from shutil import copy2 +from create_release import PackageDownloader, ReleaseInfo, ShellRunner +from ci_utils import WithIter + + +class MountPointApp(metaclass=WithIter): + RCLONE = "rclone" + S3FS = "s3fs" + + +class R2MountPoint: + _TEST_BUCKET_NAME = "repo-test" + _PROD_BUCKET_NAME = "packages" + _CACHE_MAX_SIZE_GB = 20 + MOUNT_POINT = "/home/ubuntu/mountpoint" + API_ENDPOINT = "https://d4fd593eebab2e3a58a599400c4cd64d.r2.cloudflarestorage.com" + LOG_FILE = "/home/ubuntu/fuse_mount.log" + # mod time is not required by reprepro and createrepo - disable to simplify bucket's mount sync (applicable fro rclone) + NOMODTIME = True + # enable debug messages in mount log + DEBUG = True + # enable cache for mountpoint + CACHE_ENABLED = False + # TODO: which mode is better: minimal/writes/full/off + _RCLONE_CACHE_MODE = "minimal" + UMASK = "0000" + + def __init__(self, app: str, dry_run: bool) -> None: + assert app in MountPointApp + self.app = app + if dry_run: + self.bucket_name = self._TEST_BUCKET_NAME + else: + self.bucket_name = self._PROD_BUCKET_NAME + + self.aux_mount_options = "" + self.async_mount = False + if self.app == MountPointApp.S3FS: + self.cache_dir = "/home/ubuntu/s3fs_cache" + # self.aux_mount_options += "-o nomodtime " if self.NOMODTIME else "" not for s3fs + self.aux_mount_options += "--debug " if self.DEBUG else "" + self.aux_mount_options += ( + f"-o use_cache={self.cache_dir} -o cache_size_mb={self._CACHE_MAX_SIZE_GB * 1024} " + if self.CACHE_ENABLED + else "" + ) + # without -o nomultipart there are errors like "Error 5 writing to /home/ubuntu/***.deb: Input/output error" + self.mount_cmd = f"s3fs {self.bucket_name} {self.MOUNT_POINT} -o url={self.API_ENDPOINT} -o use_path_request_style -o umask=0000 -o nomultipart -o logfile={self.LOG_FILE} {self.aux_mount_options}" + elif self.app == MountPointApp.RCLONE: + # run rclone mount process asynchronously, otherwise subprocess.run(daemonized command) will not return + self.async_mount = True + self.cache_dir = "/home/ubuntu/rclone_cache" + self.aux_mount_options += "--no-modtime " if self.NOMODTIME else "" + self.aux_mount_options += "-v " if self.DEBUG else "" # -vv too verbose + self.aux_mount_options += ( + f"--vfs-cache-mode {self._RCLONE_CACHE_MODE} --vfs-cache-max-size {self._CACHE_MAX_SIZE_GB}G" + if self.CACHE_ENABLED + else "--vfs-cache-mode off" + ) + # Use --no-modtime to try to avoid: ERROR : rpm/lts/clickhouse-client-24.3.6.5.x86_64.rpm: Failed to apply pending mod time + self.mount_cmd = f"rclone mount remote:{self.bucket_name} {self.MOUNT_POINT} --daemon --cache-dir {self.cache_dir} --umask 0000 --log-file {self.LOG_FILE} {self.aux_mount_options}" + else: + assert False + + def init(self): + print(f"Mount bucket [{self.bucket_name}] to [{self.MOUNT_POINT}]") + _CLEAN_LOG_FILE_CMD = f"tail -n 1000 {self.LOG_FILE} > {self.LOG_FILE}_tmp && mv {self.LOG_FILE}_tmp {self.LOG_FILE} ||:" + _MKDIR_CMD = f"mkdir -p {self.MOUNT_POINT}" + _MKDIR_FOR_CACHE = f"mkdir -p {self.cache_dir}" + _UNMOUNT_CMD = ( + f"mount | grep -q {self.MOUNT_POINT} && umount {self.MOUNT_POINT} ||:" + ) + + _TEST_MOUNT_CMD = f"mount | grep -q {self.MOUNT_POINT}" + ShellRunner.run(_CLEAN_LOG_FILE_CMD) + ShellRunner.run(_UNMOUNT_CMD) + ShellRunner.run(_MKDIR_CMD) + ShellRunner.run(_MKDIR_FOR_CACHE) + ShellRunner.run(self.mount_cmd, async_=self.async_mount) + if self.async_mount: + time.sleep(3) + ShellRunner.run(_TEST_MOUNT_CMD) + + @classmethod + def teardown(cls): + print(f"Unmount [{cls.MOUNT_POINT}]") + ShellRunner.run(f"umount {cls.MOUNT_POINT}") + + +class RepoCodenames(metaclass=WithIter): + LTS = "lts" + STABLE = "stable" + + +class DebianArtifactory: + _TEST_REPO_URL = "https://pub-73dd1910f4284a81a02a67018967e028.r2.dev/deb" + _PROD_REPO_URL = "https://packages.clickhouse.com/deb" + + def __init__(self, release_info: ReleaseInfo, dry_run: bool): + self.codename = release_info.codename + self.version = release_info.version + if dry_run: + self.repo_url = self._TEST_REPO_URL + else: + self.repo_url = self._PROD_REPO_URL + assert self.codename in RepoCodenames + self.pd = PackageDownloader( + release=release_info.release_branch, + commit_sha=release_info.commit_sha, + version=release_info.version, + ) + + def export_packages(self): + assert self.pd.local_deb_packages_ready(), "BUG: Packages are not downloaded" + print("Start adding packages") + paths = [ + self.pd.LOCAL_DIR + "/" + file for file in self.pd.get_deb_packages_files() + ] + REPREPRO_CMD_PREFIX = f"reprepro --basedir {R2MountPoint.MOUNT_POINT}/configs/deb --outdir {R2MountPoint.MOUNT_POINT}/deb --verbose" + cmd = f"{REPREPRO_CMD_PREFIX} includedeb {self.codename} {' '.join(paths)}" + print("Running export command:") + print(f" {cmd}") + ShellRunner.run(cmd) + ShellRunner.run("sync") + + if self.codename == RepoCodenames.LTS: + packages_with_version = [ + package + "=" + self.version for package in self.pd.get_packages_names() + ] + print( + f"Copy packages from {RepoCodenames.LTS} to {RepoCodenames.STABLE} repository" + ) + cmd = f"{REPREPRO_CMD_PREFIX} copy {RepoCodenames.STABLE} {RepoCodenames.LTS} {' '.join(packages_with_version)}" + print("Running copy command:") + print(f" {cmd}") + ShellRunner.run(cmd) + ShellRunner.run("sync") + + def test_packages(self): + ShellRunner.run("docker pull ubuntu:latest") + print(f"Test packages installation, version [{self.version}]") + cmd = f"docker run --rm ubuntu:latest bash -c \"apt update -y; apt install -y sudo gnupg ca-certificates; apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 8919F6BD2B48D754; echo 'deb {self.repo_url} stable main' | tee /etc/apt/sources.list.d/clickhouse.list; apt update -y; apt-get install -y clickhouse-client={self.version}\"" + print("Running test command:") + print(f" {cmd}") + ShellRunner.run(cmd) + + +def _copy_if_not_exists(src: Path, dst: Path) -> Path: + if dst.is_dir(): + dst = dst / src.name + if not dst.exists(): + return copy2(src, dst) # type: ignore + if src.stat().st_size == dst.stat().st_size: + return dst + return copy2(src, dst) # type: ignore + + +class RpmArtifactory: + _TEST_REPO_URL = ( + "https://pub-73dd1910f4284a81a02a67018967e028.r2.dev/rpm/clickhouse.repo" + ) + _PROD_REPO_URL = "https://packages.clickhouse.com/rpm/clickhouse.repo" + _SIGN_KEY = "885E2BDCF96B0B45ABF058453E4AD4719DDE9A38" + + def __init__(self, release_info: ReleaseInfo, dry_run: bool): + self.codename = release_info.codename + self.version = release_info.version + if dry_run: + self.repo_url = self._TEST_REPO_URL + else: + self.repo_url = self._PROD_REPO_URL + assert self.codename in RepoCodenames + self.pd = PackageDownloader( + release=release_info.release_branch, + commit_sha=release_info.commit_sha, + version=release_info.version, + ) + + def export_packages(self, codename: Optional[str] = None) -> None: + assert self.pd.local_rpm_packages_ready(), "BUG: Packages are not downloaded" + codename = codename or self.codename + print(f"Start adding packages to [{codename}]") + paths = [ + self.pd.LOCAL_DIR + "/" + file for file in self.pd.get_rpm_packages_files() + ] + + dest_dir = Path(R2MountPoint.MOUNT_POINT) / "rpm" / codename + + for package in paths: + _copy_if_not_exists(Path(package), dest_dir) + + commands = ( + f"createrepo_c --local-sqlite --workers=2 --update --verbose {dest_dir}", + f"gpg --sign-with {self._SIGN_KEY} --detach-sign --batch --yes --armor {dest_dir / 'repodata' / 'repomd.xml'}", + ) + print(f"Exporting RPM packages into [{codename}]") + + for command in commands: + print("Running command:") + print(f" {command}") + ShellRunner.run(command) + + update_public_key = f"gpg --armor --export {self._SIGN_KEY}" + pub_key_path = dest_dir / "repodata" / "repomd.xml.key" + print("Updating repomd.xml.key") + pub_key_path.write_text(ShellRunner.run(update_public_key)[1]) + if codename == RepoCodenames.LTS: + self.export_packages(RepoCodenames.STABLE) + ShellRunner.run("sync") + + def test_packages(self): + ShellRunner.run("docker pull fedora:latest") + print(f"Test package installation, version [{self.version}]") + cmd = f'docker run --rm fedora:latest /bin/bash -c "dnf -y install dnf-plugins-core && dnf config-manager --add-repo={self.repo_url} && dnf makecache && dnf -y install clickhouse-client-{self.version}-1"' + print("Running test command:") + print(f" {cmd}") + ShellRunner.run(cmd) + + +class TgzArtifactory: + _TEST_REPO_URL = "https://pub-73dd1910f4284a81a02a67018967e028.r2.dev/tgz" + _PROD_REPO_URL = "https://packages.clickhouse.com/tgz" + + def __init__(self, release_info: ReleaseInfo, dry_run: bool): + self.codename = release_info.codename + self.version = release_info.version + if dry_run: + self.repo_url = self._TEST_REPO_URL + else: + self.repo_url = self._PROD_REPO_URL + assert self.codename in RepoCodenames + self.pd = PackageDownloader( + release=release_info.release_branch, + commit_sha=release_info.commit_sha, + version=release_info.version, + ) + + def export_packages(self, codename: Optional[str] = None) -> None: + assert self.pd.local_tgz_packages_ready(), "BUG: Packages are not downloaded" + codename = codename or self.codename + + paths = [ + self.pd.LOCAL_DIR + "/" + file for file in self.pd.get_tgz_packages_files() + ] + + dest_dir = Path(R2MountPoint.MOUNT_POINT) / "tgz" / codename + + print(f"Exporting TGZ packages into [{codename}]") + + for package in paths: + _copy_if_not_exists(Path(package), dest_dir) + + if codename == RepoCodenames.LTS: + self.export_packages(RepoCodenames.STABLE) + ShellRunner.run("sync") + + def test_packages(self): + tgz_file = "/tmp/tmp.tgz" + tgz_sha_file = "/tmp/tmp.tgz.sha512" + ShellRunner.run( + f"curl -o {tgz_file} -f0 {self.repo_url}/stable/clickhouse-client-{self.version}-arm64.tgz" + ) + ShellRunner.run( + f"curl -o {tgz_sha_file} -f0 {self.repo_url}/stable/clickhouse-client-{self.version}-arm64.tgz.sha512" + ) + expected_checksum = ShellRunner.run(f"cut -d ' ' -f 1 {tgz_sha_file}") + actual_checksum = ShellRunner.run(f"sha512sum {tgz_file} | cut -d ' ' -f 1") + assert ( + expected_checksum == actual_checksum + ), f"[{actual_checksum} != {expected_checksum}]" + ShellRunner.run("rm /tmp/tmp.tgz*") + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + description="Adds release packages to the repository", + ) + parser.add_argument( + "--infile", + type=str, + required=True, + help="input file with release info", + ) + parser.add_argument( + "--export-debian", + action="store_true", + help="Export debian packages to repository", + ) + parser.add_argument( + "--export-rpm", + action="store_true", + help="Export rpm packages to repository", + ) + parser.add_argument( + "--export-tgz", + action="store_true", + help="Export tgz packages to repository", + ) + parser.add_argument( + "--test-debian", + action="store_true", + help="Test debian packages installation", + ) + parser.add_argument( + "--test-rpm", + action="store_true", + help="Test rpm packages installation", + ) + parser.add_argument( + "--test-tgz", + action="store_true", + help="Test tgz packages installation", + ) + parser.add_argument( + "--dry-run", + action="store_true", + help="Dry run mode", + ) + return parser.parse_args() + + +if __name__ == "__main__": + args = parse_args() + assert args.dry_run + + release_info = ReleaseInfo.from_file(args.infile) + """ + Use S3FS. RCLONE has some errors with r2 remote which I didn't figure out how to resolve: + ERROR : IO error: NotImplemented: versionId not implemented + Failed to copy: NotImplemented: versionId not implemented + """ + mp = R2MountPoint(MountPointApp.S3FS, dry_run=args.dry_run) + if args.export_debian: + mp.init() + DebianArtifactory(release_info, dry_run=args.dry_run).export_packages() + mp.teardown() + if args.export_rpm: + mp.init() + RpmArtifactory(release_info, dry_run=args.dry_run).export_packages() + mp.teardown() + if args.export_tgz: + mp.init() + TgzArtifactory(release_info, dry_run=args.dry_run).export_packages() + mp.teardown() + if args.test_debian: + DebianArtifactory(release_info, dry_run=args.dry_run).test_packages() + if args.test_tgz: + TgzArtifactory(release_info, dry_run=args.dry_run).test_packages() + if args.test_rpm: + RpmArtifactory(release_info, dry_run=args.dry_run).test_packages() diff --git a/tests/ci/ci.py b/tests/ci/ci.py index 32b87698395..cf285f4b97d 100644 --- a/tests/ci/ci.py +++ b/tests/ci/ci.py @@ -6,6 +6,7 @@ import os import re import subprocess import sys +import time from dataclasses import dataclass from pathlib import Path from typing import Any, Dict, List, Optional @@ -15,7 +16,7 @@ import upload_result_helper from build_check import get_release_or_pr from ci_config import CI from ci_metadata import CiMetadata -from ci_utils import GHActions, normalize_string, Shell +from ci_utils import GHActions, normalize_string, Utils from clickhouse_helper import ( CiLogsCredentials, ClickHouseHelper, @@ -264,7 +265,7 @@ def check_missing_images_on_dockerhub( def _pre_action(s3, indata, pr_info): print("Clear dmesg") - Shell.run("sudo dmesg --clear ||:") + Utils.clear_dmesg() CommitStatusData.cleanup() JobReport.cleanup() BuildResult.cleanup() @@ -325,8 +326,8 @@ def _mark_success_action( # do nothing, exit without failure print(f"ERROR: no status file for job [{job}]") - if job_config.run_always or job_config.run_by_label: - print(f"Job [{job}] runs always or by label in CI - do not cache") + if job_config.run_by_label or not job_config.has_digest(): + print(f"Job [{job}] has no digest or run by label in CI - do not cache") else: if pr_info.is_master: pass @@ -550,7 +551,17 @@ def _update_gh_statuses_action(indata: Dict, s3: S3Helper) -> None: except Exception as e: raise e print("Going to update overall CI report") - set_status_comment(commit, pr_info) + for retry in range(2): + try: + set_status_comment(commit, pr_info) + break + except Exception as e: + print( + f"WARNING: Failed to update CI Running status, attempt [{retry + 1}], exception [{e}]" + ) + time.sleep(1) + else: + print("ERROR: All retry attempts failed.") print("... CI report update - done") @@ -996,10 +1007,10 @@ def main() -> int: args.skip_jobs, ) - if IS_CI and pr_info.is_pr: - ci_cache.filter_out_not_affected_jobs() - ci_cache.print_status() + if IS_CI and pr_info.is_pr and not ci_settings.no_ci_cache: + ci_cache.filter_out_not_affected_jobs() + ci_cache.print_status() if IS_CI and not pr_info.is_merge_queue: # wait for pending jobs to be finished, await_jobs is a long blocking call @@ -1035,6 +1046,7 @@ def main() -> int: elif args.pre: assert indata, "Run config must be provided via --infile" _pre_action(s3, indata, pr_info) + JobReport.create_pre_report().dump() ### RUN action: start elif args.run: @@ -1056,6 +1068,15 @@ def main() -> int: if build_result: if build_result.status == SUCCESS: previous_status = build_result.status + JobReport( + status=SUCCESS, + description="", + test_results=[], + start_time="", + duration=0.0, + additional_files=[], + job_skipped=True, + ).dump() else: # FIXME: Consider reusing failures for build jobs. # Just remove this if/else - that makes build job starting and failing immediately @@ -1086,6 +1107,16 @@ def main() -> int: print(status) print("::endgroup::") previous_status = status.state + print("Create dummy job report with job_skipped flag") + JobReport( + status=status.state, + description="", + test_results=[], + start_time="", + duration=0.0, + additional_files=[], + job_skipped=True, + ).dump() # ci cache check if not previous_status and not ci_settings.no_ci_cache: @@ -1121,20 +1152,22 @@ def main() -> int: exit_code = 1 else: exit_code = _run_test(check_name, args.run_command) + job_report = JobReport.load() if JobReport.exist() else None + assert ( + job_report + ), "BUG. There must be job report either real report, or pre-report if job was killed" + job_report.exit_code = exit_code + job_report.dump() ### RUN action: end ### POST action: start elif args.post: - if Shell.check( - "sudo dmesg -T | grep -q -e 'Out of memory: Killed process' -e 'oom_reaper: reaped process' -e 'oom-kill:constraint=CONSTRAINT_NONE'" - ): - print("WARNING: OOM while job execution") - CIBuddy(dry_run=not pr_info.is_release).post_error( - "Out Of Memory", job_name=_get_ext_check_name(args.job_name) - ) - job_report = JobReport.load() if JobReport.exist() else None - if job_report: + assert ( + job_report + ), "BUG. There must be job report either real report, or pre-report if job was killed" + if not job_report.job_skipped and not job_report.pre_report: + # it's a real job report ch_helper = ClickHouseHelper() check_url = "" @@ -1234,9 +1267,37 @@ def main() -> int: indata["build"], ch_helper, ) - else: - # no job report - print(f"No job report for {[args.job_name]} - do nothing") + elif job_report.job_skipped: + print(f"Skipped after rerun check {[args.job_name]} - do nothing") + elif job_report.job_skipped: + print(f"Job was skipped {[args.job_name]} - do nothing") + elif job_report.pre_report: + print(f"ERROR: Job was killed - generate evidence") + job_report.update_duration() + ret_code = os.getenv("JOB_EXIT_CODE", "") + if ret_code: + try: + job_report.exit_code = int(ret_code) + except ValueError: + pass + if Utils.is_killed_with_oom(): + print("WARNING: OOM while job execution") + error = f"Out Of Memory, exit_code {job_report.exit_code}, after {int(job_report.duration)}s" + else: + error = f"Unknown, exit_code {job_report.exit_code}, after {int(job_report.duration)}s" + CIBuddy().post_error(error, job_name=_get_ext_check_name(args.job_name)) + if CI.is_test_job(args.job_name): + gh = GitHub(get_best_robot_token(), per_page=100) + commit = get_commit(gh, pr_info.sha) + post_commit_status( + commit, + ERROR, + "", + "Error: " + error, + _get_ext_check_name(args.job_name), + pr_info, + dump_to_file=True, + ) ### POST action: end ### MARK SUCCESS action: start diff --git a/tests/ci/ci_buddy.py b/tests/ci/ci_buddy.py index ea690bb602c..c650b876610 100644 --- a/tests/ci/ci_buddy.py +++ b/tests/ci/ci_buddy.py @@ -26,6 +26,7 @@ class CIBuddy: self.pr_number = pr_info.number self.head_ref = pr_info.head_ref self.commit_url = pr_info.commit_html_url + self.sha = pr_info.sha[:10] @staticmethod def _get_webhooks(): @@ -69,8 +70,10 @@ class CIBuddy: line_err = f":red_circle: *Error: {error_description}*\n\n" line_ghr = f" *Runner:* `{instance_type}`, `{instance_id}`\n" line_job = f" *Job:* `{job_name}`\n" - line_pr_ = f" *PR:* \n" - line_br_ = f" *Branch:* `{self.head_ref}`, <{self.commit_url}|commit>\n" + line_pr_ = f" *PR:* , <{self.commit_url}|{self.sha}>\n" + line_br_ = ( + f" *Branch:* `{self.head_ref}`, <{self.commit_url}|{self.sha}>\n" + ) message = line_err message += line_job if with_instance_info: @@ -85,4 +88,4 @@ class CIBuddy: if __name__ == "__main__": # test buddy = CIBuddy(dry_run=True) - buddy.post_error("Out of memory") + buddy.post_error("TEst") diff --git a/tests/ci/ci_cache.py b/tests/ci/ci_cache.py index 291ed56aeea..9486a286a8d 100644 --- a/tests/ci/ci_cache.py +++ b/tests/ci/ci_cache.py @@ -520,6 +520,35 @@ class CiCache: self.RecordType.SUCCESSFUL, job, batch, num_batches, release_branch ) + def has_evidence(self, job: str, job_config: CI.JobConfig) -> bool: + """ + checks if the job has been seen in master/release CI + function is to be used to check if change did not affect the job + :param job_config: + :param job: + :return: + """ + return ( + self.is_successful( + job=job, + batch=0, + num_batches=job_config.num_batches, + release_branch=True, + ) + or self.is_pending( + job=job, + batch=0, + num_batches=job_config.num_batches, + release_branch=True, + ) + or self.is_failed( + job=job, + batch=0, + num_batches=job_config.num_batches, + release_branch=True, + ) + ) + def is_failed( self, job: str, batch: int, num_batches: int, release_branch: bool ) -> bool: @@ -609,7 +638,7 @@ class CiCache: pushes pending records for all jobs that supposed to be run """ for job, job_config in self.jobs_to_do.items(): - if job_config.run_always: + if not job_config.has_digest(): continue pending_state = PendingState(time.time(), run_url=GITHUB_RUN_URL) assert job_config.batches @@ -677,74 +706,48 @@ class CiCache: def filter_out_not_affected_jobs(self): """ Filter is to be applied in PRs to remove jobs that are not affected by the change - It removes jobs from @jobs_to_do if it is a: - 1. test job and it is in @jobs_to_wait (no need to wait not affected jobs in PRs) - 2. test job and it has finished on release branch (even if failed) - 2. build job which is not required by any test job that is left in @jobs_to_do - :return: """ - # 1. - remove_from_await_list = [] - for job_name, job_config in self.jobs_to_wait.items(): - if CI.is_test_job(job_name) and job_name != CI.JobNames.BUILD_CHECK: - remove_from_await_list.append(job_name) - for job in remove_from_await_list: - print(f"Filter job [{job}] - test job and not affected by the change") - del self.jobs_to_wait[job] - del self.jobs_to_do[job] - - # 2. remove_from_to_do = [] + required_builds = [] + has_test_jobs_to_skip = False for job_name, job_config in self.jobs_to_do.items(): if CI.is_test_job(job_name) and job_name != CI.JobNames.BUILD_CHECK: - batches_to_remove = [] - assert job_config.batches is not None - for batch in job_config.batches: - if self.is_failed( - job_name, batch, job_config.num_batches, release_branch=True - ): - print( - f"Filter [{job_name}/{batch}] - not affected by the change (failed on release branch)" - ) - batches_to_remove.append(batch) - for batch in batches_to_remove: - job_config.batches.remove(batch) - if not job_config.batches: - print( - f"Filter [{job_name}] - not affected by the change (failed on release branch)" - ) - remove_from_to_do.append(job_name) - for job in remove_from_to_do: - del self.jobs_to_do[job] - - # 3. - required_builds = [] # type: List[str] - for job_name, job_config in self.jobs_to_do.items(): - if CI.is_test_job(job_name) and job_config.required_builds: - required_builds += job_config.required_builds - required_builds = list(set(required_builds)) - - remove_builds = [] # type: List[str] - has_builds_to_do = False - for job_name, job_config in self.jobs_to_do.items(): - if CI.is_build_job(job_name): - if job_name not in required_builds: - remove_builds.append(job_name) + if job_config.reference_job_name: + reference_name = job_config.reference_job_name + reference_config = CI.JOB_CONFIGS[reference_name] else: - has_builds_to_do = True + reference_name = job_name + reference_config = job_config + if self.has_evidence( + job=reference_name, + job_config=reference_config, + ): + remove_from_to_do.append(job_name) + has_test_jobs_to_skip = True + else: + required_builds += ( + job_config.required_builds if job_config.required_builds else [] + ) + if has_test_jobs_to_skip: + # If there are tests to skip, it means build digest has not been changed. + # No need to test builds. Let's keep all builds required for test jobs and skip the others + for job_name, job_config in self.jobs_to_do.items(): + if CI.is_build_job(job_name): + if job_name not in required_builds: + remove_from_to_do.append(job_name) - for build_job in remove_builds: - print( - f"Filter build job [{build_job}] - not affected and not required by test jobs" - ) - del self.jobs_to_do[build_job] - if build_job in self.jobs_to_wait: - del self.jobs_to_wait[build_job] + if not required_builds: + remove_from_to_do.append(CI.JobNames.BUILD_CHECK) - if not has_builds_to_do and CI.JobNames.BUILD_CHECK in self.jobs_to_do: - print(f"Filter job [{CI.JobNames.BUILD_CHECK}] - no builds to do") - del self.jobs_to_do[CI.JobNames.BUILD_CHECK] + for job in remove_from_to_do: + print(f"Filter job [{job}] - not affected by the change") + if job in self.jobs_to_do: + del self.jobs_to_do[job] + if job in self.jobs_to_wait: + del self.jobs_to_wait[job] + if job in self.jobs_to_skip: + self.jobs_to_skip.remove(job) def await_pending_jobs(self, is_release: bool, dry_run: bool = False) -> None: """ @@ -760,17 +763,13 @@ class CiCache: # TIMEOUT * MAX_ROUNDS_TO_WAIT must be less than 6h (GH job timeout) with a room for rest RunConfig work TIMEOUT = 3000 # 50 min MAX_ROUNDS_TO_WAIT = 6 - MAX_JOB_NUM_TO_WAIT = 3 round_cnt = 0 - # FIXME: temporary experiment: lets enable await for PR' workflows but for a shorter time if not is_release: - MAX_ROUNDS_TO_WAIT = 3 + # in PRs we can wait only for builds, TIMEOUT*MAX_ROUNDS_TO_WAIT=100min is enough + MAX_ROUNDS_TO_WAIT = 2 - while ( - len(self.jobs_to_wait) > MAX_JOB_NUM_TO_WAIT - and round_cnt < MAX_ROUNDS_TO_WAIT - ): + while round_cnt < MAX_ROUNDS_TO_WAIT: round_cnt += 1 GHActions.print_in_group( f"Wait pending jobs, round [{round_cnt}/{MAX_ROUNDS_TO_WAIT}]:", @@ -812,6 +811,10 @@ class CiCache: f"Job [{job_name}_[{batch}/{num_batches}]] is not pending anymore" ) job_config.batches.remove(batch) + if not job_config.batches: + print(f"Remove job [{job_name}] from jobs_to_do") + self.jobs_to_skip.append(job_name) + del self.jobs_to_do[job_name] else: print( f"NOTE: Job [{job_name}:{batch}] finished failed - do not add to ready" @@ -822,9 +825,7 @@ class CiCache: await_finished.add(job_name) for job in await_finished: - self.jobs_to_skip.append(job) del self.jobs_to_wait[job] - del self.jobs_to_do[job] if not dry_run: expired_sec = int(time.time()) - start_at diff --git a/tests/ci/ci_config.py b/tests/ci/ci_config.py index 8eda6e6b96f..9b9ddee5326 100644 --- a/tests/ci/ci_config.py +++ b/tests/ci/ci_config.py @@ -13,6 +13,9 @@ class CI: each config item in the below dicts should be an instance of JobConfig class or inherited from it """ + MAX_TOTAL_FAILURES_BEFORE_BLOCKING_CI = 5 + MAX_TOTAL_FAILURES_PER_JOB_BEFORE_BLOCKING_CI = 2 + # reimport types to CI class so that they visible as CI.* and mypy is happy # pylint:disable=useless-import-alias,reimported,import-outside-toplevel from ci_definitions import BuildConfig as BuildConfig @@ -410,7 +413,9 @@ class CI: release_only=True, ), JobNames.INTEGRATION_TEST_FLAKY: CommonJobConfigs.INTEGRATION_TEST.with_properties( - required_builds=[BuildNames.PACKAGE_ASAN], pr_only=True + required_builds=[BuildNames.PACKAGE_ASAN], + pr_only=True, + reference_job_name=JobNames.INTEGRATION_TEST_TSAN, ), JobNames.COMPATIBILITY_TEST: CommonJobConfigs.COMPATIBILITY_TEST.with_properties( required_builds=[BuildNames.PACKAGE_RELEASE], @@ -452,7 +457,10 @@ class CI: required_builds=[BuildNames.PACKAGE_UBSAN], ), JobNames.STATELESS_TEST_FLAKY_ASAN: CommonJobConfigs.STATELESS_TEST.with_properties( - required_builds=[BuildNames.PACKAGE_ASAN], pr_only=True, timeout=3600 + required_builds=[BuildNames.PACKAGE_ASAN], + pr_only=True, + timeout=3600, + reference_job_name=JobNames.STATELESS_TEST_RELEASE, ), JobNames.JEPSEN_KEEPER: JobConfig( required_builds=[BuildNames.BINARY_RELEASE], @@ -637,7 +645,7 @@ class CI: @classmethod def is_test_job(cls, job: str) -> bool: - return not cls.is_build_job(job) and job != cls.JobNames.STYLE_CHECK + return not cls.is_build_job(job) @classmethod def is_docs_job(cls, job: str) -> bool: diff --git a/tests/ci/ci_definitions.py b/tests/ci/ci_definitions.py index 4ae252560e9..acd9b7fa904 100644 --- a/tests/ci/ci_definitions.py +++ b/tests/ci/ci_definitions.py @@ -284,8 +284,12 @@ class JobConfig: # GH Runner type (tag from @Runners) runner_type: str - # used for config validation in ci unittests + # used in ci unittests for config validation job_name_keyword: str = "" + # name of another job that (if provided) should be used to check if job was affected by the change or not (in CiCache.has_evidence(job=@reference_job_name) call) + # for example: "Stateless flaky check" can use reference_job_name="Stateless tests (release)". "Stateless flaky check" does not run on master + # and there cannot be an evidence for it, so instead "Stateless tests (release)" job name can be used to check the evidence + reference_job_name: str = "" # builds required for the job (applicable for test jobs) required_builds: Optional[List[str]] = None # build config for the build job (applicable for builds) @@ -327,6 +331,9 @@ class JobConfig: assert self.required_builds return self.required_builds[0] + def has_digest(self) -> bool: + return self.digest != DigestConfig() + class CommonJobConfigs: """ @@ -440,7 +447,12 @@ class CommonJobConfigs: ) ASTFUZZER_TEST = JobConfig( job_name_keyword="ast", - digest=DigestConfig(), + digest=DigestConfig( + include_paths=[ + "./tests/ci/ast_fuzzer_check.py", + ], + docker=["clickhouse/fuzzer"], + ), run_command="ast_fuzzer_check.py", run_always=True, runner_type=Runners.FUZZER_UNIT_TESTER, diff --git a/tests/ci/ci_utils.py b/tests/ci/ci_utils.py index 629f37289a9..44bd37fe260 100644 --- a/tests/ci/ci_utils.py +++ b/tests/ci/ci_utils.py @@ -1,8 +1,9 @@ import os +import re import subprocess from contextlib import contextmanager from pathlib import Path -from typing import Any, Iterator, List, Union +from typing import Any, Iterator, List, Union, Optional class WithIter(type): @@ -83,3 +84,27 @@ class Shell: check=False, ) return result.returncode == 0 + + +class Utils: + @staticmethod + def get_failed_tests_number(description: str) -> Optional[int]: + description = description.lower() + + pattern = r"fail:\s*(\d+)\s*(?=,|$)" + match = re.search(pattern, description) + if match: + return int(match.group(1)) + return None + + @staticmethod + def is_killed_with_oom(): + if Shell.check( + "sudo dmesg -T | grep -q -e 'Out of memory: Killed process' -e 'oom_reaper: reaped process' -e 'oom-kill:constraint=CONSTRAINT_NONE'" + ): + return True + return False + + @staticmethod + def clear_dmesg(): + Shell.run("sudo dmesg --clear ||:") diff --git a/tests/ci/create_release.py b/tests/ci/create_release.py new file mode 100755 index 00000000000..7f4cf8c787a --- /dev/null +++ b/tests/ci/create_release.py @@ -0,0 +1,710 @@ +import argparse +import dataclasses +import json +import os +import subprocess + +from contextlib import contextmanager +from copy import copy +from pathlib import Path +from typing import Iterator, List + +from git_helper import Git, GIT_PREFIX +from ssh import SSHAgent +from env_helper import GITHUB_REPOSITORY, S3_BUILDS_BUCKET +from s3_helper import S3Helper +from autoscale_runners_lambda.lambda_shared.pr import Labels +from version_helper import ( + FILE_WITH_VERSION_PATH, + GENERATED_CONTRIBUTORS, + get_abs_path, + get_version_from_repo, + update_cmake_version, + update_contributors, + VersionType, +) +from ci_config import CI + +CMAKE_PATH = get_abs_path(FILE_WITH_VERSION_PATH) +CONTRIBUTORS_PATH = get_abs_path(GENERATED_CONTRIBUTORS) + + +class ShellRunner: + + @classmethod + def run( + cls, command, check_retcode=True, print_output=True, async_=False, dry_run=False + ): + if dry_run: + print(f"Dry-run: Would run shell command: [{command}]") + return 0, "" + print(f"Running shell command: [{command}]") + if async_: + subprocess.Popen(command.split(" ")) # pylint:disable=consider-using-with + return 0, "" + result = subprocess.run( + command + " 2>&1", + shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + text=True, + check=True, + ) + if print_output: + print(result.stdout) + if check_retcode: + assert result.returncode == 0, f"Return code [{result.returncode}]" + return result.returncode, result.stdout + + +@dataclasses.dataclass +class ReleaseInfo: + version: str + release_tag: str + release_branch: str + commit_sha: str + # lts or stable + codename: str + + @staticmethod + def from_file(file_path: str) -> "ReleaseInfo": + with open(file_path, "r", encoding="utf-8") as json_file: + res = json.load(json_file) + return ReleaseInfo(**res) + + @staticmethod + def prepare(commit_ref: str, release_type: str, outfile: str) -> None: + Path(outfile).parent.mkdir(parents=True, exist_ok=True) + Path(outfile).unlink(missing_ok=True) + version = None + release_branch = None + release_tag = None + codename = None + assert release_type in ("patch", "new") + if release_type == "new": + # check commit_ref is right and on a right branch + ShellRunner.run( + f"git merge-base --is-ancestor origin/{commit_ref} origin/master" + ) + with checkout(commit_ref): + _, commit_sha = ShellRunner.run(f"git rev-parse {commit_ref}") + # Git() must be inside "with checkout" contextmanager + git = Git() + version = get_version_from_repo(git=git) + release_branch = "master" + expected_prev_tag = f"v{version.major}.{version.minor}.1.1-new" + version.bump().with_description(VersionType.NEW) + assert ( + git.latest_tag == expected_prev_tag + ), f"BUG: latest tag [{git.latest_tag}], expected [{expected_prev_tag}]" + release_tag = version.describe + codename = ( + VersionType.STABLE + ) # dummy value (artifactory won't be updated for new release) + if release_type == "patch": + with checkout(commit_ref): + _, commit_sha = ShellRunner.run(f"git rev-parse {commit_ref}") + # Git() must be inside "with checkout" contextmanager + git = Git() + version = get_version_from_repo(git=git) + codename = version.get_stable_release_type() + version.with_description(codename) + release_branch = f"{version.major}.{version.minor}" + release_tag = version.describe + ShellRunner.run(f"{GIT_PREFIX} fetch origin {release_branch} --tags") + # check commit is right and on a right branch + ShellRunner.run( + f"git merge-base --is-ancestor {commit_ref} origin/{release_branch}" + ) + if version.patch == 1: + expected_version = copy(version) + expected_version.bump() + expected_tag_prefix = ( + f"v{expected_version.major}.{expected_version.minor}-" + ) + expected_tag_suffix = "-new" + else: + expected_tag_prefix = ( + f"v{version.major}.{version.minor}.{version.patch-1}." + ) + expected_tag_suffix = f"-{version.get_stable_release_type()}" + if git.latest_tag.startswith( + expected_tag_prefix + ) and git.latest_tag.endswith(expected_tag_suffix): + pass + else: + assert ( + False + ), f"BUG: Unexpected latest tag [{git.latest_tag}] expected [{expected_tag_prefix}*{expected_tag_suffix}]" + + assert ( + release_branch + and commit_sha + and release_tag + and version + and codename in ("lts", "stable") + ) + res = ReleaseInfo( + release_branch=release_branch, + commit_sha=commit_sha, + release_tag=release_tag, + version=version.string, + codename=codename, + ) + with open(outfile, "w", encoding="utf-8") as f: + print(json.dumps(dataclasses.asdict(res), indent=2), file=f) + + def push_release_tag(self, dry_run: bool) -> None: + if dry_run: + # remove locally created tag from prev run + ShellRunner.run( + f"{GIT_PREFIX} tag -l | grep -q {self.release_tag} && git tag -d {self.release_tag} ||:" + ) + # Create release tag + print( + f"Create and push release tag [{self.release_tag}], commit [{self.commit_sha}]" + ) + tag_message = f"Release {self.release_tag}" + ShellRunner.run( + f"{GIT_PREFIX} tag -a -m '{tag_message}' {self.release_tag} {self.commit_sha}" + ) + cmd_push_tag = f"{GIT_PREFIX} push origin {self.release_tag}:{self.release_tag}" + ShellRunner.run(cmd_push_tag, dry_run=dry_run) + + @staticmethod + def _create_gh_label(label: str, color_hex: str, dry_run: bool) -> None: + cmd = f"gh api repos/{GITHUB_REPOSITORY}/labels -f name={label} -f color={color_hex}" + ShellRunner.run(cmd, dry_run=dry_run) + + def push_new_release_branch(self, dry_run: bool) -> None: + assert ( + self.release_branch == "master" + ), "New release branch can be created only for release type [new]" + git = Git() + version = get_version_from_repo(git=git) + new_release_branch = f"{version.major}.{version.minor}" + stable_release_type = version.get_stable_release_type() + version_after_release = copy(version) + version_after_release.bump() + assert ( + version_after_release.string == self.version + ), f"Unexpected current version in git, must precede [{self.version}] by one step, actual [{version.string}]" + if dry_run: + # remove locally created branch from prev run + ShellRunner.run( + f"{GIT_PREFIX} branch -l | grep -q {new_release_branch} && git branch -d {new_release_branch} ||:" + ) + print( + f"Create and push new release branch [{new_release_branch}], commit [{self.commit_sha}]" + ) + with checkout(self.release_branch): + with checkout_new(new_release_branch): + pr_labels = f"--label {Labels.RELEASE}" + if stable_release_type == VersionType.LTS: + pr_labels += f" --label {Labels.RELEASE_LTS}" + cmd_push_branch = ( + f"{GIT_PREFIX} push --set-upstream origin {new_release_branch}" + ) + ShellRunner.run(cmd_push_branch, dry_run=dry_run) + + print("Create and push backport tags for new release branch") + ReleaseInfo._create_gh_label( + f"v{new_release_branch}-must-backport", "10dbed", dry_run=dry_run + ) + ReleaseInfo._create_gh_label( + f"v{new_release_branch}-affected", "c2bfff", dry_run=dry_run + ) + ShellRunner.run( + f"""gh pr create --repo {GITHUB_REPOSITORY} --title 'Release pull request for branch {new_release_branch}' + --head {new_release_branch} {pr_labels} + --body 'This PullRequest is a part of ClickHouse release cycle. It is used by CI system only. Do not perform any changes with it.' + """, + dry_run=dry_run, + ) + + def update_version_and_contributors_list(self, dry_run: bool) -> None: + # Bump version, update contributors list, create PR + branch_upd_version_contributors = f"bump_version_{self.version}" + with checkout(self.commit_sha): + git = Git() + version = get_version_from_repo(git=git) + if self.release_branch == "master": + version.bump() + version.with_description(VersionType.TESTING) + else: + version.with_description(version.get_stable_release_type()) + assert ( + version.string == self.version + ), f"BUG: version in release info does not match version in git commit, expected [{self.version}], got [{version.string}]" + with checkout(self.release_branch): + with checkout_new(branch_upd_version_contributors): + update_cmake_version(version) + update_contributors(raise_error=True) + cmd_commit_version_upd = f"{GIT_PREFIX} commit '{CMAKE_PATH}' '{CONTRIBUTORS_PATH}' -m 'Update autogenerated version to {self.version} and contributors'" + cmd_push_branch = f"{GIT_PREFIX} push --set-upstream origin {branch_upd_version_contributors}" + body_file = get_abs_path(".github/PULL_REQUEST_TEMPLATE.md") + actor = os.getenv("GITHUB_ACTOR", "") or "me" + cmd_create_pr = f"gh pr create --repo {GITHUB_REPOSITORY} --title 'Update version after release' --head {branch_upd_version_contributors} --base {self.release_branch} --body-file '{body_file} --label 'do not test' --assignee @{actor}" + ShellRunner.run(cmd_commit_version_upd, dry_run=dry_run) + ShellRunner.run(cmd_push_branch, dry_run=dry_run) + ShellRunner.run(cmd_create_pr, dry_run=dry_run) + if dry_run: + ShellRunner.run( + f"{GIT_PREFIX} diff '{CMAKE_PATH}' '{CONTRIBUTORS_PATH}'" + ) + ShellRunner.run( + f"{GIT_PREFIX} checkout '{CMAKE_PATH}' '{CONTRIBUTORS_PATH}'" + ) + + def create_gh_release(self, packages_files: List[str], dry_run: bool) -> None: + repo = os.getenv("GITHUB_REPOSITORY") + assert repo + cmds = [] + cmds.append( + f"gh release create --repo {repo} --title 'Release {self.release_tag}' {self.release_tag}" + ) + for file in packages_files: + cmds.append(f"gh release upload {self.release_tag} {file}") + if not dry_run: + for cmd in cmds: + ShellRunner.run(cmd) + else: + print("Dry-run, would run commands:") + print("\n * ".join(cmds)) + + +class RepoTypes: + RPM = "rpm" + DEBIAN = "deb" + TGZ = "tgz" + + +class PackageDownloader: + PACKAGES = ( + "clickhouse-client", + "clickhouse-common-static", + "clickhouse-common-static-dbg", + "clickhouse-keeper", + "clickhouse-keeper-dbg", + "clickhouse-server", + ) + + EXTRA_PACKAGES = ( + "clickhouse-library-bridge", + "clickhouse-odbc-bridge", + ) + PACKAGE_TYPES = (CI.BuildNames.PACKAGE_RELEASE, CI.BuildNames.PACKAGE_AARCH64) + MACOS_PACKAGE_TO_BIN_SUFFIX = { + CI.BuildNames.BINARY_DARWIN: "macos", + CI.BuildNames.BINARY_DARWIN_AARCH64: "macos-aarch64", + } + LOCAL_DIR = "/tmp/packages" + + @classmethod + def _get_arch_suffix(cls, package_arch, repo_type): + if package_arch == CI.BuildNames.PACKAGE_RELEASE: + return ( + "amd64" if repo_type in (RepoTypes.DEBIAN, RepoTypes.TGZ) else "x86_64" + ) + elif package_arch == CI.BuildNames.PACKAGE_AARCH64: + return ( + "arm64" if repo_type in (RepoTypes.DEBIAN, RepoTypes.TGZ) else "aarch64" + ) + else: + assert False, "BUG" + + def __init__(self, release, commit_sha, version): + assert version.startswith(release), "Invalid release branch or version" + major, minor = map(int, release.split(".")) + self.package_names = list(self.PACKAGES) + if major > 24 or (major == 24 and minor > 3): + self.package_names += list(self.EXTRA_PACKAGES) + self.release = release + self.commit_sha = commit_sha + self.version = version + self.s3 = S3Helper() + self.deb_package_files = [] + self.rpm_package_files = [] + self.tgz_package_files = [] + # just binaries for macos + self.macos_package_files = ["clickhouse-macos", "clickhouse-macos-aarch64"] + self.file_to_type = {} + + ShellRunner.run(f"mkdir -p {self.LOCAL_DIR}") + + for package_type in self.PACKAGE_TYPES: + for package in self.package_names: + deb_package_file_name = f"{package}_{self.version}_{self._get_arch_suffix(package_type, RepoTypes.DEBIAN)}.deb" + self.deb_package_files.append(deb_package_file_name) + self.file_to_type[deb_package_file_name] = package_type + + rpm_package_file_name = f"{package}-{self.version}.{self._get_arch_suffix(package_type, RepoTypes.RPM)}.rpm" + self.rpm_package_files.append(rpm_package_file_name) + self.file_to_type[rpm_package_file_name] = package_type + + tgz_package_file_name = f"{package}-{self.version}-{self._get_arch_suffix(package_type, RepoTypes.TGZ)}.tgz" + self.tgz_package_files.append(tgz_package_file_name) + self.file_to_type[tgz_package_file_name] = package_type + tgz_package_file_name += ".sha512" + self.tgz_package_files.append(tgz_package_file_name) + self.file_to_type[tgz_package_file_name] = package_type + + def get_deb_packages_files(self): + return self.deb_package_files + + def get_rpm_packages_files(self): + return self.rpm_package_files + + def get_tgz_packages_files(self): + return self.tgz_package_files + + def get_macos_packages_files(self): + return self.macos_package_files + + def get_packages_names(self): + return self.package_names + + def get_all_packages_files(self): + assert self.local_tgz_packages_ready() + assert self.local_deb_packages_ready() + assert self.local_rpm_packages_ready() + assert self.local_macos_packages_ready() + res = [] + for package_file in ( + self.deb_package_files + + self.rpm_package_files + + self.tgz_package_files + + self.macos_package_files + ): + res.append(self.LOCAL_DIR + "/" + package_file) + return res + + def run(self): + ShellRunner.run(f"rm -rf {self.LOCAL_DIR}/*") + for package_file in ( + self.deb_package_files + self.rpm_package_files + self.tgz_package_files + ): + print(f"Downloading: [{package_file}]") + s3_path = "/".join( + [ + self.release, + self.commit_sha, + self.file_to_type[package_file], + package_file, + ] + ) + self.s3.download_file( + bucket=S3_BUILDS_BUCKET, + s3_path=s3_path, + local_file_path="/".join([self.LOCAL_DIR, package_file]), + ) + + for macos_package, bin_suffix in self.MACOS_PACKAGE_TO_BIN_SUFFIX.items(): + binary_name = "clickhouse" + destination_binary_name = f"{binary_name}-{bin_suffix}" + assert destination_binary_name in self.macos_package_files + print( + f"Downloading: [{macos_package}] binary to [{destination_binary_name}]" + ) + s3_path = "/".join( + [ + self.release, + self.commit_sha, + macos_package, + binary_name, + ] + ) + self.s3.download_file( + bucket=S3_BUILDS_BUCKET, + s3_path=s3_path, + local_file_path="/".join([self.LOCAL_DIR, destination_binary_name]), + ) + + def local_deb_packages_ready(self) -> bool: + assert self.deb_package_files + for package_file in self.deb_package_files: + print(f"Check package is downloaded [{package_file}]") + if not Path(self.LOCAL_DIR + "/" + package_file).is_file(): + return False + return True + + def local_rpm_packages_ready(self) -> bool: + assert self.rpm_package_files + for package_file in self.rpm_package_files: + print(f"Check package is downloaded [{package_file}]") + if not Path(self.LOCAL_DIR + "/" + package_file).is_file(): + return False + return True + + def local_tgz_packages_ready(self) -> bool: + assert self.tgz_package_files + for package_file in self.tgz_package_files: + print(f"Check package is downloaded [{package_file}]") + if not Path(self.LOCAL_DIR + "/" + package_file).is_file(): + return False + return True + + def local_macos_packages_ready(self) -> bool: + assert self.macos_package_files + for package_file in self.macos_package_files: + print(f"Check package is downloaded [{package_file}]") + if not Path(self.LOCAL_DIR + "/" + package_file).is_file(): + return False + return True + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + description="Creates release", + ) + parser.add_argument( + "--prepare-release-info", + action="store_true", + help="Initial step to prepare info like release branch, release tag, etc.", + ) + parser.add_argument( + "--push-release-tag", + action="store_true", + help="Creates and pushes git tag", + ) + parser.add_argument( + "--push-new-release-branch", + action="store_true", + help="Creates and pushes new release branch and corresponding service gh tags for backports", + ) + parser.add_argument( + "--create-bump-version-pr", + action="store_true", + help="Updates version, contributors' list and creates PR", + ) + parser.add_argument( + "--download-packages", + action="store_true", + help="Downloads all required packages from s3", + ) + parser.add_argument( + "--create-gh-release", + action="store_true", + help="Create GH Release object and attach all packages", + ) + parser.add_argument( + "--ref", + type=str, + help="the commit hash or branch", + ) + parser.add_argument( + "--release-type", + choices=("new", "patch"), + # dest="release_type", + help="a release type to bump the major.minor.patch version part, " + "new branch is created only for the value 'new'", + ) + parser.add_argument( + "--dry-run", + action="store_true", + help="do not make any actual changes in the repo, just show what will be done", + ) + parser.add_argument( + "--outfile", + default="", + type=str, + help="output file to write json result to, if not set - stdout", + ) + parser.add_argument( + "--infile", + default="", + type=str, + help="input file with release info", + ) + + return parser.parse_args() + + +@contextmanager +def checkout(ref: str) -> Iterator[None]: + _, orig_ref = ShellRunner.run(f"{GIT_PREFIX} symbolic-ref --short HEAD") + rollback_cmd = f"{GIT_PREFIX} checkout {orig_ref}" + assert orig_ref + if ref not in (orig_ref,): + ShellRunner.run(f"{GIT_PREFIX} checkout {ref}") + try: + yield + except (Exception, KeyboardInterrupt) as e: + print(f"ERROR: Exception [{e}]") + ShellRunner.run(rollback_cmd) + raise + ShellRunner.run(rollback_cmd) + + +@contextmanager +def checkout_new(ref: str) -> Iterator[None]: + _, orig_ref = ShellRunner.run(f"{GIT_PREFIX} symbolic-ref --short HEAD") + rollback_cmd = f"{GIT_PREFIX} checkout {orig_ref}" + assert orig_ref + ShellRunner.run(f"{GIT_PREFIX} checkout -b {ref}") + try: + yield + except (Exception, KeyboardInterrupt) as e: + print(f"ERROR: Exception [{e}]") + ShellRunner.run(rollback_cmd) + raise + ShellRunner.run(rollback_cmd) + + +if __name__ == "__main__": + args = parse_args() + assert args.dry_run + + # prepare ssh for git if needed + _ssh_agent = None + _key_pub = None + if os.getenv("ROBOT_CLICKHOUSE_SSH_KEY", ""): + _key = os.getenv("ROBOT_CLICKHOUSE_SSH_KEY") + _ssh_agent = SSHAgent() + _key_pub = _ssh_agent.add(_key) + _ssh_agent.print_keys() + + if args.prepare_release_info: + assert ( + args.ref and args.release_type and args.outfile + ), "--ref, --release-type and --outfile must be provided with --prepare-release-info" + ReleaseInfo.prepare( + commit_ref=args.ref, release_type=args.release_type, outfile=args.outfile + ) + if args.push_release_tag: + assert args.infile, "--infile must be provided" + release_info = ReleaseInfo.from_file(args.infile) + release_info.push_release_tag(dry_run=args.dry_run) + if args.push_new_release_branch: + assert args.infile, "--infile must be provided" + release_info = ReleaseInfo.from_file(args.infile) + release_info.push_new_release_branch(dry_run=args.dry_run) + if args.create_bump_version_pr: + # TODO: store link to PR in release info + assert args.infile, "--infile must be provided" + release_info = ReleaseInfo.from_file(args.infile) + release_info.update_version_and_contributors_list(dry_run=args.dry_run) + if args.download_packages: + assert args.infile, "--infile must be provided" + release_info = ReleaseInfo.from_file(args.infile) + p = PackageDownloader( + release=release_info.release_branch, + commit_sha=release_info.commit_sha, + version=release_info.version, + ) + p.run() + if args.create_gh_release: + assert args.infile, "--infile must be provided" + release_info = ReleaseInfo.from_file(args.infile) + p = PackageDownloader( + release=release_info.release_branch, + commit_sha=release_info.commit_sha, + version=release_info.version, + ) + release_info.create_gh_release(p.get_all_packages_files(), args.dry_run) + + # tear down ssh + if _ssh_agent and _key_pub: + _ssh_agent.remove(_key_pub) + + +""" +Prepare release machine: + +### INSTALL PACKAGES +sudo apt update +sudo apt install --yes --no-install-recommends python3-dev python3-pip gh unzip +sudo apt install --yes python3-boto3 +sudo apt install --yes python3-github +sudo apt install --yes python3-unidiff +sudo apt install --yes s3fs + +### INSTALL AWS CLI +cd /tmp +curl "https://awscli.amazonaws.com/awscli-exe-linux-$(uname -m).zip" -o "awscliv2.zip" +unzip awscliv2.zip +sudo ./aws/install +rm -rf aws* +cd - + +### INSTALL GH ACTIONS RUNNER: +# Create a folder +RUNNER_VERSION=2.317.0 +cd ~ +mkdir actions-runner && cd actions-runner +# Download the latest runner package +runner_arch() { + case $(uname -m) in + x86_64 ) + echo x64;; + aarch64 ) + echo arm64;; + esac +} +curl -O -L https://github.com/actions/runner/releases/download/v$RUNNER_VERSION/actions-runner-linux-$(runner_arch)-$RUNNER_VERSION.tar.gz +# Extract the installer +tar xzf ./actions-runner-linux-$(runner_arch)-$RUNNER_VERSION.tar.gz +rm ./actions-runner-linux-$(runner_arch)-$RUNNER_VERSION.tar.gz + +### Install reprepro: +cd ~ +sudo apt install dpkg-dev libgpgme-dev libdb-dev libbz2-dev liblzma-dev libarchive-dev shunit2 db-util debhelper +git clone https://salsa.debian.org/debian/reprepro.git +cd reprepro +dpkg-buildpackage -b --no-sign && sudo dpkg -i ../reprepro_$(dpkg-parsechangelog --show-field Version)_$(dpkg-architecture -q DEB_HOST_ARCH).deb + +### Install createrepo-c: +sudo apt install createrepo-c +createrepo_c --version +#Version: 0.17.3 (Features: DeltaRPM LegacyWeakdeps ) + +### Import gpg sign key +gpg --import key.pgp +gpg --list-secret-keys + +### Install docker +sudo su; cd ~ + +deb_arch() { + case $(uname -m) in + x86_64 ) + echo amd64;; + aarch64 ) + echo arm64;; + esac +} +curl -fsSL https://download.docker.com/linux/ubuntu/gpg | gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg + +echo "deb [arch=$(deb_arch) signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null + +sudo apt-get update +sudo apt-get install --yes --no-install-recommends docker-ce docker-buildx-plugin docker-ce-cli containerd.io + +sudo usermod -aG docker ubuntu + +# enable ipv6 in containers (fixed-cidr-v6 is some random network mask) +cat < /etc/docker/daemon.json +{ + "ipv6": true, + "fixed-cidr-v6": "2001:db8:1::/64", + "log-driver": "json-file", + "log-opts": { + "max-file": "5", + "max-size": "1000m" + }, + "insecure-registries" : ["dockerhub-proxy.dockerhub-proxy-zone:5000"], + "registry-mirrors" : ["http://dockerhub-proxy.dockerhub-proxy-zone:5000"] +} +EOT + +# if docker build does not work: + sudo systemctl restart docker + docker buildx rm mybuilder + docker buildx create --name mybuilder --driver docker-container --use + docker buildx inspect mybuilder --bootstrap + +### Install tailscale + +### Configure GH runner +""" diff --git a/tests/ci/docker_server.py b/tests/ci/docker_server.py index 151cc5a4c02..21fc02ce02a 100644 --- a/tests/ci/docker_server.py +++ b/tests/ci/docker_server.py @@ -11,7 +11,6 @@ from os import path as p from pathlib import Path from typing import Dict, List -from build_check import get_release_or_pr from build_download_helper import read_build_urls from docker_images_helper import DockerImageData, docker_login from env_helper import ( @@ -22,7 +21,7 @@ from env_helper import ( TEMP_PATH, ) from git_helper import Git -from pr_info import PRInfo +from pr_info import PRInfo, EventType from report import FAILURE, SUCCESS, JobReport, TestResult, TestResults from stopwatch import Stopwatch from tee_popen import TeePopen @@ -63,6 +62,12 @@ def parse_args() -> argparse.Namespace: help="a version to build, automaticaly got from version_helper, accepts either " "tag ('refs/tags/' is removed automatically) or a normal 22.2.2.2 format", ) + parser.add_argument( + "--sha", + type=str, + default="", + help="sha of the commit to use packages from", + ) parser.add_argument( "--release-type", type=str, @@ -122,7 +127,7 @@ def parse_args() -> argparse.Namespace: def retry_popen(cmd: str, log_file: Path) -> int: - max_retries = 5 + max_retries = 2 for retry in range(max_retries): # From time to time docker build may failed. Curl issues, or even push # It will sleep progressively 5, 15, 30 and 50 seconds between retries @@ -370,13 +375,22 @@ def main(): tags = gen_tags(args.version, args.release_type) repo_urls = {} direct_urls: Dict[str, List[str]] = {} - release_or_pr, _ = get_release_or_pr(pr_info, args.version) + if pr_info.event_type == EventType.PULL_REQUEST: + release_or_pr = str(pr_info.number) + sha = pr_info.sha + elif pr_info.event_type == EventType.PUSH and pr_info.is_master: + release_or_pr = str(0) + sha = pr_info.sha + else: + release_or_pr = f"{args.version.major}.{args.version.minor}" + sha = args.sha + assert sha for arch, build_name in zip(ARCH, ("package_release", "package_aarch64")): if not args.bucket_prefix: repo_urls[arch] = ( f"{S3_DOWNLOAD}/{S3_BUILDS_BUCKET}/" - f"{release_or_pr}/{pr_info.sha}/{build_name}" + f"{release_or_pr}/{sha}/{build_name}" ) else: repo_urls[arch] = f"{args.bucket_prefix}/{build_name}" diff --git a/tests/ci/functional_test_check.py b/tests/ci/functional_test_check.py index ef9f4dc016e..41c7ed963c9 100644 --- a/tests/ci/functional_test_check.py +++ b/tests/ci/functional_test_check.py @@ -24,6 +24,18 @@ from tee_popen import TeePopen NO_CHANGES_MSG = "Nothing to run" +class SensitiveFormatter(logging.Formatter): + @staticmethod + def _filter(s): + return re.sub( + r"(.*)(AZURE_CONNECTION_STRING.*\')(.*)", r"\1AZURE_CONNECTION_STRING\3", s + ) + + def format(self, record): + original = logging.Formatter.format(self, record) + return self._filter(original) + + def get_additional_envs( check_name: str, run_by_hash_num: int, run_by_hash_total: int ) -> List[str]: @@ -213,6 +225,9 @@ def parse_args(): def main(): logging.basicConfig(level=logging.INFO) + for handler in logging.root.handlers: + # pylint: disable=protected-access + handler.setFormatter(SensitiveFormatter(handler.formatter._fmt)) # type: ignore stopwatch = Stopwatch() diff --git a/tests/ci/merge_pr.py b/tests/ci/merge_pr.py index 37c08fc4efe..94456506879 100644 --- a/tests/ci/merge_pr.py +++ b/tests/ci/merge_pr.py @@ -26,6 +26,8 @@ from pr_info import PRInfo from report import SUCCESS, FAILURE from env_helper import GITHUB_UPSTREAM_REPOSITORY, GITHUB_REPOSITORY from synchronizer_utils import SYNC_BRANCH_PREFIX +from ci_config import CI +from ci_utils import Utils # The team name for accepted approvals TEAM_NAME = getenv("GITHUB_TEAM_NAME", "core") @@ -251,23 +253,88 @@ def main(): # set mergeable check status and exit commit = get_commit(gh, args.pr_info.sha) statuses = get_commit_filtered_statuses(commit) - state = trigger_mergeable_check( - commit, - statuses, - workflow_failed=(args.wf_status != "success"), - ) - # Process upstream StatusNames.SYNC - pr_info = PRInfo() - if ( - pr_info.head_ref.startswith(f"{SYNC_BRANCH_PREFIX}/pr/") - and GITHUB_REPOSITORY != GITHUB_UPSTREAM_REPOSITORY - ): - print("Updating upstream statuses") - update_upstream_sync_status(pr_info, state) + max_failed_tests_per_job = 0 + job_name_with_max_failures = None + total_failed_tests = 0 + failed_to_get_info = False + has_failed_statuses = False + for status in statuses: + if not CI.is_required(status.context) or status.context in ( + CI.StatusNames.SYNC, + CI.StatusNames.PR_CHECK, + ): + # CI.StatusNames.SYNC or CI.StatusNames.PR_CHECK should not be checked + continue + print(f"Check status [{status.context}], [{status.state}]") + if status.state == FAILURE: + has_failed_statuses = True + failed_cnt = Utils.get_failed_tests_number(status.description) + if failed_cnt is None: + failed_to_get_info = True + print( + f"WARNING: failed to get number of failed tests from [{status.description}]" + ) + else: + if failed_cnt > max_failed_tests_per_job: + job_name_with_max_failures = status.context + max_failed_tests_per_job = failed_cnt + total_failed_tests += failed_cnt + print( + f"Failed test cases in [{status.context}] is [{failed_cnt}], total failures [{total_failed_tests}]" + ) + elif status.state != SUCCESS and status.context not in ( + CI.StatusNames.SYNC, + CI.StatusNames.PR_CHECK, + ): + # do not block CI on failures in (CI.StatusNames.SYNC, CI.StatusNames.PR_CHECK) + has_failed_statuses = True + print( + f"Unexpected status for [{status.context}]: [{status.state}] - block further testing" + ) + failed_to_get_info = True - if args.wf_status != "success": - # exit with 1 to rerun on workflow failed job restart + can_continue = True + if total_failed_tests > CI.MAX_TOTAL_FAILURES_BEFORE_BLOCKING_CI: + print( + f"Required check has [{total_failed_tests}] failed - block further testing" + ) + can_continue = False + if max_failed_tests_per_job > CI.MAX_TOTAL_FAILURES_PER_JOB_BEFORE_BLOCKING_CI: + print( + f"Job [{job_name_with_max_failures}] has [{max_failed_tests_per_job}] failures - block further testing" + ) + can_continue = False + if failed_to_get_info: + print("Unexpected commit status state - block further testing") + can_continue = False + if args.wf_status != SUCCESS and not has_failed_statuses: + # workflow failed but reason is unknown as no failed statuses present + can_continue = False + print( + "WARNING: Either the runner is faulty or the operating status is unknown. The first is self-healing, the second requires investigation." + ) + + if args.wf_status == SUCCESS or has_failed_statuses: + # do not set mergeable check status if args.wf_status == failure, apparently it has died runners and is to be restarted + state = trigger_mergeable_check( + commit, + statuses, + ) + # Process upstream StatusNames.SYNC + pr_info = PRInfo() + if ( + pr_info.head_ref.startswith(f"{SYNC_BRANCH_PREFIX}/pr/") + and GITHUB_REPOSITORY != GITHUB_UPSTREAM_REPOSITORY + ): + print("Updating upstream statuses") + update_upstream_sync_status(pr_info, state) + else: + print( + "Workflow failed but no failed statuses found (died runner?) - cannot set Mergeable Check status" + ) + + if not can_continue: sys.exit(1) sys.exit(0) diff --git a/tests/ci/report.py b/tests/ci/report.py index bdaa2e15130..4be7b438f4f 100644 --- a/tests/ci/report.py +++ b/tests/ci/report.py @@ -23,7 +23,7 @@ from typing import ( from build_download_helper import get_gh_api from ci_config import CI from ci_utils import normalize_string -from env_helper import REPORT_PATH, TEMP_PATH +from env_helper import REPORT_PATH, GITHUB_WORKSPACE logger = logging.getLogger(__name__) @@ -244,7 +244,8 @@ HTML_TEST_PART = """ """ BASE_HEADERS = ["Test name", "Test status"] -JOB_REPORT_FILE = Path(TEMP_PATH) / "job_report.json" +# should not be in TEMP directory or any directory that may be cleaned during the job execution +JOB_REPORT_FILE = Path(GITHUB_WORKSPACE) / "job_report.json" @dataclass @@ -296,6 +297,33 @@ class JobReport: build_dir_for_upload: Union[Path, str] = "" # if False no GH commit status will be created by CI need_commit_status: bool = True + # indicates that this is not real job report but report for the job that was skipped by rerun check + job_skipped: bool = False + # indicates that report generated by CI script in order to check later if job was killed before real report is generated + pre_report: bool = False + exit_code: int = -1 + + @staticmethod + def create_pre_report() -> "JobReport": + return JobReport( + status=ERROR, + description="", + test_results=[], + start_time=datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S"), + duration=0.0, + additional_files=[], + pre_report=True, + ) + + def update_duration(self): + if not self.start_time: + self.duration = 0.0 + else: + start_time = datetime.datetime.strptime( + self.start_time, "%Y-%m-%d %H:%M:%S" + ) + current_time = datetime.datetime.utcnow() + self.duration = (current_time - start_time).total_seconds() def __post_init__(self): assert self.status in (SUCCESS, ERROR, FAILURE, PENDING) diff --git a/tests/ci/ssh.py b/tests/ci/ssh.py index 321826fcf44..89d90d724d2 100644 --- a/tests/ci/ssh.py +++ b/tests/ci/ssh.py @@ -37,9 +37,9 @@ class SSHAgent: ssh_options = ( "," + os.environ["SSH_OPTIONS"] if os.environ.get("SSH_OPTIONS") else "" ) - os.environ[ - "SSH_OPTIONS" - ] = f"{ssh_options}UserKnownHostsFile=/dev/null,StrictHostKeyChecking=no" + os.environ["SSH_OPTIONS"] = ( + f"{ssh_options}UserKnownHostsFile=/dev/null,StrictHostKeyChecking=no" + ) def add(self, key): key_pub = self._key_pub(key) diff --git a/tests/ci/stress_check.py b/tests/ci/stress_check.py index 486bfc25e22..85da601e379 100644 --- a/tests/ci/stress_check.py +++ b/tests/ci/stress_check.py @@ -3,6 +3,7 @@ import csv import logging import os +import re import subprocess import sys from pathlib import Path @@ -19,6 +20,18 @@ from stopwatch import Stopwatch from tee_popen import TeePopen +class SensitiveFormatter(logging.Formatter): + @staticmethod + def _filter(s): + return re.sub( + r"(.*)(AZURE_CONNECTION_STRING.*\')(.*)", r"\1AZURE_CONNECTION_STRING\3", s + ) + + def format(self, record): + original = logging.Formatter.format(self, record) + return self._filter(original) + + def get_additional_envs(check_name: str) -> List[str]: result = [] azure_connection_string = get_parameter_from_ssm("azure_connection_string") @@ -117,6 +130,9 @@ def process_results( def run_stress_test(docker_image_name: str) -> None: logging.basicConfig(level=logging.INFO) + for handler in logging.root.handlers: + # pylint: disable=protected-access + handler.setFormatter(SensitiveFormatter(handler.formatter._fmt)) # type: ignore stopwatch = Stopwatch() temp_path = Path(TEMP_PATH) diff --git a/tests/ci/style_check.py b/tests/ci/style_check.py index 9deae06d9f4..36620d44a2d 100644 --- a/tests/ci/style_check.py +++ b/tests/ci/style_check.py @@ -16,7 +16,15 @@ from docker_images_helper import get_docker_image, pull_image from env_helper import IS_CI, REPO_COPY, TEMP_PATH, GITHUB_EVENT_PATH from git_helper import GIT_PREFIX, git_runner from pr_info import PRInfo -from report import ERROR, FAILURE, SUCCESS, JobReport, TestResults, read_test_results +from report import ( + ERROR, + FAILURE, + SUCCESS, + JobReport, + TestResults, + read_test_results, + FAIL, +) from ssh import SSHKey from stopwatch import Stopwatch @@ -192,15 +200,6 @@ def main(): future = executor.submit(subprocess.run, cmd_shell, shell=True) _ = future.result() - autofix_description = "" - if args.push: - try: - commit_push_staged(pr_info) - except subprocess.SubprocessError: - # do not fail the whole script if the autofix didn't work out - logging.error("Unable to push the autofix. Continue.") - autofix_description = "Failed to push autofix to the PR. " - subprocess.check_call( f"python3 ../../utils/check-style/process_style_check_result.py --in-results-dir {temp_path} " f"--out-results-file {temp_path}/test_results.tsv --out-status-file {temp_path}/check_status.tsv || " @@ -210,6 +209,21 @@ def main(): state, description, test_results, additional_files = process_result(temp_path) + autofix_description = "" + fail_cnt = 0 + for result in test_results: + if result.status in (FAILURE, FAIL): + # do not autofix if not only black failed + fail_cnt += 1 + + if args.push and fail_cnt == 1: + try: + commit_push_staged(pr_info) + except subprocess.SubprocessError: + # do not fail the whole script if the autofix didn't work out + logging.error("Unable to push the autofix. Continue.") + autofix_description = "Failed to push autofix to the PR. " + JobReport( description=f"{autofix_description}{description}", test_results=test_results, diff --git a/tests/ci/test_ci_config.py b/tests/ci/test_ci_config.py index 558faca915e..4336783e0d5 100644 --- a/tests/ci/test_ci_config.py +++ b/tests/ci/test_ci_config.py @@ -1,6 +1,8 @@ #!/usr/bin/env python3 - +import copy import unittest +import random + from ci_config import CI import ci as CIPY from ci_settings import CiSettings @@ -57,6 +59,18 @@ class TestCIConfig(unittest.TestCase): f"Job [{job}] apparently uses wrong common config with job keyword [{CI.JOB_CONFIGS[job].job_name_keyword}]", ) + def test_job_config_has_proper_values(self): + for job in CI.JobNames: + if CI.JOB_CONFIGS[job].reference_job_name: + reference_job_config = CI.JOB_CONFIGS[ + CI.JOB_CONFIGS[job].reference_job_name + ] + # reference job must run in all workflows and has digest + self.assertTrue(reference_job_config.pr_only == False) + self.assertTrue(reference_job_config.release_only == False) + self.assertTrue(reference_job_config.run_always == False) + self.assertTrue(reference_job_config.digest != CI.DigestConfig()) + def test_required_checks(self): for job in CI.REQUIRED_CHECKS: if job in (CI.StatusNames.PR_CHECK, CI.StatusNames.SYNC): @@ -269,6 +283,7 @@ class TestCIConfig(unittest.TestCase): ci_cache = CIPY._configure_jobs( S3Helper(), pr_info, settings, skip_jobs=False, dry_run=True ) + ci_cache.filter_out_not_affected_jobs() actual_jobs_to_do = list(ci_cache.jobs_to_do) expected_jobs_to_do = [] for set_ in settings.ci_sets: @@ -401,6 +416,30 @@ class TestCIConfig(unittest.TestCase): """ checks ci.py job configuration """ + + def _reset_ci_cache_to_wait_all_jobs(ci_cache): + # pretend there are pending jobs that we need to wait + ci_cache.jobs_to_wait = dict(ci_cache.jobs_to_do) + for job, config in ci_cache.jobs_to_wait.items(): + assert config.batches + config.pending_batches = list(config.batches) + + for batch in range(config.num_batches): + record = CiCache.Record( + record_type=CiCache.RecordType.PENDING, + job_name=job, + job_digest=ci_cache.job_digests[job], + batch=batch, + num_batches=config.num_batches, + release_branch=True, + ) + for record_t_, records_ in ci_cache.records.items(): + if record_t_.value == CiCache.RecordType.PENDING.value: + records_[record.to_str_key()] = record + assert not ci_cache.jobs_to_skip + assert ci_cache.jobs_to_wait + ci_cache.jobs_to_skip = [] + settings = CiSettings() settings.no_ci_cache = True pr_info = PRInfo(github_event=_TEST_EVENT_JSON) @@ -417,26 +456,6 @@ class TestCIConfig(unittest.TestCase): assert not ci_cache.jobs_to_skip assert not ci_cache.jobs_to_wait - # pretend there are pending jobs that we need to wait - ci_cache.jobs_to_wait = dict(ci_cache.jobs_to_do) - for job, config in ci_cache.jobs_to_wait.items(): - assert not config.pending_batches - assert config.batches - config.pending_batches = list(config.batches) - for job, config in ci_cache.jobs_to_wait.items(): - for batch in range(config.num_batches): - record = CiCache.Record( - record_type=CiCache.RecordType.PENDING, - job_name=job, - job_digest=ci_cache.job_digests[job], - batch=batch, - num_batches=config.num_batches, - release_branch=True, - ) - for record_t_, records_ in ci_cache.records.items(): - if record_t_.value == CiCache.RecordType.PENDING.value: - records_[record.to_str_key()] = record - def _test_await_for_batch( ci_cache: CiCache, record_type: CiCache.RecordType, batch: int ) -> None: @@ -462,32 +481,76 @@ class TestCIConfig(unittest.TestCase): and batch < config_.num_batches ): assert batch not in config_.pending_batches - else: - assert batch in config_.pending_batches for _, config_ in ci_cache.jobs_to_do.items(): # jobs to do must have batches to run before/after await # if it's an empty list after await - apparently job has not been removed after await assert config_.batches - _test_await_for_batch(ci_cache, CiCache.RecordType.SUCCESSFUL, 0) - # check all one-batch jobs are in jobs_to_skip - for job in all_jobs_in_wf: - config = CI.JOB_CONFIGS[job] - if config.num_batches == 1: - self.assertTrue(job in ci_cache.jobs_to_skip) - self.assertTrue(job not in ci_cache.jobs_to_do) - else: - self.assertTrue(job not in ci_cache.jobs_to_skip) - self.assertTrue(job in ci_cache.jobs_to_do) - - _test_await_for_batch(ci_cache, CiCache.RecordType.FAILED, 1) - _test_await_for_batch(ci_cache, CiCache.RecordType.SUCCESSFUL, 2) - - self.assertTrue(len(ci_cache.jobs_to_skip) > 0) - self.assertTrue(len(ci_cache.jobs_to_do) > 0) + _reset_ci_cache_to_wait_all_jobs(ci_cache) + _test_await_for_batch(ci_cache, CiCache.RecordType.FAILED, 0) + tested = False + for job, config in ci_cache.jobs_to_do.items(): + if config.batches == [0]: + tested = True + self.assertTrue( + job not in ci_cache.jobs_to_wait, + "Job must be removed from @jobs_to_wait, because its only batch has FAILED cache record", + ) self.assertCountEqual( - list(ci_cache.jobs_to_do) + ci_cache.jobs_to_skip, all_jobs_in_wf + ci_cache.jobs_to_skip, + [], + "No jobs must be skipped, since all cache records are of type FAILED", + ) + assert tested + + # reset jobs_to_wait after previous test + _reset_ci_cache_to_wait_all_jobs(ci_cache) + assert not ci_cache.jobs_to_skip + + # set batch 0 as SUCCESSFUL in ci cache + jobs_to_do_prev = list(ci_cache.jobs_to_do) + jobs_to_skip_prev = [] + jobs_to_wait_prev = list(ci_cache.jobs_to_wait) + _test_await_for_batch(ci_cache, CiCache.RecordType.SUCCESSFUL, 0) + self.assertTrue(len(jobs_to_skip_prev) != len(ci_cache.jobs_to_skip)) + self.assertTrue(len(jobs_to_wait_prev) > len(ci_cache.jobs_to_wait)) + self.assertCountEqual( + list(ci_cache.jobs_to_do) + ci_cache.jobs_to_skip, + jobs_to_do_prev + jobs_to_skip_prev, + ) + + # set batch 1 as SUCCESSFUL in ci cache + jobs_to_do_prev = list(ci_cache.jobs_to_do) + jobs_to_skip_prev = list(ci_cache.jobs_to_skip) + jobs_to_wait_prev = list(ci_cache.jobs_to_wait) + _test_await_for_batch(ci_cache, CiCache.RecordType.SUCCESSFUL, 1) + self.assertTrue(len(jobs_to_skip_prev) != len(ci_cache.jobs_to_skip)) + self.assertTrue(len(jobs_to_wait_prev) > len(ci_cache.jobs_to_wait)) + self.assertCountEqual( + list(ci_cache.jobs_to_do) + ci_cache.jobs_to_skip, + jobs_to_do_prev + jobs_to_skip_prev, + ) + + # set batch 3, 4, 5, 6 as SUCCESSFUL in ci cache + jobs_to_do_prev = list(ci_cache.jobs_to_do) + jobs_to_skip_prev = list(ci_cache.jobs_to_skip) + jobs_to_wait_prev = list(ci_cache.jobs_to_wait) + _test_await_for_batch(ci_cache, CiCache.RecordType.SUCCESSFUL, 2) + self.assertTrue(ci_cache.jobs_to_do) + _test_await_for_batch(ci_cache, CiCache.RecordType.SUCCESSFUL, 3) + self.assertTrue(ci_cache.jobs_to_do) + _test_await_for_batch(ci_cache, CiCache.RecordType.SUCCESSFUL, 4) + self.assertTrue(ci_cache.jobs_to_do) + _test_await_for_batch(ci_cache, CiCache.RecordType.SUCCESSFUL, 5) + self.assertTrue( + not ci_cache.jobs_to_do + ) # by this moment there must be no jobs left as batch 5 is currently the maximum + self.assertTrue(len(jobs_to_skip_prev) != len(ci_cache.jobs_to_skip)) + self.assertTrue(len(jobs_to_wait_prev) > len(ci_cache.jobs_to_wait)) + self.assertCountEqual( + list(ci_cache.jobs_to_do) + ci_cache.jobs_to_skip, + jobs_to_do_prev + jobs_to_skip_prev, ) def test_ci_py_filters_not_affected_jobs_in_prs(self): @@ -497,79 +560,68 @@ class TestCIConfig(unittest.TestCase): settings = CiSettings() settings.no_ci_cache = True pr_info = PRInfo(github_event=_TEST_EVENT_JSON) - pr_info.event_type = EventType.PUSH - pr_info.number = 0 - assert pr_info.is_release and not pr_info.is_merge_queue + pr_info.event_type = EventType.PULL_REQUEST + pr_info.number = 123 + assert pr_info.is_pr ci_cache = CIPY._configure_jobs( S3Helper(), pr_info, settings, skip_jobs=False, dry_run=True ) self.assertTrue(not ci_cache.jobs_to_skip, "Must be no jobs in skip list") - all_jobs_in_wf = list(ci_cache.jobs_to_do) assert not ci_cache.jobs_to_wait assert not ci_cache.jobs_to_skip + MOCK_AFFECTED_JOBS = [ + CI.JobNames.STATELESS_TEST_S3_DEBUG, + CI.JobNames.STRESS_TEST_TSAN, + ] + MOCK_REQUIRED_BUILDS = [] + # pretend there are pending jobs that we need to wait for job, job_config in ci_cache.jobs_to_do.items(): - ci_cache.jobs_to_wait[job] = job_config + if job in MOCK_AFFECTED_JOBS: + MOCK_REQUIRED_BUILDS += job_config.required_builds + elif job not in MOCK_AFFECTED_JOBS: + ci_cache.jobs_to_wait[job] = job_config - # remove couple tests from to_wait and - # expect they are preserved in @jobs_to_to along with required package_asan - del ci_cache.jobs_to_wait[CI.JobNames.STATELESS_TEST_ASAN] - del ci_cache.jobs_to_wait[CI.JobNames.INTEGRATION_TEST_TSAN] - del ci_cache.jobs_to_wait[CI.JobNames.STATELESS_TEST_MSAN] - - # pretend we have some batches failed for one of the job from the to_do list - failed_job = CI.JobNames.INTEGRATION_TEST_TSAN - failed_job_config = ci_cache.jobs_to_do[failed_job] - FAILED_BATCHES = [0, 3] - for batch in FAILED_BATCHES: - assert batch < failed_job_config.num_batches - record = CiCache.Record( - record_type=CiCache.RecordType.FAILED, - job_name=failed_job, - job_digest=ci_cache.job_digests[failed_job], - batch=batch, - num_batches=failed_job_config.num_batches, - release_branch=True, - ) - for record_t_, records_ in ci_cache.records.items(): - if record_t_.value == CiCache.RecordType.FAILED.value: - records_[record.to_str_key()] = record - - # pretend we have all batches failed for one of the job from the to_do list - failed_job = CI.JobNames.STATELESS_TEST_MSAN - failed_job_config = ci_cache.jobs_to_do[failed_job] - assert failed_job_config.num_batches > 1 - for batch in range(failed_job_config.num_batches): - record = CiCache.Record( - record_type=CiCache.RecordType.FAILED, - job_name=failed_job, - job_digest=ci_cache.job_digests[failed_job], - batch=batch, - num_batches=failed_job_config.num_batches, - release_branch=True, - ) - for record_t_, records_ in ci_cache.records.items(): - if record_t_.value == CiCache.RecordType.FAILED.value: - records_[record.to_str_key()] = record + for job, job_config in ci_cache.jobs_to_do.items(): + if job_config.reference_job_name: + # jobs with reference_job_name in config are not supposed to have records in the cache - continue + continue + if job in MOCK_AFFECTED_JOBS: + continue + for batch in range(job_config.num_batches): + # add any record into cache + record = CiCache.Record( + record_type=random.choice( + [ + CiCache.RecordType.FAILED, + CiCache.RecordType.PENDING, + CiCache.RecordType.SUCCESSFUL, + ] + ), + job_name=job, + job_digest=ci_cache.job_digests[job], + batch=batch, + num_batches=job_config.num_batches, + release_branch=True, + ) + for record_t_, records_ in ci_cache.records.items(): + if record_t_.value == CiCache.RecordType.FAILED.value: + records_[record.to_str_key()] = record ci_cache.filter_out_not_affected_jobs() - expected_to_do = [ - CI.JobNames.STATELESS_TEST_ASAN, - CI.BuildNames.PACKAGE_ASAN, - CI.JobNames.INTEGRATION_TEST_TSAN, - CI.BuildNames.PACKAGE_TSAN, - CI.JobNames.BUILD_CHECK, - ] + expected_to_do = ( + [ + CI.JobNames.BUILD_CHECK, + ] + + MOCK_AFFECTED_JOBS + + MOCK_REQUIRED_BUILDS + ) self.assertCountEqual( list(ci_cache.jobs_to_wait), [ - CI.BuildNames.PACKAGE_ASAN, - CI.BuildNames.PACKAGE_TSAN, CI.JobNames.BUILD_CHECK, - ], + ] + + MOCK_REQUIRED_BUILDS, ) self.assertCountEqual(list(ci_cache.jobs_to_do), expected_to_do) - self.assertTrue(ci_cache.jobs_to_do[CI.JobNames.INTEGRATION_TEST_TSAN].batches) - for batch in ci_cache.jobs_to_do[CI.JobNames.INTEGRATION_TEST_TSAN].batches: - self.assertTrue(batch not in FAILED_BATCHES) diff --git a/tests/ci/test_ci_options.py b/tests/ci/test_ci_options.py index 3f158e79f30..f4d14a17512 100644 --- a/tests/ci/test_ci_options.py +++ b/tests/ci/test_ci_options.py @@ -172,14 +172,10 @@ class TestCIOptions(unittest.TestCase): job: CI.JobConfig(runner_type=CI.Runners.STYLE_CHECKER) for job in _TEST_JOB_LIST } - jobs_configs[ - "fuzzers" - ].run_by_label = ( + jobs_configs["fuzzers"].run_by_label = ( "TEST_LABEL" # check "fuzzers" appears in the result due to the label ) - jobs_configs[ - "Integration tests (asan)" - ].release_only = ( + jobs_configs["Integration tests (asan)"].release_only = ( True # still must be included as it's set with include keywords ) filtered_jobs = list( @@ -311,9 +307,9 @@ class TestCIOptions(unittest.TestCase): job: CI.JobConfig(runner_type=CI.Runners.STYLE_CHECKER) for job in _TEST_JOB_LIST } - jobs_configs[ - "fuzzers" - ].run_by_label = "TEST_LABEL" # check "fuzzers" does not appears in the result + jobs_configs["fuzzers"].run_by_label = ( + "TEST_LABEL" # check "fuzzers" does not appears in the result + ) jobs_configs["Integration tests (asan)"].release_only = True filtered_jobs = list( ci_options.apply( diff --git a/tests/ci/version_helper.py b/tests/ci/version_helper.py index 50263f6ebb6..07a7a9601c0 100755 --- a/tests/ci/version_helper.py +++ b/tests/ci/version_helper.py @@ -72,6 +72,19 @@ class ClickHouseVersion: return self.patch_update() raise KeyError(f"wrong part {part} is used") + def bump(self) -> "ClickHouseVersion": + if self.minor < 12: + self._minor += 1 + self._revision += 1 + self._patch = 1 + self._tweak = 1 + else: + self._major += 1 + self._revision += 1 + self._patch = 1 + self._tweak = 1 + return self + def major_update(self) -> "ClickHouseVersion": if self._git is not None: self._git.update() @@ -148,6 +161,11 @@ class ClickHouseVersion: """our X.3 and X.8 are LTS""" return self.minor % 5 == 3 + def get_stable_release_type(self) -> str: + if self.is_lts: + return VersionType.LTS + return VersionType.STABLE + def as_dict(self) -> VERSIONS: return { "revision": self.revision, @@ -168,6 +186,7 @@ class ClickHouseVersion: raise ValueError(f"version type {version_type} not in {VersionType.VALID}") self._description = version_type self._describe = f"v{self.string}-{version_type}" + return self def copy(self) -> "ClickHouseVersion": copy = ClickHouseVersion( diff --git a/tests/clickhouse-test b/tests/clickhouse-test index 79f6b5d71d3..90fb9611151 100755 --- a/tests/clickhouse-test +++ b/tests/clickhouse-test @@ -711,9 +711,9 @@ def get_localzone(): class SettingsRandomizer: settings = { - "max_insert_threads": lambda: 32 - if random.random() < 0.03 - else random.randint(1, 3), + "max_insert_threads": lambda: ( + 12 if random.random() < 0.03 else random.randint(1, 3) + ), "group_by_two_level_threshold": threshold_generator(0.2, 0.2, 1, 1000000), "group_by_two_level_threshold_bytes": threshold_generator( 0.2, 0.2, 1, 50000000 @@ -729,7 +729,7 @@ class SettingsRandomizer: "prefer_localhost_replica": lambda: random.randint(0, 1), "max_block_size": lambda: random.randint(8000, 100000), "max_joined_block_size_rows": lambda: random.randint(8000, 100000), - "max_threads": lambda: 64 if random.random() < 0.03 else random.randint(1, 3), + "max_threads": lambda: 32 if random.random() < 0.03 else random.randint(1, 3), "optimize_append_index": lambda: random.randint(0, 1), "optimize_if_chain_to_multiif": lambda: random.randint(0, 1), "optimize_if_transform_strings_to_enum": lambda: random.randint(0, 1), diff --git a/tests/integration/helpers/cluster.py b/tests/integration/helpers/cluster.py index 34f5c28fef8..548b58a17e8 100644 --- a/tests/integration/helpers/cluster.py +++ b/tests/integration/helpers/cluster.py @@ -1454,9 +1454,9 @@ class ClickHouseCluster: def setup_azurite_cmd(self, instance, env_variables, docker_compose_yml_dir): self.with_azurite = True env_variables["AZURITE_PORT"] = str(self.azurite_port) - env_variables[ - "AZURITE_STORAGE_ACCOUNT_URL" - ] = f"http://azurite1:{env_variables['AZURITE_PORT']}/devstoreaccount1" + env_variables["AZURITE_STORAGE_ACCOUNT_URL"] = ( + f"http://azurite1:{env_variables['AZURITE_PORT']}/devstoreaccount1" + ) env_variables["AZURITE_CONNECTION_STRING"] = ( f"DefaultEndpointsProtocol=http;AccountName=devstoreaccount1;" f"AccountKey=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==;" @@ -1653,9 +1653,9 @@ class ClickHouseCluster: # Code coverage files will be placed in database directory # (affect only WITH_COVERAGE=1 build) - env_variables[ - "LLVM_PROFILE_FILE" - ] = "/var/lib/clickhouse/server_%h_%p_%m.profraw" + env_variables["LLVM_PROFILE_FILE"] = ( + "/var/lib/clickhouse/server_%h_%p_%m.profraw" + ) clickhouse_start_command = CLICKHOUSE_START_COMMAND if clickhouse_log_file: @@ -1668,9 +1668,9 @@ class ClickHouseCluster: cluster=self, base_path=self.base_dir, name=name, - base_config_dir=base_config_dir - if base_config_dir - else self.base_config_dir, + base_config_dir=( + base_config_dir if base_config_dir else self.base_config_dir + ), custom_main_configs=main_configs or [], custom_user_configs=user_configs or [], custom_dictionaries=dictionaries or [], diff --git a/tests/integration/test_disk_types/test.py b/tests/integration/test_disk_types/test.py index 1cc5048eb69..a5e2456ef4f 100644 --- a/tests/integration/test_disk_types/test.py +++ b/tests/integration/test_disk_types/test.py @@ -19,9 +19,9 @@ def cluster(): cluster = ClickHouseCluster(__file__) cluster.add_instance( "node", - main_configs=["configs/storage_arm.xml"] - if is_arm() - else ["configs/storage_amd.xml"], + main_configs=( + ["configs/storage_arm.xml"] if is_arm() else ["configs/storage_amd.xml"] + ), with_minio=True, with_hdfs=not is_arm(), ) diff --git a/tests/integration/test_distributed_default_database/test.py b/tests/integration/test_distributed_default_database/test.py index ef69533416b..7da9a368997 100644 --- a/tests/integration/test_distributed_default_database/test.py +++ b/tests/integration/test_distributed_default_database/test.py @@ -5,6 +5,7 @@ in this test we write into per-node tables and read from the distributed table. The default database in the distributed table definition is left empty on purpose to test default database deduction. """ + import pytest from helpers.client import QueryRuntimeException diff --git a/tests/integration/test_host_regexp_multiple_ptr_records_concurrent/configs/config.xml b/tests/integration/test_host_regexp_multiple_ptr_records_concurrent/configs/config.xml deleted file mode 100644 index 42a1f962705..00000000000 --- a/tests/integration/test_host_regexp_multiple_ptr_records_concurrent/configs/config.xml +++ /dev/null @@ -1,4 +0,0 @@ - - 1 - 250 - diff --git a/tests/integration/test_host_regexp_multiple_ptr_records_concurrent/configs/host_regexp.xml b/tests/integration/test_host_regexp_multiple_ptr_records_concurrent/configs/host_regexp.xml deleted file mode 100644 index 9329c8dbde2..00000000000 --- a/tests/integration/test_host_regexp_multiple_ptr_records_concurrent/configs/host_regexp.xml +++ /dev/null @@ -1,11 +0,0 @@ - - - - - - test1\.example\.com$ - - default - - - diff --git a/tests/integration/test_host_regexp_multiple_ptr_records_concurrent/configs/listen_host.xml b/tests/integration/test_host_regexp_multiple_ptr_records_concurrent/configs/listen_host.xml deleted file mode 100644 index 9c27c612f63..00000000000 --- a/tests/integration/test_host_regexp_multiple_ptr_records_concurrent/configs/listen_host.xml +++ /dev/null @@ -1,5 +0,0 @@ - - :: - 0.0.0.0 - 1 - diff --git a/tests/integration/test_host_regexp_multiple_ptr_records_concurrent/coredns_config/Corefile b/tests/integration/test_host_regexp_multiple_ptr_records_concurrent/coredns_config/Corefile deleted file mode 100644 index 3edf37dafa5..00000000000 --- a/tests/integration/test_host_regexp_multiple_ptr_records_concurrent/coredns_config/Corefile +++ /dev/null @@ -1,8 +0,0 @@ -. { - hosts /example.com { - reload "20ms" - fallthrough - } - forward . 127.0.0.11 - log -} diff --git a/tests/integration/test_host_regexp_multiple_ptr_records_concurrent/coredns_config/example.com b/tests/integration/test_host_regexp_multiple_ptr_records_concurrent/coredns_config/example.com deleted file mode 100644 index 9beb415c290..00000000000 --- a/tests/integration/test_host_regexp_multiple_ptr_records_concurrent/coredns_config/example.com +++ /dev/null @@ -1 +0,0 @@ -filled in runtime, but needs to exist in order to be volume mapped in docker \ No newline at end of file diff --git a/tests/integration/test_host_regexp_multiple_ptr_records_concurrent/scripts/stress_test.py b/tests/integration/test_host_regexp_multiple_ptr_records_concurrent/scripts/stress_test.py deleted file mode 100644 index 70419f95dd3..00000000000 --- a/tests/integration/test_host_regexp_multiple_ptr_records_concurrent/scripts/stress_test.py +++ /dev/null @@ -1,62 +0,0 @@ -import pycurl -import threading -from io import BytesIO -import sys - -client_ip = sys.argv[1] -server_ip = sys.argv[2] - -mutex = threading.Lock() -success_counter = 0 -number_of_threads = 100 -number_of_iterations = 50 - - -def perform_request(): - buffer = BytesIO() - crl = pycurl.Curl() - crl.setopt(pycurl.INTERFACE, client_ip) - crl.setopt(crl.WRITEDATA, buffer) - crl.setopt(crl.URL, f"http://{server_ip}:8123/?query=select+1&user=test_dns") - - crl.perform() - - # End curl session - crl.close() - - str_response = buffer.getvalue().decode("iso-8859-1") - expected_response = "1\n" - - mutex.acquire() - - global success_counter - - if str_response == expected_response: - success_counter += 1 - - mutex.release() - - -def perform_multiple_requests(n): - for request_number in range(n): - perform_request() - - -threads = [] - - -for i in range(number_of_threads): - thread = threading.Thread( - target=perform_multiple_requests, args=(number_of_iterations,) - ) - thread.start() - threads.append(thread) - -for thread in threads: - thread.join() - - -if success_counter == number_of_threads * number_of_iterations: - exit(0) - -exit(1) diff --git a/tests/integration/test_host_regexp_multiple_ptr_records_concurrent/test.py b/tests/integration/test_host_regexp_multiple_ptr_records_concurrent/test.py deleted file mode 100644 index d73e8813e79..00000000000 --- a/tests/integration/test_host_regexp_multiple_ptr_records_concurrent/test.py +++ /dev/null @@ -1,88 +0,0 @@ -import pytest -import socket -from helpers.cluster import ClickHouseCluster, get_docker_compose_path, run_and_check -from time import sleep -import os - -DOCKER_COMPOSE_PATH = get_docker_compose_path() -SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) - -cluster = ClickHouseCluster(__file__) - -ch_server = cluster.add_instance( - "clickhouse-server", - with_coredns=True, - main_configs=["configs/config.xml", "configs/listen_host.xml"], - user_configs=["configs/host_regexp.xml"], -) - -client = cluster.add_instance( - "clickhouse-client", -) - - -@pytest.fixture(scope="module") -def started_cluster(): - global cluster - try: - cluster.start() - yield cluster - - finally: - cluster.shutdown() - - -def check_ptr_record(ip, hostname): - try: - host, aliaslist, ipaddrlist = socket.gethostbyaddr(ip) - if hostname.lower() == host.lower(): - return True - except socket.herror: - pass - return False - - -def setup_dns_server(ip): - domains_string = "test3.example.com test2.example.com test1.example.com" - example_file_path = f'{ch_server.env_variables["COREDNS_CONFIG_DIR"]}/example.com' - run_and_check(f"echo '{ip} {domains_string}' > {example_file_path}", shell=True) - - # DNS server takes time to reload the configuration. - for try_num in range(10): - if all(check_ptr_record(ip, host) for host in domains_string.split()): - break - sleep(1) - - -def setup_ch_server(dns_server_ip): - ch_server.exec_in_container( - (["bash", "-c", f"echo 'nameserver {dns_server_ip}' > /etc/resolv.conf"]) - ) - ch_server.exec_in_container( - (["bash", "-c", "echo 'options ndots:0' >> /etc/resolv.conf"]) - ) - ch_server.query("SYSTEM DROP DNS CACHE") - - -def build_endpoint_v4(ip): - return f"'http://{ip}:8123/?query=SELECT+1&user=test_dns'" - - -def build_endpoint_v6(ip): - return build_endpoint_v4(f"[{ip}]") - - -def test_host_regexp_multiple_ptr_v4(started_cluster): - server_ip = cluster.get_instance_ip("clickhouse-server") - client_ip = cluster.get_instance_ip("clickhouse-client") - dns_server_ip = cluster.get_instance_ip(cluster.coredns_host) - - setup_dns_server(client_ip) - setup_ch_server(dns_server_ip) - - current_dir = os.path.dirname(__file__) - client.copy_file_to_container( - os.path.join(current_dir, "scripts", "stress_test.py"), "stress_test.py" - ) - - client.exec_in_container(["python3", f"stress_test.py", client_ip, server_ip]) diff --git a/tests/integration/test_interserver_dns_retires/test.py b/tests/integration/test_interserver_dns_retires/test.py index f0c581e6450..7c81f278737 100644 --- a/tests/integration/test_interserver_dns_retires/test.py +++ b/tests/integration/test_interserver_dns_retires/test.py @@ -2,6 +2,7 @@ This test makes sure interserver cluster queries handle invalid DNS records for replicas. """ + from helpers.client import QueryRuntimeException from helpers.cluster import ClickHouseCluster, ClickHouseInstance diff --git a/tests/integration/test_lost_part/test.py b/tests/integration/test_lost_part/test.py index b8e67551d79..c2469d6c524 100644 --- a/tests/integration/test_lost_part/test.py +++ b/tests/integration/test_lost_part/test.py @@ -266,7 +266,7 @@ def test_lost_last_part(start_cluster): "ALTER TABLE mt3 UPDATE id = 777 WHERE 1", settings={"mutations_sync": "0"} ) - partition_id = node1.query("select partitionId('x')").strip() + partition_id = node1.query("select partitionID('x')").strip() remove_part_from_disk(node1, "mt3", f"{partition_id}_0_0_0") # other way to detect broken parts diff --git a/tests/integration/test_storage_s3/test.py b/tests/integration/test_storage_s3/test.py index 9a0cb352088..40cbf4b44a6 100644 --- a/tests/integration/test_storage_s3/test.py +++ b/tests/integration/test_storage_s3/test.py @@ -197,7 +197,9 @@ def test_partition_by_string_column(started_cluster): started_cluster, bucket, "test_foo/bar.csv" ) assert '3,"йцук"\n' == get_s3_file_content(started_cluster, bucket, "test_йцук.csv") - assert '78,"你好"\n' == get_s3_file_content(started_cluster, bucket, "test_你好.csv") + assert '78,"你好"\n' == get_s3_file_content( + started_cluster, bucket, "test_你好.csv" + ) def test_partition_by_const_column(started_cluster): diff --git a/tests/integration/test_tcp_handler_http_responses/test_case.py b/tests/integration/test_tcp_handler_http_responses/test_case.py index 2fc53674ca4..98f4b74223e 100644 --- a/tests/integration/test_tcp_handler_http_responses/test_case.py +++ b/tests/integration/test_tcp_handler_http_responses/test_case.py @@ -1,4 +1,5 @@ """Test HTTP responses given by the TCP Handler.""" + from pathlib import Path import pytest from helpers.cluster import ClickHouseCluster diff --git a/tests/integration/test_tcp_handler_interserver_listen_host/test_case.py b/tests/integration/test_tcp_handler_interserver_listen_host/test_case.py index 62581996f3b..b20ab48795b 100644 --- a/tests/integration/test_tcp_handler_interserver_listen_host/test_case.py +++ b/tests/integration/test_tcp_handler_interserver_listen_host/test_case.py @@ -1,4 +1,5 @@ """Test Interserver responses on configured IP.""" + from pathlib import Path import pytest from helpers.cluster import ClickHouseCluster diff --git a/tests/queries/0_stateless/00411_long_accurate_number_comparison.python b/tests/queries/0_stateless/00411_long_accurate_number_comparison.python index 183a2637d36..045de9ee7ee 100644 --- a/tests/queries/0_stateless/00411_long_accurate_number_comparison.python +++ b/tests/queries/0_stateless/00411_long_accurate_number_comparison.python @@ -50,7 +50,7 @@ TYPES = { "UInt32": {"bits": 32, "sign": False, "float": False}, "Int32": {"bits": 32, "sign": True, "float": False}, "UInt64": {"bits": 64, "sign": False, "float": False}, - "Int64": {"bits": 64, "sign": True, "float": False} + "Int64": {"bits": 64, "sign": True, "float": False}, # "Float32" : { "bits" : 32, "sign" : True, "float" : True }, # "Float64" : { "bits" : 64, "sign" : True, "float" : True } } diff --git a/tests/queries/0_stateless/00719_parallel_ddl_db.sh b/tests/queries/0_stateless/00719_parallel_ddl_db.sh index ceba24df7e4..ac4e4eae4f2 100755 --- a/tests/queries/0_stateless/00719_parallel_ddl_db.sh +++ b/tests/queries/0_stateless/00719_parallel_ddl_db.sh @@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -DB_SUFFIX=$RANDOM +DB_SUFFIX=${RANDOM}${RANDOM}${RANDOM}${RANDOM} ${CLICKHOUSE_CLIENT} --query "DROP DATABASE IF EXISTS parallel_ddl_${DB_SUFFIX}" function query() diff --git a/tests/queries/0_stateless/00992_system_parts_race_condition_zookeeper_long.sh b/tests/queries/0_stateless/00992_system_parts_race_condition_zookeeper_long.sh index d45cc3a6871..4887c409844 100755 --- a/tests/queries/0_stateless/00992_system_parts_race_condition_zookeeper_long.sh +++ b/tests/queries/0_stateless/00992_system_parts_race_condition_zookeeper_long.sh @@ -41,7 +41,7 @@ function thread3() function thread4() { - while true; do $CLICKHOUSE_CLIENT -q "OPTIMIZE TABLE alter_table0 FINAL"; done + while true; do $CLICKHOUSE_CLIENT --receive_timeout=3 -q "OPTIMIZE TABLE alter_table0 FINAL" | grep -Fv "Timeout exceeded while receiving data from server"; done } function thread5() diff --git a/tests/queries/0_stateless/01037_polygon_dicts_correctness_all.sh b/tests/queries/0_stateless/01037_polygon_dicts_correctness_all.sh index fff786d6c06..9a26f78a8ee 100755 --- a/tests/queries/0_stateless/01037_polygon_dicts_correctness_all.sh +++ b/tests/queries/0_stateless/01037_polygon_dicts_correctness_all.sh @@ -5,20 +5,23 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -TMP_DIR="/tmp" +TMP_DIR=${CLICKHOUSE_TMP}/tmp +DATA_DIR=${CLICKHOUSE_TMP}/data +mkdir -p $TMP_DIR +mkdir -p $DATA_DIR declare -a SearchTypes=("POLYGON" "POLYGON_SIMPLE" "POLYGON_INDEX_EACH" "POLYGON_INDEX_CELL") -tar -xf "${CURDIR}"/01037_test_data_search.tar.gz -C "${CURDIR}" +tar -xf "${CURDIR}"/01037_test_data_search.tar.gz -C "${DATA_DIR}" $CLICKHOUSE_CLIENT -n --query=" DROP TABLE IF EXISTS points; CREATE TABLE points (x Float64, y Float64) ENGINE = Memory; " -$CLICKHOUSE_CLIENT --query="INSERT INTO points FORMAT TSV" --max_insert_block_size=100000 < "${CURDIR}/01037_point_data" +$CLICKHOUSE_CLIENT --query="INSERT INTO points FORMAT TSV" --max_insert_block_size=100000 < "${DATA_DIR}/01037_point_data" -rm "${CURDIR}"/01037_point_data +rm "${DATA_DIR}"/01037_point_data $CLICKHOUSE_CLIENT -n --query=" DROP TABLE IF EXISTS polygons_array; @@ -32,9 +35,9 @@ CREATE TABLE polygons_array ENGINE = Memory; " -$CLICKHOUSE_CLIENT --query="INSERT INTO polygons_array FORMAT JSONEachRow" --min_chunk_bytes_for_parallel_parsing=10485760 --max_insert_block_size=100000 < "${CURDIR}/01037_polygon_data" +$CLICKHOUSE_CLIENT --query="INSERT INTO polygons_array FORMAT JSONEachRow" --min_chunk_bytes_for_parallel_parsing=10485760 --max_insert_block_size=100000 < "${DATA_DIR}/01037_polygon_data" -rm "${CURDIR}"/01037_polygon_data +rm "${DATA_DIR}"/01037_polygon_data for type in "${SearchTypes[@]}"; do diff --git a/tests/queries/0_stateless/01037_polygon_dicts_correctness_fast.sh b/tests/queries/0_stateless/01037_polygon_dicts_correctness_fast.sh index c9cd151a2d9..47f7a5c1c4f 100755 --- a/tests/queries/0_stateless/01037_polygon_dicts_correctness_fast.sh +++ b/tests/queries/0_stateless/01037_polygon_dicts_correctness_fast.sh @@ -5,19 +5,22 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -TMP_DIR="/tmp" +TMP_DIR=${CLICKHOUSE_TMP}/tmp +DATA_DIR=${CLICKHOUSE_TMP}/data +mkdir -p $TMP_DIR +mkdir -p $DATA_DIR declare -a SearchTypes=("POLYGON_INDEX_EACH" "POLYGON_INDEX_CELL") -tar -xf "${CURDIR}"/01037_test_data_perf.tar.gz -C "${CURDIR}" +tar -xf "${CURDIR}"/01037_test_data_perf.tar.gz -C "${DATA_DIR}" $CLICKHOUSE_CLIENT -n --query=" CREATE TABLE points (x Float64, y Float64) ENGINE = Memory; " -$CLICKHOUSE_CLIENT --query="INSERT INTO points FORMAT TSV" --min_chunk_bytes_for_parallel_parsing=10485760 --max_insert_block_size=100000 < "${CURDIR}/01037_point_data" +$CLICKHOUSE_CLIENT --query="INSERT INTO points FORMAT TSV" --min_chunk_bytes_for_parallel_parsing=10485760 --max_insert_block_size=100000 < "${DATA_DIR}/01037_point_data" -rm "${CURDIR}"/01037_point_data +rm "${DATA_DIR}"/01037_point_data $CLICKHOUSE_CLIENT -n --query=" DROP TABLE IF EXISTS polygons_array; @@ -31,9 +34,9 @@ CREATE TABLE polygons_array ENGINE = Memory; " -$CLICKHOUSE_CLIENT --query="INSERT INTO polygons_array FORMAT JSONEachRow" --min_chunk_bytes_for_parallel_parsing=10485760 --max_insert_block_size=100000 < "${CURDIR}/01037_polygon_data" +$CLICKHOUSE_CLIENT --query="INSERT INTO polygons_array FORMAT JSONEachRow" --min_chunk_bytes_for_parallel_parsing=10485760 --max_insert_block_size=100000 < "${DATA_DIR}/01037_polygon_data" -rm "${CURDIR}"/01037_polygon_data +rm "${DATA_DIR}"/01037_polygon_data for type in "${SearchTypes[@]}"; do diff --git a/tests/queries/0_stateless/01037_polygon_dicts_simple_functions.ans b/tests/queries/0_stateless/01037_polygon_dicts_simple_functions.ans index dfad14fb113..937539643ec 100644 --- a/tests/queries/0_stateless/01037_polygon_dicts_simple_functions.ans +++ b/tests/queries/0_stateless/01037_polygon_dicts_simple_functions.ans @@ -1,104 +1,104 @@ -dictGet test_01037.dict_array (-100,-42) qqq 101 -dictGet test_01037.dict_array (-1,0) Click South 423 -dictGet test_01037.dict_array (-0.1,0) Click South 423 -dictGet test_01037.dict_array (0,-2) Click West 424 -dictGet test_01037.dict_array (0,-1.1) Click West 424 -dictGet test_01037.dict_array (0,1.1) Click North 422 -dictGet test_01037.dict_array (0,2) Click North 422 -dictGet test_01037.dict_array (0.1,0) Click East 421 -dictGet test_01037.dict_array (0.99,2.99) Click North 422 -dictGet test_01037.dict_array (1,0) Click East 421 -dictGet test_01037.dict_array (3,3) House 314159 -dictGet test_01037.dict_array (5,6) Click 42 -dictGet test_01037.dict_array (7.01,7.01) qqq 101 -dictGetOrDefault test_01037.dict_array (-100,-42) www 1234 -dictGetOrDefault test_01037.dict_array (-1,0) Click South 423 -dictGetOrDefault test_01037.dict_array (-0.1,0) Click South 423 -dictGetOrDefault test_01037.dict_array (0,-2) Click West 424 -dictGetOrDefault test_01037.dict_array (0,-1.1) Click West 424 -dictGetOrDefault test_01037.dict_array (0,1.1) Click North 422 -dictGetOrDefault test_01037.dict_array (0,2) Click North 422 -dictGetOrDefault test_01037.dict_array (0.1,0) Click East 421 -dictGetOrDefault test_01037.dict_array (0.99,2.99) Click North 422 -dictGetOrDefault test_01037.dict_array (1,0) Click East 421 -dictGetOrDefault test_01037.dict_array (3,3) House 314159 -dictGetOrDefault test_01037.dict_array (5,6) Click 42 -dictGetOrDefault test_01037.dict_array (7.01,7.01) www 1234 -dictGetOrDefault test_01037.dict_array (-100,-42) dd 44 -dictGetOrDefault test_01037.dict_array (-1,0) Click South 423 -dictGetOrDefault test_01037.dict_array (-0.1,0) Click South 423 -dictGetOrDefault test_01037.dict_array (0,-2) Click West 424 -dictGetOrDefault test_01037.dict_array (0,-1.1) Click West 424 -dictGetOrDefault test_01037.dict_array (0,1.1) Click North 422 -dictGetOrDefault test_01037.dict_array (0,2) Click North 422 -dictGetOrDefault test_01037.dict_array (0.1,0) Click East 421 -dictGetOrDefault test_01037.dict_array (0.99,2.99) Click North 422 -dictGetOrDefault test_01037.dict_array (1,0) Click East 421 -dictGetOrDefault test_01037.dict_array (3,3) House 314159 -dictGetOrDefault test_01037.dict_array (5,6) Click 42 -dictGetOrDefault test_01037.dict_array (7.01,7.01) ee 55 -dictGet test_01037.dict_tuple (-100,-42) qqq 101 -dictGet test_01037.dict_tuple (-1,0) Click South 423 -dictGet test_01037.dict_tuple (-0.1,0) Click South 423 -dictGet test_01037.dict_tuple (0,-2) Click West 424 -dictGet test_01037.dict_tuple (0,-1.1) Click West 424 -dictGet test_01037.dict_tuple (0,1.1) Click North 422 -dictGet test_01037.dict_tuple (0,2) Click North 422 -dictGet test_01037.dict_tuple (0.1,0) Click East 421 -dictGet test_01037.dict_tuple (0.99,2.99) Click North 422 -dictGet test_01037.dict_tuple (1,0) Click East 421 -dictGet test_01037.dict_tuple (3,3) House 314159 -dictGet test_01037.dict_tuple (5,6) Click 42 -dictGet test_01037.dict_tuple (7.01,7.01) qqq 101 -dictGetOrDefault test_01037.dict_tuple (-100,-42) www 1234 -dictGetOrDefault test_01037.dict_tuple (-1,0) Click South 423 -dictGetOrDefault test_01037.dict_tuple (-0.1,0) Click South 423 -dictGetOrDefault test_01037.dict_tuple (0,-2) Click West 424 -dictGetOrDefault test_01037.dict_tuple (0,-1.1) Click West 424 -dictGetOrDefault test_01037.dict_tuple (0,1.1) Click North 422 -dictGetOrDefault test_01037.dict_tuple (0,2) Click North 422 -dictGetOrDefault test_01037.dict_tuple (0.1,0) Click East 421 -dictGetOrDefault test_01037.dict_tuple (0.99,2.99) Click North 422 -dictGetOrDefault test_01037.dict_tuple (1,0) Click East 421 -dictGetOrDefault test_01037.dict_tuple (3,3) House 314159 -dictGetOrDefault test_01037.dict_tuple (5,6) Click 42 -dictGetOrDefault test_01037.dict_tuple (7.01,7.01) www 1234 -dictGetOrDefault test_01037.dict_tuple (-100,-42) dd 44 -dictGetOrDefault test_01037.dict_tuple (-1,0) Click South 423 -dictGetOrDefault test_01037.dict_tuple (-0.1,0) Click South 423 -dictGetOrDefault test_01037.dict_tuple (0,-2) Click West 424 -dictGetOrDefault test_01037.dict_tuple (0,-1.1) Click West 424 -dictGetOrDefault test_01037.dict_tuple (0,1.1) Click North 422 -dictGetOrDefault test_01037.dict_tuple (0,2) Click North 422 -dictGetOrDefault test_01037.dict_tuple (0.1,0) Click East 421 -dictGetOrDefault test_01037.dict_tuple (0.99,2.99) Click North 422 -dictGetOrDefault test_01037.dict_tuple (1,0) Click East 421 -dictGetOrDefault test_01037.dict_tuple (3,3) House 314159 -dictGetOrDefault test_01037.dict_tuple (5,6) Click 42 -dictGetOrDefault test_01037.dict_tuple (7.01,7.01) ee 55 -dictHas test_01037.dict_array (-100,-42) 0 -dictHas test_01037.dict_array (-1,0) 1 -dictHas test_01037.dict_array (-0.1,0) 1 -dictHas test_01037.dict_array (0,-2) 1 -dictHas test_01037.dict_array (0,-1.1) 1 -dictHas test_01037.dict_array (0,1.1) 1 -dictHas test_01037.dict_array (0,2) 1 -dictHas test_01037.dict_array (0.1,0) 1 -dictHas test_01037.dict_array (0.99,2.99) 1 -dictHas test_01037.dict_array (1,0) 1 -dictHas test_01037.dict_array (3,3) 1 -dictHas test_01037.dict_array (5,6) 1 -dictHas test_01037.dict_array (7.01,7.01) 0 -dictHas test_01037.dict_tuple (-100,-42) 0 -dictHas test_01037.dict_tuple (-1,0) 1 -dictHas test_01037.dict_tuple (-0.1,0) 1 -dictHas test_01037.dict_tuple (0,-2) 1 -dictHas test_01037.dict_tuple (0,-1.1) 1 -dictHas test_01037.dict_tuple (0,1.1) 1 -dictHas test_01037.dict_tuple (0,2) 1 -dictHas test_01037.dict_tuple (0.1,0) 1 -dictHas test_01037.dict_tuple (0.99,2.99) 1 -dictHas test_01037.dict_tuple (1,0) 1 -dictHas test_01037.dict_tuple (3,3) 1 -dictHas test_01037.dict_tuple (5,6) 1 -dictHas test_01037.dict_tuple (7.01,7.01) 0 +dictGet dict_array (-100,-42) qqq 101 +dictGet dict_array (-1,0) Click South 423 +dictGet dict_array (-0.1,0) Click South 423 +dictGet dict_array (0,-2) Click West 424 +dictGet dict_array (0,-1.1) Click West 424 +dictGet dict_array (0,1.1) Click North 422 +dictGet dict_array (0,2) Click North 422 +dictGet dict_array (0.1,0) Click East 421 +dictGet dict_array (0.99,2.99) Click North 422 +dictGet dict_array (1,0) Click East 421 +dictGet dict_array (3,3) House 314159 +dictGet dict_array (5,6) Click 42 +dictGet dict_array (7.01,7.01) qqq 101 +dictGetOrDefault dict_array (-100,-42) www 1234 +dictGetOrDefault dict_array (-1,0) Click South 423 +dictGetOrDefault dict_array (-0.1,0) Click South 423 +dictGetOrDefault dict_array (0,-2) Click West 424 +dictGetOrDefault dict_array (0,-1.1) Click West 424 +dictGetOrDefault dict_array (0,1.1) Click North 422 +dictGetOrDefault dict_array (0,2) Click North 422 +dictGetOrDefault dict_array (0.1,0) Click East 421 +dictGetOrDefault dict_array (0.99,2.99) Click North 422 +dictGetOrDefault dict_array (1,0) Click East 421 +dictGetOrDefault dict_array (3,3) House 314159 +dictGetOrDefault dict_array (5,6) Click 42 +dictGetOrDefault dict_array (7.01,7.01) www 1234 +dictGetOrDefault dict_array (-100,-42) dd 44 +dictGetOrDefault dict_array (-1,0) Click South 423 +dictGetOrDefault dict_array (-0.1,0) Click South 423 +dictGetOrDefault dict_array (0,-2) Click West 424 +dictGetOrDefault dict_array (0,-1.1) Click West 424 +dictGetOrDefault dict_array (0,1.1) Click North 422 +dictGetOrDefault dict_array (0,2) Click North 422 +dictGetOrDefault dict_array (0.1,0) Click East 421 +dictGetOrDefault dict_array (0.99,2.99) Click North 422 +dictGetOrDefault dict_array (1,0) Click East 421 +dictGetOrDefault dict_array (3,3) House 314159 +dictGetOrDefault dict_array (5,6) Click 42 +dictGetOrDefault dict_array (7.01,7.01) ee 55 +dictGet dict_tuple (-100,-42) qqq 101 +dictGet dict_tuple (-1,0) Click South 423 +dictGet dict_tuple (-0.1,0) Click South 423 +dictGet dict_tuple (0,-2) Click West 424 +dictGet dict_tuple (0,-1.1) Click West 424 +dictGet dict_tuple (0,1.1) Click North 422 +dictGet dict_tuple (0,2) Click North 422 +dictGet dict_tuple (0.1,0) Click East 421 +dictGet dict_tuple (0.99,2.99) Click North 422 +dictGet dict_tuple (1,0) Click East 421 +dictGet dict_tuple (3,3) House 314159 +dictGet dict_tuple (5,6) Click 42 +dictGet dict_tuple (7.01,7.01) qqq 101 +dictGetOrDefault dict_tuple (-100,-42) www 1234 +dictGetOrDefault dict_tuple (-1,0) Click South 423 +dictGetOrDefault dict_tuple (-0.1,0) Click South 423 +dictGetOrDefault dict_tuple (0,-2) Click West 424 +dictGetOrDefault dict_tuple (0,-1.1) Click West 424 +dictGetOrDefault dict_tuple (0,1.1) Click North 422 +dictGetOrDefault dict_tuple (0,2) Click North 422 +dictGetOrDefault dict_tuple (0.1,0) Click East 421 +dictGetOrDefault dict_tuple (0.99,2.99) Click North 422 +dictGetOrDefault dict_tuple (1,0) Click East 421 +dictGetOrDefault dict_tuple (3,3) House 314159 +dictGetOrDefault dict_tuple (5,6) Click 42 +dictGetOrDefault dict_tuple (7.01,7.01) www 1234 +dictGetOrDefault dict_tuple (-100,-42) dd 44 +dictGetOrDefault dict_tuple (-1,0) Click South 423 +dictGetOrDefault dict_tuple (-0.1,0) Click South 423 +dictGetOrDefault dict_tuple (0,-2) Click West 424 +dictGetOrDefault dict_tuple (0,-1.1) Click West 424 +dictGetOrDefault dict_tuple (0,1.1) Click North 422 +dictGetOrDefault dict_tuple (0,2) Click North 422 +dictGetOrDefault dict_tuple (0.1,0) Click East 421 +dictGetOrDefault dict_tuple (0.99,2.99) Click North 422 +dictGetOrDefault dict_tuple (1,0) Click East 421 +dictGetOrDefault dict_tuple (3,3) House 314159 +dictGetOrDefault dict_tuple (5,6) Click 42 +dictGetOrDefault dict_tuple (7.01,7.01) ee 55 +dictHas dict_array (-100,-42) 0 +dictHas dict_array (-1,0) 1 +dictHas dict_array (-0.1,0) 1 +dictHas dict_array (0,-2) 1 +dictHas dict_array (0,-1.1) 1 +dictHas dict_array (0,1.1) 1 +dictHas dict_array (0,2) 1 +dictHas dict_array (0.1,0) 1 +dictHas dict_array (0.99,2.99) 1 +dictHas dict_array (1,0) 1 +dictHas dict_array (3,3) 1 +dictHas dict_array (5,6) 1 +dictHas dict_array (7.01,7.01) 0 +dictHas dict_tuple (-100,-42) 0 +dictHas dict_tuple (-1,0) 1 +dictHas dict_tuple (-0.1,0) 1 +dictHas dict_tuple (0,-2) 1 +dictHas dict_tuple (0,-1.1) 1 +dictHas dict_tuple (0,1.1) 1 +dictHas dict_tuple (0,2) 1 +dictHas dict_tuple (0.1,0) 1 +dictHas dict_tuple (0.99,2.99) 1 +dictHas dict_tuple (1,0) 1 +dictHas dict_tuple (3,3) 1 +dictHas dict_tuple (5,6) 1 +dictHas dict_tuple (7.01,7.01) 0 diff --git a/tests/queries/0_stateless/01037_polygon_dicts_simple_functions.sh b/tests/queries/0_stateless/01037_polygon_dicts_simple_functions.sh index be983ec1be4..d1ee3f283bc 100755 --- a/tests/queries/0_stateless/01037_polygon_dicts_simple_functions.sh +++ b/tests/queries/0_stateless/01037_polygon_dicts_simple_functions.sh @@ -1,56 +1,52 @@ #!/usr/bin/env bash -# Tags: no-debug, no-parallel +# Tags: no-debug CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -TMP_DIR="/tmp" +TMP_DIR=${CLICKHOUSE_TMP}/tmp +mkdir -p $TMP_DIR $CLICKHOUSE_CLIENT -n --query=" -DROP DATABASE IF EXISTS test_01037; +DROP TABLE IF EXISTS polygons_array; -CREATE DATABASE test_01037; +CREATE TABLE polygons_array (key Array(Array(Array(Array(Float64)))), name String, value UInt64) ENGINE = Memory; +INSERT INTO polygons_array VALUES ([[[[1, 3], [1, 1], [3, 1], [3, -1], [1, -1], [1, -3], [-1, -3], [-1, -1], [-3, -1], [-3, 1], [-1, 1], [-1, 3]]], [[[5, 5], [5, 1], [7, 1], [7, 7], [1, 7], [1, 5]]]], 'Click', 42); +INSERT INTO polygons_array VALUES ([[[[5, 5], [5, -5], [-5, -5], [-5, 5]], [[1, 3], [1, 1], [3, 1], [3, -1], [1, -1], [1, -3], [-1, -3], [-1, -1], [-3, -1], [-3, 1], [-1, 1], [-1, 3]]]], 'House', 314159); +INSERT INTO polygons_array VALUES ([[[[3, 1], [0, 1], [0, -1], [3, -1]]]], 'Click East', 421); +INSERT INTO polygons_array VALUES ([[[[-1, 1], [1, 1], [1, 3], [-1, 3]]]], 'Click North', 422); +INSERT INTO polygons_array VALUES ([[[[-3, 1], [-3, -1], [0, -1], [0, 1]]]], 'Click South', 423); +INSERT INTO polygons_array VALUES ([[[[-1, -1], [1, -1], [1, -3], [-1, -3]]]], 'Click West', 424); -DROP TABLE IF EXISTS test_01037.polygons_array; +DROP TABLE IF EXISTS polygons_tuple; -CREATE TABLE test_01037.polygons_array (key Array(Array(Array(Array(Float64)))), name String, value UInt64) ENGINE = Memory; -INSERT INTO test_01037.polygons_array VALUES ([[[[1, 3], [1, 1], [3, 1], [3, -1], [1, -1], [1, -3], [-1, -3], [-1, -1], [-3, -1], [-3, 1], [-1, 1], [-1, 3]]], [[[5, 5], [5, 1], [7, 1], [7, 7], [1, 7], [1, 5]]]], 'Click', 42); -INSERT INTO test_01037.polygons_array VALUES ([[[[5, 5], [5, -5], [-5, -5], [-5, 5]], [[1, 3], [1, 1], [3, 1], [3, -1], [1, -1], [1, -3], [-1, -3], [-1, -1], [-3, -1], [-3, 1], [-1, 1], [-1, 3]]]], 'House', 314159); -INSERT INTO test_01037.polygons_array VALUES ([[[[3, 1], [0, 1], [0, -1], [3, -1]]]], 'Click East', 421); -INSERT INTO test_01037.polygons_array VALUES ([[[[-1, 1], [1, 1], [1, 3], [-1, 3]]]], 'Click North', 422); -INSERT INTO test_01037.polygons_array VALUES ([[[[-3, 1], [-3, -1], [0, -1], [0, 1]]]], 'Click South', 423); -INSERT INTO test_01037.polygons_array VALUES ([[[[-1, -1], [1, -1], [1, -3], [-1, -3]]]], 'Click West', 424); +CREATE TABLE polygons_tuple (key Array(Array(Array(Tuple(Float64, Float64)))), name String, value UInt64) ENGINE = Memory; +INSERT INTO polygons_tuple VALUES ([[[(1, 3), (1, 1), (3, 1), (3, -1), (1, -1), (1, -3), (-1, -3), (-1, -1), (-3, -1), (-3, 1), (-1, 1), (-1, 3)]], [[(5, 5), (5, 1), (7, 1), (7, 7), (1, 7), (1, 5)]]], 'Click', 42); +INSERT INTO polygons_tuple VALUES ([[[(5, 5), (5, -5), (-5, -5), (-5, 5)], [(1, 3), (1, 1), (3, 1), (3, -1), (1, -1), (1, -3), (-1, -3), (-1, -1), (-3, -1), (-3, 1), (-1, 1), (-1, 3)]]], 'House', 314159); +INSERT INTO polygons_tuple VALUES ([[[(3, 1), (0, 1), (0, -1), (3, -1)]]], 'Click East', 421); +INSERT INTO polygons_tuple VALUES ([[[(-1, 1), (1, 1), (1, 3), (-1, 3)]]], 'Click North', 422); +INSERT INTO polygons_tuple VALUES ([[[(-3, 1), (-3, -1), (0, -1), (0, 1)]]], 'Click South', 423); +INSERT INTO polygons_tuple VALUES ([[[(-1, -1), (1, -1), (1, -3), (-1, -3)]]], 'Click West', 424); -DROP TABLE IF EXISTS test_01037.polygons_tuple; +DROP TABLE IF EXISTS points; -CREATE TABLE test_01037.polygons_tuple (key Array(Array(Array(Tuple(Float64, Float64)))), name String, value UInt64) ENGINE = Memory; -INSERT INTO test_01037.polygons_tuple VALUES ([[[(1, 3), (1, 1), (3, 1), (3, -1), (1, -1), (1, -3), (-1, -3), (-1, -1), (-3, -1), (-3, 1), (-1, 1), (-1, 3)]], [[(5, 5), (5, 1), (7, 1), (7, 7), (1, 7), (1, 5)]]], 'Click', 42); -INSERT INTO test_01037.polygons_tuple VALUES ([[[(5, 5), (5, -5), (-5, -5), (-5, 5)], [(1, 3), (1, 1), (3, 1), (3, -1), (1, -1), (1, -3), (-1, -3), (-1, -1), (-3, -1), (-3, 1), (-1, 1), (-1, 3)]]], 'House', 314159); -INSERT INTO test_01037.polygons_tuple VALUES ([[[(3, 1), (0, 1), (0, -1), (3, -1)]]], 'Click East', 421); -INSERT INTO test_01037.polygons_tuple VALUES ([[[(-1, 1), (1, 1), (1, 3), (-1, 3)]]], 'Click North', 422); -INSERT INTO test_01037.polygons_tuple VALUES ([[[(-3, 1), (-3, -1), (0, -1), (0, 1)]]], 'Click South', 423); -INSERT INTO test_01037.polygons_tuple VALUES ([[[(-1, -1), (1, -1), (1, -3), (-1, -3)]]], 'Click West', 424); - -DROP TABLE IF EXISTS test_01037.points; - -CREATE TABLE test_01037.points (x Float64, y Float64, def_i UInt64, def_s String) ENGINE = Memory; -INSERT INTO test_01037.points VALUES (0.1, 0.0, 112, 'aax'); -INSERT INTO test_01037.points VALUES (-0.1, 0.0, 113, 'aay'); -INSERT INTO test_01037.points VALUES (0.0, 1.1, 114, 'aaz'); -INSERT INTO test_01037.points VALUES (0.0, -1.1, 115, 'aat'); -INSERT INTO test_01037.points VALUES (3.0, 3.0, 22, 'bb'); -INSERT INTO test_01037.points VALUES (5.0, 6.0, 33, 'cc'); -INSERT INTO test_01037.points VALUES (-100.0, -42.0, 44, 'dd'); -INSERT INTO test_01037.points VALUES (7.01, 7.01, 55, 'ee'); -INSERT INTO test_01037.points VALUES (0.99, 2.99, 66, 'ee'); -INSERT INTO test_01037.points VALUES (1.0, 0.0, 771, 'ffa'); -INSERT INTO test_01037.points VALUES (-1.0, 0.0, 772, 'ffb'); -INSERT INTO test_01037.points VALUES (0.0, 2.0, 773, 'ffc'); -INSERT INTO test_01037.points VALUES (0.0, -2.0, 774, 'ffd'); +CREATE TABLE points (x Float64, y Float64, def_i UInt64, def_s String) ENGINE = Memory; +INSERT INTO points VALUES (0.1, 0.0, 112, 'aax'); +INSERT INTO points VALUES (-0.1, 0.0, 113, 'aay'); +INSERT INTO points VALUES (0.0, 1.1, 114, 'aaz'); +INSERT INTO points VALUES (0.0, -1.1, 115, 'aat'); +INSERT INTO points VALUES (3.0, 3.0, 22, 'bb'); +INSERT INTO points VALUES (5.0, 6.0, 33, 'cc'); +INSERT INTO points VALUES (-100.0, -42.0, 44, 'dd'); +INSERT INTO points VALUES (7.01, 7.01, 55, 'ee'); +INSERT INTO points VALUES (0.99, 2.99, 66, 'ee'); +INSERT INTO points VALUES (1.0, 0.0, 771, 'ffa'); +INSERT INTO points VALUES (-1.0, 0.0, 772, 'ffb'); +INSERT INTO points VALUES (0.0, 2.0, 773, 'ffc'); +INSERT INTO points VALUES (0.0, -2.0, 774, 'ffd'); " - declare -a SearchTypes=("POLYGON" "POLYGON_SIMPLE" "POLYGON_INDEX_EACH" "POLYGON_INDEX_CELL") for type in "${SearchTypes[@]}"; @@ -58,63 +54,62 @@ do outputFile="${TMP_DIR}/results${type}.out" $CLICKHOUSE_CLIENT -n --query=" - DROP DICTIONARY IF EXISTS test_01037.dict_array; - CREATE DICTIONARY test_01037.dict_array + DROP DICTIONARY IF EXISTS dict_array; + CREATE DICTIONARY dict_array ( key Array(Array(Array(Array(Float64)))), name String DEFAULT 'qqq', value UInt64 DEFAULT 101 ) PRIMARY KEY key - SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'polygons_array' PASSWORD '' DB 'test_01037')) + SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'polygons_array' PASSWORD '' DB currentDatabase())) LIFETIME(0) LAYOUT($type()); - DROP DICTIONARY IF EXISTS test_01037.dict_tuple; + DROP DICTIONARY IF EXISTS dict_tuple; - CREATE DICTIONARY test_01037.dict_tuple + CREATE DICTIONARY dict_tuple ( key Array(Array(Array(Tuple(Float64, Float64)))), name String DEFAULT 'qqq', value UInt64 DEFAULT 101 ) PRIMARY KEY key - SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'polygons_tuple' PASSWORD '' DB 'test_01037')) + SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'polygons_tuple' PASSWORD '' DB currentDatabase())) LIFETIME(0) LAYOUT($type()); - select 'dictGet', 'test_01037.dict_array' as dict_name, tuple(x, y) as key, + select 'dictGet', 'dict_array' as dict_name, tuple(x, y) as key, dictGet(dict_name, 'name', key), - dictGet(dict_name, 'value', key) from test_01037.points order by x, y; - select 'dictGetOrDefault', 'test_01037.dict_array' as dict_name, tuple(x, y) as key, + dictGet(dict_name, 'value', key) from points order by x, y; + select 'dictGetOrDefault', 'dict_array' as dict_name, tuple(x, y) as key, dictGetOrDefault(dict_name, 'name', key, 'www'), - dictGetOrDefault(dict_name, 'value', key, toUInt64(1234)) from test_01037.points order by x, y; - select 'dictGetOrDefault', 'test_01037.dict_array' as dict_name, tuple(x, y) as key, + dictGetOrDefault(dict_name, 'value', key, toUInt64(1234)) from points order by x, y; + select 'dictGetOrDefault', 'dict_array' as dict_name, tuple(x, y) as key, dictGetOrDefault(dict_name, 'name', key, def_s), - dictGetOrDefault(dict_name, 'value', key, def_i) from test_01037.points order by x, y; - select 'dictGet', 'test_01037.dict_tuple' as dict_name, tuple(x, y) as key, + dictGetOrDefault(dict_name, 'value', key, def_i) from points order by x, y; + select 'dictGet', 'dict_tuple' as dict_name, tuple(x, y) as key, dictGet(dict_name, 'name', key), - dictGet(dict_name, 'value', key) from test_01037.points order by x, y; - select 'dictGetOrDefault', 'test_01037.dict_tuple' as dict_name, tuple(x, y) as key, + dictGet(dict_name, 'value', key) from points order by x, y; + select 'dictGetOrDefault', 'dict_tuple' as dict_name, tuple(x, y) as key, dictGetOrDefault(dict_name, 'name', key, 'www'), - dictGetOrDefault(dict_name, 'value', key, toUInt64(1234)) from test_01037.points order by x, y; - select 'dictGetOrDefault', 'test_01037.dict_tuple' as dict_name, tuple(x, y) as key, + dictGetOrDefault(dict_name, 'value', key, toUInt64(1234)) from points order by x, y; + select 'dictGetOrDefault', 'dict_tuple' as dict_name, tuple(x, y) as key, dictGetOrDefault(dict_name, 'name', key, def_s), - dictGetOrDefault(dict_name, 'value', key, def_i) from test_01037.points order by x, y; - select 'dictHas', 'test_01037.dict_array' as dict_name, tuple(x, y) as key, - dictHas(dict_name, key) from test_01037.points order by x, y; - select 'dictHas', 'test_01037.dict_tuple' as dict_name, tuple(x, y) as key, - dictHas(dict_name, key) from test_01037.points order by x, y; + dictGetOrDefault(dict_name, 'value', key, def_i) from points order by x, y; + select 'dictHas', 'dict_array' as dict_name, tuple(x, y) as key, + dictHas(dict_name, key) from points order by x, y; + select 'dictHas', 'dict_tuple' as dict_name, tuple(x, y) as key, + dictHas(dict_name, key) from points order by x, y; " > "$outputFile" diff -q "${CURDIR}/01037_polygon_dicts_simple_functions.ans" "$outputFile" done $CLICKHOUSE_CLIENT -n --query=" -DROP DICTIONARY test_01037.dict_array; -DROP DICTIONARY test_01037.dict_tuple; -DROP TABLE test_01037.polygons_array; -DROP TABLE test_01037.polygons_tuple; -DROP TABLE test_01037.points; -DROP DATABASE test_01037; +DROP DICTIONARY dict_array; +DROP DICTIONARY dict_tuple; +DROP TABLE polygons_array; +DROP TABLE polygons_tuple; +DROP TABLE points; " diff --git a/tests/queries/0_stateless/01487_distributed_in_not_default_db.reference b/tests/queries/0_stateless/01487_distributed_in_not_default_db.reference deleted file mode 100644 index 0d66ea1aee9..00000000000 --- a/tests/queries/0_stateless/01487_distributed_in_not_default_db.reference +++ /dev/null @@ -1,2 +0,0 @@ -0 -1 diff --git a/tests/queries/0_stateless/01487_distributed_in_not_default_db.sql b/tests/queries/0_stateless/01487_distributed_in_not_default_db.sql deleted file mode 100644 index cd027530ac8..00000000000 --- a/tests/queries/0_stateless/01487_distributed_in_not_default_db.sql +++ /dev/null @@ -1,43 +0,0 @@ --- Tags: distributed, no-parallel - -CREATE DATABASE IF NOT EXISTS shard_0; -CREATE DATABASE IF NOT EXISTS shard_1; -CREATE DATABASE IF NOT EXISTS main_01487; -CREATE DATABASE IF NOT EXISTS test_01487; - -USE main_01487; - -DROP TABLE IF EXISTS shard_0.l; -DROP TABLE IF EXISTS shard_1.l; -DROP TABLE IF EXISTS d; -DROP TABLE IF EXISTS t; - -CREATE TABLE shard_0.l (value UInt8) ENGINE = MergeTree ORDER BY value; -CREATE TABLE shard_1.l (value UInt8) ENGINE = MergeTree ORDER BY value; -CREATE TABLE t (value UInt8) ENGINE = Memory; - -INSERT INTO shard_0.l VALUES (0); -INSERT INTO shard_1.l VALUES (1); -INSERT INTO t VALUES (0), (1), (2); - -CREATE TABLE d AS t ENGINE = Distributed(test_cluster_two_shards_different_databases, currentDatabase(), t); - -USE test_01487; -DROP DATABASE test_01487; - --- After the default database is dropped QueryAnalysisPass cannot process the following SELECT query. --- That query is invalid on the initiator node. -set allow_experimental_analyzer = 0; - -SELECT * FROM main_01487.d WHERE value IN (SELECT l.value FROM l) ORDER BY value; - -USE main_01487; - -DROP TABLE IF EXISTS shard_0.l; -DROP TABLE IF EXISTS shard_1.l; -DROP TABLE IF EXISTS d; -DROP TABLE IF EXISTS t; - -DROP DATABASE shard_0; -DROP DATABASE shard_1; -DROP DATABASE main_01487; diff --git a/tests/queries/0_stateless/01748_partition_id_pruning.sql b/tests/queries/0_stateless/01748_partition_id_pruning.sql index b637528bc6c..f492a4a8871 100644 --- a/tests/queries/0_stateless/01748_partition_id_pruning.sql +++ b/tests/queries/0_stateless/01748_partition_id_pruning.sql @@ -8,17 +8,17 @@ insert into x values (1, 1), (1, 2), (1, 3), (2, 4), (2, 5), (2, 6); set max_rows_to_read = 3; -select * from x where _partition_id = partitionId(1); +select * from x where _partition_id = partitionID(1); set max_rows_to_read = 5; -- one row for subquery + subquery -select * from x where _partition_id in (select partitionId(number + 1) from numbers(1)); +select * from x where _partition_id in (select partitionID(number + 1) from numbers(1)); -- trivial count optimization test set max_rows_to_read = 2; -- one row for subquery + subquery itself -- TODO: Relax the limits because we might build prepared set twice with _minmax_count_projection set max_rows_to_read = 3; -select count() from x where _partition_id in (select partitionId(number + 1) from numbers(1)); +select count() from x where _partition_id in (select partitionID(number + 1) from numbers(1)); drop table x; diff --git a/tests/queries/0_stateless/02378_analyzer_projection_names.reference b/tests/queries/0_stateless/02378_analyzer_projection_names.reference index fd5bc7d4ae8..532414f117c 100644 --- a/tests/queries/0_stateless/02378_analyzer_projection_names.reference +++ b/tests/queries/0_stateless/02378_analyzer_projection_names.reference @@ -443,6 +443,18 @@ SELECT '--'; -- DESCRIBE (WITH test_table_in_cte AS (SELECT id FROM test_table) SELECT id IN test_table_in_cte FROM test_table); in(id, test_table_in_cte) UInt8 +SELECT '--'; +-- +DESCRIBE (WITH test_table_in_cte_1 AS (SELECT 1 AS c1), test_table_in_cte_2 AS (SELECT 1 AS c1) SELECT * +FROM test_table_in_cte_1 INNER JOIN test_table_in_cte_2 as test_table_in_cte_2 ON test_table_in_cte_1.c1 = test_table_in_cte_2.c1); +test_table_in_cte_1.c1 UInt8 +test_table_in_cte_2.c1 UInt8 +SELECT '--'; +-- +DESCRIBE (WITH test_table_in_cte_1 AS (SELECT 1 AS c1), test_table_in_cte_2 AS (SELECT 1 AS c1 UNION ALL SELECT 1 AS c1) SELECT * +FROM test_table_in_cte_1 INNER JOIN test_table_in_cte_2 as test_table_in_cte_2 ON test_table_in_cte_1.c1 = test_table_in_cte_2.c1); +test_table_in_cte_1.c1 UInt8 +test_table_in_cte_2.c1 UInt8 SELECT 'Joins'; Joins DESCRIBE (SELECT * FROM test_table_join_1, test_table_join_2); diff --git a/tests/queries/0_stateless/02378_analyzer_projection_names.sql b/tests/queries/0_stateless/02378_analyzer_projection_names.sql index f5ac5f7476f..f41afe6a950 100644 --- a/tests/queries/0_stateless/02378_analyzer_projection_names.sql +++ b/tests/queries/0_stateless/02378_analyzer_projection_names.sql @@ -408,6 +408,16 @@ SELECT '--'; DESCRIBE (WITH test_table_in_cte AS (SELECT id FROM test_table) SELECT id IN test_table_in_cte FROM test_table); +SELECT '--'; + +DESCRIBE (WITH test_table_in_cte_1 AS (SELECT 1 AS c1), test_table_in_cte_2 AS (SELECT 1 AS c1) SELECT * +FROM test_table_in_cte_1 INNER JOIN test_table_in_cte_2 as test_table_in_cte_2 ON test_table_in_cte_1.c1 = test_table_in_cte_2.c1); + +SELECT '--'; + +DESCRIBE (WITH test_table_in_cte_1 AS (SELECT 1 AS c1), test_table_in_cte_2 AS (SELECT 1 AS c1 UNION ALL SELECT 1 AS c1) SELECT * +FROM test_table_in_cte_1 INNER JOIN test_table_in_cte_2 as test_table_in_cte_2 ON test_table_in_cte_1.c1 = test_table_in_cte_2.c1); + SELECT 'Joins'; DESCRIBE (SELECT * FROM test_table_join_1, test_table_join_2); diff --git a/tests/queries/0_stateless/02834_apache_arrow_abort.sql b/tests/queries/0_stateless/02834_apache_arrow_abort.sql index bd29e95db9a..47e1c5d3951 100644 --- a/tests/queries/0_stateless/02834_apache_arrow_abort.sql +++ b/tests/queries/0_stateless/02834_apache_arrow_abort.sql @@ -1,4 +1,4 @@ -- Tags: no-fasttest -- This tests depends on internet access, but it does not matter, because it only has to check that there is no abort due to a bug in Apache Arrow library. - +SET optimize_trivial_insert_select=1; INSERT INTO TABLE FUNCTION url('https://clickhouse-public-datasets.s3.amazonaws.com/hits_compatible/athena_partitioned/hits_9.parquet') SELECT * FROM url('https://clickhouse-public-datasets.s3.amazonaws.com/hits_compatible/athena_partitioned/hits_9.parquet'); -- { serverError CANNOT_WRITE_TO_OSTREAM, RECEIVED_ERROR_FROM_REMOTE_IO_SERVER, POCO_EXCEPTION } diff --git a/tests/queries/0_stateless/02895_npy_format.reference b/tests/queries/0_stateless/02895_npy_format.reference index f9e77644a35..52972f0acbd 100644 --- a/tests/queries/0_stateless/02895_npy_format.reference +++ b/tests/queries/0_stateless/02895_npy_format.reference @@ -85,12 +85,12 @@ c [4,5,6] [[1,2],[3,4]] [[5,6],[7,8]] -0 -0 -0 -0 -0 -0 +1 +1 +1 +1 +1 +1 1 [2.199219,1.099609,3.300781] [4.25,3.34961,6.628906] diff --git a/tests/queries/0_stateless/02895_npy_format.sh b/tests/queries/0_stateless/02895_npy_format.sh index 9d05303a091..194b2bc1fe4 100755 --- a/tests/queries/0_stateless/02895_npy_format.sh +++ b/tests/queries/0_stateless/02895_npy_format.sh @@ -52,14 +52,14 @@ $CLICKHOUSE_LOCAL -q "select * from file('$CURDIR/data_npy/two_dim.npy', Npy, 'v $CLICKHOUSE_LOCAL -q "select * from file('$CURDIR/data_npy/three_dim.npy', Npy, 'value Array(Array(Int8))')" $CLICKHOUSE_LOCAL -q "select * from file('$CURDIR/data_npy/one_dim_float.npy', Npy, 'value Array(Float32)')" 2>&1 | grep -c "BAD_ARGUMENTS" -$CLICKHOUSE_LOCAL -q "select * from file('$CURDIR/data_npy/one_dim_float.npy', Npy, 'value UUID')" 2>&1 | grep -c "BAD_ARGUMENTS" -$CLICKHOUSE_LOCAL -q "select * from file('$CURDIR/data_npy/one_dim_float.npy', Npy, 'value Tuple(UInt8)')" 2>&1 | grep -c "BAD_ARGUMENTS" +$CLICKHOUSE_LOCAL -q "select * from file('$CURDIR/data_npy/one_dim_float.npy', Npy, 'value UUID')" 2>&1 | grep -c "UNKNOWN_TYPE" +$CLICKHOUSE_LOCAL -q "select * from file('$CURDIR/data_npy/one_dim_float.npy', Npy, 'value Tuple(UInt8)')" 2>&1 | grep -c "UNKNOWN_TYPE" -$CLICKHOUSE_LOCAL -q "select * from file('$CURDIR/data_npy/one_dim_float.npy', Npy, 'value Int8')" 2>&1 | grep -c "BAD_ARGUMENTS" -$CLICKHOUSE_LOCAL -q "select * from file('$CURDIR/data_npy/one_dim_str.npy', Npy, 'value Int8')" 2>&1 | grep -c "BAD_ARGUMENTS" -$CLICKHOUSE_LOCAL -q "select * from file('$CURDIR/data_npy/one_dim_unicode.npy', Npy, 'value Float32')" 2>&1 | grep -c "BAD_ARGUMENTS" +$CLICKHOUSE_LOCAL -q "select * from file('$CURDIR/data_npy/one_dim_float.npy', Npy, 'value Int8')" 2>&1 | grep -c "ILLEGAL_COLUMN" +$CLICKHOUSE_LOCAL -q "select * from file('$CURDIR/data_npy/one_dim_str.npy', Npy, 'value Int8')" 2>&1 | grep -c "ILLEGAL_COLUMN" +$CLICKHOUSE_LOCAL -q "select * from file('$CURDIR/data_npy/one_dim_unicode.npy', Npy, 'value Float32')" 2>&1 | grep -c "ILLEGAL_COLUMN" -$CLICKHOUSE_LOCAL -q "select * from file('$CURDIR/data_npy/complex.npy')" 2>&1 | grep -c "BAD_ARGUMENTS" +$CLICKHOUSE_LOCAL -q "select * from file('$CURDIR/data_npy/complex.npy')" 2>&1 | grep -c "CANNOT_EXTRACT_TABLE_STRUCTURE" $CLICKHOUSE_LOCAL -q "select * from file('$CURDIR/data_npy/float_16.npy')" diff --git a/tests/queries/0_stateless/02944_variant_as_common_type.sql b/tests/queries/0_stateless/02944_variant_as_common_type.sql index 49ea5f2769c..b3b86427b06 100644 --- a/tests/queries/0_stateless/02944_variant_as_common_type.sql +++ b/tests/queries/0_stateless/02944_variant_as_common_type.sql @@ -71,4 +71,3 @@ select toTypeName(res), array([1, 2, 3], [[1, 2, 3]]) as res; select toTypeName(res), map('a', 1, 'b', 'str_1') as res; select toTypeName(res), map('a', 1, 'b', map('c', 2, 'd', 'str_1')) as res; select toTypeName(res), map('a', 1, 'b', [1, 2, 3], 'c', [[4, 5, 6]]) as res; - diff --git a/tests/queries/0_stateless/02982_changeDate.reference b/tests/queries/0_stateless/02982_changeDate.reference new file mode 100644 index 00000000000..4a7f093ca2b --- /dev/null +++ b/tests/queries/0_stateless/02982_changeDate.reference @@ -0,0 +1,169 @@ +Negative tests +changeYear +-- Date +2001-01-01 +1970-01-01 +1970-01-01 +2149-06-06 +-- Date32 +2001-01-01 +1900-01-01 +1900-01-01 +2299-12-31 +-- DateTime +2001-01-01 11:22:33 +1970-01-01 01:00:00 +1970-01-01 01:00:00 +2106-02-07 07:28:15 +-- DateTime64 +2001-01-01 11:22:33.4444 +1900-01-01 00:00:00.0000 +1900-01-01 00:00:00.0000 +2299-12-31 23:59:59.9999 +changeMonth +-- Date +2000-01-01 +2000-02-01 +2000-12-01 +1970-01-01 +1970-01-01 +1970-01-01 +-- Date32 +2000-01-01 +2000-02-01 +2000-12-01 +1900-01-01 +1900-01-01 +1900-01-01 +-- DateTime +2000-01-01 11:22:33 +2000-02-01 11:22:33 +2000-12-01 11:22:33 +1970-01-01 01:00:00 +1970-01-01 01:00:00 +1970-01-01 01:00:00 +-- DateTime64 +2000-01-01 11:22:33.4444 +2000-02-01 11:22:33.4444 +2000-12-01 11:22:33.4444 +1900-01-01 00:00:00.0000 +1900-01-01 00:00:00.0000 +1900-01-01 00:00:00.0000 +changeDay +-- Date +2000-01-01 +2000-01-02 +2000-01-31 +1970-01-01 +1970-01-01 +1970-01-01 +-- Date32 +2000-01-01 +2000-01-02 +2000-01-31 +1900-01-01 +1900-01-01 +1900-01-01 +-- DateTime +2000-01-01 11:22:33 +2000-01-02 11:22:33 +2000-01-31 11:22:33 +1970-01-01 01:00:00 +1970-01-01 01:00:00 +1970-01-01 01:00:00 +-- DateTime64 +2000-01-01 11:22:33.4444 +2000-01-02 11:22:33.4444 +2000-01-31 11:22:33.4444 +1900-01-01 00:00:00.0000 +1900-01-01 00:00:00.0000 +1900-01-01 00:00:00.0000 +-- Special case: change to 29 Feb in a leap year +2000-02-29 +2000-02-29 +2000-02-29 11:22:33 +2000-02-29 11:22:33.4444 +changeHour +-- Date +2000-01-01 00:00:00 +2000-01-01 02:00:00 +2000-01-01 23:00:00 +1970-01-01 01:00:00 +1970-01-01 01:00:00 +-- Date32 +2000-01-01 00:00:00.000 +2000-01-01 02:00:00.000 +2000-01-01 23:00:00.000 +1900-01-01 00:00:00.000 +1900-01-01 00:00:00.000 +-- DateTime +2000-01-01 00:22:33 +2000-01-01 02:22:33 +2000-01-01 23:22:33 +1970-01-01 01:00:00 +1970-01-01 01:00:00 +-- DateTime64 +2000-01-01 00:22:33.4444 +2000-01-01 02:22:33.4444 +2000-01-01 23:22:33.4444 +1900-01-01 00:00:00.0000 +1900-01-01 00:00:00.0000 +-- With different timezone +1970-01-01 07:00:00 +1970-01-01 07:00:00 +changeMinute +-- Date +2000-01-01 00:00:00 +2000-01-01 00:02:00 +2000-01-01 00:59:00 +1970-01-01 01:00:00 +1970-01-01 01:00:00 +-- Date32 +2000-01-01 00:00:00.000 +2000-01-01 00:02:00.000 +2000-01-01 00:59:00.000 +1900-01-01 00:00:00.000 +1900-01-01 00:00:00.000 +-- DateTime +2000-01-01 11:00:33 +2000-01-01 11:02:33 +2000-01-01 11:59:33 +1970-01-01 01:00:00 +1970-01-01 01:00:00 +-- DateTime64 +2000-01-01 11:00:33.4444 +2000-01-01 11:02:33.4444 +2000-01-01 11:59:33.4444 +1900-01-01 00:00:00.0000 +1900-01-01 00:00:00.0000 +-- With different timezone +1970-01-01 07:00:00 +1970-01-01 07:00:00 +changeSecond +-- Date +2000-01-01 00:00:00 +2000-01-01 00:00:02 +2000-01-01 00:00:59 +1970-01-01 01:00:00 +1970-01-01 01:00:00 +-- Date32 +2000-01-01 00:00:00.000 +2000-01-01 00:00:02.000 +2000-01-01 00:00:59.000 +1900-01-01 00:00:00.000 +1900-01-01 00:00:00.000 +-- DateTime +2000-01-01 11:22:00 +2000-01-01 11:22:02 +2000-01-01 11:22:59 +1970-01-01 01:00:00 +1970-01-01 01:00:00 +-- DateTime64 +2000-01-01 11:22:00.4444 +2000-01-01 11:22:02.4444 +2000-01-01 11:22:59.4444 +1900-01-01 00:00:00.0000 +1900-01-01 00:00:00.0000 +-- With different timezone +1970-01-01 07:00:00 +1970-01-01 07:00:00 diff --git a/tests/queries/0_stateless/02982_changeDate.sql b/tests/queries/0_stateless/02982_changeDate.sql new file mode 100644 index 00000000000..2bc9aa95569 --- /dev/null +++ b/tests/queries/0_stateless/02982_changeDate.sql @@ -0,0 +1,185 @@ +SELECT 'Negative tests'; +-- as changeYear, changeMonth, changeDay, changeMinute, changeSecond share the same implementation, just testing one of them +SELECT changeYear(); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT changeYear(toDate('2000-01-01')); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT changeYear(toDate('2000-01-01'), 2000, 1); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT changeYear(1999, 2000); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT changeYear(toDate('2000-01-01'), 'abc'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT changeYear(toDate('2000-01-01'), 1.5); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +-- Disable timezone randomization +SET session_timezone='CET'; + +SELECT 'changeYear'; +SELECT '-- Date'; +SELECT changeYear(toDate('2000-01-01'), 2001); +SELECT changeYear(toDate('2000-01-01'), 1800); -- out-of-bounds +SELECT changeYear(toDate('2000-01-01'), -5000); -- out-of-bounds +SELECT changeYear(toDate('2000-01-01'), 2500); -- out-of-bounds +SELECT '-- Date32'; +SELECT changeYear(toDate32('2000-01-01'), 2001); +SELECT changeYear(toDate32('2000-01-01'), 1800); -- out-of-bounds +SELECT changeYear(toDate32('2000-01-01'), -5000); -- out-of-bounds +SELECT changeYear(toDate32('2000-01-01'), 2500); -- out-of-bounds +SELECT '-- DateTime'; +SELECT changeYear(toDateTime('2000-01-01 11:22:33'), 2001); +SELECT changeYear(toDateTime('2000-01-01 11:22:33'), 1800); -- out-of-bounds +SELECT changeYear(toDateTime('2000-01-01 11:22:33'), -5000); -- out-of-bounds +SELECT changeYear(toDateTime('2000-01-01 11:22:33'), 2500); -- out-of-bounds +SELECT '-- DateTime64'; +SELECT changeYear(toDateTime64('2000-01-01 11:22:33.4444', 4), 2001); +SELECT changeYear(toDateTime64('2000-01-01 11:22:33.4444', 4), 1800); -- out-of-bounds +SELECT changeYear(toDateTime64('2000-01-01 11:22:33.4444', 4), -5000); -- out-of-bounds +SELECT changeYear(toDateTime64('2000-01-01 11:22:33.4444', 4), 2500); -- out-of-bounds + +SELECT 'changeMonth'; +SELECT '-- Date'; +SELECT changeMonth(toDate('2000-01-01'), 1); +SELECT changeMonth(toDate('2000-01-01'), 2); +SELECT changeMonth(toDate('2000-01-01'), 12); +SELECT changeMonth(toDate('2000-01-01'), 0); -- out-of-bounds +SELECT changeMonth(toDate('2000-01-01'), -1); -- out-of-bounds +SELECT changeMonth(toDate('2000-01-01'), 13); -- out-of-bounds +SELECT '-- Date32'; +SELECT changeMonth(toDate32('2000-01-01'), 1); +SELECT changeMonth(toDate32('2000-01-01'), 2); +SELECT changeMonth(toDate32('2000-01-01'), 12); +SELECT changeMonth(toDate32('2000-01-01'), 0); -- out-of-bounds +SELECT changeMonth(toDate32('2000-01-01'), -1); -- out-of-bounds +SELECT changeMonth(toDate32('2000-01-01'), 13); -- out-of-bounds +SELECT '-- DateTime'; +SELECT changeMonth(toDateTime('2000-01-01 11:22:33'), 1); +SELECT changeMonth(toDateTime('2000-01-01 11:22:33'), 2); +SELECT changeMonth(toDateTime('2000-01-01 11:22:33'), 12); +SELECT changeMonth(toDateTime('2000-01-01 11:22:33'), 0); -- out-of-bounds +SELECT changeMonth(toDateTime('2000-01-01 11:22:33'), -1); -- out-of-bounds +SELECT changeMonth(toDateTime('2000-01-01 11:22:33'), 13); -- out-of-bounds +SELECT '-- DateTime64'; +SELECT changeMonth(toDateTime64('2000-01-01 11:22:33.4444', 4), 1); +SELECT changeMonth(toDateTime64('2000-01-01 11:22:33.4444', 4), 2); +SELECT changeMonth(toDateTime64('2000-01-01 11:22:33.4444', 4), 12); +SELECT changeMonth(toDateTime64('2000-01-01 11:22:33.4444', 4), 0); -- out-of-bounds +SELECT changeMonth(toDateTime64('2000-01-01 11:22:33.4444', 4), -1); -- out-of-bounds +SELECT changeMonth(toDateTime64('2000-01-01 11:22:33.4444', 4), 13); -- out-of-bounds + +SELECT 'changeDay'; +SELECT '-- Date'; +SELECT changeDay(toDate('2000-01-01'), 1); +SELECT changeDay(toDate('2000-01-01'), 2); +SELECT changeDay(toDate('2000-01-01'), 31); +SELECT changeDay(toDate('2000-01-01'), 0); -- out-of-bounds +SELECT changeDay(toDate('2000-01-01'), -1); -- out-of-bounds +SELECT changeDay(toDate('2000-01-01'), 32); -- out-of-bounds +SELECT '-- Date32'; +SELECT changeDay(toDate32('2000-01-01'), 1); +SELECT changeDay(toDate32('2000-01-01'), 2); +SELECT changeDay(toDate32('2000-01-01'), 31); +SELECT changeDay(toDate32('2000-01-01'), 0); -- out-of-bounds +SELECT changeDay(toDate32('2000-01-01'), -1); -- out-of-bounds +SELECT changeDay(toDate32('2000-01-01'), 32); -- out-of-bounds +SELECT '-- DateTime'; +SELECT changeDay(toDateTime('2000-01-01 11:22:33'), 1); +SELECT changeDay(toDateTime('2000-01-01 11:22:33'), 2); +SELECT changeDay(toDateTime('2000-01-01 11:22:33'), 31); +SELECT changeDay(toDateTime('2000-01-01 11:22:33'), 0); -- out-of-bounds +SELECT changeDay(toDateTime('2000-01-01 11:22:33'), -1); -- out-of-bounds +SELECT changeDay(toDateTime('2000-01-01 11:22:33'), 32); -- out-of-bounds +SELECT '-- DateTime64'; +SELECT changeDay(toDateTime64('2000-01-01 11:22:33.4444', 4), 1); +SELECT changeDay(toDateTime64('2000-01-01 11:22:33.4444', 4), 2); +SELECT changeDay(toDateTime64('2000-01-01 11:22:33.4444', 4), 31); +SELECT changeDay(toDateTime64('2000-01-01 11:22:33.4444', 4), 0); -- out-of-bounds +SELECT changeDay(toDateTime64('2000-01-01 11:22:33.4444', 4), -1); -- out-of-bounds +SELECT changeDay(toDateTime64('2000-01-01 11:22:33.4444', 4), 32); -- out-of-bounds +SELECT '-- Special case: change to 29 Feb in a leap year'; +SELECT changeDay(toDate('2000-02-28'), 29); +SELECT changeDay(toDate32('2000-02-01'), 29); +SELECT changeDay(toDateTime('2000-02-01 11:22:33'), 29); +SELECT changeDay(toDateTime64('2000-02-01 11:22:33.4444', 4), 29); + +SELECT 'changeHour'; +SELECT '-- Date'; +SELECT changeHour(toDate('2000-01-01'), 0); +SELECT changeHour(toDate('2000-01-01'), 2); +SELECT changeHour(toDate('2000-01-01'), 23); +SELECT changeHour(toDate('2000-01-01'), -1); -- out-of-bounds +SELECT changeHour(toDate('2000-01-01'), 24); -- out-of-bounds +SELECT '-- Date32'; +SELECT changeHour(toDate32('2000-01-01'), 0); +SELECT changeHour(toDate32('2000-01-01'), 2); +SELECT changeHour(toDate32('2000-01-01'), 23); +SELECT changeHour(toDate32('2000-01-01'), -1); -- out-of-bounds +SELECT changeHour(toDate32('2000-01-01'), 24); -- out-of-bounds +SELECT '-- DateTime'; +SELECT changeHour(toDateTime('2000-01-01 11:22:33'), 0); +SELECT changeHour(toDateTime('2000-01-01 11:22:33'), 2); +SELECT changeHour(toDateTime('2000-01-01 11:22:33'), 23); +SELECT changeHour(toDateTime('2000-01-01 11:22:33'), -1); -- out-of-bounds +SELECT changeHour(toDateTime('2000-01-01 11:22:33'), 24); -- out-of-bounds +SELECT '-- DateTime64'; +SELECT changeHour(toDateTime64('2000-01-01 11:22:33.4444', 4), 0); +SELECT changeHour(toDateTime64('2000-01-01 11:22:33.4444', 4), 2); +SELECT changeHour(toDateTime64('2000-01-01 11:22:33.4444', 4), 23); +SELECT changeHour(toDateTime64('2000-01-01 11:22:33.4444', 4), -1); -- out-of-bounds +SELECT changeHour(toDateTime64('2000-01-01 11:22:33.4444', 4), 24); -- out-of-bounds +SELECT '-- With different timezone'; +SELECT changeHour(toDate('2000-01-01'), -1) SETTINGS session_timezone = 'Asia/Novosibirsk'; +SELECT changeHour(toDate('2000-01-01'), 24) SETTINGS session_timezone = 'Asia/Novosibirsk'; + +SELECT 'changeMinute'; +SELECT '-- Date'; +SELECT changeMinute(toDate('2000-01-01'), 0); +SELECT changeMinute(toDate('2000-01-01'), 2); +SELECT changeMinute(toDate('2000-01-01'), 59); +SELECT changeMinute(toDate('2000-01-01'), -1); -- out-of-bounds +SELECT changeMinute(toDate('2000-01-01'), 60); -- out-of-bounds +SELECT '-- Date32'; +SELECT changeMinute(toDate32('2000-01-01'), 0); +SELECT changeMinute(toDate32('2000-01-01'), 2); +SELECT changeMinute(toDate32('2000-01-01'), 59); +SELECT changeMinute(toDate32('2000-01-01'), -1); -- out-of-bounds +SELECT changeMinute(toDate32('2000-01-01'), 60); -- out-of-bounds +SELECT '-- DateTime'; +SELECT changeMinute(toDateTime('2000-01-01 11:22:33'), 0); +SELECT changeMinute(toDateTime('2000-01-01 11:22:33'), 2); +SELECT changeMinute(toDateTime('2000-01-01 11:22:33'), 59); +SELECT changeMinute(toDateTime('2000-01-01 11:22:33'), -1); -- out-of-bounds +SELECT changeMinute(toDateTime('2000-01-01 11:22:33'), 60); -- out-of-bounds +SELECT '-- DateTime64'; +SELECT changeMinute(toDateTime64('2000-01-01 11:22:33.4444', 4), 0); +SELECT changeMinute(toDateTime64('2000-01-01 11:22:33.4444', 4), 2); +SELECT changeMinute(toDateTime64('2000-01-01 11:22:33.4444', 4), 59); +SELECT changeMinute(toDateTime64('2000-01-01 11:22:33.4444', 4), -1); -- out-of-bounds +SELECT changeMinute(toDateTime64('2000-01-01 11:22:33.4444', 4), 60); -- out-of-bounds +SELECT '-- With different timezone'; +SELECT changeMinute(toDate('2000-01-01'), -1) SETTINGS session_timezone = 'Asia/Novosibirsk'; +SELECT changeMinute(toDate('2000-01-01'), 60) SETTINGS session_timezone = 'Asia/Novosibirsk'; + +SELECT 'changeSecond'; +SELECT '-- Date'; +SELECT changeSecond(toDate('2000-01-01'), 0); +SELECT changeSecond(toDate('2000-01-01'), 2); +SELECT changeSecond(toDate('2000-01-01'), 59); +SELECT changeSecond(toDate('2000-01-01'), -1); -- out-of-bounds +SELECT changeSecond(toDate('2000-01-01'), 60); -- out-of-bounds +SELECT '-- Date32'; +SELECT changeSecond(toDate32('2000-01-01'), 0); +SELECT changeSecond(toDate32('2000-01-01'), 2); +SELECT changeSecond(toDate32('2000-01-01'), 59); +SELECT changeSecond(toDate32('2000-01-01'), -1); -- out-of-bounds +SELECT changeSecond(toDate32('2000-01-01'), 60); -- out-of-bounds +SELECT '-- DateTime'; +SELECT changeSecond(toDateTime('2000-01-01 11:22:33'), 0); +SELECT changeSecond(toDateTime('2000-01-01 11:22:33'), 2); +SELECT changeSecond(toDateTime('2000-01-01 11:22:33'), 59); +SELECT changeSecond(toDateTime('2000-01-01 11:22:33'), -1); -- out-of-bounds +SELECT changeSecond(toDateTime('2000-01-01 11:22:33'), 60); -- out-of-bounds +SELECT '-- DateTime64'; +SELECT changeSecond(toDateTime64('2000-01-01 11:22:33.4444', 4), 0); +SELECT changeSecond(toDateTime64('2000-01-01 11:22:33.4444', 4), 2); +SELECT changeSecond(toDateTime64('2000-01-01 11:22:33.4444', 4), 59); +SELECT changeSecond(toDateTime64('2000-01-01 11:22:33.4444', 4), -1); -- out-of-bounds +SELECT changeSecond(toDateTime64('2000-01-01 11:22:33.4444', 4), 60); -- out-of-bounds +SELECT '-- With different timezone'; +SELECT changeSecond(toDate('2000-01-01'), -1) SETTINGS session_timezone = 'Asia/Novosibirsk'; +SELECT changeSecond(toDate('2000-01-01'), 60) SETTINGS session_timezone = 'Asia/Novosibirsk'; diff --git a/tests/queries/0_stateless/03030_system_flush_distributed_settings.reference b/tests/queries/0_stateless/03030_system_flush_distributed_settings.reference index 5caff40c4a0..3a05c8b3ee8 100644 --- a/tests/queries/0_stateless/03030_system_flush_distributed_settings.reference +++ b/tests/queries/0_stateless/03030_system_flush_distributed_settings.reference @@ -1 +1 @@ -10000 +30000 diff --git a/tests/queries/0_stateless/03030_system_flush_distributed_settings.sql b/tests/queries/0_stateless/03030_system_flush_distributed_settings.sql index ac64135b593..fac673a4fe4 100644 --- a/tests/queries/0_stateless/03030_system_flush_distributed_settings.sql +++ b/tests/queries/0_stateless/03030_system_flush_distributed_settings.sql @@ -6,15 +6,17 @@ drop table if exists dist_out; create table ephemeral (key Int, value Int) engine=Null(); create table dist_in as ephemeral engine=Distributed(test_shard_localhost, currentDatabase(), ephemeral, key) settings background_insert_batch=1; -create table data (key Int, uniq_values Int) engine=Memory(); -create materialized view mv to data as select key, uniqExact(value) uniq_values from ephemeral group by key; +create table data (key Int, uniq_values Int) engine=TinyLog(); +create materialized view mv to data as select key, uniqExact(value::String) uniq_values from ephemeral group by key; system stop distributed sends dist_in; create table dist_out as data engine=Distributed(test_shard_localhost, currentDatabase(), data); set prefer_localhost_replica=0; SET optimize_trivial_insert_select = 1; -insert into dist_in select number/100, number from system.numbers limit 1e6 settings max_memory_usage='20Mi'; +-- due to pushing to MV with aggregation the query needs ~300MiB +-- but it will be done in background via "system flush distributed" +insert into dist_in select number/100, number from system.numbers limit 3e6 settings max_block_size=3e6, max_memory_usage='100Mi'; system flush distributed dist_in; -- { serverError MEMORY_LIMIT_EXCEEDED } system flush distributed dist_in settings max_memory_usage=0; select count() from dist_out; diff --git a/tests/queries/0_stateless/03056_analyzer_double_subquery_alias.reference b/tests/queries/0_stateless/03056_analyzer_double_subquery_alias.reference deleted file mode 100644 index 72749c905a3..00000000000 --- a/tests/queries/0_stateless/03056_analyzer_double_subquery_alias.reference +++ /dev/null @@ -1 +0,0 @@ -1 1 1 diff --git a/tests/queries/0_stateless/03056_analyzer_double_subquery_alias.sql b/tests/queries/0_stateless/03056_analyzer_double_subquery_alias.sql deleted file mode 100644 index de471c1a091..00000000000 --- a/tests/queries/0_stateless/03056_analyzer_double_subquery_alias.sql +++ /dev/null @@ -1,17 +0,0 @@ --- https://github.com/ClickHouse/ClickHouse/issues/22627 -SET allow_experimental_analyzer=1; -WITH - x AS - ( - SELECT 1 AS a - ), - xx AS - ( - SELECT * - FROM x - , x AS x1 - , x AS x2 - ) -SELECT * -FROM xx -WHERE a = 1; diff --git a/tests/queries/0_stateless/03201_local_named_collections.reference b/tests/queries/0_stateless/03201_local_named_collections.reference new file mode 100644 index 00000000000..af5626b4a11 --- /dev/null +++ b/tests/queries/0_stateless/03201_local_named_collections.reference @@ -0,0 +1 @@ +Hello, world! diff --git a/tests/queries/0_stateless/03201_local_named_collections.sh b/tests/queries/0_stateless/03201_local_named_collections.sh new file mode 100755 index 00000000000..809b4b52f41 --- /dev/null +++ b/tests/queries/0_stateless/03201_local_named_collections.sh @@ -0,0 +1,20 @@ +#!/usr/bin/env bash + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +${CLICKHOUSE_CLIENT} --multiquery " +DROP TABLE IF EXISTS test; +CREATE TABLE test (s String) ORDER BY (); +INSERT INTO test VALUES ('Hello, world!'); +" + +${CLICKHOUSE_LOCAL} --multiquery " +CREATE NAMED COLLECTION mydb AS host = '${CLICKHOUSE_HOST}', port = ${CLICKHOUSE_PORT_TCP}, user = 'default', password = '', db = '${CLICKHOUSE_DATABASE}'; +SELECT * FROM remote(mydb, table = 'test'); +" 2>&1 | grep --text -F -v "ASan doesn't fully support makecontext/swapcontext functions" + +${CLICKHOUSE_CLIENT} --multiquery " +DROP TABLE test; +" diff --git a/tests/queries/0_stateless/03203_multiif_and_where_2_conditions_old_analyzer_bug.reference b/tests/queries/0_stateless/03203_multiif_and_where_2_conditions_old_analyzer_bug.reference new file mode 100644 index 00000000000..5512e59cc73 --- /dev/null +++ b/tests/queries/0_stateless/03203_multiif_and_where_2_conditions_old_analyzer_bug.reference @@ -0,0 +1,6 @@ +this query used to be broken in old analyser: +c1 c2 yes true +this query worked: +yes true +experimental analyzer: +c1 c2 yes true diff --git a/tests/queries/0_stateless/03203_multiif_and_where_2_conditions_old_analyzer_bug.sql b/tests/queries/0_stateless/03203_multiif_and_where_2_conditions_old_analyzer_bug.sql new file mode 100644 index 00000000000..c97333f7df4 --- /dev/null +++ b/tests/queries/0_stateless/03203_multiif_and_where_2_conditions_old_analyzer_bug.sql @@ -0,0 +1,35 @@ +DROP TABLE IF EXISTS bugcheck1; + +CREATE TABLE bugcheck1 +ENGINE = MergeTree +ORDER BY tuple() +AS SELECT + 'c1' as column_a, + 'c2' as column_b; + +select 'this query used to be broken in old analyser:'; +SELECT *, + multiIf(column_b IN (SELECT 'c2' as someproduct), 'yes', 'no') AS condition_1, + multiIf(column_b = 'c2', 'true', 'false') AS condition_2 +FROM (SELECT column_a, column_b FROM bugcheck1) +WHERE (condition_1 IN ('yes')) AND (condition_2 in ('true')) +settings allow_experimental_analyzer=0; + +select 'this query worked:'; + +SELECT + multiIf(column_b IN (SELECT 'c2' as someproduct), 'yes', 'no') AS condition_1, + multiIf(column_b = 'c2', 'true', 'false') AS condition_2 +FROM (SELECT column_a, column_b FROM bugcheck1) +WHERE (condition_1 IN ('yes')) AND (condition_2 in ('true')) +settings allow_experimental_analyzer=0; + +select 'experimental analyzer:'; +SELECT *, + multiIf(column_b IN (SELECT 'c2' as someproduct), 'yes', 'no') AS condition_1, + multiIf(column_b = 'c2', 'true', 'false') AS condition_2 +FROM (SELECT column_a, column_b FROM bugcheck1) +WHERE (condition_1 IN ('yes')) AND (condition_2 in ('true')) +settings allow_experimental_analyzer=1; + +DROP TABLE bugcheck1; diff --git a/tests/integration/test_host_regexp_multiple_ptr_records_concurrent/__init__.py b/tests/queries/0_stateless/03204_distributed_with_scalar_subquery.reference similarity index 100% rename from tests/integration/test_host_regexp_multiple_ptr_records_concurrent/__init__.py rename to tests/queries/0_stateless/03204_distributed_with_scalar_subquery.reference diff --git a/tests/queries/0_stateless/03204_distributed_with_scalar_subquery.sql b/tests/queries/0_stateless/03204_distributed_with_scalar_subquery.sql new file mode 100644 index 00000000000..0a07ce48268 --- /dev/null +++ b/tests/queries/0_stateless/03204_distributed_with_scalar_subquery.sql @@ -0,0 +1,10 @@ +DROP TABLE IF EXISTS t_c3oollc8r; +CREATE TABLE t_c3oollc8r (c_k37 Int32, c_y String, c_bou Int32, c_g1 Int32, c_lfntfzg Int32, c_kntw50q Int32) ENGINE = MergeTree ORDER BY (); + +SELECT ( + SELECT c_k37 + FROM t_c3oollc8r + ) > c_lfntfzg +FROM remote('127.0.0.{1,2}', currentDatabase(), t_c3oollc8r); + +DROP TABLE t_c3oollc8r; diff --git a/utils/check-style/aspell-ignore/en/aspell-dict.txt b/utils/check-style/aspell-ignore/en/aspell-dict.txt index ca2c4ec4192..7de065cc589 100644 --- a/utils/check-style/aspell-ignore/en/aspell-dict.txt +++ b/utils/check-style/aspell-ignore/en/aspell-dict.txt @@ -467,7 +467,7 @@ LOCALTIME LOCALTIMESTAMP LONGLONG LOONGARCH -Lemir +Lemire Levenshtein Liao LibFuzzer @@ -1319,6 +1319,12 @@ cfg cgroup cgroups chadmin +changeDay +changeHour +changeMinute +changeMonth +changeSecond +changeYear changelog changelogs charset @@ -2194,6 +2200,8 @@ parseReadableSizeOrZero parseTimeDelta parseable parsers +partitionId +partitionID pathFull pclmulqdq pcre diff --git a/utils/keeper-data-dumper/main.cpp b/utils/keeper-data-dumper/main.cpp index 25be8549dd6..db6dac52190 100644 --- a/utils/keeper-data-dumper/main.cpp +++ b/utils/keeper-data-dumper/main.cpp @@ -2,6 +2,7 @@ #include #include +#include #include #include #include