diff --git a/.github/actions/common_setup/action.yml b/.github/actions/common_setup/action.yml index 0d31945087d..b02413adc44 100644 --- a/.github/actions/common_setup/action.yml +++ b/.github/actions/common_setup/action.yml @@ -19,6 +19,8 @@ runs: cat >> "$GITHUB_ENV" << 'EOF' TEMP_PATH=${{runner.temp}}/${{inputs.job_type}} REPO_COPY=${{runner.temp}}/${{inputs.job_type}}/git-repo-copy + IMAGES_PATH=${{runner.temp}}/images_path + REPORTS_PATH=${{runner.temp}}/reports_dir EOF if [ -z "${{env.GITHUB_JOB_OVERRIDDEN}}" ] && [ "true" == "${{inputs.nested_job}}" ]; then echo "The GITHUB_JOB_OVERRIDDEN ENV is unset, and must be set for the nested jobs" diff --git a/.github/workflows/backport_branches.yml b/.github/workflows/backport_branches.yml index 7611c5429c5..f6af4778cf1 100644 --- a/.github/workflows/backport_branches.yml +++ b/.github/workflows/backport_branches.yml @@ -105,66 +105,22 @@ jobs: path: ${{ runner.temp }}/changed_images.json CompatibilityCheckX86: needs: [BuilderDebRelease] - runs-on: [self-hosted, style-checker] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/compatibility_check - REPO_COPY=${{runner.temp}}/compatibility_check/ClickHouse - REPORTS_PATH=${{runner.temp}}/reports_dir - EOF - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: CompatibilityCheckX86 - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" && python3 compatibility_check.py --check-name "Compatibility check (amd64)" --check-glibc --check-distributions - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Compatibility check X86 + runner_type: style-checker + run_command: | + cd "$REPO_COPY/tests/ci" + python3 compatibility_check.py --check-name "Compatibility check (amd64)" --check-glibc --check-distributions CompatibilityCheckAarch64: needs: [BuilderDebAarch64] - runs-on: [self-hosted, style-checker] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/compatibility_check - REPO_COPY=${{runner.temp}}/compatibility_check/ClickHouse - REPORTS_PATH=${{runner.temp}}/reports_dir - EOF - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: CompatibilityCheckAarch64 - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" && python3 compatibility_check.py --check-name "Compatibility check (aarch64)" --check-glibc - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Compatibility check X86 + runner_type: style-checker + run_command: | + cd "$REPO_COPY/tests/ci" + python3 compatibility_check.py --check-name "Compatibility check (aarch64)" --check-glibc ######################################################################################### #################################### ORDINARY BUILDS #################################### ######################################################################################### @@ -239,303 +195,114 @@ jobs: ##################################### BUILD REPORTER ####################################### ############################################################################################ BuilderReport: + if: ${{ success() || failure() }} needs: - BuilderDebRelease - BuilderDebAarch64 - BuilderDebAsan - BuilderDebTsan - BuilderDebDebug - runs-on: [self-hosted, style-checker] - if: ${{ success() || failure() }} - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - CHECK_NAME=ClickHouse build check - REPORTS_PATH=${{runner.temp}}/reports_dir - TEMP_PATH=${{runner.temp}}/report_check - NEEDS_DATA_PATH=${{runner.temp}}/needs.json - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Report Builder - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cat > "$NEEDS_DATA_PATH" << 'EOF' - ${{ toJSON(needs) }} - EOF - cd "$GITHUB_WORKSPACE/tests/ci" - python3 build_report_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: ClickHouse build check + runner_type: style-checker + additional_envs: | + NEEDS_DATA<> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/report_check - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=ClickHouse special build check - NEEDS_DATA_PATH=${{runner.temp}}/needs.json - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Report Builder - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cat > "$NEEDS_DATA_PATH" << 'EOF' - ${{ toJSON(needs) }} - EOF - cd "$GITHUB_WORKSPACE/tests/ci" - python3 build_report_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: ClickHouse special build check + runner_type: style-checker + additional_envs: | + NEEDS_DATA<> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/test_install - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Install packages (amd64) - REPO_COPY=${{runner.temp}}/test_install/ClickHouse - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Test packages installation - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 install_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Install packages (amd64) + runner_type: style-checker + run_command: | + cd "$REPO_COPY/tests/ci" + python3 install_check.py "$CHECK_NAME" InstallPackagesTestAarch64: needs: [BuilderDebAarch64] - runs-on: [self-hosted, style-checker-aarch64] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/test_install - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Install packages (arm64) - REPO_COPY=${{runner.temp}}/test_install/ClickHouse - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Test packages installation - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 install_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Install packages (arm64) + runner_type: style-checker-aarch64 + run_command: | + cd "$REPO_COPY/tests/ci" + python3 install_check.py "$CHECK_NAME" ############################################################################################## ########################### FUNCTIONAl STATELESS TESTS ####################################### ############################################################################################## FunctionalStatelessTestAsan: needs: [BuilderDebAsan] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_debug - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests (asan) - REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse - KILL_TIMEOUT=10800 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Stateless tests (asan) + runner_type: func-tester + additional_envs: | + KILL_TIMEOUT=10800 + run_command: | + cd "$REPO_COPY/tests/ci" + python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" ############################################################################################## ############################ FUNCTIONAl STATEFUL TESTS ####################################### ############################################################################################## FunctionalStatefulTestDebug: needs: [BuilderDebDebug] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateful_debug - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateful tests (debug) - REPO_COPY=${{runner.temp}}/stateful_debug/ClickHouse - KILL_TIMEOUT=3600 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Stateful tests (debug) + runner_type: func-tester + additional_envs: | + KILL_TIMEOUT=3600 + run_command: | + cd "$REPO_COPY/tests/ci" + python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" ############################################################################################## ######################################### STRESS TESTS ####################################### ############################################################################################## StressTestTsan: needs: [BuilderDebTsan] - # func testers have 16 cores + 128 GB memory - # while stress testers have 36 cores + 72 memory - # It would be better to have something like 32 + 128, - # but such servers almost unavailable as spot instances. - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stress_thread - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stress test (tsan) - REPO_COPY=${{runner.temp}}/stress_thread/ClickHouse - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Stress test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 stress_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Stress test (tsan) + runner_type: stress-tester + run_command: | + cd "$REPO_COPY/tests/ci" + python3 stress_check.py "$CHECK_NAME" ############################################################################################# ############################# INTEGRATION TESTS ############################################# ############################################################################################# IntegrationTestsRelease: needs: [BuilderDebRelease] - runs-on: [self-hosted, stress-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/integration_tests_release - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Integration tests (release) - REPO_COPY=${{runner.temp}}/integration_tests_release/ClickHouse - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Integration test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 integration_test_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Integration tests (release) + runner_type: stress-tester + batches: 4 + run_command: | + cd "$REPO_COPY/tests/ci" + python3 integration_test_check.py "$CHECK_NAME" FinishCheck: needs: - DockerHubPush diff --git a/.github/workflows/docs_check.yml b/.github/workflows/docs_check.yml index dada9999a68..6d449e74f30 100644 --- a/.github/workflows/docs_check.yml +++ b/.github/workflows/docs_check.yml @@ -96,68 +96,30 @@ jobs: path: ${{ runner.temp }}/changed_images.json StyleCheck: needs: DockerHubPush - runs-on: [self-hosted, style-checker] - if: ${{ success() || failure() }} - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{ runner.temp }}/style_check - ROBOT_CLICKHOUSE_SSH_KEY<> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/docs_check - REPO_COPY=${{runner.temp}}/docs_check/ClickHouse - EOF - - name: Download changed images - uses: actions/download-artifact@v3 - with: - name: changed_images - path: ${{ env.TEMP_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Docs Check - run: | - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 docs_check.py - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Docs check + runner_type: func-tester-aarch64 + additional_envs: | + run_command: | + cd "$REPO_COPY/tests/ci" + python3 docs_check.py FinishCheck: needs: - StyleCheck diff --git a/.github/workflows/jepsen.yml b/.github/workflows/jepsen.yml index 7f1fd16aa89..163de7769af 100644 --- a/.github/workflows/jepsen.yml +++ b/.github/workflows/jepsen.yml @@ -11,60 +11,19 @@ on: # yamllint disable-line rule:truthy workflow_call: jobs: KeeperJepsenRelease: - runs-on: [self-hosted, style-checker] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/keeper_jepsen - REPO_COPY=${{runner.temp}}/keeper_jepsen/ClickHouse - EOF - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - fetch-depth: 0 - filter: tree:0 - - name: Jepsen Test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 jepsen_check.py keeper - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Jepsen keeper check + runner_type: style-checker + run_command: | + cd "$REPO_COPY/tests/ci" + python3 jepsen_check.py keeper # ServerJepsenRelease: # runs-on: [self-hosted, style-checker] - # if: ${{ always() }} - # needs: [KeeperJepsenRelease] - # steps: - # - name: Set envs - # run: | - # cat >> "$GITHUB_ENV" << 'EOF' - # TEMP_PATH=${{runner.temp}}/server_jepsen - # REPO_COPY=${{runner.temp}}/server_jepsen/ClickHouse - # EOF - # - name: Check out repository code - # uses: ClickHouse/checkout@v1 - # with: - # clear-repository: true - # fetch-depth: 0 - # filter: tree:0 - # - name: Jepsen Test - # run: | - # sudo rm -fr "$TEMP_PATH" - # mkdir -p "$TEMP_PATH" - # cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - # cd "$REPO_COPY/tests/ci" - # python3 jepsen_check.py server - # - name: Cleanup - # if: always() - # run: | - # docker ps --quiet | xargs --no-run-if-empty docker kill ||: - # docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - # sudo rm -fr "$TEMP_PATH" + # uses: ./.github/workflows/reusable_test.yml + # with: + # test_name: Jepsen server check + # runner_type: style-checker + # run_command: | + # cd "$REPO_COPY/tests/ci" + # python3 jepsen_check.py server diff --git a/.github/workflows/libfuzzer.yml b/.github/workflows/libfuzzer.yml index e8a0396684a..1ca637c0d84 100644 --- a/.github/workflows/libfuzzer.yml +++ b/.github/workflows/libfuzzer.yml @@ -10,86 +10,17 @@ on: # yamllint disable-line rule:truthy workflow_call: jobs: BuilderFuzzers: - runs-on: [self-hosted, builder] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/build_check - IMAGES_PATH=${{runner.temp}}/images_path - REPO_COPY=${{runner.temp}}/build_check/ClickHouse - CACHES_PATH=${{runner.temp}}/../ccaches - BUILD_NAME=fuzzers - EOF - - name: Download changed images - # even if artifact does not exist, e.g. on `do not test` label or failed Docker job - continue-on-error: true - uses: actions/download-artifact@v3 - with: - name: changed_images - path: ${{ env.IMAGES_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - submodules: true - ref: ${{github.ref}} - - name: Build - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME" - - name: Upload build URLs to artifacts - if: ${{ success() || failure() }} - uses: actions/upload-artifact@v3 - with: - name: ${{ env.BUILD_URLS }} - path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" "$CACHES_PATH" + uses: ./.github/workflows/reusable_build.yml + with: + build_name: fuzzers libFuzzerTest: needs: [BuilderFuzzers] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/libfuzzer - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=libFuzzer tests - REPO_COPY=${{runner.temp}}/libfuzzer/ClickHouse - KILL_TIMEOUT=10800 - EOF - - name: Download changed images - # even if artifact does not exist, e.g. on `do not test` label or failed Docker job - continue-on-error: true - uses: actions/download-artifact@v3 - with: - name: changed_images - path: ${{ env.TEMP_PATH }} - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: libFuzzer test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 libfuzzer_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: libFuzzer tests + runner_type: func-tester + additional_envs: | + KILL_TIMEOUT=10800 + run_command: | + cd "$REPO_COPY/tests/ci" + python3 libfuzzer_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" diff --git a/.github/workflows/master.yml b/.github/workflows/master.yml index 5a0fc2fabcb..e662a5b6f98 100644 --- a/.github/workflows/master.yml +++ b/.github/workflows/master.yml @@ -89,97 +89,32 @@ jobs: path: ${{ runner.temp }}/changed_images.json StyleCheck: needs: DockerHubPush - runs-on: [self-hosted, style-checker] if: ${{ success() || failure() }} - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{ runner.temp }}/style_check - EOF - - name: Download changed images - # even if artifact does not exist, e.g. on `do not test` label or failed Docker job - continue-on-error: true - uses: actions/download-artifact@v3 - with: - name: changed_images - path: ${{ env.TEMP_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Style Check - run: | - cd "$GITHUB_WORKSPACE/tests/ci" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Style check + runner_type: style-checker + run_command: | + cd "$REPO_COPY/tests/ci" python3 style_check.py --no-push - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" CompatibilityCheckX86: needs: [BuilderDebRelease] - runs-on: [self-hosted, style-checker] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/compatibility_check - REPO_COPY=${{runner.temp}}/compatibility_check/ClickHouse - REPORTS_PATH=${{runner.temp}}/reports_dir - EOF - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: CompatibilityCheckX86 - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" && python3 compatibility_check.py --check-name "Compatibility check (amd64)" --check-glibc --check-distributions - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Compatibility check X86 + runner_type: style-checker + run_command: | + cd "$REPO_COPY/tests/ci" + python3 compatibility_check.py --check-name "Compatibility check (amd64)" --check-glibc --check-distributions CompatibilityCheckAarch64: needs: [BuilderDebAarch64] - runs-on: [self-hosted, style-checker] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/compatibility_check - REPO_COPY=${{runner.temp}}/compatibility_check/ClickHouse - REPORTS_PATH=${{runner.temp}}/reports_dir - EOF - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: CompatibilityCheckAarch64 - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" && python3 compatibility_check.py --check-name "Compatibility check (aarch64)" --check-glibc - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Compatibility check X86 + runner_type: style-checker + run_command: | + cd "$REPO_COPY/tests/ci" + python3 compatibility_check.py --check-name "Compatibility check (aarch64)" --check-glibc ######################################################################################### #################################### ORDINARY BUILDS #################################### ######################################################################################### @@ -320,6 +255,7 @@ jobs: ##################################### BUILD REPORTER ####################################### ############################################################################################ BuilderReport: + if: ${{ success() || failure() }} needs: - BuilderBinRelease - BuilderDebAarch64 @@ -329,42 +265,19 @@ jobs: - BuilderDebRelease - BuilderDebTsan - BuilderDebUBsan - runs-on: [self-hosted, style-checker] - if: ${{ success() || failure() }} - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - CHECK_NAME=ClickHouse build check - REPORTS_PATH=${{runner.temp}}/reports_dir - REPORTS_PATH=${{runner.temp}}/reports_dir - TEMP_PATH=${{runner.temp}}/report_check - NEEDS_DATA_PATH=${{runner.temp}}/needs.json - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Report Builder - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cat > "$NEEDS_DATA_PATH" << 'EOF' - ${{ toJSON(needs) }} - EOF - cd "$GITHUB_WORKSPACE/tests/ci" - python3 build_report_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: ClickHouse build check + runner_type: style-checker + additional_envs: | + NEEDS_DATA<> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/report_check - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=ClickHouse special build check - NEEDS_DATA_PATH=${{runner.temp}}/needs.json - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Report Builder - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cat > "$NEEDS_DATA_PATH" << 'EOF' - ${{ toJSON(needs) }} - EOF - cd "$GITHUB_WORKSPACE/tests/ci" - python3 build_report_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: ClickHouse special build check + runner_type: style-checker + additional_envs: | + NEEDS_DATA<> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/test_install - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Install packages (amd64) - REPO_COPY=${{runner.temp}}/test_install/ClickHouse - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Test packages installation - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 install_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Install packages (amd64) + runner_type: style-checker + run_command: | + cd "$REPO_COPY/tests/ci" + python3 install_check.py "$CHECK_NAME" InstallPackagesTestAarch64: needs: [BuilderDebAarch64] - runs-on: [self-hosted, style-checker-aarch64] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/test_install - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Install packages (arm64) - REPO_COPY=${{runner.temp}}/test_install/ClickHouse - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Test packages installation - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 install_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Install packages (arm64) + runner_type: style-checker-aarch64 + run_command: | + cd "$REPO_COPY/tests/ci" + python3 install_check.py "$CHECK_NAME" ############################################################################################## ########################### FUNCTIONAl STATELESS TESTS ####################################### ############################################################################################## FunctionalStatelessTestRelease: needs: [BuilderDebRelease] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_release - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests (release) - REPO_COPY=${{runner.temp}}/stateless_release/ClickHouse - KILL_TIMEOUT=10800 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Stateless tests (release) + runner_type: func-tester + additional_envs: | + KILL_TIMEOUT=10800 + run_command: | + cd "$REPO_COPY/tests/ci" + python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" FunctionalStatelessTestReleaseDatabaseOrdinary: needs: [BuilderDebRelease] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_release_database_ordinary - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests (release, DatabaseOrdinary) - REPO_COPY=${{runner.temp}}/stateless_release_database_ordinary/ClickHouse - KILL_TIMEOUT=10800 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - FunctionalStatelessTestReleaseDatabaseReplicated0: + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Stateless tests (release, DatabaseOrdinary) + runner_type: func-tester + additional_envs: | + KILL_TIMEOUT=10800 + run_command: | + cd "$REPO_COPY/tests/ci" + python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" + FunctionalStatelessTestReleaseDatabaseReplicated: needs: [BuilderDebRelease] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_database_replicated - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests (release, DatabaseReplicated) - REPO_COPY=${{runner.temp}}/stateless_database_replicated/ClickHouse - KILL_TIMEOUT=10800 - RUN_BY_HASH_NUM=0 - RUN_BY_HASH_TOTAL=4 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - FunctionalStatelessTestReleaseDatabaseReplicated1: + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Stateless tests (release, DatabaseReplicated) + runner_type: func-tester + additional_envs: | + KILL_TIMEOUT=10800 + batches: 4 + run_command: | + cd "$REPO_COPY/tests/ci" + python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" + FunctionalStatelessTestReleaseS3: needs: [BuilderDebRelease] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_database_replicated - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests (release, DatabaseReplicated) - REPO_COPY=${{runner.temp}}/stateless_database_replicated/ClickHouse - KILL_TIMEOUT=10800 - RUN_BY_HASH_NUM=1 - RUN_BY_HASH_TOTAL=4 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - FunctionalStatelessTestReleaseDatabaseReplicated2: - needs: [BuilderDebRelease] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_database_replicated - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests (release, DatabaseReplicated) - REPO_COPY=${{runner.temp}}/stateless_database_replicated/ClickHouse - KILL_TIMEOUT=10800 - RUN_BY_HASH_NUM=2 - RUN_BY_HASH_TOTAL=4 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - FunctionalStatelessTestReleaseDatabaseReplicated3: - needs: [BuilderDebRelease] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_database_replicated - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests (release, DatabaseReplicated) - REPO_COPY=${{runner.temp}}/stateless_database_replicated/ClickHouse - KILL_TIMEOUT=10800 - RUN_BY_HASH_NUM=3 - RUN_BY_HASH_TOTAL=4 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - FunctionalStatelessTestReleaseS3_0: - needs: [BuilderDebRelease] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_s3_storage - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests (release, s3 storage) - REPO_COPY=${{runner.temp}}/stateless_s3_storage/ClickHouse - KILL_TIMEOUT=10800 - RUN_BY_HASH_NUM=0 - RUN_BY_HASH_TOTAL=2 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - FunctionalStatelessTestReleaseS3_1: - needs: [BuilderDebRelease] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_s3_storage - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests (release, s3 storage) - REPO_COPY=${{runner.temp}}/stateless_s3_storage/ClickHouse - KILL_TIMEOUT=10800 - RUN_BY_HASH_NUM=1 - RUN_BY_HASH_TOTAL=2 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Stateless tests (release, s3 storage) + runner_type: func-tester + additional_envs: | + KILL_TIMEOUT=10800 + batches: 2 + run_command: | + cd "$REPO_COPY/tests/ci" + python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" FunctionalStatelessTestReleaseAnalyzer: needs: [BuilderDebRelease] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_analyzer - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests (release, analyzer) - REPO_COPY=${{runner.temp}}/stateless_analyzer/ClickHouse - KILL_TIMEOUT=10800 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Stateless tests (release, analyzer) + runner_type: func-tester + additional_envs: | + KILL_TIMEOUT=10800 + run_command: | + cd "$REPO_COPY/tests/ci" + python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" FunctionalStatelessTestAarch64: needs: [BuilderDebAarch64] - runs-on: [self-hosted, func-tester-aarch64] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_release - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests (aarch64) - REPO_COPY=${{runner.temp}}/stateless_release/ClickHouse - KILL_TIMEOUT=10800 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - FunctionalStatelessTestAsan0: + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Stateless tests (aarch64) + runner_type: func-tester-aarch64 + additional_envs: | + KILL_TIMEOUT=10800 + run_command: | + cd "$REPO_COPY/tests/ci" + python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" + FunctionalStatelessTestAsan: needs: [BuilderDebAsan] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_debug - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests (asan) - REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse - KILL_TIMEOUT=10800 - RUN_BY_HASH_NUM=0 - RUN_BY_HASH_TOTAL=4 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - FunctionalStatelessTestAsan1: - needs: [BuilderDebAsan] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_debug - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests (asan) - REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse - KILL_TIMEOUT=10800 - RUN_BY_HASH_NUM=1 - RUN_BY_HASH_TOTAL=4 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - FunctionalStatelessTestAsan2: - needs: [BuilderDebAsan] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_debug - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests (asan) - REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse - KILL_TIMEOUT=10800 - RUN_BY_HASH_NUM=2 - RUN_BY_HASH_TOTAL=4 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - FunctionalStatelessTestAsan3: - needs: [BuilderDebAsan] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_debug - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests (asan) - REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse - KILL_TIMEOUT=10800 - RUN_BY_HASH_NUM=3 - RUN_BY_HASH_TOTAL=4 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - FunctionalStatelessTestTsan0: + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Stateless tests (asan) + runner_type: func-tester + additional_envs: | + KILL_TIMEOUT=10800 + batches: 4 + run_command: | + cd "$REPO_COPY/tests/ci" + python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" + FunctionalStatelessTestTsan: needs: [BuilderDebTsan] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_tsan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests (tsan) - REPO_COPY=${{runner.temp}}/stateless_tsan/ClickHouse - KILL_TIMEOUT=10800 - RUN_BY_HASH_NUM=0 - RUN_BY_HASH_TOTAL=5 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - FunctionalStatelessTestTsan1: - needs: [BuilderDebTsan] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_tsan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests (tsan) - REPO_COPY=${{runner.temp}}/stateless_tsan/ClickHouse - KILL_TIMEOUT=10800 - RUN_BY_HASH_NUM=1 - RUN_BY_HASH_TOTAL=5 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - FunctionalStatelessTestTsan2: - needs: [BuilderDebTsan] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_tsan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests (tsan) - REPO_COPY=${{runner.temp}}/stateless_tsan/ClickHouse - KILL_TIMEOUT=10800 - RUN_BY_HASH_NUM=2 - RUN_BY_HASH_TOTAL=5 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - FunctionalStatelessTestTsan3: - needs: [BuilderDebTsan] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_tsan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests (tsan) - REPO_COPY=${{runner.temp}}/stateless_tsan/ClickHouse - KILL_TIMEOUT=10800 - RUN_BY_HASH_NUM=3 - RUN_BY_HASH_TOTAL=5 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - FunctionalStatelessTestTsan4: - needs: [BuilderDebTsan] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_tsan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests (tsan) - REPO_COPY=${{runner.temp}}/stateless_tsan/ClickHouse - KILL_TIMEOUT=10800 - RUN_BY_HASH_NUM=4 - RUN_BY_HASH_TOTAL=5 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - FunctionalStatelessTestUBsan0: + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Stateless tests (tsan) + runner_type: func-tester + additional_envs: | + KILL_TIMEOUT=10800 + batches: 5 + run_command: | + cd "$REPO_COPY/tests/ci" + python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" + FunctionalStatelessTestUBsan: needs: [BuilderDebUBsan] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_ubsan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests (ubsan) - REPO_COPY=${{runner.temp}}/stateless_ubsan/ClickHouse - KILL_TIMEOUT=10800 - RUN_BY_HASH_NUM=0 - RUN_BY_HASH_TOTAL=2 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - FunctionalStatelessTestUBsan1: - needs: [BuilderDebUBsan] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_ubsan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests (ubsan) - REPO_COPY=${{runner.temp}}/stateless_ubsan/ClickHouse - KILL_TIMEOUT=10800 - RUN_BY_HASH_NUM=1 - RUN_BY_HASH_TOTAL=2 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - FunctionalStatelessTestMsan0: + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Stateless tests (ubsan) + runner_type: func-tester + additional_envs: | + KILL_TIMEOUT=10800 + batches: 2 + run_command: | + cd "$REPO_COPY/tests/ci" + python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" + FunctionalStatelessTestMsan: needs: [BuilderDebMsan] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_memory - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests (msan) - REPO_COPY=${{runner.temp}}/stateless_memory/ClickHouse - KILL_TIMEOUT=10800 - RUN_BY_HASH_NUM=0 - RUN_BY_HASH_TOTAL=6 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - FunctionalStatelessTestMsan1: - needs: [BuilderDebMsan] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_memory - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests (msan) - REPO_COPY=${{runner.temp}}/stateless_memory/ClickHouse - KILL_TIMEOUT=10800 - RUN_BY_HASH_NUM=1 - RUN_BY_HASH_TOTAL=6 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - FunctionalStatelessTestMsan2: - needs: [BuilderDebMsan] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_memory - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests (msan) - REPO_COPY=${{runner.temp}}/stateless_memory/ClickHouse - KILL_TIMEOUT=10800 - RUN_BY_HASH_NUM=2 - RUN_BY_HASH_TOTAL=6 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - FunctionalStatelessTestMsan3: - needs: [BuilderDebMsan] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_memory - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests (msan) - REPO_COPY=${{runner.temp}}/stateless_memory/ClickHouse - KILL_TIMEOUT=10800 - RUN_BY_HASH_NUM=3 - RUN_BY_HASH_TOTAL=6 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - FunctionalStatelessTestMsan4: - needs: [BuilderDebMsan] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_memory - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests (msan) - REPO_COPY=${{runner.temp}}/stateless_memory/ClickHouse - KILL_TIMEOUT=10800 - RUN_BY_HASH_NUM=4 - RUN_BY_HASH_TOTAL=6 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - FunctionalStatelessTestMsan5: - needs: [BuilderDebMsan] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_memory - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests (msan) - REPO_COPY=${{runner.temp}}/stateless_memory/ClickHouse - KILL_TIMEOUT=10800 - RUN_BY_HASH_NUM=5 - RUN_BY_HASH_TOTAL=6 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - FunctionalStatelessTestDebug0: + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Stateless tests (msan) + runner_type: func-tester + additional_envs: | + KILL_TIMEOUT=10800 + batches: 6 + run_command: | + cd "$REPO_COPY/tests/ci" + python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" + FunctionalStatelessTestDebug: needs: [BuilderDebDebug] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_debug - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests (debug) - REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse - KILL_TIMEOUT=10800 - RUN_BY_HASH_NUM=0 - RUN_BY_HASH_TOTAL=5 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - FunctionalStatelessTestDebug1: - needs: [BuilderDebDebug] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_debug - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests (debug) - REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse - KILL_TIMEOUT=10800 - RUN_BY_HASH_NUM=1 - RUN_BY_HASH_TOTAL=5 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - FunctionalStatelessTestDebug2: - needs: [BuilderDebDebug] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_debug - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests (debug) - REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse - KILL_TIMEOUT=10800 - RUN_BY_HASH_NUM=2 - RUN_BY_HASH_TOTAL=5 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - FunctionalStatelessTestDebug3: - needs: [BuilderDebDebug] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_debug - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests (debug) - REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse - KILL_TIMEOUT=10800 - RUN_BY_HASH_NUM=3 - RUN_BY_HASH_TOTAL=5 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - FunctionalStatelessTestDebug4: - needs: [BuilderDebDebug] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_debug - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests (debug) - REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse - KILL_TIMEOUT=10800 - RUN_BY_HASH_NUM=4 - RUN_BY_HASH_TOTAL=5 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Stateless tests (debug) + runner_type: func-tester + additional_envs: | + KILL_TIMEOUT=10800 + batches: 5 + run_command: | + cd "$REPO_COPY/tests/ci" + python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" ############################################################################################## ############################ FUNCTIONAl STATEFUL TESTS ####################################### ############################################################################################## FunctionalStatefulTestRelease: needs: [BuilderDebRelease] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateful_release - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateful tests (release) - REPO_COPY=${{runner.temp}}/stateful_release/ClickHouse - KILL_TIMEOUT=3600 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Stateful tests (release) + runner_type: func-tester + additional_envs: | + KILL_TIMEOUT=3600 + run_command: | + cd "$REPO_COPY/tests/ci" + python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" FunctionalStatefulTestAarch64: needs: [BuilderDebAarch64] - runs-on: [self-hosted, func-tester-aarch64] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateful_release - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateful tests (aarch64) - REPO_COPY=${{runner.temp}}/stateful_release/ClickHouse - KILL_TIMEOUT=3600 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Stateful tests (aarch64) + runner_type: func-tester-aarch64 + additional_envs: | + KILL_TIMEOUT=3600 + run_command: | + cd "$REPO_COPY/tests/ci" + python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" FunctionalStatefulTestAsan: needs: [BuilderDebAsan] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateful_debug - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateful tests (asan) - REPO_COPY=${{runner.temp}}/stateful_debug/ClickHouse - KILL_TIMEOUT=3600 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Stateful tests (asan) + runner_type: func-tester + additional_envs: | + KILL_TIMEOUT=3600 + run_command: | + cd "$REPO_COPY/tests/ci" + python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" FunctionalStatefulTestTsan: needs: [BuilderDebTsan] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateful_tsan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateful tests (tsan) - REPO_COPY=${{runner.temp}}/stateful_tsan/ClickHouse - KILL_TIMEOUT=3600 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Stateful tests (tsan) + runner_type: func-tester + additional_envs: | + KILL_TIMEOUT=3600 + run_command: | + cd "$REPO_COPY/tests/ci" + python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" FunctionalStatefulTestMsan: needs: [BuilderDebMsan] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateful_msan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateful tests (msan) - REPO_COPY=${{runner.temp}}/stateful_msan/ClickHouse - KILL_TIMEOUT=3600 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Stateful tests (msan) + runner_type: func-tester + additional_envs: | + KILL_TIMEOUT=3600 + run_command: | + cd "$REPO_COPY/tests/ci" + python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" FunctionalStatefulTestUBsan: needs: [BuilderDebUBsan] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateful_ubsan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateful tests (ubsan) - REPO_COPY=${{runner.temp}}/stateful_ubsan/ClickHouse - KILL_TIMEOUT=3600 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Stateful tests (ubsan) + runner_type: func-tester + additional_envs: | + KILL_TIMEOUT=3600 + run_command: | + cd "$REPO_COPY/tests/ci" + python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" FunctionalStatefulTestDebug: needs: [BuilderDebDebug] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateful_debug - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateful tests (debug) - REPO_COPY=${{runner.temp}}/stateful_debug/ClickHouse - KILL_TIMEOUT=3600 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Stateful tests (debug) + runner_type: func-tester + additional_envs: | + KILL_TIMEOUT=3600 + run_command: | + cd "$REPO_COPY/tests/ci" + python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" ############################################################################################## ######################################### STRESS TESTS ####################################### ############################################################################################## StressTestAsan: needs: [BuilderDebAsan] - runs-on: [self-hosted, stress-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stress_thread - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stress test (asan) - REPO_COPY=${{runner.temp}}/stress_thread/ClickHouse - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Stress test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 stress_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Stress test (asan) + runner_type: stress-tester + run_command: | + cd "$REPO_COPY/tests/ci" + python3 stress_check.py "$CHECK_NAME" StressTestTsan: needs: [BuilderDebTsan] - # func testers have 16 cores + 128 GB memory - # while stress testers have 36 cores + 72 memory - # It would be better to have something like 32 + 128, - # but such servers almost unavailable as spot instances. - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stress_thread - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stress test (tsan) - REPO_COPY=${{runner.temp}}/stress_thread/ClickHouse - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Stress test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 stress_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Stress test (tsan) + runner_type: stress-tester + run_command: | + cd "$REPO_COPY/tests/ci" + python3 stress_check.py "$CHECK_NAME" StressTestMsan: needs: [BuilderDebMsan] - runs-on: [self-hosted, stress-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stress_memory - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stress test (msan) - REPO_COPY=${{runner.temp}}/stress_memory/ClickHouse - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Stress test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 stress_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Stress test (msan) + runner_type: stress-tester + run_command: | + cd "$REPO_COPY/tests/ci" + python3 stress_check.py "$CHECK_NAME" StressTestUBsan: needs: [BuilderDebUBsan] - runs-on: [self-hosted, stress-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stress_undefined - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stress test (ubsan) - REPO_COPY=${{runner.temp}}/stress_undefined/ClickHouse - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Stress test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 stress_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Stress test (ubsan) + runner_type: stress-tester + run_command: | + cd "$REPO_COPY/tests/ci" + python3 stress_check.py "$CHECK_NAME" StressTestDebug: needs: [BuilderDebDebug] - runs-on: [self-hosted, stress-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stress_debug - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stress test (debug) - REPO_COPY=${{runner.temp}}/stress_debug/ClickHouse - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Stress test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 stress_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Stress test (debug) + runner_type: stress-tester + run_command: | + cd "$REPO_COPY/tests/ci" + python3 stress_check.py "$CHECK_NAME" ############################################################################################# ############################# INTEGRATION TESTS ############################################# ############################################################################################# - IntegrationTestsAsan0: + IntegrationTestsAsan: needs: [BuilderDebAsan] - runs-on: [self-hosted, stress-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/integration_tests_asan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Integration tests (asan) - REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse - RUN_BY_HASH_NUM=0 - RUN_BY_HASH_TOTAL=6 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Integration test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 integration_test_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - IntegrationTestsAsan1: + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Integration tests (asan) + runner_type: stress-tester + batches: 4 + run_command: | + cd "$REPO_COPY/tests/ci" + python3 integration_test_check.py "$CHECK_NAME" + IntegrationTestsAnalyzerAsan: needs: [BuilderDebAsan] - runs-on: [self-hosted, stress-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/integration_tests_asan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Integration tests (asan) - REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse - RUN_BY_HASH_NUM=1 - RUN_BY_HASH_TOTAL=6 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Integration test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 integration_test_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - IntegrationTestsAsan2: - needs: [BuilderDebAsan] - runs-on: [self-hosted, stress-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/integration_tests_asan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Integration tests (asan) - REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse - RUN_BY_HASH_NUM=2 - RUN_BY_HASH_TOTAL=6 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Integration test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 integration_test_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - IntegrationTestsAsan3: - needs: [BuilderDebAsan] - runs-on: [self-hosted, stress-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/integration_tests_asan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Integration tests (asan) - REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse - RUN_BY_HASH_NUM=3 - RUN_BY_HASH_TOTAL=6 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Integration test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 integration_test_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - IntegrationTestsAsan4: - needs: [BuilderDebAsan] - runs-on: [self-hosted, stress-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/integration_tests_asan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Integration tests (asan) - REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse - RUN_BY_HASH_NUM=4 - RUN_BY_HASH_TOTAL=6 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Integration test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 integration_test_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - IntegrationTestsAsan5: - needs: [BuilderDebAsan] - runs-on: [self-hosted, stress-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/integration_tests_asan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Integration tests (asan) - REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse - RUN_BY_HASH_NUM=5 - RUN_BY_HASH_TOTAL=6 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Integration test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 integration_test_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - IntegrationTestsAnalyzerAsan0: - needs: [BuilderDebAsan] - runs-on: [self-hosted, stress-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/integration_tests_asan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Integration tests (asan, analyzer) - REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse - RUN_BY_HASH_NUM=0 - RUN_BY_HASH_TOTAL=6 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Integration test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 integration_test_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - IntegrationTestsAnalyzerAsan1: - needs: [BuilderDebAsan] - runs-on: [self-hosted, stress-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/integration_tests_asan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Integration tests (asan, analyzer) - REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse - RUN_BY_HASH_NUM=1 - RUN_BY_HASH_TOTAL=6 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Integration test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 integration_test_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - IntegrationTestsAnalyzerAsan2: - needs: [BuilderDebAsan] - runs-on: [self-hosted, stress-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/integration_tests_asan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Integration tests (asan, analyzer) - REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse - RUN_BY_HASH_NUM=2 - RUN_BY_HASH_TOTAL=6 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Integration test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 integration_test_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - IntegrationTestsAnalyzerAsan3: - needs: [BuilderDebAsan] - runs-on: [self-hosted, stress-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/integration_tests_asan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Integration tests (asan, analyzer) - REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse - RUN_BY_HASH_NUM=3 - RUN_BY_HASH_TOTAL=6 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Integration test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 integration_test_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - IntegrationTestsAnalyzerAsan4: - needs: [BuilderDebAsan] - runs-on: [self-hosted, stress-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/integration_tests_asan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Integration tests (asan, analyzer) - REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse - RUN_BY_HASH_NUM=4 - RUN_BY_HASH_TOTAL=6 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Integration test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 integration_test_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - IntegrationTestsAnalyzerAsan5: - needs: [BuilderDebAsan] - runs-on: [self-hosted, stress-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/integration_tests_asan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Integration tests (asan, analyzer) - REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse - RUN_BY_HASH_NUM=5 - RUN_BY_HASH_TOTAL=6 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Integration test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 integration_test_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - IntegrationTestsTsan0: + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Integration tests (asan, analyzer) + runner_type: stress-tester + batches: 6 + run_command: | + cd "$REPO_COPY/tests/ci" + python3 integration_test_check.py "$CHECK_NAME" + IntegrationTestsTsan: needs: [BuilderDebTsan] - runs-on: [self-hosted, stress-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/integration_tests_tsan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Integration tests (tsan) - REPO_COPY=${{runner.temp}}/integration_tests_tsan/ClickHouse - RUN_BY_HASH_NUM=0 - RUN_BY_HASH_TOTAL=6 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Integration test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 integration_test_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - IntegrationTestsTsan1: - needs: [BuilderDebTsan] - runs-on: [self-hosted, stress-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/integration_tests_tsan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Integration tests (tsan) - REPO_COPY=${{runner.temp}}/integration_tests_tsan/ClickHouse - RUN_BY_HASH_NUM=1 - RUN_BY_HASH_TOTAL=6 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Integration test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 integration_test_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - IntegrationTestsTsan2: - needs: [BuilderDebTsan] - runs-on: [self-hosted, stress-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/integration_tests_tsan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Integration tests (tsan) - REPO_COPY=${{runner.temp}}/integration_tests_tsan/ClickHouse - RUN_BY_HASH_NUM=2 - RUN_BY_HASH_TOTAL=6 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Integration test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 integration_test_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - IntegrationTestsTsan3: - needs: [BuilderDebTsan] - runs-on: [self-hosted, stress-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/integration_tests_tsan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Integration tests (tsan) - REPO_COPY=${{runner.temp}}/integration_tests_tsan/ClickHouse - RUN_BY_HASH_NUM=3 - RUN_BY_HASH_TOTAL=6 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Integration test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 integration_test_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - IntegrationTestsTsan4: - needs: [BuilderDebTsan] - runs-on: [self-hosted, stress-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/integration_tests_tsan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Integration tests (tsan) - REPO_COPY=${{runner.temp}}/integration_tests_tsan/ClickHouse - RUN_BY_HASH_NUM=4 - RUN_BY_HASH_TOTAL=6 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Integration test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 integration_test_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - IntegrationTestsTsan5: - needs: [BuilderDebTsan] - runs-on: [self-hosted, stress-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/integration_tests_tsan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Integration tests (tsan) - REPO_COPY=${{runner.temp}}/integration_tests_tsan/ClickHouse - RUN_BY_HASH_NUM=5 - RUN_BY_HASH_TOTAL=6 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Integration test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 integration_test_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - IntegrationTestsRelease0: + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Integration tests (tsan) + runner_type: stress-tester + batches: 6 + run_command: | + cd "$REPO_COPY/tests/ci" + python3 integration_test_check.py "$CHECK_NAME" + IntegrationTestsRelease: needs: [BuilderDebRelease] - runs-on: [self-hosted, stress-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/integration_tests_release - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Integration tests (release) - REPO_COPY=${{runner.temp}}/integration_tests_release/ClickHouse - RUN_BY_HASH_NUM=0 - RUN_BY_HASH_TOTAL=4 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Integration test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 integration_test_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - IntegrationTestsRelease1: - needs: [BuilderDebRelease] - runs-on: [self-hosted, stress-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/integration_tests_release - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Integration tests (release) - REPO_COPY=${{runner.temp}}/integration_tests_release/ClickHouse - RUN_BY_HASH_NUM=1 - RUN_BY_HASH_TOTAL=4 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Integration test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 integration_test_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - IntegrationTestsRelease2: - needs: [BuilderDebRelease] - runs-on: [self-hosted, stress-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/integration_tests_release - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Integration tests (release) - REPO_COPY=${{runner.temp}}/integration_tests_release/ClickHouse - RUN_BY_HASH_NUM=2 - RUN_BY_HASH_TOTAL=4 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Integration test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 integration_test_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - IntegrationTestsRelease3: - needs: [BuilderDebRelease] - runs-on: [self-hosted, stress-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/integration_tests_release - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Integration tests (release) - REPO_COPY=${{runner.temp}}/integration_tests_release/ClickHouse - RUN_BY_HASH_NUM=3 - RUN_BY_HASH_TOTAL=4 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Integration test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 integration_test_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Integration tests (release) + runner_type: stress-tester + batches: 4 + run_command: | + cd "$REPO_COPY/tests/ci" + python3 integration_test_check.py "$CHECK_NAME" ############################################################################################## ##################################### AST FUZZERS ############################################ ############################################################################################## ASTFuzzerTestAsan: needs: [BuilderDebAsan] - runs-on: [self-hosted, fuzzer-unit-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/ast_fuzzer_asan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=AST fuzzer (asan) - REPO_COPY=${{runner.temp}}/ast_fuzzer_asan/ClickHouse - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Fuzzer - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 ast_fuzzer_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: AST fuzzer (asan) + runner_type: fuzzer-unit-tester + run_command: | + cd "$REPO_COPY/tests/ci" + python3 ast_fuzzer_check.py "$CHECK_NAME" ASTFuzzerTestTsan: needs: [BuilderDebTsan] - runs-on: [self-hosted, fuzzer-unit-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/ast_fuzzer_tsan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=AST fuzzer (tsan) - REPO_COPY=${{runner.temp}}/ast_fuzzer_tsan/ClickHouse - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Fuzzer - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 ast_fuzzer_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: AST fuzzer (tsan) + runner_type: fuzzer-unit-tester + run_command: | + cd "$REPO_COPY/tests/ci" + python3 ast_fuzzer_check.py "$CHECK_NAME" ASTFuzzerTestUBSan: needs: [BuilderDebUBsan] - runs-on: [self-hosted, fuzzer-unit-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/ast_fuzzer_ubsan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=AST fuzzer (ubsan) - REPO_COPY=${{runner.temp}}/ast_fuzzer_ubsan/ClickHouse - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Fuzzer - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 ast_fuzzer_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: AST fuzzer (ubsan) + runner_type: fuzzer-unit-tester + run_command: | + cd "$REPO_COPY/tests/ci" + python3 ast_fuzzer_check.py "$CHECK_NAME" ASTFuzzerTestMSan: needs: [BuilderDebMsan] - runs-on: [self-hosted, fuzzer-unit-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/ast_fuzzer_msan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=AST fuzzer (msan) - REPO_COPY=${{runner.temp}}/ast_fuzzer_msan/ClickHouse - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Fuzzer - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 ast_fuzzer_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: AST fuzzer (msan) + runner_type: fuzzer-unit-tester + run_command: | + cd "$REPO_COPY/tests/ci" + python3 ast_fuzzer_check.py "$CHECK_NAME" ASTFuzzerTestDebug: needs: [BuilderDebDebug] - runs-on: [self-hosted, fuzzer-unit-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/ast_fuzzer_debug - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=AST fuzzer (debug) - REPO_COPY=${{runner.temp}}/ast_fuzzer_debug/ClickHouse - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Fuzzer - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 ast_fuzzer_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: AST fuzzer (debug) + runner_type: fuzzer-unit-tester + run_command: | + cd "$REPO_COPY/tests/ci" + python3 ast_fuzzer_check.py "$CHECK_NAME" ############################################################################################# #################################### UNIT TESTS ############################################# ############################################################################################# UnitTestsAsan: needs: [BuilderDebAsan] - runs-on: [self-hosted, fuzzer-unit-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/unit_tests_asan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Unit tests (asan) - REPO_COPY=${{runner.temp}}/unit_tests_asan/ClickHouse - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Unit test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 unit_tests_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Unit tests (asan) + runner_type: fuzzer-unit-tester + run_command: | + cd "$REPO_COPY/tests/ci" + python3 unit_tests_check.py "$CHECK_NAME" UnitTestsReleaseClang: needs: [BuilderBinRelease] - runs-on: [self-hosted, fuzzer-unit-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/unit_tests_asan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Unit tests (release) - REPO_COPY=${{runner.temp}}/unit_tests_asan/ClickHouse - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Unit test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 unit_tests_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Unit tests (release) + runner_type: fuzzer-unit-tester + run_command: | + cd "$REPO_COPY/tests/ci" + python3 unit_tests_check.py "$CHECK_NAME" UnitTestsTsan: needs: [BuilderDebTsan] - runs-on: [self-hosted, fuzzer-unit-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/unit_tests_tsan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Unit tests (tsan) - REPO_COPY=${{runner.temp}}/unit_tests_tsan/ClickHouse - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Unit test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 unit_tests_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Unit tests (tsan) + runner_type: fuzzer-unit-tester + run_command: | + cd "$REPO_COPY/tests/ci" + python3 unit_tests_check.py "$CHECK_NAME" UnitTestsMsan: needs: [BuilderDebMsan] - runs-on: [self-hosted, fuzzer-unit-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/unit_tests_msan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Unit tests (msan) - REPO_COPY=${{runner.temp}}/unit_tests_msan/ClickHouse - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Unit test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 unit_tests_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Unit tests (msan) + runner_type: fuzzer-unit-tester + run_command: | + cd "$REPO_COPY/tests/ci" + python3 unit_tests_check.py "$CHECK_NAME" UnitTestsUBsan: needs: [BuilderDebUBsan] - runs-on: [self-hosted, fuzzer-unit-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/unit_tests_ubsan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Unit tests (ubsan) - REPO_COPY=${{runner.temp}}/unit_tests_ubsan/ClickHouse - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Unit test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 unit_tests_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Unit tests (ubsan) + runner_type: fuzzer-unit-tester + run_command: | + cd "$REPO_COPY/tests/ci" + python3 unit_tests_check.py "$CHECK_NAME" ############################################################################################# #################################### PERFORMANCE TESTS ###################################### ############################################################################################# - PerformanceComparisonX86-0: + PerformanceComparisonX86: needs: [BuilderDebRelease] - runs-on: [self-hosted, stress-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/performance_comparison - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Performance Comparison - REPO_COPY=${{runner.temp}}/performance_comparison/ClickHouse - RUN_BY_HASH_NUM=0 - RUN_BY_HASH_TOTAL=4 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Performance Comparison - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 performance_comparison_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - PerformanceComparisonX86-1: - needs: [BuilderDebRelease] - runs-on: [self-hosted, stress-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/performance_comparison - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Performance Comparison - REPO_COPY=${{runner.temp}}/performance_comparison/ClickHouse - RUN_BY_HASH_NUM=1 - RUN_BY_HASH_TOTAL=4 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Performance Comparison - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 performance_comparison_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - PerformanceComparisonX86-2: - needs: [BuilderDebRelease] - runs-on: [self-hosted, stress-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/performance_comparison - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Performance Comparison - REPO_COPY=${{runner.temp}}/performance_comparison/ClickHouse - RUN_BY_HASH_NUM=2 - RUN_BY_HASH_TOTAL=4 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Performance Comparison - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 performance_comparison_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - PerformanceComparisonX86-3: - needs: [BuilderDebRelease] - runs-on: [self-hosted, stress-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/performance_comparison - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Performance Comparison - REPO_COPY=${{runner.temp}}/performance_comparison/ClickHouse - RUN_BY_HASH_NUM=3 - RUN_BY_HASH_TOTAL=4 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Performance Comparison - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 performance_comparison_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - PerformanceComparisonAarch-0: + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Performance Comparison + runner_type: stress-tester + batches: 4 + run_command: | + cd "$REPO_COPY/tests/ci" + python3 performance_comparison_check.py "$CHECK_NAME" + PerformanceComparisonAarch: needs: [BuilderDebAarch64] - runs-on: [self-hosted, func-tester-aarch64] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/performance_comparison - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Performance Comparison Aarch64 - REPO_COPY=${{runner.temp}}/performance_comparison/ClickHouse - RUN_BY_HASH_NUM=0 - RUN_BY_HASH_TOTAL=4 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Performance Comparison - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 performance_comparison_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - PerformanceComparisonAarch-1: - needs: [BuilderDebAarch64] - runs-on: [self-hosted, func-tester-aarch64] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/performance_comparison - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Performance Comparison Aarch64 - REPO_COPY=${{runner.temp}}/performance_comparison/ClickHouse - RUN_BY_HASH_NUM=1 - RUN_BY_HASH_TOTAL=4 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Performance Comparison - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 performance_comparison_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - PerformanceComparisonAarch-2: - needs: [BuilderDebAarch64] - runs-on: [self-hosted, func-tester-aarch64] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/performance_comparison - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Performance Comparison Aarch64 - REPO_COPY=${{runner.temp}}/performance_comparison/ClickHouse - RUN_BY_HASH_NUM=2 - RUN_BY_HASH_TOTAL=4 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Performance Comparison - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 performance_comparison_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - PerformanceComparisonAarch-3: - needs: [BuilderDebAarch64] - runs-on: [self-hosted, func-tester-aarch64] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/performance_comparison - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Performance Comparison Aarch64 - REPO_COPY=${{runner.temp}}/performance_comparison/ClickHouse - RUN_BY_HASH_NUM=3 - RUN_BY_HASH_TOTAL=4 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Performance Comparison - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 performance_comparison_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Performance Comparison Aarch64 + runner_type: func-tester-aarch64 + batches: 4 + run_command: | + cd "$REPO_COPY/tests/ci" + python3 performance_comparison_check.py "$CHECK_NAME" ############################################################################################## ###################################### SQLANCER FUZZERS ###################################### ############################################################################################## SQLancerTestRelease: needs: [BuilderDebRelease] - runs-on: [self-hosted, fuzzer-unit-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/sqlancer_release - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=SQLancer (release) - REPO_COPY=${{runner.temp}}/sqlancer_release/ClickHouse - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: SQLancer - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 sqlancer_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: SQLancer (release) + runner_type: fuzzer-unit-tester + run_command: | + cd "$REPO_COPY/tests/ci" + python3 sqlancer_check.py "$CHECK_NAME" SQLancerTestDebug: needs: [BuilderDebDebug] - runs-on: [self-hosted, fuzzer-unit-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/sqlancer_debug - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=SQLancer (debug) - REPO_COPY=${{runner.temp}}/sqlancer_debug/ClickHouse - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: SQLancer - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 sqlancer_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: SQLancer (debug) + runner_type: fuzzer-unit-tester + run_command: | + cd "$REPO_COPY/tests/ci" + python3 sqlancer_check.py "$CHECK_NAME" FinishCheck: needs: - DockerHubPush - BuilderReport - BuilderSpecialReport - MarkReleaseReady - - FunctionalStatelessTestDebug0 - - FunctionalStatelessTestDebug1 - - FunctionalStatelessTestDebug2 - - FunctionalStatelessTestDebug3 - - FunctionalStatelessTestDebug4 + - FunctionalStatelessTestDebug - FunctionalStatelessTestRelease - FunctionalStatelessTestReleaseDatabaseOrdinary - - FunctionalStatelessTestReleaseDatabaseReplicated0 - - FunctionalStatelessTestReleaseDatabaseReplicated1 - - FunctionalStatelessTestReleaseDatabaseReplicated2 - - FunctionalStatelessTestReleaseDatabaseReplicated3 + - FunctionalStatelessTestReleaseDatabaseReplicated + - FunctionalStatelessTestReleaseAnalyzer + - FunctionalStatelessTestReleaseS3 - FunctionalStatelessTestAarch64 - - FunctionalStatelessTestAsan0 - - FunctionalStatelessTestAsan1 - - FunctionalStatelessTestAsan2 - - FunctionalStatelessTestAsan3 - - FunctionalStatelessTestTsan0 - - FunctionalStatelessTestTsan1 - - FunctionalStatelessTestTsan2 - - FunctionalStatelessTestTsan3 - - FunctionalStatelessTestTsan4 - - FunctionalStatelessTestMsan0 - - FunctionalStatelessTestMsan1 - - FunctionalStatelessTestMsan2 - - FunctionalStatelessTestMsan3 - - FunctionalStatelessTestMsan4 - - FunctionalStatelessTestMsan5 - - FunctionalStatelessTestUBsan0 - - FunctionalStatelessTestUBsan1 + - FunctionalStatelessTestAsan + - FunctionalStatelessTestTsan + - FunctionalStatelessTestMsan + - FunctionalStatelessTestUBsan - FunctionalStatefulTestDebug - FunctionalStatefulTestRelease - - FunctionalStatelessTestReleaseS3_0 - - FunctionalStatelessTestReleaseS3_1 - FunctionalStatefulTestAarch64 - FunctionalStatefulTestAsan - FunctionalStatefulTestTsan @@ -3565,32 +808,12 @@ jobs: - StressTestTsan - StressTestMsan - StressTestUBsan - - IntegrationTestsAsan0 - - IntegrationTestsAsan1 - - IntegrationTestsAsan2 - - IntegrationTestsAsan3 - - IntegrationTestsAsan4 - - IntegrationTestsAsan5 - - IntegrationTestsAnalyzerAsan0 - - IntegrationTestsAnalyzerAsan1 - - IntegrationTestsAnalyzerAsan2 - - IntegrationTestsAnalyzerAsan3 - - IntegrationTestsAnalyzerAsan4 - - IntegrationTestsAnalyzerAsan5 - - IntegrationTestsRelease0 - - IntegrationTestsRelease1 - - IntegrationTestsRelease2 - - IntegrationTestsRelease3 - - IntegrationTestsTsan0 - - IntegrationTestsTsan1 - - IntegrationTestsTsan2 - - IntegrationTestsTsan3 - - IntegrationTestsTsan4 - - IntegrationTestsTsan5 - - PerformanceComparisonX86-0 - - PerformanceComparisonX86-1 - - PerformanceComparisonX86-2 - - PerformanceComparisonX86-3 + - IntegrationTestsAsan + - IntegrationTestsAnalyzerAsan + - IntegrationTestsTsan + - IntegrationTestsRelease + - PerformanceComparisonX86 + - PerformanceComparisonAarch - CompatibilityCheckX86 - CompatibilityCheckAarch64 - ASTFuzzerTestDebug diff --git a/.github/workflows/nightly.yml b/.github/workflows/nightly.yml index 6452b83fdd6..1e94f70b9e6 100644 --- a/.github/workflows/nightly.yml +++ b/.github/workflows/nightly.yml @@ -74,9 +74,6 @@ jobs: with: name: changed_images path: ${{ runner.temp }}/changed_images.json - Codebrowser: - needs: [DockerHubPush] - uses: ./.github/workflows/woboq.yml SonarCloud: runs-on: [self-hosted, builder] env: diff --git a/.github/workflows/pull_request.yml b/.github/workflows/pull_request.yml index 66a0b186743..f8f052d9226 100644 --- a/.github/workflows/pull_request.yml +++ b/.github/workflows/pull_request.yml @@ -118,131 +118,47 @@ jobs: path: ${{ runner.temp }}/changed_images.json StyleCheck: needs: DockerHubPush - runs-on: [self-hosted, style-checker] # We need additional `&& ! cancelled()` to have the job being able to cancel if: ${{ success() || failure() || ( always() && ! cancelled() ) }} - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{ runner.temp }}/style_check - ROBOT_CLICKHOUSE_SSH_KEY<> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/fasttest - REPO_COPY=${{runner.temp}}/fasttest/ClickHouse - CACHES_PATH=${{runner.temp}}/../ccaches - EOF - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Download changed images - uses: actions/download-artifact@v3 - with: - name: changed_images - path: ${{ env.TEMP_PATH }} - - name: Fast Test - run: | - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" && python3 fast_test_check.py - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" "$CACHES_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Fast tests + runner_type: builder + run_command: | + cd "$REPO_COPY/tests/ci" + python3 fast_test_check.py CompatibilityCheckX86: needs: [BuilderDebRelease] - runs-on: [self-hosted, style-checker] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/compatibility_check - REPO_COPY=${{runner.temp}}/compatibility_check/ClickHouse - REPORTS_PATH=${{runner.temp}}/reports_dir - EOF - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: CompatibilityCheckX86 - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" && python3 compatibility_check.py --check-name "Compatibility check (amd64)" --check-glibc --check-distributions - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Compatibility check X86 + runner_type: style-checker + run_command: | + cd "$REPO_COPY/tests/ci" + python3 compatibility_check.py --check-name "Compatibility check (amd64)" --check-glibc --check-distributions CompatibilityCheckAarch64: needs: [BuilderDebAarch64] - runs-on: [self-hosted, style-checker] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/compatibility_check - REPO_COPY=${{runner.temp}}/compatibility_check/ClickHouse - REPORTS_PATH=${{runner.temp}}/reports_dir - EOF - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: CompatibilityCheckAarch64 - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" && python3 compatibility_check.py --check-name "Compatibility check (aarch64)" --check-glibc - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Compatibility check X86 + runner_type: style-checker + run_command: | + cd "$REPO_COPY/tests/ci" + python3 compatibility_check.py --check-name "Compatibility check (aarch64)" --check-glibc ######################################################################################### #################################### ORDINARY BUILDS #################################### ######################################################################################### @@ -373,6 +289,7 @@ jobs: ##################################### BUILD REPORTER ####################################### ############################################################################################ BuilderReport: + if: ${{ success() || failure() }} needs: - BuilderBinRelease - BuilderDebAarch64 @@ -382,41 +299,19 @@ jobs: - BuilderDebRelease - BuilderDebTsan - BuilderDebUBsan - runs-on: [self-hosted, style-checker] - if: ${{ success() || failure() }} - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - CHECK_NAME=ClickHouse build check - REPORTS_PATH=${{runner.temp}}/reports_dir - TEMP_PATH=${{runner.temp}}/report_check - NEEDS_DATA_PATH=${{runner.temp}}/needs.json - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Report Builder - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cat > "$NEEDS_DATA_PATH" << 'EOF' - ${{ toJSON(needs) }} - EOF - cd "$GITHUB_WORKSPACE/tests/ci" - python3 build_report_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: ClickHouse build check + runner_type: style-checker + additional_envs: | + NEEDS_DATA<> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/report_check - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=ClickHouse special build check - NEEDS_DATA_PATH=${{runner.temp}}/needs.json - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Report Builder - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cat > "$NEEDS_DATA_PATH" << 'EOF' - ${{ toJSON(needs) }} - EOF - cd "$GITHUB_WORKSPACE/tests/ci" - python3 build_report_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: ClickHouse special build check + runner_type: style-checker + additional_envs: | + NEEDS_DATA<> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/test_install - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Install packages (amd64) - REPO_COPY=${{runner.temp}}/test_install/ClickHouse - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Test packages installation - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 install_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Install packages (amd64) + runner_type: style-checker + run_command: | + cd "$REPO_COPY/tests/ci" + python3 install_check.py "$CHECK_NAME" InstallPackagesTestAarch64: needs: [BuilderDebAarch64] - runs-on: [self-hosted, style-checker-aarch64] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/test_install - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Install packages (arm64) - REPO_COPY=${{runner.temp}}/test_install/ClickHouse - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Test packages installation - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 install_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Install packages (arm64) + runner_type: style-checker-aarch64 + run_command: | + cd "$REPO_COPY/tests/ci" + python3 install_check.py "$CHECK_NAME" ############################################################################################## ########################### FUNCTIONAl STATELESS TESTS ####################################### ############################################################################################## FunctionalStatelessTestRelease: needs: [BuilderDebRelease] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_release - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests (release) - REPO_COPY=${{runner.temp}}/stateless_release/ClickHouse - KILL_TIMEOUT=10800 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - FunctionalStatelessTestReleaseDatabaseReplicated0: + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Stateless tests (release) + runner_type: func-tester + additional_envs: | + KILL_TIMEOUT=10800 + run_command: | + cd "$REPO_COPY/tests/ci" + python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" + FunctionalStatelessTestReleaseDatabaseReplicated: needs: [BuilderDebRelease] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_database_replicated - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests (release, DatabaseReplicated) - REPO_COPY=${{runner.temp}}/stateless_database_replicated/ClickHouse - KILL_TIMEOUT=10800 - RUN_BY_HASH_NUM=0 - RUN_BY_HASH_TOTAL=4 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - FunctionalStatelessTestReleaseDatabaseReplicated1: - needs: [BuilderDebRelease] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_database_replicated - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests (release, DatabaseReplicated) - REPO_COPY=${{runner.temp}}/stateless_database_replicated/ClickHouse - KILL_TIMEOUT=10800 - RUN_BY_HASH_NUM=1 - RUN_BY_HASH_TOTAL=4 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - FunctionalStatelessTestReleaseDatabaseReplicated2: - needs: [BuilderDebRelease] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_database_replicated - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests (release, DatabaseReplicated) - REPO_COPY=${{runner.temp}}/stateless_database_replicated/ClickHouse - KILL_TIMEOUT=10800 - RUN_BY_HASH_NUM=2 - RUN_BY_HASH_TOTAL=4 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - FunctionalStatelessTestReleaseDatabaseReplicated3: - needs: [BuilderDebRelease] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_database_replicated - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests (release, DatabaseReplicated) - REPO_COPY=${{runner.temp}}/stateless_database_replicated/ClickHouse - KILL_TIMEOUT=10800 - RUN_BY_HASH_NUM=3 - RUN_BY_HASH_TOTAL=4 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Stateless tests (release, DatabaseReplicated) + runner_type: func-tester + additional_envs: | + KILL_TIMEOUT=10800 + batches: 4 + run_command: | + cd "$REPO_COPY/tests/ci" + python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" FunctionalStatelessTestReleaseWideParts: needs: [BuilderDebRelease] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_wide_parts - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests (release, wide parts enabled) - REPO_COPY=${{runner.temp}}/stateless_wide_parts/ClickHouse - KILL_TIMEOUT=10800 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Stateless tests (release, wide parts enabled) + runner_type: func-tester + additional_envs: | + KILL_TIMEOUT=10800 + run_command: | + cd "$REPO_COPY/tests/ci" + python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" FunctionalStatelessTestReleaseAnalyzer: needs: [BuilderDebRelease] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_analyzer - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests (release, analyzer) - REPO_COPY=${{runner.temp}}/stateless_analyzer/ClickHouse - KILL_TIMEOUT=10800 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - FunctionalStatelessTestReleaseS3_0: + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Stateless tests (release, analyzer) + runner_type: func-tester + additional_envs: | + KILL_TIMEOUT=10800 + run_command: | + cd "$REPO_COPY/tests/ci" + python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" + FunctionalStatelessTestReleaseS3: needs: [BuilderDebRelease] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_s3_storage - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests (release, s3 storage) - REPO_COPY=${{runner.temp}}/stateless_s3_storage/ClickHouse - KILL_TIMEOUT=10800 - RUN_BY_HASH_NUM=0 - RUN_BY_HASH_TOTAL=2 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - FunctionalStatelessTestReleaseS3_1: - needs: [BuilderDebRelease] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_s3_storage - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests (release, s3 storage) - REPO_COPY=${{runner.temp}}/stateless_s3_storage/ClickHouse - KILL_TIMEOUT=10800 - RUN_BY_HASH_NUM=1 - RUN_BY_HASH_TOTAL=2 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - FunctionalStatelessTestS3Debug0: + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Stateless tests (release, s3 storage) + runner_type: func-tester + additional_envs: | + KILL_TIMEOUT=10800 + batches: 2 + run_command: | + cd "$REPO_COPY/tests/ci" + python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" + FunctionalStatelessTestS3Debug: needs: [BuilderDebDebug] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_s3_storage_debug - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests (debug, s3 storage) - REPO_COPY=${{runner.temp}}/stateless_s3_storage_debug/ClickHouse - KILL_TIMEOUT=10800 - RUN_BY_HASH_NUM=0 - RUN_BY_HASH_TOTAL=6 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - FunctionalStatelessTestS3Debug1: - needs: [BuilderDebDebug] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_s3_storage_debug - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests (debug, s3 storage) - REPO_COPY=${{runner.temp}}/stateless_s3_storage_debug/ClickHouse - KILL_TIMEOUT=10800 - RUN_BY_HASH_NUM=1 - RUN_BY_HASH_TOTAL=6 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - FunctionalStatelessTestS3Debug2: - needs: [BuilderDebDebug] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_s3_storage_debug - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests (debug, s3 storage) - REPO_COPY=${{runner.temp}}/stateless_s3_storage_debug/ClickHouse - KILL_TIMEOUT=10800 - RUN_BY_HASH_NUM=2 - RUN_BY_HASH_TOTAL=6 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - FunctionalStatelessTestS3Debug3: - needs: [BuilderDebDebug] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_s3_storage_debug - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests (debug, s3 storage) - REPO_COPY=${{runner.temp}}/stateless_s3_storage_debug/ClickHouse - KILL_TIMEOUT=10800 - RUN_BY_HASH_NUM=3 - RUN_BY_HASH_TOTAL=6 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - FunctionalStatelessTestS3Debug4: - needs: [BuilderDebDebug] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_s3_storage_debug - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests (debug, s3 storage) - REPO_COPY=${{runner.temp}}/stateless_s3_storage_debug/ClickHouse - KILL_TIMEOUT=10800 - RUN_BY_HASH_NUM=4 - RUN_BY_HASH_TOTAL=6 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - FunctionalStatelessTestS3Debug5: - needs: [BuilderDebDebug] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_s3_storage_debug - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests (debug, s3 storage) - REPO_COPY=${{runner.temp}}/stateless_s3_storage_debug/ClickHouse - KILL_TIMEOUT=10800 - RUN_BY_HASH_NUM=5 - RUN_BY_HASH_TOTAL=6 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - FunctionalStatelessTestS3Tsan0: + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Stateless tests (debug, s3 storage) + runner_type: func-tester + additional_envs: | + KILL_TIMEOUT=10800 + batches: 6 + run_command: | + cd "$REPO_COPY/tests/ci" + python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" + FunctionalStatelessTestS3Tsan: needs: [BuilderDebTsan] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_s3_storage_tsan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests (tsan, s3 storage) - REPO_COPY=${{runner.temp}}/stateless_s3_storage_tsan/ClickHouse - KILL_TIMEOUT=10800 - RUN_BY_HASH_NUM=0 - RUN_BY_HASH_TOTAL=5 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - FunctionalStatelessTestS3Tsan1: - needs: [BuilderDebTsan] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_s3_storage_tsan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests (tsan, s3 storage) - REPO_COPY=${{runner.temp}}/stateless_s3_storage_tsan/ClickHouse - KILL_TIMEOUT=10800 - RUN_BY_HASH_NUM=1 - RUN_BY_HASH_TOTAL=5 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - FunctionalStatelessTestS3Tsan2: - needs: [BuilderDebTsan] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_s3_storage_tsan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests (tsan, s3 storage) - REPO_COPY=${{runner.temp}}/stateless_s3_storage_tsan/ClickHouse - KILL_TIMEOUT=10800 - RUN_BY_HASH_NUM=2 - RUN_BY_HASH_TOTAL=5 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - FunctionalStatelessTestS3Tsan3: - needs: [BuilderDebTsan] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_s3_storage_tsan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests (tsan, s3 storage) - REPO_COPY=${{runner.temp}}/stateless_s3_storage_tsan/ClickHouse - KILL_TIMEOUT=10800 - RUN_BY_HASH_NUM=3 - RUN_BY_HASH_TOTAL=5 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - FunctionalStatelessTestS3Tsan4: - needs: [BuilderDebTsan] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_s3_storage_tsan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests (tsan, s3 storage) - REPO_COPY=${{runner.temp}}/stateless_s3_storage_tsan/ClickHouse - KILL_TIMEOUT=10800 - RUN_BY_HASH_NUM=4 - RUN_BY_HASH_TOTAL=5 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Stateless tests (tsan, s3 storage) + runner_type: func-tester + additional_envs: | + KILL_TIMEOUT=10800 + batches: 5 + run_command: | + cd "$REPO_COPY/tests/ci" + python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" FunctionalStatelessTestAarch64: needs: [BuilderDebAarch64] - runs-on: [self-hosted, func-tester-aarch64] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_release - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests (aarch64) - REPO_COPY=${{runner.temp}}/stateless_release/ClickHouse - KILL_TIMEOUT=10800 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - FunctionalStatelessTestAsan0: + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Stateless tests (aarch64) + runner_type: func-tester-aarch64 + additional_envs: | + KILL_TIMEOUT=10800 + run_command: | + cd "$REPO_COPY/tests/ci" + python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" + FunctionalStatelessTestAsan: needs: [BuilderDebAsan] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_debug - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests (asan) - REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse - KILL_TIMEOUT=10800 - RUN_BY_HASH_NUM=0 - RUN_BY_HASH_TOTAL=4 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - FunctionalStatelessTestAsan1: - needs: [BuilderDebAsan] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_debug - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests (asan) - REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse - KILL_TIMEOUT=10800 - RUN_BY_HASH_NUM=1 - RUN_BY_HASH_TOTAL=4 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - FunctionalStatelessTestAsan2: - needs: [BuilderDebAsan] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_debug - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests (asan) - REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse - KILL_TIMEOUT=10800 - RUN_BY_HASH_NUM=2 - RUN_BY_HASH_TOTAL=4 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - FunctionalStatelessTestAsan3: - needs: [BuilderDebAsan] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_debug - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests (asan) - REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse - KILL_TIMEOUT=10800 - RUN_BY_HASH_NUM=3 - RUN_BY_HASH_TOTAL=4 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - FunctionalStatelessTestTsan0: + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Stateless tests (asan) + runner_type: func-tester + additional_envs: | + KILL_TIMEOUT=10800 + batches: 4 + run_command: | + cd "$REPO_COPY/tests/ci" + python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" + FunctionalStatelessTestTsan: needs: [BuilderDebTsan] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_tsan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests (tsan) - REPO_COPY=${{runner.temp}}/stateless_tsan/ClickHouse - KILL_TIMEOUT=10800 - RUN_BY_HASH_NUM=0 - RUN_BY_HASH_TOTAL=5 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - FunctionalStatelessTestTsan1: - needs: [BuilderDebTsan] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_tsan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests (tsan) - REPO_COPY=${{runner.temp}}/stateless_tsan/ClickHouse - KILL_TIMEOUT=10800 - RUN_BY_HASH_NUM=1 - RUN_BY_HASH_TOTAL=5 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - FunctionalStatelessTestTsan2: - needs: [BuilderDebTsan] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_tsan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests (tsan) - REPO_COPY=${{runner.temp}}/stateless_tsan/ClickHouse - KILL_TIMEOUT=10800 - RUN_BY_HASH_NUM=2 - RUN_BY_HASH_TOTAL=5 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - FunctionalStatelessTestTsan3: - needs: [BuilderDebTsan] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_tsan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests (tsan) - REPO_COPY=${{runner.temp}}/stateless_tsan/ClickHouse - KILL_TIMEOUT=10800 - RUN_BY_HASH_NUM=3 - RUN_BY_HASH_TOTAL=5 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - FunctionalStatelessTestTsan4: - needs: [BuilderDebTsan] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_tsan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests (tsan) - REPO_COPY=${{runner.temp}}/stateless_tsan/ClickHouse - KILL_TIMEOUT=10800 - RUN_BY_HASH_NUM=4 - RUN_BY_HASH_TOTAL=5 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - FunctionalStatelessTestUBsan0: + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Stateless tests (tsan) + runner_type: func-tester + additional_envs: | + KILL_TIMEOUT=10800 + batches: 5 + run_command: | + cd "$REPO_COPY/tests/ci" + python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" + FunctionalStatelessTestMsan: + needs: [BuilderDebMsan] + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Stateless tests (msan) + runner_type: func-tester + additional_envs: | + KILL_TIMEOUT=10800 + batches: 6 + run_command: | + cd "$REPO_COPY/tests/ci" + python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" + FunctionalStatelessTestUBsan: needs: [BuilderDebUBsan] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_ubsan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests (ubsan) - REPO_COPY=${{runner.temp}}/stateless_ubsan/ClickHouse - KILL_TIMEOUT=10800 - RUN_BY_HASH_NUM=0 - RUN_BY_HASH_TOTAL=2 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - FunctionalStatelessTestUBsan1: - needs: [BuilderDebUBsan] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_ubsan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests (ubsan) - REPO_COPY=${{runner.temp}}/stateless_ubsan/ClickHouse - KILL_TIMEOUT=10800 - RUN_BY_HASH_NUM=1 - RUN_BY_HASH_TOTAL=2 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - FunctionalStatelessTestMsan0: - needs: [BuilderDebMsan] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_memory - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests (msan) - REPO_COPY=${{runner.temp}}/stateless_memory/ClickHouse - KILL_TIMEOUT=10800 - RUN_BY_HASH_NUM=0 - RUN_BY_HASH_TOTAL=6 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - FunctionalStatelessTestMsan1: - needs: [BuilderDebMsan] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_memory - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests (msan) - REPO_COPY=${{runner.temp}}/stateless_memory/ClickHouse - KILL_TIMEOUT=10800 - RUN_BY_HASH_NUM=1 - RUN_BY_HASH_TOTAL=6 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - FunctionalStatelessTestMsan2: - needs: [BuilderDebMsan] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_memory - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests (msan) - REPO_COPY=${{runner.temp}}/stateless_memory/ClickHouse - KILL_TIMEOUT=10800 - RUN_BY_HASH_NUM=2 - RUN_BY_HASH_TOTAL=6 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - FunctionalStatelessTestMsan3: - needs: [BuilderDebMsan] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_memory - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests (msan) - REPO_COPY=${{runner.temp}}/stateless_memory/ClickHouse - KILL_TIMEOUT=10800 - RUN_BY_HASH_NUM=3 - RUN_BY_HASH_TOTAL=6 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - FunctionalStatelessTestMsan4: - needs: [BuilderDebMsan] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_memory - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests (msan) - REPO_COPY=${{runner.temp}}/stateless_memory/ClickHouse - KILL_TIMEOUT=10800 - RUN_BY_HASH_NUM=4 - RUN_BY_HASH_TOTAL=6 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - FunctionalStatelessTestMsan5: - needs: [BuilderDebMsan] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_memory - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests (msan) - REPO_COPY=${{runner.temp}}/stateless_memory/ClickHouse - KILL_TIMEOUT=10800 - RUN_BY_HASH_NUM=5 - RUN_BY_HASH_TOTAL=6 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - FunctionalStatelessTestDebug0: + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Stateless tests (ubsan) + runner_type: func-tester + additional_envs: | + KILL_TIMEOUT=10800 + batches: 2 + run_command: | + cd "$REPO_COPY/tests/ci" + python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" + FunctionalStatelessTestDebug: needs: [BuilderDebDebug] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_debug - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests (debug) - REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse - KILL_TIMEOUT=10800 - RUN_BY_HASH_NUM=0 - RUN_BY_HASH_TOTAL=5 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - FunctionalStatelessTestDebug1: - needs: [BuilderDebDebug] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_debug - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests (debug) - REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse - KILL_TIMEOUT=10800 - RUN_BY_HASH_NUM=1 - RUN_BY_HASH_TOTAL=5 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - FunctionalStatelessTestDebug2: - needs: [BuilderDebDebug] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_debug - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests (debug) - REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse - KILL_TIMEOUT=10800 - RUN_BY_HASH_NUM=2 - RUN_BY_HASH_TOTAL=5 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - FunctionalStatelessTestDebug3: - needs: [BuilderDebDebug] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_debug - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests (debug) - REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse - KILL_TIMEOUT=10800 - RUN_BY_HASH_NUM=3 - RUN_BY_HASH_TOTAL=5 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - FunctionalStatelessTestDebug4: - needs: [BuilderDebDebug] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_debug - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests (debug) - REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse - KILL_TIMEOUT=10800 - RUN_BY_HASH_NUM=4 - RUN_BY_HASH_TOTAL=5 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Stateless tests (debug) + runner_type: func-tester + additional_envs: | + KILL_TIMEOUT=10800 + batches: 5 + run_command: | + cd "$REPO_COPY/tests/ci" + python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" FunctionalStatelessTestFlakyCheck: needs: [BuilderDebAsan] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_flaky_asan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests flaky check (asan) - REPO_COPY=${{runner.temp}}/stateless_flaky_asan/ClickHouse - KILL_TIMEOUT=3600 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Stateless tests flaky check (asan) + runner_type: func-tester + additional_envs: | + KILL_TIMEOUT=3600 + run_command: | + cd "$REPO_COPY/tests/ci" + python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" TestsBugfixCheck: needs: [CheckLabels, StyleCheck] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/tests_bugfix_check - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=tests bugfix validate check - KILL_TIMEOUT=3600 - REPO_COPY=${{runner.temp}}/tests_bugfix_check/ClickHouse - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Bugfix test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: tests bugfix validate check + runner_type: func-tester + additional_envs: | + KILL_TIMEOUT=3600 + run_command: | + cd "$REPO_COPY/tests/ci" - TEMP_PATH="${TEMP_PATH}/integration" \ - REPORTS_PATH="${REPORTS_PATH}/integration" \ - python3 integration_test_check.py "Integration $CHECK_NAME" \ - --validate-bugfix --post-commit-status=file || echo 'ignore exit code' + TEMP_PATH="${TEMP_PATH}/integration" \ + REPORTS_PATH="${REPORTS_PATH}/integration" \ + python3 integration_test_check.py "Integration $CHECK_NAME" \ + --validate-bugfix --post-commit-status=file || echo 'ignore exit code' - TEMP_PATH="${TEMP_PATH}/stateless" \ - REPORTS_PATH="${REPORTS_PATH}/stateless" \ - python3 functional_test_check.py "Stateless $CHECK_NAME" "$KILL_TIMEOUT" \ - --validate-bugfix --post-commit-status=file || echo 'ignore exit code' + TEMP_PATH="${TEMP_PATH}/stateless" \ + REPORTS_PATH="${REPORTS_PATH}/stateless" \ + python3 functional_test_check.py "Stateless $CHECK_NAME" "$KILL_TIMEOUT" \ + --validate-bugfix --post-commit-status=file || echo 'ignore exit code' - python3 bugfix_validate_check.py "${TEMP_PATH}/stateless/functional_commit_status.tsv" "${TEMP_PATH}/integration/integration_commit_status.tsv" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + python3 bugfix_validate_check.py "${TEMP_PATH}/stateless/functional_commit_status.tsv" "${TEMP_PATH}/integration/integration_commit_status.tsv" ############################################################################################## ############################ FUNCTIONAl STATEFUL TESTS ####################################### ############################################################################################## FunctionalStatefulTestRelease: needs: [BuilderDebRelease] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateful_release - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateful tests (release) - REPO_COPY=${{runner.temp}}/stateful_release/ClickHouse - KILL_TIMEOUT=3600 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Stateful tests (release) + runner_type: func-tester + additional_envs: | + KILL_TIMEOUT=3600 + run_command: | + cd "$REPO_COPY/tests/ci" + python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" FunctionalStatefulTestAarch64: needs: [BuilderDebAarch64] - runs-on: [self-hosted, func-tester-aarch64] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateful_release - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateful tests (aarch64) - REPO_COPY=${{runner.temp}}/stateful_release/ClickHouse - KILL_TIMEOUT=3600 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Stateful tests (aarch64) + runner_type: func-tester-aarch64 + additional_envs: | + KILL_TIMEOUT=3600 + run_command: | + cd "$REPO_COPY/tests/ci" + python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" FunctionalStatefulTestAsan: needs: [BuilderDebAsan] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateful_debug - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateful tests (asan) - REPO_COPY=${{runner.temp}}/stateful_debug/ClickHouse - KILL_TIMEOUT=3600 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Stateful tests (asan) + runner_type: func-tester + additional_envs: | + KILL_TIMEOUT=3600 + run_command: | + cd "$REPO_COPY/tests/ci" + python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" FunctionalStatefulTestTsan: needs: [BuilderDebTsan] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateful_tsan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateful tests (tsan) - REPO_COPY=${{runner.temp}}/stateful_tsan/ClickHouse - KILL_TIMEOUT=3600 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Stateful tests (tsan) + runner_type: func-tester + additional_envs: | + KILL_TIMEOUT=3600 + run_command: | + cd "$REPO_COPY/tests/ci" + python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" FunctionalStatefulTestMsan: needs: [BuilderDebMsan] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateful_msan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateful tests (msan) - REPO_COPY=${{runner.temp}}/stateful_msan/ClickHouse - KILL_TIMEOUT=3600 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Stateful tests (msan) + runner_type: func-tester + additional_envs: | + KILL_TIMEOUT=3600 + run_command: | + cd "$REPO_COPY/tests/ci" + python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" FunctionalStatefulTestUBsan: needs: [BuilderDebUBsan] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateful_ubsan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateful tests (ubsan) - REPO_COPY=${{runner.temp}}/stateful_ubsan/ClickHouse - KILL_TIMEOUT=3600 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Stateful tests (ubsan) + runner_type: func-tester + additional_envs: | + KILL_TIMEOUT=3600 + run_command: | + cd "$REPO_COPY/tests/ci" + python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" FunctionalStatefulTestDebug: needs: [BuilderDebDebug] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateful_debug - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateful tests (debug) - REPO_COPY=${{runner.temp}}/stateful_debug/ClickHouse - KILL_TIMEOUT=3600 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Stateful tests (debug) + runner_type: func-tester + additional_envs: | + KILL_TIMEOUT=3600 + run_command: | + cd "$REPO_COPY/tests/ci" + python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" # Parallel replicas FunctionalStatefulTestDebugParallelReplicas: needs: [BuilderDebDebug] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateful_debug - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateful tests (debug, ParallelReplicas) - REPO_COPY=${{runner.temp}}/stateful_debug/ClickHouse - KILL_TIMEOUT=3600 - EOF - - name: Download json reports - uses: actions/download-artifact@v2 - with: - path: ${{ env.REPORTS_PATH }} - - name: Clear repository - run: | - sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" - - name: Check out repository code - uses: actions/checkout@v2 - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Stateful tests (debug, ParallelReplicas) + runner_type: func-tester + additional_envs: | + KILL_TIMEOUT=3600 + run_command: | + cd "$REPO_COPY/tests/ci" + python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" FunctionalStatefulTestUBsanParallelReplicas: needs: [BuilderDebUBsan] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateful_ubsan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateful tests (ubsan, ParallelReplicas) - REPO_COPY=${{runner.temp}}/stateful_ubsan/ClickHouse - KILL_TIMEOUT=3600 - EOF - - name: Download json reports - uses: actions/download-artifact@v2 - with: - path: ${{ env.REPORTS_PATH }} - - name: Clear repository - run: | - sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" - - name: Check out repository code - uses: actions/checkout@v2 - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Stateful tests (ubsan, ParallelReplicas) + runner_type: func-tester + additional_envs: | + KILL_TIMEOUT=3600 + run_command: | + cd "$REPO_COPY/tests/ci" + python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" FunctionalStatefulTestMsanParallelReplicas: needs: [BuilderDebMsan] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateful_msan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateful tests (msan, ParallelReplicas) - REPO_COPY=${{runner.temp}}/stateful_msan/ClickHouse - KILL_TIMEOUT=3600 - EOF - - name: Download json reports - uses: actions/download-artifact@v2 - with: - path: ${{ env.REPORTS_PATH }} - - name: Clear repository - run: | - sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" - - name: Check out repository code - uses: actions/checkout@v2 - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Stateful tests (msan, ParallelReplicas) + runner_type: func-tester + additional_envs: | + KILL_TIMEOUT=3600 + run_command: | + cd "$REPO_COPY/tests/ci" + python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" FunctionalStatefulTestTsanParallelReplicas: needs: [BuilderDebTsan] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateful_tsan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateful tests (tsan, ParallelReplicas) - REPO_COPY=${{runner.temp}}/stateful_tsan/ClickHouse - KILL_TIMEOUT=3600 - EOF - - name: Download json reports - uses: actions/download-artifact@v2 - with: - path: ${{ env.REPORTS_PATH }} - - name: Clear repository - run: | - sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" - - name: Check out repository code - uses: actions/checkout@v2 - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Stateful tests (tsan, ParallelReplicas) + runner_type: func-tester + additional_envs: | + KILL_TIMEOUT=3600 + run_command: | + cd "$REPO_COPY/tests/ci" + python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" FunctionalStatefulTestAsanParallelReplicas: needs: [BuilderDebAsan] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateful_debug - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateful tests (asan, ParallelReplicas) - REPO_COPY=${{runner.temp}}/stateful_debug/ClickHouse - KILL_TIMEOUT=3600 - EOF - - name: Download json reports - uses: actions/download-artifact@v2 - with: - path: ${{ env.REPORTS_PATH }} - - name: Clear repository - run: | - sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" - - name: Check out repository code - uses: actions/checkout@v2 - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Stateful tests (asan, ParallelReplicas) + runner_type: func-tester + additional_envs: | + KILL_TIMEOUT=3600 + run_command: | + cd "$REPO_COPY/tests/ci" + python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" FunctionalStatefulTestReleaseParallelReplicas: needs: [BuilderDebRelease] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateful_release - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateful tests (release, ParallelReplicas) - REPO_COPY=${{runner.temp}}/stateful_release/ClickHouse - KILL_TIMEOUT=3600 - EOF - - name: Download json reports - uses: actions/download-artifact@v2 - with: - path: ${{ env.REPORTS_PATH }} - - name: Clear repository - run: | - sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" - - name: Check out repository code - uses: actions/checkout@v2 - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Stateful tests (release, ParallelReplicas) + runner_type: func-tester + additional_envs: | + KILL_TIMEOUT=3600 + run_command: | + cd "$REPO_COPY/tests/ci" + python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" ############################################################################################## ######################################### STRESS TESTS ####################################### ############################################################################################## StressTestAsan: needs: [BuilderDebAsan] - runs-on: [self-hosted, stress-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stress_asan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stress test (asan) - REPO_COPY=${{runner.temp}}/stress_asan/ClickHouse - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Stress test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 stress_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Stress test (asan) + runner_type: stress-tester + run_command: | + cd "$REPO_COPY/tests/ci" + python3 stress_check.py "$CHECK_NAME" StressTestTsan: needs: [BuilderDebTsan] - # func testers have 16 cores + 128 GB memory - # while stress testers have 36 cores + 72 memory - # It would be better to have something like 32 + 128, - # but such servers almost unavailable as spot instances. - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stress_thread - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stress test (tsan) - REPO_COPY=${{runner.temp}}/stress_thread/ClickHouse - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Stress test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 stress_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Stress test (tsan) + runner_type: stress-tester + run_command: | + cd "$REPO_COPY/tests/ci" + python3 stress_check.py "$CHECK_NAME" StressTestMsan: needs: [BuilderDebMsan] - runs-on: [self-hosted, stress-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stress_memory - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stress test (msan) - REPO_COPY=${{runner.temp}}/stress_memory/ClickHouse - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Stress test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 stress_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Stress test (msan) + runner_type: stress-tester + run_command: | + cd "$REPO_COPY/tests/ci" + python3 stress_check.py "$CHECK_NAME" StressTestUBsan: needs: [BuilderDebUBsan] - runs-on: [self-hosted, stress-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stress_undefined - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stress test (ubsan) - REPO_COPY=${{runner.temp}}/stress_undefined/ClickHouse - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Stress test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 stress_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Stress test (ubsan) + runner_type: stress-tester + run_command: | + cd "$REPO_COPY/tests/ci" + python3 stress_check.py "$CHECK_NAME" StressTestDebug: needs: [BuilderDebDebug] - runs-on: [self-hosted, stress-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stress_debug - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stress test (debug) - REPO_COPY=${{runner.temp}}/stress_debug/ClickHouse - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Stress test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 stress_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - ############################################################################################## - ######################################### UPGRADE CHECK ###################################### - ############################################################################################## + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Stress test (debug) + runner_type: stress-tester + run_command: | + cd "$REPO_COPY/tests/ci" + python3 stress_check.py "$CHECK_NAME" +############################################################################################## +######################################### UPGRADE CHECK ###################################### +############################################################################################## UpgradeCheckAsan: needs: [BuilderDebAsan] - runs-on: [self-hosted, stress-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/upgrade_asan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Upgrade check (asan) - REPO_COPY=${{runner.temp}}/upgrade_asan/ClickHouse - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Upgrade check - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 upgrade_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Upgrade check (asan) + runner_type: stress-tester + run_command: | + cd "$REPO_COPY/tests/ci" + python3 upgrade_check.py "$CHECK_NAME" UpgradeCheckTsan: needs: [BuilderDebTsan] - # same as for stress test with tsan - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/upgrade_thread - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Upgrade check (tsan) - REPO_COPY=${{runner.temp}}/upgrade_thread/ClickHouse - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Upgrade check - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 upgrade_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Upgrade check (tsan) + runner_type: stress-tester + run_command: | + cd "$REPO_COPY/tests/ci" + python3 upgrade_check.py "$CHECK_NAME" UpgradeCheckMsan: needs: [BuilderDebMsan] - runs-on: [self-hosted, stress-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/upgrade_memory - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Upgrade check (msan) - REPO_COPY=${{runner.temp}}/upgrade_memory/ClickHouse - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Upgrade check - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 upgrade_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Upgrade check (msan) + runner_type: stress-tester + run_command: | + cd "$REPO_COPY/tests/ci" + python3 upgrade_check.py "$CHECK_NAME" UpgradeCheckDebug: needs: [BuilderDebDebug] - runs-on: [self-hosted, stress-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/upgrade_debug - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Upgrade check (debug) - REPO_COPY=${{runner.temp}}/upgrade_debug/ClickHouse - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Upgrade check - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 upgrade_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Upgrade check (debug) + runner_type: stress-tester + run_command: | + cd "$REPO_COPY/tests/ci" + python3 upgrade_check.py "$CHECK_NAME" ############################################################################################## ##################################### AST FUZZERS ############################################ ############################################################################################## ASTFuzzerTestAsan: needs: [BuilderDebAsan] - runs-on: [self-hosted, fuzzer-unit-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/ast_fuzzer_asan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=AST fuzzer (asan) - REPO_COPY=${{runner.temp}}/ast_fuzzer_asan/ClickHouse - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Fuzzer - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 ast_fuzzer_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: AST fuzzer (asan) + runner_type: fuzzer-unit-tester + run_command: | + cd "$REPO_COPY/tests/ci" + python3 ast_fuzzer_check.py "$CHECK_NAME" ASTFuzzerTestTsan: needs: [BuilderDebTsan] - runs-on: [self-hosted, fuzzer-unit-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/ast_fuzzer_tsan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=AST fuzzer (tsan) - REPO_COPY=${{runner.temp}}/ast_fuzzer_tsan/ClickHouse - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Fuzzer - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 ast_fuzzer_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: AST fuzzer (tsan) + runner_type: fuzzer-unit-tester + run_command: | + cd "$REPO_COPY/tests/ci" + python3 ast_fuzzer_check.py "$CHECK_NAME" ASTFuzzerTestUBSan: needs: [BuilderDebUBsan] - runs-on: [self-hosted, fuzzer-unit-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/ast_fuzzer_ubsan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=AST fuzzer (ubsan) - REPO_COPY=${{runner.temp}}/ast_fuzzer_ubsan/ClickHouse - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Fuzzer - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 ast_fuzzer_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: AST fuzzer (ubsan) + runner_type: fuzzer-unit-tester + run_command: | + cd "$REPO_COPY/tests/ci" + python3 ast_fuzzer_check.py "$CHECK_NAME" ASTFuzzerTestMSan: needs: [BuilderDebMsan] - runs-on: [self-hosted, fuzzer-unit-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/ast_fuzzer_msan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=AST fuzzer (msan) - REPO_COPY=${{runner.temp}}/ast_fuzzer_msan/ClickHouse - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Fuzzer - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 ast_fuzzer_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: AST fuzzer (msan) + runner_type: fuzzer-unit-tester + run_command: | + cd "$REPO_COPY/tests/ci" + python3 ast_fuzzer_check.py "$CHECK_NAME" ASTFuzzerTestDebug: needs: [BuilderDebDebug] - runs-on: [self-hosted, fuzzer-unit-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/ast_fuzzer_debug - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=AST fuzzer (debug) - REPO_COPY=${{runner.temp}}/ast_fuzzer_debug/ClickHouse - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Fuzzer - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 ast_fuzzer_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: AST fuzzer (debug) + runner_type: fuzzer-unit-tester + run_command: | + cd "$REPO_COPY/tests/ci" + python3 ast_fuzzer_check.py "$CHECK_NAME" ############################################################################################# ############################# INTEGRATION TESTS ############################################# ############################################################################################# - IntegrationTestsAsan0: + IntegrationTestsAsan: needs: [BuilderDebAsan] - runs-on: [self-hosted, stress-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/integration_tests_asan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Integration tests (asan) - REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse - RUN_BY_HASH_NUM=0 - RUN_BY_HASH_TOTAL=6 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Integration test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 integration_test_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - IntegrationTestsAsan1: + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Integration tests (asan) + runner_type: stress-tester + batches: 4 + run_command: | + cd "$REPO_COPY/tests/ci" + python3 integration_test_check.py "$CHECK_NAME" + IntegrationTestsAnalyzerAsan: needs: [BuilderDebAsan] - runs-on: [self-hosted, stress-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/integration_tests_asan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Integration tests (asan) - REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse - RUN_BY_HASH_NUM=1 - RUN_BY_HASH_TOTAL=6 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Integration test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 integration_test_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - IntegrationTestsAsan2: - needs: [BuilderDebAsan] - runs-on: [self-hosted, stress-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/integration_tests_asan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Integration tests (asan) - REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse - RUN_BY_HASH_NUM=2 - RUN_BY_HASH_TOTAL=6 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Integration test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 integration_test_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - IntegrationTestsAsan3: - needs: [BuilderDebAsan] - runs-on: [self-hosted, stress-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/integration_tests_asan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Integration tests (asan) - REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse - RUN_BY_HASH_NUM=3 - RUN_BY_HASH_TOTAL=6 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Integration test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 integration_test_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - IntegrationTestsAsan4: - needs: [BuilderDebAsan] - runs-on: [self-hosted, stress-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/integration_tests_asan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Integration tests (asan) - REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse - RUN_BY_HASH_NUM=4 - RUN_BY_HASH_TOTAL=6 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Integration test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 integration_test_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - IntegrationTestsAsan5: - needs: [BuilderDebAsan] - runs-on: [self-hosted, stress-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/integration_tests_asan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Integration tests (asan) - REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse - RUN_BY_HASH_NUM=5 - RUN_BY_HASH_TOTAL=6 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Integration test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 integration_test_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - IntegrationTestsAnalyzerAsan0: - needs: [BuilderDebAsan] - runs-on: [self-hosted, stress-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/integration_tests_asan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Integration tests (asan, analyzer) - REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse - RUN_BY_HASH_NUM=0 - RUN_BY_HASH_TOTAL=6 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Integration test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 integration_test_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - IntegrationTestsAnalyzerAsan1: - needs: [BuilderDebAsan] - runs-on: [self-hosted, stress-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/integration_tests_asan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Integration tests (asan, analyzer) - REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse - RUN_BY_HASH_NUM=1 - RUN_BY_HASH_TOTAL=6 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Integration test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 integration_test_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - IntegrationTestsAnalyzerAsan2: - needs: [BuilderDebAsan] - runs-on: [self-hosted, stress-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/integration_tests_asan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Integration tests (asan, analyzer) - REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse - RUN_BY_HASH_NUM=2 - RUN_BY_HASH_TOTAL=6 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Integration test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 integration_test_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - IntegrationTestsAnalyzerAsan3: - needs: [BuilderDebAsan] - runs-on: [self-hosted, stress-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/integration_tests_asan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Integration tests (asan, analyzer) - REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse - RUN_BY_HASH_NUM=3 - RUN_BY_HASH_TOTAL=6 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Integration test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 integration_test_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - IntegrationTestsAnalyzerAsan4: - needs: [BuilderDebAsan] - runs-on: [self-hosted, stress-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/integration_tests_asan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Integration tests (asan, analyzer) - REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse - RUN_BY_HASH_NUM=4 - RUN_BY_HASH_TOTAL=6 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Integration test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 integration_test_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - IntegrationTestsAnalyzerAsan5: - needs: [BuilderDebAsan] - runs-on: [self-hosted, stress-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/integration_tests_asan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Integration tests (asan, analyzer) - REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse - RUN_BY_HASH_NUM=5 - RUN_BY_HASH_TOTAL=6 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Integration test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 integration_test_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - IntegrationTestsTsan0: + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Integration tests (asan, analyzer) + runner_type: stress-tester + batches: 6 + run_command: | + cd "$REPO_COPY/tests/ci" + python3 integration_test_check.py "$CHECK_NAME" + IntegrationTestsTsan: needs: [BuilderDebTsan] - runs-on: [self-hosted, stress-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/integration_tests_tsan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Integration tests (tsan) - REPO_COPY=${{runner.temp}}/integration_tests_tsan/ClickHouse - RUN_BY_HASH_NUM=0 - RUN_BY_HASH_TOTAL=6 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Integration test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 integration_test_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - IntegrationTestsTsan1: - needs: [BuilderDebTsan] - runs-on: [self-hosted, stress-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/integration_tests_tsan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Integration tests (tsan) - REPO_COPY=${{runner.temp}}/integration_tests_tsan/ClickHouse - RUN_BY_HASH_NUM=1 - RUN_BY_HASH_TOTAL=6 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Integration test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 integration_test_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - IntegrationTestsTsan2: - needs: [BuilderDebTsan] - runs-on: [self-hosted, stress-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/integration_tests_tsan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Integration tests (tsan) - REPO_COPY=${{runner.temp}}/integration_tests_tsan/ClickHouse - RUN_BY_HASH_NUM=2 - RUN_BY_HASH_TOTAL=6 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Integration test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 integration_test_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - IntegrationTestsTsan3: - needs: [BuilderDebTsan] - runs-on: [self-hosted, stress-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/integration_tests_tsan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Integration tests (tsan) - REPO_COPY=${{runner.temp}}/integration_tests_tsan/ClickHouse - RUN_BY_HASH_NUM=3 - RUN_BY_HASH_TOTAL=6 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Integration test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 integration_test_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - IntegrationTestsTsan4: - needs: [BuilderDebTsan] - runs-on: [self-hosted, stress-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/integration_tests_tsan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Integration tests (tsan) - REPO_COPY=${{runner.temp}}/integration_tests_tsan/ClickHouse - RUN_BY_HASH_NUM=4 - RUN_BY_HASH_TOTAL=6 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Integration test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 integration_test_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - IntegrationTestsTsan5: - needs: [BuilderDebTsan] - runs-on: [self-hosted, stress-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/integration_tests_tsan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Integration tests (tsan) - REPO_COPY=${{runner.temp}}/integration_tests_tsan/ClickHouse - RUN_BY_HASH_NUM=5 - RUN_BY_HASH_TOTAL=6 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Integration test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 integration_test_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - IntegrationTestsRelease0: + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Integration tests (tsan) + runner_type: stress-tester + batches: 6 + run_command: | + cd "$REPO_COPY/tests/ci" + python3 integration_test_check.py "$CHECK_NAME" + IntegrationTestsRelease: needs: [BuilderDebRelease] - runs-on: [self-hosted, stress-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/integration_tests_release - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Integration tests (release) - REPO_COPY=${{runner.temp}}/integration_tests_release/ClickHouse - RUN_BY_HASH_NUM=0 - RUN_BY_HASH_TOTAL=4 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Integration test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 integration_test_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - IntegrationTestsRelease1: - needs: [BuilderDebRelease] - runs-on: [self-hosted, stress-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/integration_tests_release - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Integration tests (release) - REPO_COPY=${{runner.temp}}/integration_tests_release/ClickHouse - RUN_BY_HASH_NUM=1 - RUN_BY_HASH_TOTAL=4 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Integration test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 integration_test_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - IntegrationTestsRelease2: - needs: [BuilderDebRelease] - runs-on: [self-hosted, stress-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/integration_tests_release - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Integration tests (release) - REPO_COPY=${{runner.temp}}/integration_tests_release/ClickHouse - RUN_BY_HASH_NUM=2 - RUN_BY_HASH_TOTAL=4 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Integration test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 integration_test_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - IntegrationTestsRelease3: - needs: [BuilderDebRelease] - runs-on: [self-hosted, stress-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/integration_tests_release - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Integration tests (release) - REPO_COPY=${{runner.temp}}/integration_tests_release/ClickHouse - RUN_BY_HASH_NUM=3 - RUN_BY_HASH_TOTAL=4 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Integration test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 integration_test_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Integration tests (release) + runner_type: stress-tester + batches: 4 + run_command: | + cd "$REPO_COPY/tests/ci" + python3 integration_test_check.py "$CHECK_NAME" IntegrationTestsFlakyCheck: needs: [BuilderDebAsan] - runs-on: [self-hosted, stress-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/integration_tests_asan_flaky_check - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Integration tests flaky check (asan) - REPO_COPY=${{runner.temp}}/integration_tests_asan_flaky_check/ClickHouse - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Integration test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 integration_test_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Integration tests flaky check (asan) + runner_type: stress-tester + run_command: | + cd "$REPO_COPY/tests/ci" + python3 integration_test_check.py "$CHECK_NAME" ############################################################################################# #################################### UNIT TESTS ############################################# ############################################################################################# UnitTestsAsan: needs: [BuilderDebAsan] - runs-on: [self-hosted, fuzzer-unit-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/unit_tests_asan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Unit tests (asan) - REPO_COPY=${{runner.temp}}/unit_tests_asan/ClickHouse - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Unit test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 unit_tests_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Unit tests (asan) + runner_type: fuzzer-unit-tester + run_command: | + cd "$REPO_COPY/tests/ci" + python3 unit_tests_check.py "$CHECK_NAME" UnitTestsReleaseClang: needs: [BuilderBinRelease] - runs-on: [self-hosted, fuzzer-unit-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/unit_tests_asan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Unit tests (release) - REPO_COPY=${{runner.temp}}/unit_tests_asan/ClickHouse - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Unit test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 unit_tests_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Unit tests (release) + runner_type: fuzzer-unit-tester + run_command: | + cd "$REPO_COPY/tests/ci" + python3 unit_tests_check.py "$CHECK_NAME" UnitTestsTsan: needs: [BuilderDebTsan] - runs-on: [self-hosted, fuzzer-unit-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/unit_tests_tsan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Unit tests (tsan) - REPO_COPY=${{runner.temp}}/unit_tests_tsan/ClickHouse - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Unit test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 unit_tests_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Unit tests (tsan) + runner_type: fuzzer-unit-tester + run_command: | + cd "$REPO_COPY/tests/ci" + python3 unit_tests_check.py "$CHECK_NAME" UnitTestsMsan: needs: [BuilderDebMsan] - runs-on: [self-hosted, fuzzer-unit-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/unit_tests_msan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Unit tests (msan) - REPO_COPY=${{runner.temp}}/unit_tests_msan/ClickHouse - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Unit test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 unit_tests_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Unit tests (msan) + runner_type: fuzzer-unit-tester + run_command: | + cd "$REPO_COPY/tests/ci" + python3 unit_tests_check.py "$CHECK_NAME" UnitTestsUBsan: needs: [BuilderDebUBsan] - runs-on: [self-hosted, fuzzer-unit-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/unit_tests_ubsan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Unit tests (ubsan) - REPO_COPY=${{runner.temp}}/unit_tests_ubsan/ClickHouse - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Unit test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 unit_tests_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Unit tests (ubsan) + runner_type: fuzzer-unit-tester + run_command: | + cd "$REPO_COPY/tests/ci" + python3 unit_tests_check.py "$CHECK_NAME" ############################################################################################# #################################### PERFORMANCE TESTS ###################################### ############################################################################################# - PerformanceComparisonX86-0: + PerformanceComparisonX86: needs: [BuilderDebRelease] - runs-on: [self-hosted, stress-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/performance_comparison - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Performance Comparison - REPO_COPY=${{runner.temp}}/performance_comparison/ClickHouse - RUN_BY_HASH_NUM=0 - RUN_BY_HASH_TOTAL=4 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Performance Comparison - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 performance_comparison_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - PerformanceComparisonX86-1: - needs: [BuilderDebRelease] - runs-on: [self-hosted, stress-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/performance_comparison - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Performance Comparison - REPO_COPY=${{runner.temp}}/performance_comparison/ClickHouse - RUN_BY_HASH_NUM=1 - RUN_BY_HASH_TOTAL=4 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Performance Comparison - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 performance_comparison_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - PerformanceComparisonX86-2: - needs: [BuilderDebRelease] - runs-on: [self-hosted, stress-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/performance_comparison - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Performance Comparison - REPO_COPY=${{runner.temp}}/performance_comparison/ClickHouse - RUN_BY_HASH_NUM=2 - RUN_BY_HASH_TOTAL=4 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Performance Comparison - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 performance_comparison_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - PerformanceComparisonX86-3: - needs: [BuilderDebRelease] - runs-on: [self-hosted, stress-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/performance_comparison - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Performance Comparison - REPO_COPY=${{runner.temp}}/performance_comparison/ClickHouse - RUN_BY_HASH_NUM=3 - RUN_BY_HASH_TOTAL=4 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Performance Comparison - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 performance_comparison_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - PerformanceComparisonAarch-0: + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Performance Comparison + runner_type: stress-tester + batches: 4 + run_command: | + cd "$REPO_COPY/tests/ci" + python3 performance_comparison_check.py "$CHECK_NAME" + PerformanceComparisonAarch: needs: [BuilderDebAarch64] - runs-on: [self-hosted, func-tester-aarch64] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/performance_comparison - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Performance Comparison Aarch64 - REPO_COPY=${{runner.temp}}/performance_comparison/ClickHouse - RUN_BY_HASH_NUM=0 - RUN_BY_HASH_TOTAL=4 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Performance Comparison - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 performance_comparison_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - PerformanceComparisonAarch-1: - needs: [BuilderDebAarch64] - runs-on: [self-hosted, func-tester-aarch64] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/performance_comparison - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Performance Comparison Aarch64 - REPO_COPY=${{runner.temp}}/performance_comparison/ClickHouse - RUN_BY_HASH_NUM=1 - RUN_BY_HASH_TOTAL=4 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Performance Comparison - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 performance_comparison_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - PerformanceComparisonAarch-2: - needs: [BuilderDebAarch64] - runs-on: [self-hosted, func-tester-aarch64] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/performance_comparison - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Performance Comparison Aarch64 - REPO_COPY=${{runner.temp}}/performance_comparison/ClickHouse - RUN_BY_HASH_NUM=2 - RUN_BY_HASH_TOTAL=4 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Performance Comparison - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 performance_comparison_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - PerformanceComparisonAarch-3: - needs: [BuilderDebAarch64] - runs-on: [self-hosted, func-tester-aarch64] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/performance_comparison - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Performance Comparison Aarch64 - REPO_COPY=${{runner.temp}}/performance_comparison/ClickHouse - RUN_BY_HASH_NUM=3 - RUN_BY_HASH_TOTAL=4 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Performance Comparison - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 performance_comparison_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Performance Comparison Aarch64 + runner_type: func-tester-aarch64 + batches: 4 + run_command: | + cd "$REPO_COPY/tests/ci" + python3 performance_comparison_check.py "$CHECK_NAME" ############################################################################################## ###################################### SQLANCER FUZZERS ###################################### ############################################################################################## SQLancerTestRelease: needs: [BuilderDebRelease] - runs-on: [self-hosted, fuzzer-unit-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/sqlancer_release - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=SQLancer (release) - REPO_COPY=${{runner.temp}}/sqlancer_release/ClickHouse - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: SQLancer - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 sqlancer_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: SQLancer (release) + runner_type: fuzzer-unit-tester + run_command: | + cd "$REPO_COPY/tests/ci" + python3 sqlancer_check.py "$CHECK_NAME" SQLancerTestDebug: needs: [BuilderDebDebug] - runs-on: [self-hosted, fuzzer-unit-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/sqlancer_debug - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=SQLancer (debug) - REPO_COPY=${{runner.temp}}/sqlancer_debug/ClickHouse - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: SQLancer - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 sqlancer_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" -############################################################################################# -###################################### JEPSEN TESTS ######################################### -############################################################################################# - Jepsen: - # This is special test NOT INCLUDED in FinishCheck - # When it's skipped, all dependent tasks will be skipped too. - # DO NOT add it there - if: contains(github.event.pull_request.labels.*.name, 'jepsen-test') - needs: [BuilderBinRelease] - uses: ./.github/workflows/jepsen.yml + uses: ./.github/workflows/reusable_test.yml + with: + test_name: SQLancer (debug) + runner_type: fuzzer-unit-tester + run_command: | + cd "$REPO_COPY/tests/ci" + python3 sqlancer_check.py "$CHECK_NAME" FinishCheck: needs: - StyleCheck @@ -4426,36 +978,16 @@ jobs: - BuilderReport - BuilderSpecialReport - FastTest - - FunctionalStatelessTestDebug0 - - FunctionalStatelessTestDebug1 - - FunctionalStatelessTestDebug2 - - FunctionalStatelessTestDebug3 - - FunctionalStatelessTestDebug4 + - FunctionalStatelessTestDebug - FunctionalStatelessTestRelease - - FunctionalStatelessTestReleaseDatabaseReplicated0 - - FunctionalStatelessTestReleaseDatabaseReplicated1 - - FunctionalStatelessTestReleaseDatabaseReplicated2 - - FunctionalStatelessTestReleaseDatabaseReplicated3 + - FunctionalStatelessTestReleaseDatabaseReplicated - FunctionalStatelessTestReleaseWideParts - FunctionalStatelessTestReleaseAnalyzer - FunctionalStatelessTestAarch64 - - FunctionalStatelessTestAsan0 - - FunctionalStatelessTestAsan1 - - FunctionalStatelessTestAsan2 - - FunctionalStatelessTestAsan3 - - FunctionalStatelessTestTsan0 - - FunctionalStatelessTestTsan1 - - FunctionalStatelessTestTsan2 - - FunctionalStatelessTestTsan3 - - FunctionalStatelessTestTsan4 - - FunctionalStatelessTestMsan0 - - FunctionalStatelessTestMsan1 - - FunctionalStatelessTestMsan2 - - FunctionalStatelessTestMsan3 - - FunctionalStatelessTestMsan4 - - FunctionalStatelessTestMsan5 - - FunctionalStatelessTestUBsan0 - - FunctionalStatelessTestUBsan1 + - FunctionalStatelessTestAsan + - FunctionalStatelessTestTsan + - FunctionalStatelessTestMsan + - FunctionalStatelessTestUBsan - FunctionalStatefulTestDebug - FunctionalStatefulTestRelease - FunctionalStatefulTestAarch64 @@ -4463,57 +995,36 @@ jobs: - FunctionalStatefulTestTsan - FunctionalStatefulTestMsan - FunctionalStatefulTestUBsan - - FunctionalStatelessTestReleaseS3_0 - - FunctionalStatelessTestReleaseS3_1 - - FunctionalStatelessTestS3Debug0 - - FunctionalStatelessTestS3Debug1 - - FunctionalStatelessTestS3Debug2 - - FunctionalStatelessTestS3Debug4 - - FunctionalStatelessTestS3Debug5 - - FunctionalStatelessTestS3Tsan0 - - FunctionalStatelessTestS3Tsan1 - - FunctionalStatelessTestS3Tsan2 - - FunctionalStatelessTestS3Tsan4 + - FunctionalStatelessTestReleaseS3 + - FunctionalStatelessTestS3Debug + - FunctionalStatelessTestS3Tsan + - FunctionalStatefulTestReleaseParallelReplicas + - FunctionalStatefulTestAsanParallelReplicas + - FunctionalStatefulTestTsanParallelReplicas + - FunctionalStatefulTestMsanParallelReplicas + - FunctionalStatefulTestUBsanParallelReplicas + - FunctionalStatefulTestDebugParallelReplicas - StressTestDebug - StressTestAsan - StressTestTsan - StressTestMsan - StressTestUBsan + - UpgradeCheckAsan + - UpgradeCheckTsan + - UpgradeCheckMsan + - UpgradeCheckDebug - ASTFuzzerTestDebug - ASTFuzzerTestAsan - ASTFuzzerTestTsan - ASTFuzzerTestMSan - ASTFuzzerTestUBSan - - IntegrationTestsAsan0 - - IntegrationTestsAsan1 - - IntegrationTestsAsan2 - - IntegrationTestsAsan3 - - IntegrationTestsAsan4 - - IntegrationTestsAsan5 - - IntegrationTestsAnalyzerAsan0 - - IntegrationTestsAnalyzerAsan1 - - IntegrationTestsAnalyzerAsan2 - - IntegrationTestsAnalyzerAsan3 - - IntegrationTestsAnalyzerAsan4 - - IntegrationTestsAnalyzerAsan5 - - IntegrationTestsRelease0 - - IntegrationTestsRelease1 - - IntegrationTestsRelease2 - - IntegrationTestsRelease3 - - IntegrationTestsTsan0 - - IntegrationTestsTsan1 - - IntegrationTestsTsan2 - - IntegrationTestsTsan3 - - IntegrationTestsTsan4 - - IntegrationTestsTsan5 - - PerformanceComparisonX86-0 - - PerformanceComparisonX86-1 - - PerformanceComparisonX86-2 - - PerformanceComparisonX86-3 - - PerformanceComparisonAarch-0 - - PerformanceComparisonAarch-1 - - PerformanceComparisonAarch-2 - - PerformanceComparisonAarch-3 + - IntegrationTestsAsan + - IntegrationTestsAnalyzerAsan + - IntegrationTestsTsan + - IntegrationTestsRelease + - IntegrationTestsFlakyCheck + - PerformanceComparisonX86 + - PerformanceComparisonAarch - UnitTestsAsan - UnitTestsTsan - UnitTestsMsan @@ -4521,7 +1032,6 @@ jobs: - UnitTestsReleaseClang - CompatibilityCheckX86 - CompatibilityCheckAarch64 - - IntegrationTestsFlakyCheck - SQLancerTestRelease - SQLancerTestDebug runs-on: [self-hosted, style-checker] @@ -4535,6 +1045,44 @@ jobs: cd "$GITHUB_WORKSPACE/tests/ci" python3 finish_check.py python3 merge_pr.py --check-approved +############################################################################################## +############################ SQLLOGIC TEST ################################################### +############################################################################################## + SQLLogicTestRelease: + needs: [BuilderDebRelease] + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Sqllogic test (release) + runner_type: func-tester + additional_envs: | + KILL_TIMEOUT=10800 + run_command: | + cd "$REPO_COPY/tests/ci" + python3 sqllogic_test.py "$CHECK_NAME" "$KILL_TIMEOUT" +############################################################################################## +##################################### SQL TEST ############################################### +############################################################################################## + SQLTest: + needs: [BuilderDebRelease] + uses: ./.github/workflows/reusable_test.yml + with: + test_name: SQLTest + runner_type: fuzzer-unit-tester + run_command: | + cd "$REPO_COPY/tests/ci" + python3 sqltest.py "$CHECK_NAME" +############################################################################################# +###################################### NOT IN FINISH ######################################## +############################################################################################# +###################################### JEPSEN TESTS ######################################### +############################################################################################# + Jepsen: + # This is special test NOT INCLUDED in FinishCheck + # When it's skipped, all dependent tasks will be skipped too. + # DO NOT add it there + if: contains(github.event.pull_request.labels.*.name, 'jepsen-test') + needs: [BuilderBinRelease] + uses: ./.github/workflows/jepsen.yml ############################################################################################# ####################################### libFuzzer ########################################### ############################################################################################# @@ -4542,77 +1090,3 @@ jobs: if: contains(github.event.pull_request.labels.*.name, 'libFuzzer') needs: [DockerHubPush, StyleCheck] uses: ./.github/workflows/libfuzzer.yml - ############################################################################################## - ############################ SQLLOGIC TEST ################################################### - ############################################################################################## - SQLLogicTestRelease: - needs: [BuilderDebRelease] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/sqllogic_debug - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Sqllogic test (release) - REPO_COPY=${{runner.temp}}/sqllogic_debug/ClickHouse - KILL_TIMEOUT=10800 - EOF - - name: Download json reports - uses: actions/download-artifact@v2 - with: - path: ${{ env.REPORTS_PATH }} - - name: Clear repository - run: | - sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" - - name: Check out repository code - uses: actions/checkout@v2 - - name: Sqllogic test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 sqllogic_test.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" -############################################################################################## -##################################### SQL TEST ############################################### -############################################################################################## - SQLTest: - needs: [BuilderDebRelease] - runs-on: [self-hosted, fuzzer-unit-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/sqltest - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=SQLTest - REPO_COPY=${{runner.temp}}/sqltest/ClickHouse - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: SQLTest - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 sqltest.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" diff --git a/.github/workflows/release_branches.yml b/.github/workflows/release_branches.yml index 29776d0aa5c..b5771fa87ab 100644 --- a/.github/workflows/release_branches.yml +++ b/.github/workflows/release_branches.yml @@ -76,66 +76,22 @@ jobs: path: ${{ runner.temp }}/changed_images.json CompatibilityCheckX86: needs: [BuilderDebRelease] - runs-on: [self-hosted, style-checker] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/compatibility_check - REPO_COPY=${{runner.temp}}/compatibility_check/ClickHouse - REPORTS_PATH=${{runner.temp}}/reports_dir - EOF - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: CompatibilityCheckX86 - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" && python3 compatibility_check.py --check-name "Compatibility check (amd64)" --check-glibc --check-distributions - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Compatibility check X86 + runner_type: style-checker + run_command: | + cd "$REPO_COPY/tests/ci" + python3 compatibility_check.py --check-name "Compatibility check (amd64)" --check-glibc --check-distributions CompatibilityCheckAarch64: needs: [BuilderDebAarch64] - runs-on: [self-hosted, style-checker] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/compatibility_check - REPO_COPY=${{runner.temp}}/compatibility_check/ClickHouse - REPORTS_PATH=${{runner.temp}}/reports_dir - EOF - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: CompatibilityCheckAarch64 - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" && python3 compatibility_check.py --check-name "Compatibility check (aarch64)" --check-glibc - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Compatibility check X86 + runner_type: style-checker + run_command: | + cd "$REPO_COPY/tests/ci" + python3 compatibility_check.py --check-name "Compatibility check (aarch64)" --check-glibc ######################################################################################### #################################### ORDINARY BUILDS #################################### ######################################################################################### @@ -220,6 +176,7 @@ jobs: ##################################### BUILD REPORTER ####################################### ############################################################################################ BuilderReport: + if: ${{ success() || failure() }} needs: - BuilderDebRelease - BuilderDebAarch64 @@ -228,79 +185,33 @@ jobs: - BuilderDebUBsan - BuilderDebMsan - BuilderDebDebug - runs-on: [self-hosted, style-checker] - if: ${{ success() || failure() }} - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - CHECK_NAME=ClickHouse build check - REPORTS_PATH=${{runner.temp}}/reports_dir - REPORTS_PATH=${{runner.temp}}/reports_dir - TEMP_PATH=${{runner.temp}}/report_check - NEEDS_DATA_PATH=${{runner.temp}}/needs.json - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Report Builder - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cat > "$NEEDS_DATA_PATH" << 'EOF' - ${{ toJSON(needs) }} - EOF - cd "$GITHUB_WORKSPACE/tests/ci" - python3 build_report_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: ClickHouse build check + runner_type: style-checker + additional_envs: | + NEEDS_DATA<> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/report_check - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=ClickHouse special build check - NEEDS_DATA_PATH=${{runner.temp}}/needs.json - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Report Builder - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cat > "$NEEDS_DATA_PATH" << 'EOF' - ${{ toJSON(needs) }} - EOF - cd "$GITHUB_WORKSPACE/tests/ci" - python3 build_report_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: ClickHouse special build check + runner_type: style-checker + additional_envs: | + NEEDS_DATA<> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/test_install - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Install packages (amd64) - REPO_COPY=${{runner.temp}}/test_install/ClickHouse - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Test packages installation - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 install_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Install packages (amd64) + runner_type: style-checker + run_command: | + cd "$REPO_COPY/tests/ci" + python3 install_check.py "$CHECK_NAME" InstallPackagesTestAarch64: needs: [BuilderDebAarch64] - runs-on: [self-hosted, style-checker-aarch64] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/test_install - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Install packages (arm64) - REPO_COPY=${{runner.temp}}/test_install/ClickHouse - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Test packages installation - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 install_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Install packages (arm64) + runner_type: style-checker-aarch64 + run_command: | + cd "$REPO_COPY/tests/ci" + python3 install_check.py "$CHECK_NAME" ############################################################################################## ########################### FUNCTIONAl STATELESS TESTS ####################################### ############################################################################################## FunctionalStatelessTestRelease: needs: [BuilderDebRelease] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_debug - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests (release) - REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse - KILL_TIMEOUT=10800 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Stateless tests (release) + runner_type: func-tester + additional_envs: | + KILL_TIMEOUT=10800 + run_command: | + cd "$REPO_COPY/tests/ci" + python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" FunctionalStatelessTestAarch64: needs: [BuilderDebAarch64] - runs-on: [self-hosted, func-tester-aarch64] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_release - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests (aarch64) - REPO_COPY=${{runner.temp}}/stateless_release/ClickHouse - KILL_TIMEOUT=10800 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - FunctionalStatelessTestAsan0: + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Stateless tests (aarch64) + runner_type: func-tester-aarch64 + additional_envs: | + KILL_TIMEOUT=10800 + run_command: | + cd "$REPO_COPY/tests/ci" + python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" + FunctionalStatelessTestAsan: needs: [BuilderDebAsan] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_debug - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests (asan) - REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse - KILL_TIMEOUT=10800 - RUN_BY_HASH_NUM=0 - RUN_BY_HASH_TOTAL=2 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - FunctionalStatelessTestAsan1: - needs: [BuilderDebAsan] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_debug - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests (asan) - REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse - KILL_TIMEOUT=10800 - RUN_BY_HASH_NUM=1 - RUN_BY_HASH_TOTAL=2 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - FunctionalStatelessTestTsan0: + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Stateless tests (asan) + runner_type: func-tester + additional_envs: | + KILL_TIMEOUT=10800 + batches: 4 + run_command: | + cd "$REPO_COPY/tests/ci" + python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" + FunctionalStatelessTestTsan: needs: [BuilderDebTsan] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_tsan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests (tsan) - REPO_COPY=${{runner.temp}}/stateless_tsan/ClickHouse - KILL_TIMEOUT=10800 - RUN_BY_HASH_NUM=0 - RUN_BY_HASH_TOTAL=3 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - FunctionalStatelessTestTsan1: - needs: [BuilderDebTsan] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_tsan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests (tsan) - REPO_COPY=${{runner.temp}}/stateless_tsan/ClickHouse - KILL_TIMEOUT=10800 - RUN_BY_HASH_NUM=1 - RUN_BY_HASH_TOTAL=3 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - FunctionalStatelessTestTsan2: - needs: [BuilderDebTsan] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_tsan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests (tsan) - REPO_COPY=${{runner.temp}}/stateless_tsan/ClickHouse - KILL_TIMEOUT=10800 - RUN_BY_HASH_NUM=2 - RUN_BY_HASH_TOTAL=3 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Stateless tests (tsan) + runner_type: func-tester + additional_envs: | + KILL_TIMEOUT=10800 + batches: 5 + run_command: | + cd "$REPO_COPY/tests/ci" + python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" FunctionalStatelessTestUBsan: needs: [BuilderDebUBsan] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_ubsan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests (ubsan) - REPO_COPY=${{runner.temp}}/stateless_ubsan/ClickHouse - KILL_TIMEOUT=10800 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - FunctionalStatelessTestMsan0: + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Stateless tests (ubsan) + runner_type: func-tester + additional_envs: | + KILL_TIMEOUT=10800 + batches: 2 + run_command: | + cd "$REPO_COPY/tests/ci" + python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" + FunctionalStatelessTestMsan: needs: [BuilderDebMsan] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_memory - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests (msan) - REPO_COPY=${{runner.temp}}/stateless_memory/ClickHouse - KILL_TIMEOUT=10800 - RUN_BY_HASH_NUM=0 - RUN_BY_HASH_TOTAL=3 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - FunctionalStatelessTestMsan1: - needs: [BuilderDebMsan] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_memory - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests (msan) - REPO_COPY=${{runner.temp}}/stateless_memory/ClickHouse - KILL_TIMEOUT=10800 - RUN_BY_HASH_NUM=1 - RUN_BY_HASH_TOTAL=3 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - FunctionalStatelessTestMsan2: - needs: [BuilderDebMsan] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_memory - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests (msan) - REPO_COPY=${{runner.temp}}/stateless_memory/ClickHouse - KILL_TIMEOUT=10800 - RUN_BY_HASH_NUM=2 - RUN_BY_HASH_TOTAL=3 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - FunctionalStatelessTestDebug0: + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Stateless tests (msan) + runner_type: func-tester + additional_envs: | + KILL_TIMEOUT=10800 + batches: 6 + run_command: | + cd "$REPO_COPY/tests/ci" + python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" + FunctionalStatelessTestDebug: needs: [BuilderDebDebug] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_debug - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests (debug) - REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse - KILL_TIMEOUT=10800 - RUN_BY_HASH_NUM=0 - RUN_BY_HASH_TOTAL=3 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - FunctionalStatelessTestDebug1: - needs: [BuilderDebDebug] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_debug - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests (debug) - REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse - KILL_TIMEOUT=10800 - RUN_BY_HASH_NUM=1 - RUN_BY_HASH_TOTAL=3 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - FunctionalStatelessTestDebug2: - needs: [BuilderDebDebug] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_debug - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests (debug) - REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse - KILL_TIMEOUT=10800 - RUN_BY_HASH_NUM=2 - RUN_BY_HASH_TOTAL=3 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Stateless tests (debug) + runner_type: func-tester + additional_envs: | + KILL_TIMEOUT=10800 + batches: 5 + run_command: | + cd "$REPO_COPY/tests/ci" + python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" ############################################################################################## ############################ FUNCTIONAl STATEFUL TESTS ####################################### ############################################################################################## FunctionalStatefulTestRelease: needs: [BuilderDebRelease] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateful_debug - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateful tests (release) - REPO_COPY=${{runner.temp}}/stateful_debug/ClickHouse - KILL_TIMEOUT=3600 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Stateful tests (release) + runner_type: func-tester + additional_envs: | + KILL_TIMEOUT=3600 + run_command: | + cd "$REPO_COPY/tests/ci" + python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" FunctionalStatefulTestAarch64: needs: [BuilderDebAarch64] - runs-on: [self-hosted, func-tester-aarch64] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateful_release - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateful tests (aarch64) - REPO_COPY=${{runner.temp}}/stateful_release/ClickHouse - KILL_TIMEOUT=3600 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Stateful tests (aarch64) + runner_type: func-tester-aarch64 + additional_envs: | + KILL_TIMEOUT=3600 + run_command: | + cd "$REPO_COPY/tests/ci" + python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" FunctionalStatefulTestAsan: needs: [BuilderDebAsan] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateful_debug - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateful tests (asan) - REPO_COPY=${{runner.temp}}/stateful_debug/ClickHouse - KILL_TIMEOUT=3600 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Stateful tests (asan) + runner_type: func-tester + additional_envs: | + KILL_TIMEOUT=3600 + run_command: | + cd "$REPO_COPY/tests/ci" + python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" FunctionalStatefulTestTsan: needs: [BuilderDebTsan] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateful_tsan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateful tests (tsan) - REPO_COPY=${{runner.temp}}/stateful_tsan/ClickHouse - KILL_TIMEOUT=3600 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Stateful tests (tsan) + runner_type: func-tester + additional_envs: | + KILL_TIMEOUT=3600 + run_command: | + cd "$REPO_COPY/tests/ci" + python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" FunctionalStatefulTestMsan: needs: [BuilderDebMsan] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateful_msan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateful tests (msan) - REPO_COPY=${{runner.temp}}/stateful_msan/ClickHouse - KILL_TIMEOUT=3600 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Stateful tests (msan) + runner_type: func-tester + additional_envs: | + KILL_TIMEOUT=3600 + run_command: | + cd "$REPO_COPY/tests/ci" + python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" FunctionalStatefulTestUBsan: needs: [BuilderDebUBsan] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateful_ubsan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateful tests (ubsan) - REPO_COPY=${{runner.temp}}/stateful_ubsan/ClickHouse - KILL_TIMEOUT=3600 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Stateful tests (ubsan) + runner_type: func-tester + additional_envs: | + KILL_TIMEOUT=3600 + run_command: | + cd "$REPO_COPY/tests/ci" + python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" FunctionalStatefulTestDebug: needs: [BuilderDebDebug] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateful_debug - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateful tests (debug) - REPO_COPY=${{runner.temp}}/stateful_debug/ClickHouse - KILL_TIMEOUT=3600 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Stateful tests (debug) + runner_type: func-tester + additional_envs: | + KILL_TIMEOUT=3600 + run_command: | + cd "$REPO_COPY/tests/ci" + python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" ############################################################################################## ######################################### STRESS TESTS ####################################### ############################################################################################## StressTestAsan: needs: [BuilderDebAsan] - runs-on: [self-hosted, stress-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stress_thread - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stress test (asan) - REPO_COPY=${{runner.temp}}/stress_thread/ClickHouse - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Stress test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 stress_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Stress test (asan) + runner_type: stress-tester + run_command: | + cd "$REPO_COPY/tests/ci" + python3 stress_check.py "$CHECK_NAME" StressTestTsan: needs: [BuilderDebTsan] - # func testers have 16 cores + 128 GB memory - # while stress testers have 36 cores + 72 memory - # It would be better to have something like 32 + 128, - # but such servers almost unavailable as spot instances. - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stress_thread - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stress test (tsan) - REPO_COPY=${{runner.temp}}/stress_thread/ClickHouse - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Stress test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 stress_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Stress test (tsan) + runner_type: stress-tester + run_command: | + cd "$REPO_COPY/tests/ci" + python3 stress_check.py "$CHECK_NAME" StressTestMsan: needs: [BuilderDebMsan] - runs-on: [self-hosted, stress-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stress_memory - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stress test (msan) - REPO_COPY=${{runner.temp}}/stress_memory/ClickHouse - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Stress test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 stress_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Stress test (msan) + runner_type: stress-tester + run_command: | + cd "$REPO_COPY/tests/ci" + python3 stress_check.py "$CHECK_NAME" StressTestUBsan: needs: [BuilderDebUBsan] - runs-on: [self-hosted, stress-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stress_undefined - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stress test (ubsan) - REPO_COPY=${{runner.temp}}/stress_undefined/ClickHouse - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Stress test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 stress_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Stress test (ubsan) + runner_type: stress-tester + run_command: | + cd "$REPO_COPY/tests/ci" + python3 stress_check.py "$CHECK_NAME" StressTestDebug: needs: [BuilderDebDebug] - runs-on: [self-hosted, stress-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stress_debug - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stress test (debug) - REPO_COPY=${{runner.temp}}/stress_debug/ClickHouse - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Stress test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 stress_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Stress test (debug) + runner_type: stress-tester + run_command: | + cd "$REPO_COPY/tests/ci" + python3 stress_check.py "$CHECK_NAME" ############################################################################################# ############################# INTEGRATION TESTS ############################################# ############################################################################################# - IntegrationTestsAsan0: + IntegrationTestsAsan: needs: [BuilderDebAsan] - runs-on: [self-hosted, stress-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/integration_tests_asan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Integration tests (asan) - REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse - RUN_BY_HASH_NUM=0 - RUN_BY_HASH_TOTAL=3 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Integration test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 integration_test_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - IntegrationTestsAsan1: + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Integration tests (asan) + runner_type: stress-tester + batches: 4 + run_command: | + cd "$REPO_COPY/tests/ci" + python3 integration_test_check.py "$CHECK_NAME" + IntegrationTestsAnalyzerAsan: needs: [BuilderDebAsan] - runs-on: [self-hosted, stress-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/integration_tests_asan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Integration tests (asan) - REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse - RUN_BY_HASH_NUM=1 - RUN_BY_HASH_TOTAL=3 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Integration test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 integration_test_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - IntegrationTestsAsan2: - needs: [BuilderDebAsan] - runs-on: [self-hosted, stress-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/integration_tests_asan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Integration tests (asan) - REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse - RUN_BY_HASH_NUM=2 - RUN_BY_HASH_TOTAL=3 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Integration test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 integration_test_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - IntegrationTestsTsan0: + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Integration tests (asan, analyzer) + runner_type: stress-tester + batches: 6 + run_command: | + cd "$REPO_COPY/tests/ci" + python3 integration_test_check.py "$CHECK_NAME" + IntegrationTestsTsan: needs: [BuilderDebTsan] - runs-on: [self-hosted, stress-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/integration_tests_tsan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Integration tests (tsan) - REPO_COPY=${{runner.temp}}/integration_tests_tsan/ClickHouse - RUN_BY_HASH_NUM=0 - RUN_BY_HASH_TOTAL=4 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Integration test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 integration_test_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - IntegrationTestsTsan1: - needs: [BuilderDebTsan] - runs-on: [self-hosted, stress-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/integration_tests_tsan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Integration tests (tsan) - REPO_COPY=${{runner.temp}}/integration_tests_tsan/ClickHouse - RUN_BY_HASH_NUM=1 - RUN_BY_HASH_TOTAL=4 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Integration test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 integration_test_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - IntegrationTestsTsan2: - needs: [BuilderDebTsan] - runs-on: [self-hosted, stress-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/integration_tests_tsan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Integration tests (tsan) - REPO_COPY=${{runner.temp}}/integration_tests_tsan/ClickHouse - RUN_BY_HASH_NUM=2 - RUN_BY_HASH_TOTAL=4 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Integration test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 integration_test_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - IntegrationTestsTsan3: - needs: [BuilderDebTsan] - runs-on: [self-hosted, stress-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/integration_tests_tsan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Integration tests (tsan) - REPO_COPY=${{runner.temp}}/integration_tests_tsan/ClickHouse - RUN_BY_HASH_NUM=3 - RUN_BY_HASH_TOTAL=4 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Integration test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 integration_test_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - IntegrationTestsRelease0: + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Integration tests (tsan) + runner_type: stress-tester + batches: 6 + run_command: | + cd "$REPO_COPY/tests/ci" + python3 integration_test_check.py "$CHECK_NAME" + IntegrationTestsRelease: needs: [BuilderDebRelease] - runs-on: [self-hosted, stress-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/integration_tests_release - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Integration tests (release) - REPO_COPY=${{runner.temp}}/integration_tests_release/ClickHouse - RUN_BY_HASH_NUM=0 - RUN_BY_HASH_TOTAL=2 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Integration test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 integration_test_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" - IntegrationTestsRelease1: - needs: [BuilderDebRelease] - runs-on: [self-hosted, stress-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/integration_tests_release - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Integration tests (release) - REPO_COPY=${{runner.temp}}/integration_tests_release/ClickHouse - RUN_BY_HASH_NUM=1 - RUN_BY_HASH_TOTAL=2 - EOF - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.REPORTS_PATH }} - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Integration test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 integration_test_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Integration tests (release) + runner_type: stress-tester + batches: 4 + run_command: | + cd "$REPO_COPY/tests/ci" + python3 integration_test_check.py "$CHECK_NAME" FinishCheck: needs: - DockerHubPush @@ -1625,19 +512,12 @@ jobs: - BuilderReport - BuilderSpecialReport - MarkReleaseReady - - FunctionalStatelessTestDebug0 - - FunctionalStatelessTestDebug1 - - FunctionalStatelessTestDebug2 + - FunctionalStatelessTestDebug - FunctionalStatelessTestRelease - FunctionalStatelessTestAarch64 - - FunctionalStatelessTestAsan0 - - FunctionalStatelessTestAsan1 - - FunctionalStatelessTestTsan0 - - FunctionalStatelessTestTsan1 - - FunctionalStatelessTestTsan2 - - FunctionalStatelessTestMsan0 - - FunctionalStatelessTestMsan1 - - FunctionalStatelessTestMsan2 + - FunctionalStatelessTestAsan + - FunctionalStatelessTestTsan + - FunctionalStatelessTestMsan - FunctionalStatelessTestUBsan - FunctionalStatefulTestDebug - FunctionalStatefulTestRelease @@ -1651,15 +531,9 @@ jobs: - StressTestTsan - StressTestMsan - StressTestUBsan - - IntegrationTestsAsan0 - - IntegrationTestsAsan1 - - IntegrationTestsAsan2 - - IntegrationTestsRelease0 - - IntegrationTestsRelease1 - - IntegrationTestsTsan0 - - IntegrationTestsTsan1 - - IntegrationTestsTsan2 - - IntegrationTestsTsan3 + - IntegrationTestsAsan + - IntegrationTestsTsan + - IntegrationTestsRelease - CompatibilityCheckX86 - CompatibilityCheckAarch64 runs-on: [self-hosted, style-checker] diff --git a/.github/workflows/reusable_build.yml b/.github/workflows/reusable_build.yml index 1eb25307f0c..f36b93bea58 100644 --- a/.github/workflows/reusable_build.yml +++ b/.github/workflows/reusable_build.yml @@ -1,6 +1,10 @@ ### For the pure soul wishes to move it to another place # https://github.com/orgs/community/discussions/9050 +env: + # Force the stdout and stderr streams to be unbuffered + PYTHONUNBUFFERED: 1 + name: Build ClickHouse 'on': workflow_call: @@ -25,6 +29,8 @@ name: Build ClickHouse jobs: Build: name: Build-${{inputs.build_name}} + env: + GITHUB_JOB_OVERRIDDEN: Build-${{inputs.build_name}} runs-on: [self-hosted, '${{inputs.runner_type}}'] steps: - name: Check out repository code @@ -37,8 +43,6 @@ jobs: - name: Set build envs run: | cat >> "$GITHUB_ENV" << 'EOF' - IMAGES_PATH=${{runner.temp}}/images_path - GITHUB_JOB_OVERRIDDEN=Build-${{inputs.build_name}} ${{inputs.additional_envs}} EOF python3 "$GITHUB_WORKSPACE"/tests/ci/ci_config.py --build-name "${{inputs.build_name}}" >> "$GITHUB_ENV" @@ -71,4 +75,5 @@ jobs: name: ${{ env.BUILD_URLS }} path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json - name: Clean + if: always() uses: ./.github/actions/clean diff --git a/.github/workflows/reusable_test.yml b/.github/workflows/reusable_test.yml new file mode 100644 index 00000000000..e82d2d51596 --- /dev/null +++ b/.github/workflows/reusable_test.yml @@ -0,0 +1,113 @@ +### For the pure soul wishes to move it to another place +# https://github.com/orgs/community/discussions/9050 + +name: Testing workflow +'on': + workflow_call: + inputs: + test_name: + description: the value of test type from tests/ci/ci_config.py, ends up as $CHECK_NAME ENV + required: true + type: string + runner_type: + description: the label of runner to use + required: true + type: string + run_command: + description: the command to launch the check. Usually starts with `cd '$REPO_COPY/tests/ci'` + required: true + type: string + batches: + description: how many batches for the test will be launched + default: 1 + type: number + checkout_depth: + description: the value of the git shallow checkout + required: false + type: number + default: 1 + submodules: + description: if the submodules should be checked out + required: false + type: boolean + default: false + additional_envs: + description: additional ENV variables to setup the job + type: string + secrets: + secret_envs: + description: if given, it's passed to the environments + required: false + +env: + # Force the stdout and stderr streams to be unbuffered + PYTHONUNBUFFERED: 1 + CHECK_NAME: ${{inputs.test_name}} + +jobs: + PrepareStrategy: + # batches < 1 is misconfiguration, + # and we need this step only for batches > 1 + if: ${{ inputs.batches > 1 }} + runs-on: [self-hosted, style-checker-aarch64] + outputs: + batches: ${{steps.batches.outputs.batches}} + steps: + - name: Calculate batches + id: batches + run: | + batches_output=$(python3 -c 'import json; print(json.dumps(list(range(${{inputs.batches}}))))') + echo "batches=${batches_output}" >> "$GITHUB_OUTPUT" + Test: + # If PrepareStrategy is skipped for batches == 1, + # we still need to launch the test. + # `! failure()` is mandatory here to launch on skipped Job + # `&& !cancelled()` to allow the be cancelable + if: ${{ ( !failure() && !cancelled() ) && inputs.batches > 0 }} + # Do not add `-0` to the end, if there's only one batch + name: ${{inputs.test_name}}${{ inputs.batches > 1 && format('-{0}',matrix.batch) || '' }} + env: + GITHUB_JOB_OVERRIDDEN: ${{inputs.test_name}}${{ inputs.batches > 1 && format('-{0}',matrix.batch) || '' }} + runs-on: [self-hosted, '${{inputs.runner_type}}'] + needs: [PrepareStrategy] + strategy: + fail-fast: false # we always wait for entire matrix + matrix: + # if PrepareStrategy does not have batches, we use 0 + batch: ${{ needs.PrepareStrategy.outputs.batches + && fromJson(needs.PrepareStrategy.outputs.batches) + || fromJson('[0]')}} + steps: + - name: Check out repository code + uses: ClickHouse/checkout@v1 + with: + clear-repository: true + submodules: ${{inputs.submodules}} + fetch-depth: ${{inputs.checkout_depth}} + filter: tree:0 + - name: Set build envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + ${{inputs.additional_envs}} + ${{secrets.secret_envs}} + EOF + - name: Common setup + uses: ./.github/actions/common_setup + with: + job_type: test + - name: Download json reports + uses: actions/download-artifact@v3 + with: + path: ${{ env.REPORTS_PATH }} + - name: Setup batch + if: ${{ inputs.batches > 1}} + run: | + cat >> "$GITHUB_ENV" << 'EOF' + RUN_BY_HASH_NUM=${{matrix.batch}} + RUN_BY_HASH_TOTAL=${{inputs.batches}} + EOF + - name: Run test + run: ${{inputs.run_command}} + - name: Clean + if: always() + uses: ./.github/actions/clean diff --git a/.github/workflows/woboq.yml b/.github/workflows/woboq.yml deleted file mode 100644 index 1ef729af30a..00000000000 --- a/.github/workflows/woboq.yml +++ /dev/null @@ -1,44 +0,0 @@ -name: WoboqBuilder -env: - # Force the stdout and stderr streams to be unbuffered - PYTHONUNBUFFERED: 1 - -concurrency: - group: woboq -on: # yamllint disable-line rule:truthy - workflow_dispatch: - workflow_call: -jobs: - # don't use dockerhub push because this image updates so rarely - WoboqCodebrowser: - runs-on: [self-hosted, style-checker] - timeout-minutes: 420 # the task is pretty heavy, so there's an additional hour - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/codebrowser - REPO_COPY=${{runner.temp}}/codebrowser/ClickHouse - IMAGES_PATH=${{runner.temp}}/images_path - EOF - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - submodules: 'true' - - name: Download json reports - uses: actions/download-artifact@v3 - with: - path: ${{ env.IMAGES_PATH }} - - name: Codebrowser - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" && python3 codebrowser_check.py - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" diff --git a/.gitmodules b/.gitmodules index 1a464ee1170..af90c788012 100644 --- a/.gitmodules +++ b/.gitmodules @@ -354,3 +354,6 @@ [submodule "contrib/aklomp-base64"] path = contrib/aklomp-base64 url = https://github.com/aklomp/base64.git +[submodule "contrib/pocketfft"] + path = contrib/pocketfft + url = https://github.com/mreineck/pocketfft.git diff --git a/CMakeLists.txt b/CMakeLists.txt index 0d1ef22b2aa..063cfc77302 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -21,8 +21,11 @@ include (cmake/clang_tidy.cmake) include (cmake/git.cmake) include (cmake/utils.cmake) +# This is needed to set up the CMAKE_INSTALL_BINDIR variable. +include (GNUInstallDirs) + # Ignore export() since we don't use it, -# but it gets broken with a global targets via link_libraries() +# but it gets broken with global targets via link_libraries() macro (export) endmacro () @@ -164,7 +167,7 @@ if (OS_LINUX) # and whatever is poisoning it by LD_PRELOAD should not link to our symbols. # - The clickhouse-odbc-bridge and clickhouse-library-bridge binaries # should not expose their symbols to ODBC drivers and libraries. - set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--no-export-dynamic") + set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--no-export-dynamic -Wl,--gc-sections") endif () if (OS_DARWIN) @@ -187,9 +190,10 @@ if (NOT CMAKE_BUILD_TYPE_UC STREQUAL "RELEASE") endif () endif() -if (CMAKE_BUILD_TYPE_UC STREQUAL "RELEASE" - OR CMAKE_BUILD_TYPE_UC STREQUAL "RELWITHDEBINFO" - OR CMAKE_BUILD_TYPE_UC STREQUAL "MINSIZEREL") +if (NOT (SANITIZE_COVERAGE OR WITH_COVERAGE) + AND (CMAKE_BUILD_TYPE_UC STREQUAL "RELEASE" + OR CMAKE_BUILD_TYPE_UC STREQUAL "RELWITHDEBINFO" + OR CMAKE_BUILD_TYPE_UC STREQUAL "MINSIZEREL")) set (OMIT_HEAVY_DEBUG_SYMBOLS_DEFAULT ON) else() set (OMIT_HEAVY_DEBUG_SYMBOLS_DEFAULT OFF) @@ -273,6 +277,11 @@ option (ENABLE_BUILD_PROFILING "Enable profiling of build time" OFF) if (ENABLE_BUILD_PROFILING) if (COMPILER_CLANG) set (COMPILER_FLAGS "${COMPILER_FLAGS} -ftime-trace") + + if (LINKER_NAME MATCHES "lld") + set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--time-trace") + set (CMAKE_MODULE_LINKER_FLAGS "${CMAKE_MODULE_LINKER_FLAGS} -Wl,--time-trace") + endif () else () message (${RECONFIGURE_MESSAGE_LEVEL} "Build profiling is only available with CLang") endif () @@ -286,9 +295,6 @@ set (CMAKE_C_STANDARD 11) set (CMAKE_C_EXTENSIONS ON) # required by most contribs written in C set (CMAKE_C_STANDARD_REQUIRED ON) -# Compiler-specific coverage flags e.g. -fcoverage-mapping -option(WITH_COVERAGE "Profile the resulting binary/binaries" OFF) - if (COMPILER_CLANG) # Enable C++14 sized global deallocation functions. It should be enabled by setting -std=c++14 but I'm not sure. # See https://reviews.llvm.org/D112921 @@ -304,18 +310,12 @@ if (COMPILER_CLANG) set(BRANCHES_WITHIN_32B_BOUNDARIES "-mbranches-within-32B-boundaries") set(COMPILER_FLAGS "${COMPILER_FLAGS} ${BRANCHES_WITHIN_32B_BOUNDARIES}") endif() - - if (WITH_COVERAGE) - set(COMPILER_FLAGS "${COMPILER_FLAGS} -fprofile-instr-generate -fcoverage-mapping") - # If we want to disable coverage for specific translation units - set(WITHOUT_COVERAGE "-fno-profile-instr-generate -fno-coverage-mapping") - endif() endif () set (COMPILER_FLAGS "${COMPILER_FLAGS}") # Our built-in unwinder only supports DWARF version up to 4. -set (DEBUG_INFO_FLAGS "-g -gdwarf-4") +set (DEBUG_INFO_FLAGS "-g") # Disable omit frame pointer compiler optimization using -fno-omit-frame-pointer option(DISABLE_OMIT_FRAME_POINTER "Disable omit frame pointer compiler optimization" OFF) @@ -463,14 +463,6 @@ endif () message (STATUS "Building for: ${CMAKE_SYSTEM} ${CMAKE_SYSTEM_PROCESSOR} ${CMAKE_LIBRARY_ARCHITECTURE}") -include (GNUInstallDirs) - -# When testing for memory leaks with Valgrind, don't link tcmalloc or jemalloc. - -if (TARGET global-group) - install (EXPORT global DESTINATION cmake) -endif () - add_subdirectory (contrib EXCLUDE_FROM_ALL) if (NOT ENABLE_JEMALLOC) @@ -554,10 +546,16 @@ if (ENABLE_RUST) endif() endif() +if (CMAKE_BUILD_TYPE_UC STREQUAL "RELWITHDEBINFO" AND NOT SANITIZE AND OS_LINUX AND (ARCH_AMD64 OR ARCH_AARCH64)) + set(CHECK_LARGE_OBJECT_SIZES_DEFAULT ON) +else () + set(CHECK_LARGE_OBJECT_SIZES_DEFAULT OFF) +endif () +option(CHECK_LARGE_OBJECT_SIZES "Check that there are no large object files after build." ${CHECK_LARGE_OBJECT_SIZES_DEFAULT}) + add_subdirectory (base) add_subdirectory (src) add_subdirectory (programs) -add_subdirectory (tests) add_subdirectory (utils) if (FUZZER) diff --git a/base/base/CMakeLists.txt b/base/base/CMakeLists.txt index cad794968f8..3886932d198 100644 --- a/base/base/CMakeLists.txt +++ b/base/base/CMakeLists.txt @@ -1,3 +1,5 @@ +add_compile_options($<$,$>:${COVERAGE_FLAGS}>) + if (USE_CLANG_TIDY) set (CMAKE_CXX_CLANG_TIDY "${CLANG_TIDY_PATH}") endif () diff --git a/base/base/coverage.cpp b/base/base/coverage.cpp index 1027638be3d..d70c3bcd82b 100644 --- a/base/base/coverage.cpp +++ b/base/base/coverage.cpp @@ -1,11 +1,15 @@ #include "coverage.h" -#if WITH_COVERAGE - #pragma GCC diagnostic ignored "-Wreserved-identifier" -# include -# include + +/// WITH_COVERAGE enables the default implementation of code coverage, +/// that dumps a map to the filesystem. + +#if WITH_COVERAGE + +#include +#include # if defined(__clang__) @@ -31,3 +35,131 @@ void dumpCoverageReportIfPossible() #endif } + + +/// SANITIZE_COVERAGE enables code instrumentation, +/// but leaves the callbacks implementation to us, +/// which we use to calculate coverage on a per-test basis +/// and to write it to system tables. + +#if defined(SANITIZE_COVERAGE) + +namespace +{ + bool pc_guards_initialized = false; + bool pc_table_initialized = false; + + uint32_t * guards_start = nullptr; + uint32_t * guards_end = nullptr; + + uintptr_t * coverage_array = nullptr; + size_t coverage_array_size = 0; + + uintptr_t * all_addresses_array = nullptr; + size_t all_addresses_array_size = 0; +} + +extern "C" +{ + +/// This is called at least once for every DSO for initialization. +/// But we will use it only for the main DSO. +void __sanitizer_cov_trace_pc_guard_init(uint32_t * start, uint32_t * stop) +{ + if (pc_guards_initialized) + return; + pc_guards_initialized = true; + + /// The function can be called multiple times, but we need to initialize only once. + if (start == stop || *start) + return; + + guards_start = start; + guards_end = stop; + coverage_array_size = stop - start; + + /// Note: we will leak this. + coverage_array = static_cast(malloc(sizeof(uintptr_t) * coverage_array_size)); + + resetCoverage(); +} + +/// This is called at least once for every DSO for initialization +/// and provides information about all instrumented addresses. +void __sanitizer_cov_pcs_init(const uintptr_t * pcs_begin, const uintptr_t * pcs_end) +{ + if (pc_table_initialized) + return; + pc_table_initialized = true; + + all_addresses_array = static_cast(malloc(sizeof(uintptr_t) * coverage_array_size)); + all_addresses_array_size = pcs_end - pcs_begin; + + /// They are not a real pointers, but also contain a flag in the most significant bit, + /// in which we are not interested for now. Reset it. + for (size_t i = 0; i < all_addresses_array_size; ++i) + all_addresses_array[i] = pcs_begin[i] & 0x7FFFFFFFFFFFFFFFULL; +} + +/// This is called at every basic block / edge, etc. +void __sanitizer_cov_trace_pc_guard(uint32_t * guard) +{ + /// Duplicate the guard check. + if (!*guard) + return; + *guard = 0; + + /// If you set *guard to 0 this code will not be called again for this edge. + /// Now we can get the PC and do whatever you want: + /// - store it somewhere or symbolize it and print right away. + /// The values of `*guard` are as you set them in + /// __sanitizer_cov_trace_pc_guard_init and so you can make them consecutive + /// and use them to dereference an array or a bit vector. + void * pc = __builtin_return_address(0); + + coverage_array[guard - guards_start] = reinterpret_cast(pc); +} + +} + +__attribute__((no_sanitize("coverage"))) +std::span getCoverage() +{ + return {coverage_array, coverage_array_size}; +} + +__attribute__((no_sanitize("coverage"))) +std::span getAllInstrumentedAddresses() +{ + return {all_addresses_array, all_addresses_array_size}; +} + +__attribute__((no_sanitize("coverage"))) +void resetCoverage() +{ + memset(coverage_array, 0, coverage_array_size * sizeof(*coverage_array)); + + /// The guard defines whether the __sanitizer_cov_trace_pc_guard should be called. + /// For example, you can unset it after first invocation to prevent excessive work. + /// Initially set all the guards to 1 to enable callbacks. + for (uint32_t * x = guards_start; x < guards_end; ++x) + *x = 1; +} + +#else + +std::span getCoverage() +{ + return {}; +} + +std::span getAllInstrumentedAddresses() +{ + return {}; +} + +void resetCoverage() +{ +} + +#endif diff --git a/base/base/coverage.h b/base/base/coverage.h index 4a57528b0ce..f75ed2d3553 100644 --- a/base/base/coverage.h +++ b/base/base/coverage.h @@ -1,5 +1,8 @@ #pragma once +#include +#include + /// Flush coverage report to file, depending on coverage system /// proposed by compiler (llvm for clang and gcov for gcc). /// @@ -7,3 +10,16 @@ /// Thread safe (use exclusive lock). /// Idempotent, may be called multiple times. void dumpCoverageReportIfPossible(); + +/// This is effective if SANITIZE_COVERAGE is enabled at build time. +/// Get accumulated unique program addresses of the instrumented parts of the code, +/// seen so far after program startup or after previous reset. +/// The returned span will be represented as a sparse map, containing mostly zeros, which you should filter away. +std::span getCoverage(); + +/// Get all instrumented addresses that could be in the coverage. +std::span getAllInstrumentedAddresses(); + +/// Reset the accumulated coverage. +/// This is useful to compare coverage of different tests, including differential coverage. +void resetCoverage(); diff --git a/base/base/wide_integer_impl.h b/base/base/wide_integer_impl.h index fc4e9e551ca..c1fd7b69b7f 100644 --- a/base/base/wide_integer_impl.h +++ b/base/base/wide_integer_impl.h @@ -65,7 +65,7 @@ class IsTupleLike static void check(...); public: - static constexpr const bool value = !std::is_void(nullptr))>::value; + static constexpr const bool value = !std::is_void_v(nullptr))>; }; } @@ -79,7 +79,7 @@ class numeric_limits> { public: static constexpr bool is_specialized = true; - static constexpr bool is_signed = is_same::value; + static constexpr bool is_signed = is_same_v; static constexpr bool is_integer = true; static constexpr bool is_exact = true; static constexpr bool has_infinity = false; @@ -91,7 +91,7 @@ public: static constexpr bool is_iec559 = false; static constexpr bool is_bounded = true; static constexpr bool is_modulo = true; - static constexpr int digits = Bits - (is_same::value ? 1 : 0); + static constexpr int digits = Bits - (is_same_v ? 1 : 0); static constexpr int digits10 = digits * 0.30103 /*std::log10(2)*/; static constexpr int max_digits10 = 0; static constexpr int radix = 2; @@ -104,7 +104,7 @@ public: static constexpr wide::integer min() noexcept { - if (is_same::value) + if constexpr (is_same_v) { using T = wide::integer; T res{}; @@ -118,7 +118,7 @@ public: { using T = wide::integer; T res{}; - res.items[T::_impl::big(0)] = is_same::value + res.items[T::_impl::big(0)] = is_same_v ? std::numeric_limits::signed_base_type>::max() : std::numeric_limits::base_type>::max(); for (unsigned i = 1; i < wide::integer::_impl::item_count; ++i) diff --git a/base/glibc-compatibility/CMakeLists.txt b/base/glibc-compatibility/CMakeLists.txt index 0539f0c231d..c967fa5b11b 100644 --- a/base/glibc-compatibility/CMakeLists.txt +++ b/base/glibc-compatibility/CMakeLists.txt @@ -5,9 +5,6 @@ if (GLIBC_COMPATIBILITY) endif() enable_language(ASM) - include(CheckIncludeFile) - - check_include_file("sys/random.h" HAVE_SYS_RANDOM_H) add_headers_and_sources(glibc_compatibility .) add_headers_and_sources(glibc_compatibility musl) @@ -21,11 +18,6 @@ if (GLIBC_COMPATIBILITY) message (FATAL_ERROR "glibc_compatibility can only be used on x86_64 or aarch64.") endif () - list(REMOVE_ITEM glibc_compatibility_sources musl/getentropy.c) - if(HAVE_SYS_RANDOM_H) - list(APPEND glibc_compatibility_sources musl/getentropy.c) - endif() - # Need to omit frame pointers to match the performance of glibc set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fomit-frame-pointer") @@ -43,12 +35,6 @@ if (GLIBC_COMPATIBILITY) target_link_libraries(global-libs INTERFACE glibc-compatibility ${MEMCPY_LIBRARY}) - install( - TARGETS glibc-compatibility ${MEMCPY_LIBRARY} - EXPORT global - ARCHIVE DESTINATION lib - ) - message (STATUS "Some symbols from glibc will be replaced for compatibility") elseif (CLICKHOUSE_OFFICIAL_BUILD) diff --git a/base/glibc-compatibility/memcpy/memcpy.cpp b/base/glibc-compatibility/memcpy/memcpy.cpp index ec43a2c3649..8bab35934d3 100644 --- a/base/glibc-compatibility/memcpy/memcpy.cpp +++ b/base/glibc-compatibility/memcpy/memcpy.cpp @@ -1,5 +1,6 @@ #include "memcpy.h" +__attribute__((no_sanitize("coverage"))) extern "C" void * memcpy(void * __restrict dst, const void * __restrict src, size_t size) { return inline_memcpy(dst, src, size); diff --git a/base/glibc-compatibility/memcpy/memcpy.h b/base/glibc-compatibility/memcpy/memcpy.h index 0930dfb5c67..86439dda061 100644 --- a/base/glibc-compatibility/memcpy/memcpy.h +++ b/base/glibc-compatibility/memcpy/memcpy.h @@ -93,7 +93,7 @@ * See https://habr.com/en/company/yandex/blog/457612/ */ - +__attribute__((no_sanitize("coverage"))) static inline void * inline_memcpy(void * __restrict dst_, const void * __restrict src_, size_t size) { /// We will use pointer arithmetic, so char pointer will be used. diff --git a/base/harmful/CMakeLists.txt b/base/harmful/CMakeLists.txt index 399f6ecc625..c19661875be 100644 --- a/base/harmful/CMakeLists.txt +++ b/base/harmful/CMakeLists.txt @@ -1,2 +1 @@ add_library(harmful harmful.c) -install(TARGETS harmful EXPORT global ARCHIVE DESTINATION lib) diff --git a/base/poco/Net/src/HTTPServerSession.cpp b/base/poco/Net/src/HTTPServerSession.cpp index f6d3c4e5b92..d4f2b24879e 100644 --- a/base/poco/Net/src/HTTPServerSession.cpp +++ b/base/poco/Net/src/HTTPServerSession.cpp @@ -26,7 +26,6 @@ HTTPServerSession::HTTPServerSession(const StreamSocket& socket, HTTPServerParam _maxKeepAliveRequests(pParams->getMaxKeepAliveRequests()) { setTimeout(pParams->getTimeout()); - this->socket().setReceiveTimeout(pParams->getTimeout()); } diff --git a/base/poco/Net/src/HTTPSession.cpp b/base/poco/Net/src/HTTPSession.cpp index d2663baaf9f..8f951b3102c 100644 --- a/base/poco/Net/src/HTTPSession.cpp +++ b/base/poco/Net/src/HTTPSession.cpp @@ -93,9 +93,34 @@ void HTTPSession::setTimeout(const Poco::Timespan& timeout) void HTTPSession::setTimeout(const Poco::Timespan& connectionTimeout, const Poco::Timespan& sendTimeout, const Poco::Timespan& receiveTimeout) { - _connectionTimeout = connectionTimeout; - _sendTimeout = sendTimeout; - _receiveTimeout = receiveTimeout; + try + { + _connectionTimeout = connectionTimeout; + + if (_sendTimeout.totalMicroseconds() != sendTimeout.totalMicroseconds()) { + _sendTimeout = sendTimeout; + + if (connected()) + _socket.setSendTimeout(_sendTimeout); + } + + if (_receiveTimeout.totalMicroseconds() != receiveTimeout.totalMicroseconds()) { + _receiveTimeout = receiveTimeout; + + if (connected()) + _socket.setReceiveTimeout(_receiveTimeout); + } + } + catch (NetException &) + { +#ifndef NDEBUG + throw; +#else + // mute exceptions in release + // just in case when changing settings on socket is not allowed + // however it should be OK for timeouts +#endif + } } diff --git a/cmake/add_check.cmake b/cmake/add_check.cmake deleted file mode 100644 index ba30ee8676f..00000000000 --- a/cmake/add_check.cmake +++ /dev/null @@ -1,19 +0,0 @@ -# Adding test output on failure -enable_testing () - -if (NOT TARGET check) - if (CMAKE_CONFIGURATION_TYPES) - add_custom_target (check COMMAND ${CMAKE_CTEST_COMMAND} - --force-new-ctest-process --output-on-failure --build-config "$" - WORKING_DIRECTORY ${PROJECT_BINARY_DIR}) - else () - add_custom_target (check COMMAND ${CMAKE_CTEST_COMMAND} - --force-new-ctest-process --output-on-failure - WORKING_DIRECTORY ${PROJECT_BINARY_DIR}) - endif () -endif () - -macro (add_check target) - add_test (NAME test_${target} COMMAND ${target} WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}) - add_dependencies (check ${target}) -endmacro (add_check) diff --git a/cmake/ccache.cmake b/cmake/ccache.cmake index e8bf856332a..0df70d82d2c 100644 --- a/cmake/ccache.cmake +++ b/cmake/ccache.cmake @@ -9,10 +9,10 @@ if (CMAKE_CXX_COMPILER_LAUNCHER MATCHES "ccache" OR CMAKE_C_COMPILER_LAUNCHER MA return() endif() -set(COMPILER_CACHE "auto" CACHE STRING "Speedup re-compilations using the caching tools; valid options are 'auto' (ccache, then sccache), 'ccache', 'sccache', or 'disabled'") +set(COMPILER_CACHE "auto" CACHE STRING "Speedup re-compilations using the caching tools; valid options are 'auto' (sccache, then ccache), 'ccache', 'sccache', or 'disabled'") if(COMPILER_CACHE STREQUAL "auto") - find_program (CCACHE_EXECUTABLE NAMES ccache sccache) + find_program (CCACHE_EXECUTABLE NAMES sccache ccache) elseif (COMPILER_CACHE STREQUAL "ccache") find_program (CCACHE_EXECUTABLE ccache) elseif(COMPILER_CACHE STREQUAL "sccache") @@ -21,7 +21,7 @@ elseif(COMPILER_CACHE STREQUAL "disabled") message(STATUS "Using *ccache: no (disabled via configuration)") return() else() - message(${RECONFIGURE_MESSAGE_LEVEL} "The COMPILER_CACHE must be one of (auto|ccache|sccache|disabled), value: '${COMPILER_CACHE}'") + message(${RECONFIGURE_MESSAGE_LEVEL} "The COMPILER_CACHE must be one of (auto|sccache|ccache|disabled), value: '${COMPILER_CACHE}'") endif() diff --git a/cmake/cpu_features.cmake b/cmake/cpu_features.cmake index 9fc3960c166..cfa9c314bc0 100644 --- a/cmake/cpu_features.cmake +++ b/cmake/cpu_features.cmake @@ -1,10 +1,5 @@ # https://software.intel.com/sites/landingpage/IntrinsicsGuide/ -include (CheckCXXSourceCompiles) -include (CMakePushCheckState) - -cmake_push_check_state () - # The variables HAVE_* determine if compiler has support for the flag to use the corresponding instruction set. # The options ENABLE_* determine if we will tell compiler to actually use the corresponding instruction set if compiler can do it. @@ -137,189 +132,54 @@ elseif (ARCH_AMD64) endif() # ClickHouse can be cross-compiled (e.g. on an ARM host for x86) but it is also possible to build ClickHouse on x86 w/o AVX for x86 w/ - # AVX. We only check that the compiler can emit certain SIMD instructions, we don't care if the host system is able to run the binary. - # Therefore, use check_cxx_source_compiles (= does the code compile+link?) instead of check_cxx_source_runs (= does the code - # compile+link+run). + # AVX. We only assume that the compiler can emit certain SIMD instructions, we don't care if the host system is able to run the binary. - set (TEST_FLAG "-mssse3") - set (CMAKE_REQUIRED_FLAGS "${TEST_FLAG} -O0") - check_cxx_source_compiles(" - #include - int main() { - __m64 a = _mm_abs_pi8(__m64()); - (void)a; - return 0; - } - " HAVE_SSSE3) - if (HAVE_SSSE3 AND ENABLE_SSSE3) - set (COMPILER_FLAGS "${COMPILER_FLAGS} ${TEST_FLAG}") + if (ENABLE_SSSE3) + set (COMPILER_FLAGS "${COMPILER_FLAGS} -mssse3") endif () - set (TEST_FLAG "-msse4.1") - set (CMAKE_REQUIRED_FLAGS "${TEST_FLAG} -O0") - check_cxx_source_compiles(" - #include - int main() { - auto a = _mm_insert_epi8(__m128i(), 0, 0); - (void)a; - return 0; - } - " HAVE_SSE41) - if (HAVE_SSE41 AND ENABLE_SSE41) - set (COMPILER_FLAGS "${COMPILER_FLAGS} ${TEST_FLAG}") + if (ENABLE_SSE41) + set (COMPILER_FLAGS "${COMPILER_FLAGS} -msse4.1") endif () - set (TEST_FLAG "-msse4.2") - set (CMAKE_REQUIRED_FLAGS "${TEST_FLAG} -O0") - check_cxx_source_compiles(" - #include - int main() { - auto a = _mm_crc32_u64(0, 0); - (void)a; - return 0; - } - " HAVE_SSE42) - if (HAVE_SSE42 AND ENABLE_SSE42) - set (COMPILER_FLAGS "${COMPILER_FLAGS} ${TEST_FLAG}") + if (ENABLE_SSE42) + set (COMPILER_FLAGS "${COMPILER_FLAGS} -msse4.2") endif () - set (TEST_FLAG "-mpclmul") - set (CMAKE_REQUIRED_FLAGS "${TEST_FLAG} -O0") - check_cxx_source_compiles(" - #include - int main() { - auto a = _mm_clmulepi64_si128(__m128i(), __m128i(), 0); - (void)a; - return 0; - } - " HAVE_PCLMULQDQ) - if (HAVE_PCLMULQDQ AND ENABLE_PCLMULQDQ) - set (COMPILER_FLAGS "${COMPILER_FLAGS} ${TEST_FLAG}") + if (ENABLE_PCLMULQDQ) + set (COMPILER_FLAGS "${COMPILER_FLAGS} -mpclmul") endif () - set (TEST_FLAG "-mpopcnt") - set (CMAKE_REQUIRED_FLAGS "${TEST_FLAG} -O0") - check_cxx_source_compiles(" - int main() { - auto a = __builtin_popcountll(0); - (void)a; - return 0; - } - " HAVE_POPCNT) - if (HAVE_POPCNT AND ENABLE_POPCNT) - set (COMPILER_FLAGS "${COMPILER_FLAGS} ${TEST_FLAG}") + if (ENABLE_BMI) + set (COMPILER_FLAGS "${COMPILER_FLAGS} -mbmi") endif () - set (TEST_FLAG "-mavx") - set (CMAKE_REQUIRED_FLAGS "${TEST_FLAG} -O0") - check_cxx_source_compiles(" - #include - int main() { - auto a = _mm256_insert_epi8(__m256i(), 0, 0); - (void)a; - return 0; - } - " HAVE_AVX) - if (HAVE_AVX AND ENABLE_AVX) - set (COMPILER_FLAGS "${COMPILER_FLAGS} ${TEST_FLAG}") + if (ENABLE_POPCNT) + set (COMPILER_FLAGS "${COMPILER_FLAGS} -mpopcnt") endif () - set (TEST_FLAG "-mavx2") - set (CMAKE_REQUIRED_FLAGS "${TEST_FLAG} -O0") - check_cxx_source_compiles(" - #include - int main() { - auto a = _mm256_add_epi16(__m256i(), __m256i()); - (void)a; - return 0; - } - " HAVE_AVX2) - if (HAVE_AVX2 AND ENABLE_AVX2) - set (COMPILER_FLAGS "${COMPILER_FLAGS} ${TEST_FLAG}") + if (ENABLE_AVX) + set (COMPILER_FLAGS "${COMPILER_FLAGS} -mavx") endif () - set (TEST_FLAG "-mavx512f -mavx512bw -mavx512vl") - set (CMAKE_REQUIRED_FLAGS "${TEST_FLAG} -O0") - check_cxx_source_compiles(" - #include - int main() { - auto a = _mm512_setzero_epi32(); - (void)a; - auto b = _mm512_add_epi16(__m512i(), __m512i()); - (void)b; - auto c = _mm_cmp_epi8_mask(__m128i(), __m128i(), 0); - (void)c; - return 0; - } - " HAVE_AVX512) - if (HAVE_AVX512 AND ENABLE_AVX512) - set (COMPILER_FLAGS "${COMPILER_FLAGS} ${TEST_FLAG}") - endif () - - set (TEST_FLAG "-mavx512vbmi") - set (CMAKE_REQUIRED_FLAGS "${TEST_FLAG} -O0") - check_cxx_source_compiles(" - #include - int main() { - auto a = _mm512_permutexvar_epi8(__m512i(), __m512i()); - (void)a; - return 0; - } - " HAVE_AVX512_VBMI) - if (HAVE_AVX512 AND ENABLE_AVX512 AND HAVE_AVX512_VBMI AND ENABLE_AVX512_VBMI) - set (COMPILER_FLAGS "${COMPILER_FLAGS} ${TEST_FLAG}") - endif () - - set (TEST_FLAG "-mbmi") - set (CMAKE_REQUIRED_FLAGS "${TEST_FLAG} -O0") - check_cxx_source_compiles(" - #include - int main() { - auto a = _blsr_u32(0); - (void)a; - return 0; - } - " HAVE_BMI) - if (HAVE_BMI AND ENABLE_BMI) - set (COMPILER_FLAGS "${COMPILER_FLAGS} ${TEST_FLAG}") - endif () - - set (TEST_FLAG "-mbmi2") - set (CMAKE_REQUIRED_FLAGS "${TEST_FLAG} -O0") - check_cxx_source_compiles(" - #include - int main() { - auto a = _pdep_u64(0, 0); - (void)a; - return 0; - } - " HAVE_BMI2) - if (HAVE_BMI2 AND HAVE_AVX2 AND ENABLE_AVX2 AND ENABLE_BMI2) - set (COMPILER_FLAGS "${COMPILER_FLAGS} ${TEST_FLAG}") - endif () - - # Limit avx2/avx512 flag for specific source build - set (X86_INTRINSICS_FLAGS "") - if (ENABLE_AVX2_FOR_SPEC_OP) - if (HAVE_BMI) - set (X86_INTRINSICS_FLAGS "${X86_INTRINSICS_FLAGS} -mbmi") + if (ENABLE_AVX2) + set (COMPILER_FLAGS "${COMPILER_FLAGS} -mavx2") + if (ENABLE_BMI2) + set (COMPILER_FLAGS "${COMPILER_FLAGS} -mbmi2") endif () - if (HAVE_AVX AND HAVE_AVX2) - set (X86_INTRINSICS_FLAGS "${X86_INTRINSICS_FLAGS} -mavx -mavx2") + endif () + + if (ENABLE_AVX512) + set (COMPILER_FLAGS "${COMPILER_FLAGS} -mavx512f -mavx512bw -mavx512vl") + if (ENABLE_AVX512_VBMI) + set (COMPILER_FLAGS "${COMPILER_FLAGS} -mavx512vbmi") endif () endif () if (ENABLE_AVX512_FOR_SPEC_OP) - set (X86_INTRINSICS_FLAGS "") - if (HAVE_BMI) - set (X86_INTRINSICS_FLAGS "${X86_INTRINSICS_FLAGS} -mbmi") - endif () - if (HAVE_AVX512) - set (X86_INTRINSICS_FLAGS "${X86_INTRINSICS_FLAGS} -mavx512f -mavx512bw -mavx512vl -mprefer-vector-width=256") - endif () + set (X86_INTRINSICS_FLAGS "-mbmi -mavx512f -mavx512bw -mavx512vl -mprefer-vector-width=256") endif () + else () # RISC-V + exotic platforms endif () - -cmake_pop_check_state () diff --git a/cmake/darwin/default_libs.cmake b/cmake/darwin/default_libs.cmake index 42b8473cb75..cf0210d9b45 100644 --- a/cmake/darwin/default_libs.cmake +++ b/cmake/darwin/default_libs.cmake @@ -22,9 +22,3 @@ link_libraries(global-group) target_link_libraries(global-group INTERFACE $ ) - -# FIXME: remove when all contribs will get custom cmake lists -install( - TARGETS global-group global-libs - EXPORT global -) diff --git a/cmake/darwin/toolchain-aarch64.cmake b/cmake/darwin/toolchain-aarch64.cmake index 569b02bb642..178153c1098 100644 --- a/cmake/darwin/toolchain-aarch64.cmake +++ b/cmake/darwin/toolchain-aarch64.cmake @@ -9,9 +9,3 @@ set (CMAKE_ASM_COMPILER_TARGET "aarch64-apple-darwin") set (CMAKE_OSX_SYSROOT "${CMAKE_CURRENT_LIST_DIR}/../toolchain/darwin-aarch64") set (CMAKE_TRY_COMPILE_TARGET_TYPE STATIC_LIBRARY) # disable linkage check - it doesn't work in CMake - -set (HAS_PRE_1970_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE) -set (HAS_PRE_1970_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE) - -set (HAS_POST_2038_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE) -set (HAS_POST_2038_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE) diff --git a/cmake/darwin/toolchain-x86_64.cmake b/cmake/darwin/toolchain-x86_64.cmake index c4527d2fc0d..b9cbe72a2b6 100644 --- a/cmake/darwin/toolchain-x86_64.cmake +++ b/cmake/darwin/toolchain-x86_64.cmake @@ -9,9 +9,3 @@ set (CMAKE_ASM_COMPILER_TARGET "x86_64-apple-darwin") set (CMAKE_OSX_SYSROOT "${CMAKE_CURRENT_LIST_DIR}/../toolchain/darwin-x86_64") set (CMAKE_TRY_COMPILE_TARGET_TYPE STATIC_LIBRARY) # disable linkage check - it doesn't work in CMake - -set (HAS_PRE_1970_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE) -set (HAS_PRE_1970_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE) - -set (HAS_POST_2038_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE) -set (HAS_POST_2038_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE) diff --git a/cmake/freebsd/default_libs.cmake b/cmake/freebsd/default_libs.cmake index 65bf296ee09..1eeb1a872bd 100644 --- a/cmake/freebsd/default_libs.cmake +++ b/cmake/freebsd/default_libs.cmake @@ -25,9 +25,3 @@ link_libraries(global-group) target_link_libraries(global-group INTERFACE $ ) - -# FIXME: remove when all contribs will get custom cmake lists -install( - TARGETS global-group global-libs - EXPORT global -) diff --git a/cmake/freebsd/toolchain-aarch64.cmake b/cmake/freebsd/toolchain-aarch64.cmake index 8a8da00f3be..53b7856ed03 100644 --- a/cmake/freebsd/toolchain-aarch64.cmake +++ b/cmake/freebsd/toolchain-aarch64.cmake @@ -9,13 +9,3 @@ set (CMAKE_ASM_COMPILER_TARGET "aarch64-unknown-freebsd12") set (CMAKE_SYSROOT "${CMAKE_CURRENT_LIST_DIR}/../../contrib/sysroot/freebsd-aarch64") set (CMAKE_TRY_COMPILE_TARGET_TYPE STATIC_LIBRARY) # disable linkage check - it doesn't work in CMake - -# Will be changed later, but somehow needed to be set here. -set (CMAKE_AR "ar") -set (CMAKE_RANLIB "ranlib") - -set (HAS_PRE_1970_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE) -set (HAS_PRE_1970_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE) - -set (HAS_POST_2038_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE) -set (HAS_POST_2038_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE) diff --git a/cmake/freebsd/toolchain-ppc64le.cmake b/cmake/freebsd/toolchain-ppc64le.cmake index c3f6594204d..bb23f0fbafc 100644 --- a/cmake/freebsd/toolchain-ppc64le.cmake +++ b/cmake/freebsd/toolchain-ppc64le.cmake @@ -9,13 +9,3 @@ set (CMAKE_ASM_COMPILER_TARGET "powerpc64le-unknown-freebsd13") set (CMAKE_SYSROOT "${CMAKE_CURRENT_LIST_DIR}/../../contrib/sysroot/freebsd-ppc64le") set (CMAKE_TRY_COMPILE_TARGET_TYPE STATIC_LIBRARY) # disable linkage check - it doesn't work in CMake - -# Will be changed later, but somehow needed to be set here. -set (CMAKE_AR "ar") -set (CMAKE_RANLIB "ranlib") - -set (HAS_PRE_1970_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE) -set (HAS_PRE_1970_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE) - -set (HAS_POST_2038_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE) -set (HAS_POST_2038_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE) diff --git a/cmake/freebsd/toolchain-x86_64.cmake b/cmake/freebsd/toolchain-x86_64.cmake index 460de6a7d39..4635880b4a6 100644 --- a/cmake/freebsd/toolchain-x86_64.cmake +++ b/cmake/freebsd/toolchain-x86_64.cmake @@ -9,13 +9,3 @@ set (CMAKE_ASM_COMPILER_TARGET "x86_64-pc-freebsd11") set (CMAKE_SYSROOT "${CMAKE_CURRENT_LIST_DIR}/../../contrib/sysroot/freebsd-x86_64") set (CMAKE_TRY_COMPILE_TARGET_TYPE STATIC_LIBRARY) # disable linkage check - it doesn't work in CMake - -# Will be changed later, but somehow needed to be set here. -set (CMAKE_AR "ar") -set (CMAKE_RANLIB "ranlib") - -set (HAS_PRE_1970_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE) -set (HAS_PRE_1970_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE) - -set (HAS_POST_2038_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE) -set (HAS_POST_2038_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE) diff --git a/cmake/fuzzer.cmake b/cmake/fuzzer.cmake index 52f301ab8ad..dd0c4b080fe 100644 --- a/cmake/fuzzer.cmake +++ b/cmake/fuzzer.cmake @@ -4,8 +4,8 @@ if (FUZZER) # NOTE: Eldar Zaitov decided to name it "libfuzzer" instead of "fuzzer" to keep in mind another possible fuzzer backends. # NOTE: no-link means that all the targets are built with instrumentation for fuzzer, but only some of them # (tests) have entry point for fuzzer and it's not checked. - set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${SAN_FLAGS} -fsanitize=fuzzer-no-link") - set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${SAN_FLAGS} -fsanitize=fuzzer-no-link") + set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${SAN_FLAGS} -fsanitize=fuzzer-no-link -DFUZZER=1") + set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${SAN_FLAGS} -fsanitize=fuzzer-no-link -DFUZZER=1") # NOTE: oss-fuzz can change LIB_FUZZING_ENGINE variable if (NOT LIB_FUZZING_ENGINE) diff --git a/cmake/limit_jobs.cmake b/cmake/limit_jobs.cmake index 28ccb62e10c..8e48fc9b9d8 100644 --- a/cmake/limit_jobs.cmake +++ b/cmake/limit_jobs.cmake @@ -21,7 +21,7 @@ if (NOT PARALLEL_COMPILE_JOBS AND MAX_COMPILER_MEMORY) set (PARALLEL_COMPILE_JOBS 1) endif () if (PARALLEL_COMPILE_JOBS LESS NUMBER_OF_LOGICAL_CORES) - message(WARNING "The auto-calculated compile jobs limit (${PARALLEL_COMPILE_JOBS}) underutilizes CPU cores (${NUMBER_OF_LOGICAL_CORES}). Set PARALLEL_COMPILE_JOBS to override.") + message("The auto-calculated compile jobs limit (${PARALLEL_COMPILE_JOBS}) underutilizes CPU cores (${NUMBER_OF_LOGICAL_CORES}). Set PARALLEL_COMPILE_JOBS to override.") endif() endif () @@ -32,7 +32,7 @@ if (NOT PARALLEL_LINK_JOBS AND MAX_LINKER_MEMORY) set (PARALLEL_LINK_JOBS 1) endif () if (PARALLEL_LINK_JOBS LESS NUMBER_OF_LOGICAL_CORES) - message(WARNING "The auto-calculated link jobs limit (${PARALLEL_LINK_JOBS}) underutilizes CPU cores (${NUMBER_OF_LOGICAL_CORES}). Set PARALLEL_LINK_JOBS to override.") + message("The auto-calculated link jobs limit (${PARALLEL_LINK_JOBS}) underutilizes CPU cores (${NUMBER_OF_LOGICAL_CORES}). Set PARALLEL_LINK_JOBS to override.") endif() endif () diff --git a/cmake/linux/default_libs.cmake b/cmake/linux/default_libs.cmake index 56a663a708e..8552097fa57 100644 --- a/cmake/linux/default_libs.cmake +++ b/cmake/linux/default_libs.cmake @@ -50,9 +50,3 @@ target_link_libraries(global-group INTERFACE $ -Wl,--end-group ) - -# FIXME: remove when all contribs will get custom cmake lists -install( - TARGETS global-group global-libs - EXPORT global -) diff --git a/cmake/linux/toolchain-aarch64.cmake b/cmake/linux/toolchain-aarch64.cmake index 2dedef8859f..b80cc01296d 100644 --- a/cmake/linux/toolchain-aarch64.cmake +++ b/cmake/linux/toolchain-aarch64.cmake @@ -9,10 +9,6 @@ set (CMAKE_C_COMPILER_TARGET "aarch64-linux-gnu") set (CMAKE_CXX_COMPILER_TARGET "aarch64-linux-gnu") set (CMAKE_ASM_COMPILER_TARGET "aarch64-linux-gnu") -# Will be changed later, but somehow needed to be set here. -set (CMAKE_AR "ar") -set (CMAKE_RANLIB "ranlib") - set (TOOLCHAIN_PATH "${CMAKE_CURRENT_LIST_DIR}/../../contrib/sysroot/linux-aarch64") set (CMAKE_SYSROOT "${TOOLCHAIN_PATH}/aarch64-linux-gnu/libc") @@ -20,9 +16,3 @@ set (CMAKE_SYSROOT "${TOOLCHAIN_PATH}/aarch64-linux-gnu/libc") set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}") set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}") set (CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}") - -set (HAS_PRE_1970_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE) -set (HAS_PRE_1970_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE) - -set (HAS_POST_2038_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE) -set (HAS_POST_2038_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE) diff --git a/cmake/linux/toolchain-ppc64le.cmake b/cmake/linux/toolchain-ppc64le.cmake index c46ea954b71..98e8f7e8489 100644 --- a/cmake/linux/toolchain-ppc64le.cmake +++ b/cmake/linux/toolchain-ppc64le.cmake @@ -9,10 +9,6 @@ set (CMAKE_C_COMPILER_TARGET "powerpc64le-linux-gnu") set (CMAKE_CXX_COMPILER_TARGET "powerpc64le-linux-gnu") set (CMAKE_ASM_COMPILER_TARGET "powerpc64le-linux-gnu") -# Will be changed later, but somehow needed to be set here. -set (CMAKE_AR "ar") -set (CMAKE_RANLIB "ranlib") - set (TOOLCHAIN_PATH "${CMAKE_CURRENT_LIST_DIR}/../../contrib/sysroot/linux-powerpc64le") set (CMAKE_SYSROOT "${TOOLCHAIN_PATH}/powerpc64le-linux-gnu/libc") @@ -20,9 +16,3 @@ set (CMAKE_SYSROOT "${TOOLCHAIN_PATH}/powerpc64le-linux-gnu/libc") set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}") set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}") set (CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}") - -set (HAS_PRE_1970_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE) -set (HAS_PRE_1970_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE) - -set (HAS_POST_2038_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE) -set (HAS_POST_2038_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE) diff --git a/cmake/linux/toolchain-riscv64.cmake b/cmake/linux/toolchain-riscv64.cmake index 7f876f88d72..ae5a38f08eb 100644 --- a/cmake/linux/toolchain-riscv64.cmake +++ b/cmake/linux/toolchain-riscv64.cmake @@ -9,10 +9,6 @@ set (CMAKE_C_COMPILER_TARGET "riscv64-linux-gnu") set (CMAKE_CXX_COMPILER_TARGET "riscv64-linux-gnu") set (CMAKE_ASM_COMPILER_TARGET "riscv64-linux-gnu") -# Will be changed later, but somehow needed to be set here. -set (CMAKE_AR "ar") -set (CMAKE_RANLIB "ranlib") - set (TOOLCHAIN_PATH "${CMAKE_CURRENT_LIST_DIR}/../../contrib/sysroot/linux-riscv64") set (CMAKE_SYSROOT "${TOOLCHAIN_PATH}") @@ -27,9 +23,3 @@ set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fuse-ld=bfd") # ld.lld: error: section size decrease is too large # But GNU BinUtils work. set (LINKER_NAME "riscv64-linux-gnu-ld.bfd" CACHE STRING "Linker name" FORCE) - -set (HAS_PRE_1970_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE) -set (HAS_PRE_1970_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE) - -set (HAS_POST_2038_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE) -set (HAS_POST_2038_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE) diff --git a/cmake/linux/toolchain-s390x.cmake b/cmake/linux/toolchain-s390x.cmake index 945eb9affa4..d34329fb3bb 100644 --- a/cmake/linux/toolchain-s390x.cmake +++ b/cmake/linux/toolchain-s390x.cmake @@ -9,10 +9,6 @@ set (CMAKE_C_COMPILER_TARGET "s390x-linux-gnu") set (CMAKE_CXX_COMPILER_TARGET "s390x-linux-gnu") set (CMAKE_ASM_COMPILER_TARGET "s390x-linux-gnu") -# Will be changed later, but somehow needed to be set here. -set (CMAKE_AR "ar") -set (CMAKE_RANLIB "ranlib") - set (TOOLCHAIN_PATH "${CMAKE_CURRENT_LIST_DIR}/../../contrib/sysroot/linux-s390x") set (CMAKE_SYSROOT "${TOOLCHAIN_PATH}/s390x-linux-gnu/libc") @@ -23,9 +19,3 @@ set (CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}") set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fuse-ld=mold -Wl,-L${CMAKE_SYSROOT}/usr/lib64") set (CMAKE_MODULE_LINKER_FLAGS "${CMAKE_MODULE_LINKER_FLAGS} -fuse-ld=mold -Wl,-L${CMAKE_SYSROOT}/usr/lib64") set (CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -fuse-ld=mold -Wl,-L${CMAKE_SYSROOT}/usr/lib64") - -set (HAS_PRE_1970_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE) -set (HAS_PRE_1970_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE) - -set (HAS_POST_2038_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE) -set (HAS_POST_2038_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE) diff --git a/cmake/linux/toolchain-x86_64-musl.cmake b/cmake/linux/toolchain-x86_64-musl.cmake index bc327e5ac25..fa7b3eaf0d1 100644 --- a/cmake/linux/toolchain-x86_64-musl.cmake +++ b/cmake/linux/toolchain-x86_64-musl.cmake @@ -9,10 +9,6 @@ set (CMAKE_C_COMPILER_TARGET "x86_64-linux-musl") set (CMAKE_CXX_COMPILER_TARGET "x86_64-linux-musl") set (CMAKE_ASM_COMPILER_TARGET "x86_64-linux-musl") -# Will be changed later, but somehow needed to be set here. -set (CMAKE_AR "ar") -set (CMAKE_RANLIB "ranlib") - set (TOOLCHAIN_PATH "${CMAKE_CURRENT_LIST_DIR}/../../contrib/sysroot/linux-x86_64-musl") set (CMAKE_SYSROOT "${TOOLCHAIN_PATH}") @@ -21,11 +17,5 @@ set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}") set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}") set (CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}") -set (HAS_PRE_1970_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE) -set (HAS_PRE_1970_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE) - -set (HAS_POST_2038_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE) -set (HAS_POST_2038_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE) - set (USE_MUSL 1) add_definitions(-DUSE_MUSL=1) diff --git a/cmake/linux/toolchain-x86_64.cmake b/cmake/linux/toolchain-x86_64.cmake index 55b9df79f70..e341219a7e5 100644 --- a/cmake/linux/toolchain-x86_64.cmake +++ b/cmake/linux/toolchain-x86_64.cmake @@ -19,10 +19,6 @@ set (CMAKE_C_COMPILER_TARGET "x86_64-linux-gnu") set (CMAKE_CXX_COMPILER_TARGET "x86_64-linux-gnu") set (CMAKE_ASM_COMPILER_TARGET "x86_64-linux-gnu") -# Will be changed later, but somehow needed to be set here. -set (CMAKE_AR "ar") -set (CMAKE_RANLIB "ranlib") - set (TOOLCHAIN_PATH "${CMAKE_CURRENT_LIST_DIR}/../../contrib/sysroot/linux-x86_64") set (CMAKE_SYSROOT "${TOOLCHAIN_PATH}/x86_64-linux-gnu/libc") @@ -32,9 +28,3 @@ set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}") set (CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}") set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}") set (CMAKE_MODULE_LINKER_FLAGS "${CMAKE_MODULE_LINKER_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}") - -set (HAS_PRE_1970_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE) -set (HAS_PRE_1970_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE) - -set (HAS_POST_2038_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE) -set (HAS_POST_2038_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE) diff --git a/cmake/sanitize.cmake b/cmake/sanitize.cmake index f17283774eb..3f7a8498059 100644 --- a/cmake/sanitize.cmake +++ b/cmake/sanitize.cmake @@ -58,3 +58,27 @@ if (SANITIZE) message (FATAL_ERROR "Unknown sanitizer type: ${SANITIZE}") endif () endif() + +# Default coverage instrumentation (dumping the coverage map on exit) +option(WITH_COVERAGE "Instrumentation for code coverage with default implementation" OFF) + +if (WITH_COVERAGE) + message (INFORMATION "Enabled instrumentation for code coverage") + set(COVERAGE_FLAGS "-fprofile-instr-generate -fcoverage-mapping") +endif() + +option (SANITIZE_COVERAGE "Instrumentation for code coverage with custom callbacks" OFF) + +if (SANITIZE_COVERAGE) + message (INFORMATION "Enabled instrumentation for code coverage") + + # We set this define for whole build to indicate that at least some parts are compiled with coverage. + # And to expose it in system.build_options. + set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DSANITIZE_COVERAGE=1") + set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DSANITIZE_COVERAGE=1") + + # But the actual coverage will be enabled on per-library basis: for ClickHouse code, but not for 3rd-party. + set (COVERAGE_FLAGS "-fsanitize-coverage=trace-pc-guard,pc-table") +endif() + +set (WITHOUT_COVERAGE_FLAGS "-fno-profile-instr-generate -fno-coverage-mapping -fno-sanitize-coverage=trace-pc-guard,pc-table") diff --git a/contrib/AMQP-CPP b/contrib/AMQP-CPP index 818c2d8ad96..00f09897ce0 160000 --- a/contrib/AMQP-CPP +++ b/contrib/AMQP-CPP @@ -1 +1 @@ -Subproject commit 818c2d8ad96a08a5d20fece7d1e1e8855a2b0860 +Subproject commit 00f09897ce020a84e38f87dc416af4a19c5da9ae diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index 390b0241e7d..a8f0705df88 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -1,16 +1,7 @@ #"${folder}/CMakeLists.txt" Third-party libraries may have substandard code. -set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -w") -set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -w") - -if (WITH_COVERAGE) - set (WITHOUT_COVERAGE_LIST ${WITHOUT_COVERAGE}) - separate_arguments(WITHOUT_COVERAGE_LIST) - # disable coverage for contib files and build with optimisations - if (COMPILER_CLANG) - add_compile_options(-O3 -DNDEBUG -finline-functions -finline-hint-functions ${WITHOUT_COVERAGE_LIST}) - endif() -endif() +set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -w -ffunction-sections -fdata-sections") +set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -w -ffunction-sections -fdata-sections") if (SANITIZE STREQUAL "undefined") # 3rd-party libraries usually not intended to work with UBSan. @@ -53,6 +44,7 @@ else () endif () add_contrib (miniselect-cmake miniselect) add_contrib (pdqsort-cmake pdqsort) +add_contrib (pocketfft-cmake pocketfft) add_contrib (crc32-vpmsum-cmake crc32-vpmsum) add_contrib (sparsehash-c11-cmake sparsehash-c11) add_contrib (abseil-cpp-cmake abseil-cpp) diff --git a/contrib/abseil-cpp b/contrib/abseil-cpp index 5655528c418..3bd86026c93 160000 --- a/contrib/abseil-cpp +++ b/contrib/abseil-cpp @@ -1 +1 @@ -Subproject commit 5655528c41830f733160de4fb0b99073841bae9e +Subproject commit 3bd86026c93da5a40006fd53403dff9d5f5e30e3 diff --git a/contrib/abseil-cpp-cmake/CMakeLists.txt b/contrib/abseil-cpp-cmake/CMakeLists.txt index 2901daf32db..e6c3268c57a 100644 --- a/contrib/abseil-cpp-cmake/CMakeLists.txt +++ b/contrib/abseil-cpp-cmake/CMakeLists.txt @@ -1,33 +1,3428 @@ set(ABSL_ROOT_DIR "${ClickHouse_SOURCE_DIR}/contrib/abseil-cpp") +set(ABSL_COMMON_INCLUDE_DIRS "${ABSL_ROOT_DIR}") + +# +# Copyright 2017 The Abseil Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +function(absl_cc_library) + cmake_parse_arguments(ABSL_CC_LIB + "DISABLE_INSTALL;PUBLIC;TESTONLY" + "NAME" + "HDRS;SRCS;COPTS;DEFINES;LINKOPTS;DEPS" + ${ARGN} + ) + + set(_NAME "absl_${ABSL_CC_LIB_NAME}") + + # Check if this is a header-only library + set(ABSL_CC_SRCS "${ABSL_CC_LIB_SRCS}") + foreach(src_file IN LISTS ABSL_CC_SRCS) + if(${src_file} MATCHES ".*\\.(h|inc)") + list(REMOVE_ITEM ABSL_CC_SRCS "${src_file}") + endif() + endforeach() + + if(ABSL_CC_SRCS STREQUAL "") + set(ABSL_CC_LIB_IS_INTERFACE 1) + else() + set(ABSL_CC_LIB_IS_INTERFACE 0) + endif() + + if(NOT ABSL_CC_LIB_IS_INTERFACE) + add_library(${_NAME} "") + target_sources(${_NAME} PRIVATE ${ABSL_CC_LIB_SRCS} ${ABSL_CC_LIB_HDRS}) + target_link_libraries(${_NAME} + PUBLIC ${ABSL_CC_LIB_DEPS} + PRIVATE + ${ABSL_CC_LIB_LINKOPTS} + ${ABSL_DEFAULT_LINKOPTS} + ) + + target_include_directories(${_NAME} + PUBLIC "${ABSL_COMMON_INCLUDE_DIRS}") + target_compile_options(${_NAME} + PRIVATE ${ABSL_CC_LIB_COPTS}) + target_compile_definitions(${_NAME} PUBLIC ${ABSL_CC_LIB_DEFINES}) + + else() + # Generating header-only library + add_library(${_NAME} INTERFACE) + target_include_directories(${_NAME} + INTERFACE "${ABSL_COMMON_INCLUDE_DIRS}") + + target_link_libraries(${_NAME} + INTERFACE + ${ABSL_CC_LIB_DEPS} + ${ABSL_CC_LIB_LINKOPTS} + ${ABSL_DEFAULT_LINKOPTS} + ) + target_compile_definitions(${_NAME} INTERFACE ${ABSL_CC_LIB_DEFINES}) + + endif() + + add_library(absl::${ABSL_CC_LIB_NAME} ALIAS ${_NAME}) +endfunction() + + +set(DIR ${ABSL_ROOT_DIR}/absl/algorithm) + +absl_cc_library( + NAME + algorithm + HDRS + "${DIR}/algorithm.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::config + PUBLIC +) + +absl_cc_library( + NAME + algorithm_container + HDRS + "${DIR}/container.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::algorithm + absl::core_headers + absl::meta + PUBLIC +) + +set(DIR ${ABSL_ROOT_DIR}/absl/base) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + atomic_hook + HDRS + "${DIR}/internal/atomic_hook.h" + DEPS + absl::config + absl::core_headers + COPTS + ${ABSL_DEFAULT_COPTS} +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + errno_saver + HDRS + "${DIR}/internal/errno_saver.h" + DEPS + absl::config + COPTS + ${ABSL_DEFAULT_COPTS} +) + +absl_cc_library( + NAME + log_severity + HDRS + "${DIR}/log_severity.h" + SRCS + "${DIR}/log_severity.cc" + DEPS + absl::config + absl::core_headers + COPTS + ${ABSL_DEFAULT_COPTS} +) + +absl_cc_library( + NAME + nullability + HDRS + "${DIR}/nullability.h" + SRCS + "${DIR}/internal/nullability_impl.h" + DEPS + absl::core_headers + absl::type_traits + COPTS + ${ABSL_DEFAULT_COPTS} +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + raw_logging_internal + HDRS + "${DIR}/internal/raw_logging.h" + SRCS + "${DIR}/internal/raw_logging.cc" + DEPS + absl::atomic_hook + absl::config + absl::core_headers + absl::errno_saver + absl::log_severity + COPTS + ${ABSL_DEFAULT_COPTS} +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + spinlock_wait + HDRS + "${DIR}/internal/spinlock_wait.h" + SRCS + "${DIR}/internal/spinlock_akaros.inc" + "${DIR}/internal/spinlock_linux.inc" + "${DIR}/internal/spinlock_posix.inc" + "${DIR}/internal/spinlock_wait.cc" + "${DIR}/internal/spinlock_win32.inc" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::base_internal + absl::core_headers + absl::errno_saver +) + +absl_cc_library( + NAME + config + HDRS + "${DIR}/config.h" + "${DIR}/options.h" + "${DIR}/policy_checks.h" + COPTS + ${ABSL_DEFAULT_COPTS} + PUBLIC +) + +absl_cc_library( + NAME + dynamic_annotations + HDRS + "${DIR}/dynamic_annotations.h" + SRCS + "${DIR}/internal/dynamic_annotations.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::config + PUBLIC +) + +absl_cc_library( + NAME + core_headers + HDRS + "${DIR}/attributes.h" + "${DIR}/const_init.h" + "${DIR}/macros.h" + "${DIR}/optimization.h" + "${DIR}/port.h" + "${DIR}/thread_annotations.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::config + PUBLIC +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + malloc_internal + HDRS + "${DIR}/internal/direct_mmap.h" + "${DIR}/internal/low_level_alloc.h" + SRCS + "${DIR}/internal/low_level_alloc.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::base + absl::base_internal + absl::config + absl::core_headers + absl::dynamic_annotations + absl::raw_logging_internal + Threads::Threads +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + base_internal + HDRS + "${DIR}/internal/hide_ptr.h" + "${DIR}/internal/identity.h" + "${DIR}/internal/inline_variable.h" + "${DIR}/internal/invoke.h" + "${DIR}/internal/scheduling_mode.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::config + absl::type_traits +) + +absl_cc_library( + NAME + base + HDRS + "${DIR}/call_once.h" + "${DIR}/casts.h" + "${DIR}/internal/cycleclock.h" + "${DIR}/internal/cycleclock_config.h" + "${DIR}/internal/low_level_scheduling.h" + "${DIR}/internal/per_thread_tls.h" + "${DIR}/internal/spinlock.h" + "${DIR}/internal/sysinfo.h" + "${DIR}/internal/thread_identity.h" + "${DIR}/internal/tsan_mutex_interface.h" + "${DIR}/internal/unscaledcycleclock.h" + "${DIR}/internal/unscaledcycleclock_config.h" + SRCS + "${DIR}/internal/cycleclock.cc" + "${DIR}/internal/spinlock.cc" + "${DIR}/internal/sysinfo.cc" + "${DIR}/internal/thread_identity.cc" + "${DIR}/internal/unscaledcycleclock.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::atomic_hook + absl::base_internal + absl::config + absl::core_headers + absl::dynamic_annotations + absl::log_severity + absl::raw_logging_internal + absl::spinlock_wait + absl::type_traits + Threads::Threads + PUBLIC +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + throw_delegate + HDRS + "${DIR}/internal/throw_delegate.h" + SRCS + "${DIR}/internal/throw_delegate.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::config + absl::raw_logging_internal +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + pretty_function + HDRS + "${DIR}/internal/pretty_function.h" + COPTS + ${ABSL_DEFAULT_COPTS} +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + endian + HDRS + "${DIR}/internal/endian.h" + "${DIR}/internal/unaligned_access.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::base + absl::config + absl::core_headers + PUBLIC +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + scoped_set_env + SRCS + "${DIR}/internal/scoped_set_env.cc" + HDRS + "${DIR}/internal/scoped_set_env.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::config + absl::raw_logging_internal +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + strerror + SRCS + "${DIR}/internal/strerror.cc" + HDRS + "${DIR}/internal/strerror.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config + absl::core_headers + absl::errno_saver +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + fast_type_id + HDRS + "${DIR}/internal/fast_type_id.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config +) + +absl_cc_library( + NAME + prefetch + HDRS + "${DIR}/prefetch.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config + absl::core_headers +) + +set(DIR ${ABSL_ROOT_DIR}/absl/cleanup) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + cleanup_internal + HDRS + "${DIR}/internal/cleanup.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::base_internal + absl::core_headers + absl::utility + PUBLIC +) + +absl_cc_library( + NAME + cleanup + HDRS + "${DIR}/cleanup.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::cleanup_internal + absl::config + absl::core_headers + PUBLIC +) + +set(DIR ${ABSL_ROOT_DIR}/absl/container) + +absl_cc_library( + NAME + btree + HDRS + "${DIR}/btree_map.h" + "${DIR}/btree_set.h" + "${DIR}/internal/btree.h" + "${DIR}/internal/btree_container.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::container_common + absl::common_policy_traits + absl::compare + absl::compressed_tuple + absl::container_memory + absl::cord + absl::core_headers + absl::layout + absl::memory + absl::raw_logging_internal + absl::strings + absl::throw_delegate + absl::type_traits + absl::utility +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + compressed_tuple + HDRS + "${DIR}/internal/compressed_tuple.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::utility + PUBLIC +) + +absl_cc_library( + NAME + fixed_array + HDRS + "${DIR}/fixed_array.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::compressed_tuple + absl::algorithm + absl::config + absl::core_headers + absl::dynamic_annotations + absl::throw_delegate + absl::memory + PUBLIC +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + inlined_vector_internal + HDRS + "${DIR}/internal/inlined_vector.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::compressed_tuple + absl::core_headers + absl::memory + absl::span + absl::type_traits + PUBLIC +) + +absl_cc_library( + NAME + inlined_vector + HDRS + "${DIR}/inlined_vector.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::algorithm + absl::core_headers + absl::inlined_vector_internal + absl::throw_delegate + absl::memory + absl::type_traits + PUBLIC +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + counting_allocator + HDRS + "${DIR}/internal/counting_allocator.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::config +) + +absl_cc_library( + NAME + flat_hash_map + HDRS + "${DIR}/flat_hash_map.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::container_memory + absl::core_headers + absl::hash_function_defaults + absl::raw_hash_map + absl::algorithm_container + absl::memory + PUBLIC +) + +absl_cc_library( + NAME + flat_hash_set + HDRS + "${DIR}/flat_hash_set.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::container_memory + absl::hash_function_defaults + absl::raw_hash_set + absl::algorithm_container + absl::core_headers + absl::memory + PUBLIC +) + +absl_cc_library( + NAME + node_hash_map + HDRS + "${DIR}/node_hash_map.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::container_memory + absl::core_headers + absl::hash_function_defaults + absl::node_slot_policy + absl::raw_hash_map + absl::algorithm_container + absl::memory + PUBLIC +) + +absl_cc_library( + NAME + node_hash_set + HDRS + "${DIR}/node_hash_set.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::core_headers + absl::hash_function_defaults + absl::node_slot_policy + absl::raw_hash_set + absl::algorithm_container + absl::memory + PUBLIC +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + container_memory + HDRS + "${DIR}/internal/container_memory.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::config + absl::memory + absl::type_traits + absl::utility + PUBLIC +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + hash_function_defaults + HDRS + "${DIR}/internal/hash_function_defaults.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::config + absl::cord + absl::hash + absl::strings + PUBLIC +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + hash_policy_traits + HDRS + "${DIR}/internal/hash_policy_traits.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::common_policy_traits + absl::meta + PUBLIC +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + common_policy_traits + HDRS + "${DIR}/internal/common_policy_traits.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::meta + PUBLIC +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + hashtablez_sampler + HDRS + "${DIR}/internal/hashtablez_sampler.h" + SRCS + "${DIR}/internal/hashtablez_sampler.cc" + "${DIR}/internal/hashtablez_sampler_force_weak_definition.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::base + absl::config + absl::exponential_biased + absl::raw_logging_internal + absl::sample_recorder + absl::synchronization + absl::time +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + hashtable_debug + HDRS + "${DIR}/internal/hashtable_debug.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::hashtable_debug_hooks +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + hashtable_debug_hooks + HDRS + "${DIR}/internal/hashtable_debug_hooks.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::config + PUBLIC +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + node_slot_policy + HDRS + "${DIR}/internal/node_slot_policy.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::config + PUBLIC +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + raw_hash_map + HDRS + "${DIR}/internal/raw_hash_map.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::container_memory + absl::raw_hash_set + absl::throw_delegate + PUBLIC +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + container_common + HDRS + "${DIR}/internal/common.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::type_traits +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + raw_hash_set + HDRS + "${DIR}/internal/raw_hash_set.h" + SRCS + "${DIR}/internal/raw_hash_set.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::bits + absl::compressed_tuple + absl::config + absl::container_common + absl::container_memory + absl::core_headers + absl::dynamic_annotations + absl::endian + absl::hash + absl::hash_policy_traits + absl::hashtable_debug_hooks + absl::hashtablez_sampler + absl::memory + absl::meta + absl::optional + absl::prefetch + absl::raw_logging_internal + absl::utility + PUBLIC +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + layout + HDRS + "${DIR}/internal/layout.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::config + absl::core_headers + absl::meta + absl::strings + absl::span + absl::utility + PUBLIC +) + +set(DIR ${ABSL_ROOT_DIR}/absl/crc) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + crc_cpu_detect + HDRS + "${DIR}/internal/cpu_detect.h" + SRCS + "${DIR}/internal/cpu_detect.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::base + absl::config +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + crc_internal + HDRS + "${DIR}/internal/crc.h" + "${DIR}/internal/crc32_x86_arm_combined_simd.h" + SRCS + "${DIR}/internal/crc.cc" + "${DIR}/internal/crc_internal.h" + "${DIR}/internal/crc_x86_arm_combined.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::crc_cpu_detect + absl::config + absl::core_headers + absl::endian + absl::prefetch + absl::raw_logging_internal + absl::memory + absl::bits +) + +absl_cc_library( + NAME + crc32c + HDRS + "${DIR}/crc32c.h" + "${DIR}/internal/crc32c.h" + "${DIR}/internal/crc_memcpy.h" + SRCS + "${DIR}/crc32c.cc" + "${DIR}/internal/crc32c_inline.h" + "${DIR}/internal/crc_memcpy_fallback.cc" + "${DIR}/internal/crc_memcpy_x86_arm_combined.cc" + "${DIR}/internal/crc_non_temporal_memcpy.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::crc_cpu_detect + absl::crc_internal + absl::non_temporal_memcpy + absl::config + absl::core_headers + absl::endian + absl::prefetch + absl::str_format + absl::strings +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + non_temporal_arm_intrinsics + HDRS + "${DIR}/internal/non_temporal_arm_intrinsics.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::config +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + non_temporal_memcpy + HDRS + "${DIR}/internal/non_temporal_memcpy.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::non_temporal_arm_intrinsics + absl::config + absl::core_headers +) + +absl_cc_library( + NAME + crc_cord_state + HDRS + "${DIR}/internal/crc_cord_state.h" + SRCS + "${DIR}/internal/crc_cord_state.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::crc32c + absl::config + absl::strings +) + +set(DIR ${ABSL_ROOT_DIR}/absl/debugging) + +absl_cc_library( + NAME + stacktrace + HDRS + "${DIR}/stacktrace.h" + "${DIR}/internal/stacktrace_aarch64-inl.inc" + "${DIR}/internal/stacktrace_arm-inl.inc" + "${DIR}/internal/stacktrace_config.h" + "${DIR}/internal/stacktrace_emscripten-inl.inc" + "${DIR}/internal/stacktrace_generic-inl.inc" + "${DIR}/internal/stacktrace_powerpc-inl.inc" + "${DIR}/internal/stacktrace_riscv-inl.inc" + "${DIR}/internal/stacktrace_unimplemented-inl.inc" + "${DIR}/internal/stacktrace_win32-inl.inc" + "${DIR}/internal/stacktrace_x86-inl.inc" + SRCS + "${DIR}/stacktrace.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::debugging_internal + absl::config + absl::core_headers + absl::dynamic_annotations + absl::raw_logging_internal + PUBLIC +) + +absl_cc_library( + NAME + symbolize + HDRS + "${DIR}/symbolize.h" + "${DIR}/internal/symbolize.h" + SRCS + "${DIR}/symbolize.cc" + "${DIR}/symbolize_darwin.inc" + "${DIR}/symbolize_elf.inc" + "${DIR}/symbolize_emscripten.inc" + "${DIR}/symbolize_unimplemented.inc" + "${DIR}/symbolize_win32.inc" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::debugging_internal + absl::demangle_internal + absl::base + absl::config + absl::core_headers + absl::dynamic_annotations + absl::malloc_internal + absl::raw_logging_internal + absl::strings + PUBLIC +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + examine_stack + HDRS + "${DIR}/internal/examine_stack.h" + SRCS + "${DIR}/internal/examine_stack.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::stacktrace + absl::symbolize + absl::config + absl::core_headers + absl::raw_logging_internal +) + +absl_cc_library( + NAME + failure_signal_handler + HDRS + "${DIR}/failure_signal_handler.h" + SRCS + "${DIR}/failure_signal_handler.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::examine_stack + absl::stacktrace + absl::base + absl::config + absl::core_headers + absl::raw_logging_internal + PUBLIC +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + debugging_internal + HDRS + "${DIR}/internal/address_is_readable.h" + "${DIR}/internal/elf_mem_image.h" + "${DIR}/internal/vdso_support.h" + SRCS + "${DIR}/internal/address_is_readable.cc" + "${DIR}/internal/elf_mem_image.cc" + "${DIR}/internal/vdso_support.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::core_headers + absl::config + absl::dynamic_annotations + absl::errno_saver + absl::raw_logging_internal +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + demangle_internal + HDRS + "${DIR}/internal/demangle.h" + SRCS + "${DIR}/internal/demangle.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::base + absl::core_headers + PUBLIC +) + +absl_cc_library( + NAME + leak_check + HDRS + "${DIR}/leak_check.h" + SRCS + "${DIR}/leak_check.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::config + absl::core_headers + PUBLIC +) + +# component target +absl_cc_library( + NAME + debugging + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::stacktrace + absl::leak_check + PUBLIC +) + +set(DIR ${ABSL_ROOT_DIR}/absl/flags) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + flags_path_util + HDRS + "${DIR}/internal/path_util.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config + absl::strings + PUBLIC +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + flags_program_name + SRCS + "${DIR}/internal/program_name.cc" + HDRS + "${DIR}/internal/program_name.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config + absl::core_headers + absl::flags_path_util + absl::strings + absl::synchronization + PUBLIC +) + +absl_cc_library( + NAME + flags_config + SRCS + "${DIR}/usage_config.cc" + HDRS + "${DIR}/config.h" + "${DIR}/usage_config.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config + absl::flags_path_util + absl::flags_program_name + absl::core_headers + absl::strings + absl::synchronization +) + +absl_cc_library( + NAME + flags_marshalling + SRCS + "${DIR}/marshalling.cc" + HDRS + "${DIR}/marshalling.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config + absl::core_headers + absl::log_severity + absl::int128 + absl::optional + absl::strings + absl::str_format +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + flags_commandlineflag_internal + SRCS + "${DIR}/internal/commandlineflag.cc" + HDRS + "${DIR}/internal/commandlineflag.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config + absl::dynamic_annotations + absl::fast_type_id +) + +absl_cc_library( + NAME + flags_commandlineflag + SRCS + "${DIR}/commandlineflag.cc" + HDRS + "${DIR}/commandlineflag.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config + absl::fast_type_id + absl::flags_commandlineflag_internal + absl::optional + absl::strings +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + flags_private_handle_accessor + SRCS + "${DIR}/internal/private_handle_accessor.cc" + HDRS + "${DIR}/internal/private_handle_accessor.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config + absl::flags_commandlineflag + absl::flags_commandlineflag_internal + absl::strings +) + +absl_cc_library( + NAME + flags_reflection + SRCS + "${DIR}/reflection.cc" + HDRS + "${DIR}/reflection.h" + "${DIR}/internal/registry.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config + absl::flags_commandlineflag + absl::flags_private_handle_accessor + absl::flags_config + absl::strings + absl::synchronization + absl::flat_hash_map +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + flags_internal + SRCS + "${DIR}/internal/flag.cc" + HDRS + "${DIR}/internal/flag.h" + "${DIR}/internal/sequence_lock.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::base + absl::config + absl::flags_commandlineflag + absl::flags_commandlineflag_internal + absl::flags_config + absl::flags_marshalling + absl::synchronization + absl::meta + absl::utility + PUBLIC +) + +absl_cc_library( + NAME + flags + SRCS + "${DIR}/flag.cc" + HDRS + "${DIR}/declare.h" + "${DIR}/flag.h" + "${DIR}/internal/flag_msvc.inc" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config + absl::flags_commandlineflag + absl::flags_config + absl::flags_internal + absl::flags_reflection + absl::base + absl::core_headers + absl::strings +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + flags_usage_internal + SRCS + "${DIR}/internal/usage.cc" + HDRS + "${DIR}/internal/usage.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config + absl::flags_config + absl::flags + absl::flags_commandlineflag + absl::flags_internal + absl::flags_path_util + absl::flags_private_handle_accessor + absl::flags_program_name + absl::flags_reflection + absl::strings + absl::synchronization +) + +absl_cc_library( + NAME + flags_usage + SRCS + "${DIR}/usage.cc" + HDRS + "${DIR}/usage.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config + absl::core_headers + absl::flags_usage_internal + absl::raw_logging_internal + absl::strings + absl::synchronization +) + +absl_cc_library( + NAME + flags_parse + SRCS + "${DIR}/parse.cc" + HDRS + "${DIR}/internal/parse.h" + "${DIR}/parse.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::algorithm_container + absl::config + absl::core_headers + absl::flags_config + absl::flags + absl::flags_commandlineflag + absl::flags_commandlineflag_internal + absl::flags_internal + absl::flags_private_handle_accessor + absl::flags_program_name + absl::flags_reflection + absl::flags_usage + absl::strings + absl::synchronization +) + +set(DIR ${ABSL_ROOT_DIR}/absl/functional) + +absl_cc_library( + NAME + any_invocable + SRCS + "${DIR}/internal/any_invocable.h" + HDRS + "${DIR}/any_invocable.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::base_internal + absl::config + absl::core_headers + absl::type_traits + absl::utility + PUBLIC +) + +absl_cc_library( + NAME + bind_front + SRCS + "${DIR}/internal/front_binder.h" + HDRS + "${DIR}/bind_front.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::base_internal + absl::compressed_tuple + PUBLIC +) + +absl_cc_library( + NAME + function_ref + SRCS + "${DIR}/internal/function_ref.h" + HDRS + "${DIR}/function_ref.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::base_internal + absl::core_headers + absl::any_invocable + absl::meta + PUBLIC +) + +set(DIR ${ABSL_ROOT_DIR}/absl/hash) + +absl_cc_library( + NAME + hash + HDRS + "${DIR}/hash.h" + SRCS + "${DIR}/internal/hash.cc" + "${DIR}/internal/hash.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::bits + absl::city + absl::config + absl::core_headers + absl::endian + absl::fixed_array + absl::function_ref + absl::meta + absl::int128 + absl::strings + absl::optional + absl::variant + absl::utility + absl::low_level_hash + PUBLIC +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + city + HDRS + "${DIR}/internal/city.h" + SRCS + "${DIR}/internal/city.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::config + absl::core_headers + absl::endian +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + low_level_hash + HDRS + "${DIR}/internal/low_level_hash.h" + SRCS + "${DIR}/internal/low_level_hash.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::config + absl::endian + absl::int128 + absl::prefetch +) + +set(DIR ${ABSL_ROOT_DIR}/absl/log) + +# Internal targets +absl_cc_library( + NAME + log_internal_check_impl + SRCS + HDRS + "${DIR}/internal/check_impl.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::core_headers + absl::log_internal_check_op + absl::log_internal_conditions + absl::log_internal_message + absl::log_internal_strip +) + +absl_cc_library( + NAME + log_internal_check_op + SRCS + "${DIR}/internal/check_op.cc" + HDRS + "${DIR}/internal/check_op.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config + absl::core_headers + absl::log_internal_nullguard + absl::log_internal_nullstream + absl::log_internal_strip + absl::strings +) + +absl_cc_library( + NAME + log_internal_conditions + SRCS + "${DIR}/internal/conditions.cc" + HDRS + "${DIR}/internal/conditions.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::base + absl::config + absl::core_headers + absl::log_internal_voidify +) + +absl_cc_library( + NAME + log_internal_config + SRCS + HDRS + "${DIR}/internal/config.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config + absl::core_headers +) + +absl_cc_library( + NAME + log_internal_flags + SRCS + HDRS + "${DIR}/internal/flags.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::flags +) + +absl_cc_library( + NAME + log_internal_format + SRCS + "${DIR}/internal/log_format.cc" + HDRS + "${DIR}/internal/log_format.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config + absl::core_headers + absl::log_internal_append_truncated + absl::log_internal_config + absl::log_internal_globals + absl::log_severity + absl::strings + absl::str_format + absl::time + absl::span +) + +absl_cc_library( + NAME + log_internal_globals + SRCS + "${DIR}/internal/globals.cc" + HDRS + "${DIR}/internal/globals.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config + absl::core_headers + absl::log_severity + absl::raw_logging_internal + absl::strings + absl::time +) + +absl_cc_library( + NAME + log_internal_log_impl + SRCS + HDRS + "${DIR}/internal/log_impl.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::log_internal_conditions + absl::log_internal_message + absl::log_internal_strip +) + +absl_cc_library( + NAME + log_internal_proto + SRCS + "${DIR}/internal/proto.cc" + HDRS + "${DIR}/internal/proto.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::base + absl::config + absl::core_headers + absl::strings + absl::span +) + +absl_cc_library( + NAME + log_internal_message + SRCS + "${DIR}/internal/log_message.cc" + HDRS + "${DIR}/internal/log_message.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::base + absl::config + absl::core_headers + absl::errno_saver + absl::inlined_vector + absl::examine_stack + absl::log_internal_append_truncated + absl::log_internal_format + absl::log_internal_globals + absl::log_internal_proto + absl::log_internal_log_sink_set + absl::log_internal_nullguard + absl::log_globals + absl::log_entry + absl::log_severity + absl::log_sink + absl::log_sink_registry + absl::memory + absl::raw_logging_internal + absl::strings + absl::strerror + absl::time + absl::span +) + +absl_cc_library( + NAME + log_internal_log_sink_set + SRCS + "${DIR}/internal/log_sink_set.cc" + HDRS + "${DIR}/internal/log_sink_set.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + $<$:-llog> + DEPS + absl::base + absl::cleanup + absl::config + absl::core_headers + absl::log_internal_config + absl::log_internal_globals + absl::log_globals + absl::log_entry + absl::log_severity + absl::log_sink + absl::raw_logging_internal + absl::synchronization + absl::span + absl::strings +) + +absl_cc_library( + NAME + log_internal_nullguard + SRCS + "${DIR}/internal/nullguard.cc" + HDRS + "${DIR}/internal/nullguard.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config + absl::core_headers +) + +absl_cc_library( + NAME + log_internal_nullstream + SRCS + HDRS + "${DIR}/internal/nullstream.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config + absl::core_headers + absl::log_severity + absl::strings +) + +absl_cc_library( + NAME + log_internal_strip + SRCS + HDRS + "${DIR}/internal/strip.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::log_internal_message + absl::log_internal_nullstream + absl::log_severity +) + +absl_cc_library( + NAME + log_internal_voidify + SRCS + HDRS + "${DIR}/internal/voidify.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config +) + +absl_cc_library( + NAME + log_internal_append_truncated + SRCS + HDRS + "${DIR}/internal/append_truncated.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config + absl::strings + absl::span +) + +# Public targets +absl_cc_library( + NAME + absl_check + SRCS + HDRS + "${DIR}/absl_check.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::log_internal_check_impl + PUBLIC +) + +absl_cc_library( + NAME + absl_log + SRCS + HDRS + "${DIR}/absl_log.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::log_internal_log_impl + PUBLIC +) + +absl_cc_library( + NAME + check + SRCS + HDRS + "${DIR}/check.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::log_internal_check_impl + absl::core_headers + absl::log_internal_check_op + absl::log_internal_conditions + absl::log_internal_message + absl::log_internal_strip + PUBLIC +) + +absl_cc_library( + NAME + die_if_null + SRCS + "${DIR}/die_if_null.cc" + HDRS + "${DIR}/die_if_null.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config + absl::core_headers + absl::log + absl::strings + PUBLIC +) + +absl_cc_library( + NAME + log_flags + SRCS + "${DIR}/flags.cc" + HDRS + "${DIR}/flags.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config + absl::core_headers + absl::log_globals + absl::log_severity + absl::log_internal_config + absl::log_internal_flags + absl::flags + absl::flags_marshalling + absl::strings + PUBLIC +) + +absl_cc_library( + NAME + log_globals + SRCS + "${DIR}/globals.cc" + HDRS + "${DIR}/globals.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::atomic_hook + absl::config + absl::core_headers + absl::hash + absl::log_severity + absl::raw_logging_internal + absl::strings +) + +absl_cc_library( + NAME + log_initialize + SRCS + "${DIR}/initialize.cc" + HDRS + "${DIR}/initialize.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config + absl::log_globals + absl::log_internal_globals + absl::time + PUBLIC +) + +absl_cc_library( + NAME + log + SRCS + HDRS + "${DIR}/log.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::log_internal_log_impl + PUBLIC +) + +absl_cc_library( + NAME + log_entry + SRCS + "${DIR}/log_entry.cc" + HDRS + "${DIR}/log_entry.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config + absl::core_headers + absl::log_internal_config + absl::log_severity + absl::span + absl::strings + absl::time + PUBLIC +) + +absl_cc_library( + NAME + log_sink + SRCS + "${DIR}/log_sink.cc" + HDRS + "${DIR}/log_sink.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config + absl::log_entry + PUBLIC +) + +absl_cc_library( + NAME + log_sink_registry + SRCS + HDRS + "${DIR}/log_sink_registry.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config + absl::log_sink + absl::log_internal_log_sink_set + PUBLIC +) + +absl_cc_library( + NAME + log_streamer + SRCS + HDRS + "${DIR}/log_streamer.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config + absl::absl_log + absl::log_severity + absl::optional + absl::strings + absl::strings_internal + absl::utility + PUBLIC +) + +absl_cc_library( + NAME + log_internal_structured + HDRS + "${DIR}/internal/structured.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config + absl::log_internal_message + absl::strings +) + +absl_cc_library( + NAME + log_structured + HDRS + "${DIR}/structured.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config + absl::log_internal_structured + absl::strings + PUBLIC +) + +absl_cc_library( + NAME + log_internal_fnmatch + SRCS + "${DIR}/internal/fnmatch.cc" + HDRS + "${DIR}/internal/fnmatch.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config + absl::strings +) + +set(DIR ${ABSL_ROOT_DIR}/absl/memory) + +absl_cc_library( + NAME + memory + HDRS + "${DIR}/memory.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::core_headers + absl::meta + PUBLIC +) + +set(DIR ${ABSL_ROOT_DIR}/absl/meta) + +absl_cc_library( + NAME + type_traits + HDRS + "${DIR}/type_traits.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::config + absl::core_headers + PUBLIC +) + +# component target +absl_cc_library( + NAME + meta + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::type_traits + PUBLIC +) + +set(DIR ${ABSL_ROOT_DIR}/absl/numeric) + +absl_cc_library( + NAME + bits + HDRS + "${DIR}/bits.h" + "${DIR}/internal/bits.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::core_headers + PUBLIC +) + +absl_cc_library( + NAME + int128 + HDRS + "${DIR}/int128.h" + SRCS + "${DIR}/int128.cc" + "${DIR}/int128_have_intrinsic.inc" + "${DIR}/int128_no_intrinsic.inc" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::config + absl::core_headers + absl::bits + PUBLIC +) + +# component target +absl_cc_library( + NAME + numeric + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::int128 + PUBLIC +) + +absl_cc_library( + NAME + numeric_representation + HDRS + "${DIR}/internal/representation.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::config + PUBLIC +) + +absl_cc_library( + NAME + sample_recorder + HDRS + "${DIR}/internal/sample_recorder.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::base + absl::synchronization +) + +set(DIR ${ABSL_ROOT_DIR}/absl/profiling) + +absl_cc_library( + NAME + exponential_biased + SRCS + "${DIR}/internal/exponential_biased.cc" + HDRS + "${DIR}/internal/exponential_biased.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::config + absl::core_headers +) + +absl_cc_library( + NAME + periodic_sampler + SRCS + "${DIR}/internal/periodic_sampler.cc" + HDRS + "${DIR}/internal/periodic_sampler.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::core_headers + absl::exponential_biased +) + +set(DIR ${ABSL_ROOT_DIR}/absl/random) + +absl_cc_library( + NAME + random_random + HDRS + "${DIR}/random.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::random_distributions + absl::random_internal_nonsecure_base + absl::random_internal_pcg_engine + absl::random_internal_pool_urbg + absl::random_internal_randen_engine + absl::random_seed_sequences +) + +absl_cc_library( + NAME + random_bit_gen_ref + HDRS + "${DIR}/bit_gen_ref.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::core_headers + absl::random_internal_distribution_caller + absl::random_internal_fast_uniform_bits + absl::type_traits +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + random_internal_mock_helpers + HDRS + "${DIR}/internal/mock_helpers.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::fast_type_id + absl::optional +) + +absl_cc_library( + NAME + random_distributions + SRCS + "${DIR}/discrete_distribution.cc" + "${DIR}/gaussian_distribution.cc" + HDRS + "${DIR}/bernoulli_distribution.h" + "${DIR}/beta_distribution.h" + "${DIR}/discrete_distribution.h" + "${DIR}/distributions.h" + "${DIR}/exponential_distribution.h" + "${DIR}/gaussian_distribution.h" + "${DIR}/log_uniform_int_distribution.h" + "${DIR}/poisson_distribution.h" + "${DIR}/uniform_int_distribution.h" + "${DIR}/uniform_real_distribution.h" + "${DIR}/zipf_distribution.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::base_internal + absl::config + absl::core_headers + absl::random_internal_generate_real + absl::random_internal_distribution_caller + absl::random_internal_fast_uniform_bits + absl::random_internal_fastmath + absl::random_internal_iostream_state_saver + absl::random_internal_traits + absl::random_internal_uniform_helper + absl::random_internal_wide_multiply + absl::strings + absl::type_traits +) + +absl_cc_library( + NAME + random_seed_gen_exception + SRCS + "${DIR}/seed_gen_exception.cc" + HDRS + "${DIR}/seed_gen_exception.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config +) + +absl_cc_library( + NAME + random_seed_sequences + SRCS + "${DIR}/seed_sequences.cc" + HDRS + "${DIR}/seed_sequences.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config + absl::inlined_vector + absl::random_internal_pool_urbg + absl::random_internal_salted_seed_seq + absl::random_internal_seed_material + absl::random_seed_gen_exception + absl::span +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + random_internal_traits + HDRS + "${DIR}/internal/traits.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + random_internal_distribution_caller + HDRS + "${DIR}/internal/distribution_caller.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config + absl::utility + absl::fast_type_id +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + random_internal_fast_uniform_bits + HDRS + "${DIR}/internal/fast_uniform_bits.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + random_internal_seed_material + SRCS + "${DIR}/internal/seed_material.cc" + HDRS + "${DIR}/internal/seed_material.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::core_headers + absl::optional + absl::random_internal_fast_uniform_bits + absl::raw_logging_internal + absl::span + absl::strings +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + random_internal_pool_urbg + SRCS + "${DIR}/internal/pool_urbg.cc" + HDRS + "${DIR}/internal/pool_urbg.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::base + absl::config + absl::core_headers + absl::endian + absl::random_internal_randen + absl::random_internal_seed_material + absl::random_internal_traits + absl::random_seed_gen_exception + absl::raw_logging_internal + absl::span +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + random_internal_salted_seed_seq + HDRS + "${DIR}/internal/salted_seed_seq.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::inlined_vector + absl::optional + absl::span + absl::random_internal_seed_material + absl::type_traits +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + random_internal_iostream_state_saver + HDRS + "${DIR}/internal/iostream_state_saver.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::int128 + absl::type_traits +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + random_internal_generate_real + HDRS + "${DIR}/internal/generate_real.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::bits + absl::random_internal_fastmath + absl::random_internal_traits + absl::type_traits +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + random_internal_wide_multiply + HDRS + "${DIR}/internal/wide_multiply.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::bits + absl::config + absl::int128 +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + random_internal_fastmath + HDRS + "${DIR}/internal/fastmath.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::bits +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + random_internal_nonsecure_base + HDRS + "${DIR}/internal/nonsecure_base.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::core_headers + absl::inlined_vector + absl::random_internal_pool_urbg + absl::random_internal_salted_seed_seq + absl::random_internal_seed_material + absl::span + absl::type_traits +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + random_internal_pcg_engine + HDRS + "${DIR}/internal/pcg_engine.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config + absl::int128 + absl::random_internal_fastmath + absl::random_internal_iostream_state_saver + absl::type_traits +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + random_internal_randen_engine + HDRS + "${DIR}/internal/randen_engine.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::endian + absl::random_internal_iostream_state_saver + absl::random_internal_randen + absl::raw_logging_internal + absl::type_traits +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + random_internal_platform + HDRS + "${DIR}/internal/randen_traits.h" + "${DIR}/internal/platform.h" + SRCS + "${DIR}/internal/randen_round_keys.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + random_internal_randen + SRCS + "${DIR}/internal/randen.cc" + HDRS + "${DIR}/internal/randen.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::random_internal_platform + absl::random_internal_randen_hwaes + absl::random_internal_randen_slow +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + random_internal_randen_slow + SRCS + "${DIR}/internal/randen_slow.cc" + HDRS + "${DIR}/internal/randen_slow.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::random_internal_platform + absl::config +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + random_internal_randen_hwaes + SRCS + "${DIR}/internal/randen_detect.cc" + HDRS + "${DIR}/internal/randen_detect.h" + "${DIR}/internal/randen_hwaes.h" + COPTS + ${ABSL_DEFAULT_COPTS} + ${ABSL_RANDOM_RANDEN_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::random_internal_platform + absl::random_internal_randen_hwaes_impl + absl::config +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + random_internal_randen_hwaes_impl + SRCS + "${DIR}/internal/randen_hwaes.cc" + "${DIR}/internal/randen_hwaes.h" + COPTS + ${ABSL_DEFAULT_COPTS} + ${ABSL_RANDOM_RANDEN_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::random_internal_platform + absl::config +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + random_internal_uniform_helper + HDRS + "${DIR}/internal/uniform_helper.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config + absl::random_internal_traits + absl::type_traits +) + +set(DIR ${ABSL_ROOT_DIR}/absl/status) + +absl_cc_library( + NAME + status + HDRS + "${DIR}/status.h" + SRCS + "${DIR}/internal/status_internal.h" + "${DIR}/internal/status_internal.cc" + "${DIR}/status.cc" + "${DIR}/status_payload_printer.h" + "${DIR}/status_payload_printer.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + DEFINES + "$<$:_LINUX_SOURCE_COMPAT>" + DEPS + absl::atomic_hook + absl::config + absl::cord + absl::core_headers + absl::function_ref + absl::inlined_vector + absl::memory + absl::optional + absl::raw_logging_internal + absl::span + absl::stacktrace + absl::strerror + absl::str_format + absl::strings + absl::symbolize + PUBLIC +) + +absl_cc_library( + NAME + statusor + HDRS + "${DIR}/statusor.h" + SRCS + "${DIR}/statusor.cc" + "${DIR}/internal/statusor_internal.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::base + absl::config + absl::core_headers + absl::raw_logging_internal + absl::status + absl::strings + absl::type_traits + absl::utility + absl::variant + PUBLIC +) + +set(DIR ${ABSL_ROOT_DIR}/absl/strings) + +absl_cc_library( + NAME + string_view + HDRS + "${DIR}/string_view.h" + SRCS + "${DIR}/string_view.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::base + absl::config + absl::core_headers + absl::throw_delegate + PUBLIC +) + +absl_cc_library( + NAME + strings + HDRS + "${DIR}/ascii.h" + "${DIR}/charconv.h" + "${DIR}/escaping.h" + "${DIR}/has_absl_stringify.h" + "${DIR}/internal/damerau_levenshtein_distance.h" + "${DIR}/internal/string_constant.h" + "${DIR}/match.h" + "${DIR}/numbers.h" + "${DIR}/str_cat.h" + "${DIR}/str_join.h" + "${DIR}/str_replace.h" + "${DIR}/str_split.h" + "${DIR}/strip.h" + "${DIR}/substitute.h" + SRCS + "${DIR}/ascii.cc" + "${DIR}/charconv.cc" + "${DIR}/escaping.cc" + "${DIR}/internal/charconv_bigint.cc" + "${DIR}/internal/charconv_bigint.h" + "${DIR}/internal/charconv_parse.cc" + "${DIR}/internal/charconv_parse.h" + "${DIR}/internal/damerau_levenshtein_distance.cc" + "${DIR}/internal/memutil.cc" + "${DIR}/internal/memutil.h" + "${DIR}/internal/stringify_sink.h" + "${DIR}/internal/stringify_sink.cc" + "${DIR}/internal/stl_type_traits.h" + "${DIR}/internal/str_join_internal.h" + "${DIR}/internal/str_split_internal.h" + "${DIR}/match.cc" + "${DIR}/numbers.cc" + "${DIR}/str_cat.cc" + "${DIR}/str_replace.cc" + "${DIR}/str_split.cc" + "${DIR}/substitute.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::string_view + absl::strings_internal + absl::base + absl::bits + absl::charset + absl::config + absl::core_headers + absl::endian + absl::int128 + absl::memory + absl::raw_logging_internal + absl::throw_delegate + absl::type_traits + PUBLIC +) + +absl_cc_library( + NAME + charset + HDRS + charset.h + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::core_headers + absl::string_view + PUBLIC +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + strings_internal + HDRS + "${DIR}/internal/escaping.cc" + "${DIR}/internal/escaping.h" + "${DIR}/internal/ostringstream.h" + "${DIR}/internal/resize_uninitialized.h" + "${DIR}/internal/utf8.h" + SRCS + "${DIR}/internal/ostringstream.cc" + "${DIR}/internal/utf8.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::config + absl::core_headers + absl::endian + absl::raw_logging_internal + absl::type_traits +) + +absl_cc_library( + NAME + str_format + HDRS + "${DIR}/str_format.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::str_format_internal + PUBLIC +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + str_format_internal + HDRS + "${DIR}/internal/str_format/arg.h" + "${DIR}/internal/str_format/bind.h" + "${DIR}/internal/str_format/checker.h" + "${DIR}/internal/str_format/constexpr_parser.h" + "${DIR}/internal/str_format/extension.h" + "${DIR}/internal/str_format/float_conversion.h" + "${DIR}/internal/str_format/output.h" + "${DIR}/internal/str_format/parser.h" + SRCS + "${DIR}/internal/str_format/arg.cc" + "${DIR}/internal/str_format/bind.cc" + "${DIR}/internal/str_format/extension.cc" + "${DIR}/internal/str_format/float_conversion.cc" + "${DIR}/internal/str_format/output.cc" + "${DIR}/internal/str_format/parser.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::bits + absl::strings + absl::config + absl::core_headers + absl::inlined_vector + absl::numeric_representation + absl::type_traits + absl::utility + absl::int128 + absl::span +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + cord_internal + HDRS + "${DIR}/internal/cord_data_edge.h" + "${DIR}/internal/cord_internal.h" + "${DIR}/internal/cord_rep_btree.h" + "${DIR}/internal/cord_rep_btree_navigator.h" + "${DIR}/internal/cord_rep_btree_reader.h" + "${DIR}/internal/cord_rep_crc.h" + "${DIR}/internal/cord_rep_consume.h" + "${DIR}/internal/cord_rep_flat.h" + SRCS + "${DIR}/internal/cord_internal.cc" + "${DIR}/internal/cord_rep_btree.cc" + "${DIR}/internal/cord_rep_btree_navigator.cc" + "${DIR}/internal/cord_rep_btree_reader.cc" + "${DIR}/internal/cord_rep_crc.cc" + "${DIR}/internal/cord_rep_consume.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::base_internal + absl::compressed_tuple + absl::config + absl::container_memory + absl::core_headers + absl::crc_cord_state + absl::endian + absl::inlined_vector + absl::layout + absl::raw_logging_internal + absl::strings + absl::throw_delegate + absl::type_traits +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + cordz_update_tracker + HDRS + "${DIR}/internal/cordz_update_tracker.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::config +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + cordz_functions + HDRS + "${DIR}/internal/cordz_functions.h" + SRCS + "${DIR}/internal/cordz_functions.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::config + absl::core_headers + absl::exponential_biased + absl::raw_logging_internal +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + cordz_statistics + HDRS + "${DIR}/internal/cordz_statistics.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::config + absl::core_headers + absl::cordz_update_tracker + absl::synchronization +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + cordz_handle + HDRS + "${DIR}/internal/cordz_handle.h" + SRCS + "${DIR}/internal/cordz_handle.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::base + absl::config + absl::raw_logging_internal + absl::synchronization +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + cordz_info + HDRS + "${DIR}/internal/cordz_info.h" + SRCS + "${DIR}/internal/cordz_info.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::base + absl::config + absl::cord_internal + absl::cordz_functions + absl::cordz_handle + absl::cordz_statistics + absl::cordz_update_tracker + absl::core_headers + absl::inlined_vector + absl::span + absl::raw_logging_internal + absl::stacktrace + absl::synchronization + absl::time +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + cordz_sample_token + HDRS + "${DIR}/internal/cordz_sample_token.h" + SRCS + "${DIR}/internal/cordz_sample_token.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::config + absl::cordz_handle + absl::cordz_info +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + cordz_update_scope + HDRS + "${DIR}/internal/cordz_update_scope.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::config + absl::cord_internal + absl::cordz_info + absl::cordz_update_tracker + absl::core_headers +) + +absl_cc_library( + NAME + cord + HDRS + "${DIR}/cord.h" + "${DIR}/cord_buffer.h" + SRCS + "${DIR}/cord.cc" + "${DIR}/cord_analysis.cc" + "${DIR}/cord_analysis.h" + "${DIR}/cord_buffer.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::base + absl::config + absl::cord_internal + absl::cordz_functions + absl::cordz_info + absl::cordz_update_scope + absl::cordz_update_tracker + absl::core_headers + absl::crc32c + absl::crc_cord_state + absl::endian + absl::function_ref + absl::inlined_vector + absl::optional + absl::raw_logging_internal + absl::span + absl::strings + absl::type_traits + PUBLIC +) + +set(DIR ${ABSL_ROOT_DIR}/absl/synchronization) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + graphcycles_internal + HDRS + "${DIR}/internal/graphcycles.h" + SRCS + "${DIR}/internal/graphcycles.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::base + absl::base_internal + absl::config + absl::core_headers + absl::malloc_internal + absl::raw_logging_internal +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + kernel_timeout_internal + HDRS + "${DIR}/internal/kernel_timeout.h" + SRCS + "${DIR}/internal/kernel_timeout.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::base + absl::config + absl::core_headers + absl::raw_logging_internal + absl::time +) + +absl_cc_library( + NAME + synchronization + HDRS + "${DIR}/barrier.h" + "${DIR}/blocking_counter.h" + "${DIR}/internal/create_thread_identity.h" + "${DIR}/internal/futex.h" + "${DIR}/internal/futex_waiter.h" + "${DIR}/internal/per_thread_sem.h" + "${DIR}/internal/pthread_waiter.h" + "${DIR}/internal/sem_waiter.h" + "${DIR}/internal/stdcpp_waiter.h" + "${DIR}/internal/waiter.h" + "${DIR}/internal/waiter_base.h" + "${DIR}/internal/win32_waiter.h" + "${DIR}/mutex.h" + "${DIR}/notification.h" + SRCS + "${DIR}/barrier.cc" + "${DIR}/blocking_counter.cc" + "${DIR}/internal/create_thread_identity.cc" + "${DIR}/internal/futex_waiter.cc" + "${DIR}/internal/per_thread_sem.cc" + "${DIR}/internal/pthread_waiter.cc" + "${DIR}/internal/sem_waiter.cc" + "${DIR}/internal/stdcpp_waiter.cc" + "${DIR}/internal/waiter_base.cc" + "${DIR}/internal/win32_waiter.cc" + "${DIR}/notification.cc" + "${DIR}/mutex.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::graphcycles_internal + absl::kernel_timeout_internal + absl::atomic_hook + absl::base + absl::base_internal + absl::config + absl::core_headers + absl::dynamic_annotations + absl::malloc_internal + absl::raw_logging_internal + absl::stacktrace + absl::symbolize + absl::time + Threads::Threads + PUBLIC +) + +set(DIR ${ABSL_ROOT_DIR}/absl/time) + +absl_cc_library( + NAME + time + HDRS + "${DIR}/civil_time.h" + "${DIR}/clock.h" + "${DIR}/time.h" + SRCS + "${DIR}/civil_time.cc" + "${DIR}/clock.cc" + "${DIR}/duration.cc" + "${DIR}/format.cc" + "${DIR}/internal/get_current_time_chrono.inc" + "${DIR}/internal/get_current_time_posix.inc" + "${DIR}/time.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::base + absl::civil_time + absl::core_headers + absl::int128 + absl::raw_logging_internal + absl::strings + absl::time_zone + PUBLIC +) + +absl_cc_library( + NAME + civil_time + HDRS + "${DIR}/internal/cctz/include/cctz/civil_time.h" + "${DIR}/internal/cctz/include/cctz/civil_time_detail.h" + SRCS + "${DIR}/internal/cctz/src/civil_time_detail.cc" + COPTS + ${ABSL_DEFAULT_COPTS} +) + +absl_cc_library( + NAME + time_zone + HDRS + "${DIR}/internal/cctz/include/cctz/time_zone.h" + "${DIR}/internal/cctz/include/cctz/zone_info_source.h" + SRCS + "${DIR}/internal/cctz/src/time_zone_fixed.cc" + "${DIR}/internal/cctz/src/time_zone_fixed.h" + "${DIR}/internal/cctz/src/time_zone_format.cc" + "${DIR}/internal/cctz/src/time_zone_if.cc" + "${DIR}/internal/cctz/src/time_zone_if.h" + "${DIR}/internal/cctz/src/time_zone_impl.cc" + "${DIR}/internal/cctz/src/time_zone_impl.h" + "${DIR}/internal/cctz/src/time_zone_info.cc" + "${DIR}/internal/cctz/src/time_zone_info.h" + "${DIR}/internal/cctz/src/time_zone_libc.cc" + "${DIR}/internal/cctz/src/time_zone_libc.h" + "${DIR}/internal/cctz/src/time_zone_lookup.cc" + "${DIR}/internal/cctz/src/time_zone_posix.cc" + "${DIR}/internal/cctz/src/time_zone_posix.h" + "${DIR}/internal/cctz/src/tzfile.h" + "${DIR}/internal/cctz/src/zone_info_source.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + Threads::Threads + $<$:-Wl,-framework,CoreFoundation> +) + +set(DIR ${ABSL_ROOT_DIR}/absl/types) + +absl_cc_library( + NAME + any + HDRS + "${DIR}/any.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::bad_any_cast + absl::config + absl::core_headers + absl::fast_type_id + absl::type_traits + absl::utility + PUBLIC +) + +absl_cc_library( + NAME + bad_any_cast + HDRS + "${DIR}/bad_any_cast.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::bad_any_cast_impl + absl::config + PUBLIC +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + bad_any_cast_impl + SRCS + "${DIR}/bad_any_cast.h" + "${DIR}/bad_any_cast.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::config + absl::raw_logging_internal +) + +absl_cc_library( + NAME + span + HDRS + "${DIR}/span.h" + SRCS + "${DIR}/internal/span.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::algorithm + absl::core_headers + absl::throw_delegate + absl::type_traits + PUBLIC +) + +absl_cc_library( + NAME + optional + HDRS + "${DIR}/optional.h" + SRCS + "${DIR}/internal/optional.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::bad_optional_access + absl::base_internal + absl::config + absl::core_headers + absl::memory + absl::type_traits + absl::utility + PUBLIC +) + +absl_cc_library( + NAME + bad_optional_access + HDRS + "${DIR}/bad_optional_access.h" + SRCS + "${DIR}/bad_optional_access.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::config + absl::raw_logging_internal + PUBLIC +) + +absl_cc_library( + NAME + bad_variant_access + HDRS + "${DIR}/bad_variant_access.h" + SRCS + "${DIR}/bad_variant_access.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::config + absl::raw_logging_internal + PUBLIC +) + +absl_cc_library( + NAME + variant + HDRS + "${DIR}/variant.h" + SRCS + "${DIR}/internal/variant.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::bad_variant_access + absl::base_internal + absl::config + absl::core_headers + absl::type_traits + absl::utility + PUBLIC +) + +absl_cc_library( + NAME + compare + HDRS + "${DIR}/compare.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::core_headers + absl::type_traits + PUBLIC +) + +set(DIR ${ABSL_ROOT_DIR}/absl/utility) + +absl_cc_library( + NAME + utility + HDRS + "${DIR}/utility.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::base_internal + absl::config + absl::type_traits + PUBLIC +) + +absl_cc_library( + NAME + if_constexpr + HDRS + "${DIR}/internal/if_constexpr.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::config + PUBLIC +) -set(ABSL_PROPAGATE_CXX_STD ON) -add_subdirectory("${ABSL_ROOT_DIR}" "${ClickHouse_BINARY_DIR}/contrib/abseil-cpp") add_library(_abseil_swiss_tables INTERFACE) - -target_link_libraries(_abseil_swiss_tables INTERFACE - absl::flat_hash_map - absl::flat_hash_set -) - -get_target_property(FLAT_HASH_MAP_INCLUDE_DIR absl::flat_hash_map INTERFACE_INCLUDE_DIRECTORIES) -target_include_directories (_abseil_swiss_tables SYSTEM BEFORE INTERFACE ${FLAT_HASH_MAP_INCLUDE_DIR}) - -get_target_property(FLAT_HASH_SET_INCLUDE_DIR absl::flat_hash_set INTERFACE_INCLUDE_DIRECTORIES) -target_include_directories (_abseil_swiss_tables SYSTEM BEFORE INTERFACE ${FLAT_HASH_SET_INCLUDE_DIR}) - +target_include_directories (_abseil_swiss_tables SYSTEM BEFORE INTERFACE ${ABSL_ROOT_DIR}) add_library(ch_contrib::abseil_swiss_tables ALIAS _abseil_swiss_tables) - -set(ABSL_FORMAT_SRC - ${ABSL_ROOT_DIR}/absl/strings/internal/str_format/arg.cc - ${ABSL_ROOT_DIR}/absl/strings/internal/str_format/bind.cc - ${ABSL_ROOT_DIR}/absl/strings/internal/str_format/extension.cc - ${ABSL_ROOT_DIR}/absl/strings/internal/str_format/float_conversion.cc - ${ABSL_ROOT_DIR}/absl/strings/internal/str_format/output.cc - ${ABSL_ROOT_DIR}/absl/strings/internal/str_format/parser.cc -) - -add_library(_abseil_str_format ${ABSL_FORMAT_SRC}) -target_include_directories(_abseil_str_format PUBLIC ${ABSL_ROOT_DIR}) - -add_library(ch_contrib::abseil_str_format ALIAS _abseil_str_format) diff --git a/contrib/arrow b/contrib/arrow index 1d93838f69a..ba5c67934e8 160000 --- a/contrib/arrow +++ b/contrib/arrow @@ -1 +1 @@ -Subproject commit 1d93838f69a802639ca144ea5704a98e2481810d +Subproject commit ba5c67934e8274d649befcffab56731632dc5253 diff --git a/contrib/arrow-cmake/CMakeLists.txt b/contrib/arrow-cmake/CMakeLists.txt index 02e809c560f..96d1f4adda7 100644 --- a/contrib/arrow-cmake/CMakeLists.txt +++ b/contrib/arrow-cmake/CMakeLists.txt @@ -77,16 +77,16 @@ set(FLATBUFFERS_SRC_DIR "${ClickHouse_SOURCE_DIR}/contrib/flatbuffers") set(FLATBUFFERS_BINARY_DIR "${ClickHouse_BINARY_DIR}/contrib/flatbuffers") set(FLATBUFFERS_INCLUDE_DIR "${FLATBUFFERS_SRC_DIR}/include") -# set flatbuffers CMake options -set(FLATBUFFERS_BUILD_FLATLIB ON CACHE BOOL "Enable the build of the flatbuffers library") -set(FLATBUFFERS_BUILD_SHAREDLIB OFF CACHE BOOL "Disable the build of the flatbuffers shared library") -set(FLATBUFFERS_BUILD_TESTS OFF CACHE BOOL "Skip flatbuffers tests") +set(FLATBUFFERS_SRCS + ${FLATBUFFERS_SRC_DIR}/src/idl_parser.cpp + ${FLATBUFFERS_SRC_DIR}/src/idl_gen_text.cpp + ${FLATBUFFERS_SRC_DIR}/src/reflection.cpp + ${FLATBUFFERS_SRC_DIR}/src/util.cpp) -add_subdirectory(${FLATBUFFERS_SRC_DIR} "${FLATBUFFERS_BINARY_DIR}") +add_library(_flatbuffers STATIC ${FLATBUFFERS_SRCS}) +target_include_directories(_flatbuffers PUBLIC ${FLATBUFFERS_INCLUDE_DIR}) +target_compile_definitions(_flatbuffers PRIVATE -DFLATBUFFERS_LOCALE_INDEPENDENT=0) -add_library(_flatbuffers INTERFACE) -target_link_libraries(_flatbuffers INTERFACE flatbuffers) -target_include_directories(_flatbuffers INTERFACE ${FLATBUFFERS_INCLUDE_DIR}) # === hdfs # NOTE: cannot use ch_contrib::hdfs since it's INCLUDE_DIRECTORIES does not includes trailing "hdfs/" @@ -109,7 +109,6 @@ set (ORC_CXX_HAS_CSTDINT 1) set (ORC_CXX_HAS_THREAD_LOCAL 1) include(orc_check.cmake) -configure_file("${ORC_INCLUDE_DIR}/orc/orc-config.hh.in" "${ORC_BUILD_INCLUDE_DIR}/orc/orc-config.hh") configure_file("${ORC_SOURCE_SRC_DIR}/Adaptor.hh.in" "${ORC_BUILD_INCLUDE_DIR}/Adaptor.hh") @@ -128,7 +127,6 @@ set(ORC_SRCS "${ORC_SOURCE_SRC_DIR}/BpackingDefault.hh" "${ORC_SOURCE_SRC_DIR}/ByteRLE.cc" "${ORC_SOURCE_SRC_DIR}/ByteRLE.hh" - "${ORC_SOURCE_SRC_DIR}/CMakeLists.txt" "${ORC_SOURCE_SRC_DIR}/ColumnPrinter.cc" "${ORC_SOURCE_SRC_DIR}/ColumnReader.cc" "${ORC_SOURCE_SRC_DIR}/ColumnReader.hh" @@ -198,7 +196,9 @@ target_link_libraries(_orc PRIVATE ch_contrib::snappy ch_contrib::zlib ch_contrib::zstd) -target_include_directories(_orc SYSTEM BEFORE PUBLIC ${ORC_INCLUDE_DIR}) +target_include_directories(_orc SYSTEM BEFORE PUBLIC + ${ORC_INCLUDE_DIR} + "${ClickHouse_SOURCE_DIR}/contrib/arrow-cmake/cpp/src/orc/c++/include") target_include_directories(_orc SYSTEM BEFORE PUBLIC ${ORC_BUILD_INCLUDE_DIR}) target_include_directories(_orc SYSTEM PRIVATE ${ORC_SOURCE_SRC_DIR} @@ -212,8 +212,6 @@ target_include_directories(_orc SYSTEM PRIVATE set(LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/arrow/cpp/src/arrow") -configure_file("${LIBRARY_DIR}/util/config.h.cmake" "${CMAKE_CURRENT_BINARY_DIR}/cpp/src/arrow/util/config.h") - # arrow/cpp/src/arrow/CMakeLists.txt (ARROW_SRCS + ARROW_COMPUTE + ARROW_IPC) set(ARROW_SRCS "${LIBRARY_DIR}/array/array_base.cc" @@ -230,6 +228,8 @@ set(ARROW_SRCS "${LIBRARY_DIR}/array/builder_nested.cc" "${LIBRARY_DIR}/array/builder_primitive.cc" "${LIBRARY_DIR}/array/builder_union.cc" + "${LIBRARY_DIR}/array/builder_run_end.cc" + "${LIBRARY_DIR}/array/array_run_end.cc" "${LIBRARY_DIR}/array/concatenate.cc" "${LIBRARY_DIR}/array/data.cc" "${LIBRARY_DIR}/array/diff.cc" @@ -309,9 +309,12 @@ set(ARROW_SRCS "${LIBRARY_DIR}/util/debug.cc" "${LIBRARY_DIR}/util/tracing.cc" "${LIBRARY_DIR}/util/atfork_internal.cc" + "${LIBRARY_DIR}/util/crc32.cc" + "${LIBRARY_DIR}/util/hashing.cc" + "${LIBRARY_DIR}/util/ree_util.cc" + "${LIBRARY_DIR}/util/union_util.cc" "${LIBRARY_DIR}/vendored/base64.cpp" "${LIBRARY_DIR}/vendored/datetime/tz.cpp" - "${LIBRARY_DIR}/vendored/musl/strptime.c" "${LIBRARY_DIR}/vendored/uriparser/UriCommon.c" "${LIBRARY_DIR}/vendored/uriparser/UriCompare.c" @@ -328,39 +331,20 @@ set(ARROW_SRCS "${LIBRARY_DIR}/vendored/uriparser/UriRecompose.c" "${LIBRARY_DIR}/vendored/uriparser/UriResolve.c" "${LIBRARY_DIR}/vendored/uriparser/UriShorten.c" + "${LIBRARY_DIR}/vendored/double-conversion/bignum.cc" + "${LIBRARY_DIR}/vendored/double-conversion/bignum-dtoa.cc" + "${LIBRARY_DIR}/vendored/double-conversion/cached-powers.cc" + "${LIBRARY_DIR}/vendored/double-conversion/double-to-string.cc" + "${LIBRARY_DIR}/vendored/double-conversion/fast-dtoa.cc" + "${LIBRARY_DIR}/vendored/double-conversion/fixed-dtoa.cc" + "${LIBRARY_DIR}/vendored/double-conversion/string-to-double.cc" + "${LIBRARY_DIR}/vendored/double-conversion/strtod.cc" "${LIBRARY_DIR}/compute/api_aggregate.cc" "${LIBRARY_DIR}/compute/api_scalar.cc" "${LIBRARY_DIR}/compute/api_vector.cc" "${LIBRARY_DIR}/compute/cast.cc" "${LIBRARY_DIR}/compute/exec.cc" - "${LIBRARY_DIR}/compute/exec/accumulation_queue.cc" - "${LIBRARY_DIR}/compute/exec/accumulation_queue.h" - "${LIBRARY_DIR}/compute/exec/aggregate.cc" - "${LIBRARY_DIR}/compute/exec/aggregate_node.cc" - "${LIBRARY_DIR}/compute/exec/asof_join_node.cc" - "${LIBRARY_DIR}/compute/exec/bloom_filter.cc" - "${LIBRARY_DIR}/compute/exec/exec_plan.cc" - "${LIBRARY_DIR}/compute/exec/expression.cc" - "${LIBRARY_DIR}/compute/exec/filter_node.cc" - "${LIBRARY_DIR}/compute/exec/hash_join.cc" - "${LIBRARY_DIR}/compute/exec/hash_join_dict.cc" - "${LIBRARY_DIR}/compute/exec/hash_join_node.cc" - "${LIBRARY_DIR}/compute/exec/key_hash.cc" - "${LIBRARY_DIR}/compute/exec/key_map.cc" - "${LIBRARY_DIR}/compute/exec/map_node.cc" - "${LIBRARY_DIR}/compute/exec/options.cc" - "${LIBRARY_DIR}/compute/exec/order_by_impl.cc" - "${LIBRARY_DIR}/compute/exec/partition_util.cc" - "${LIBRARY_DIR}/compute/exec/project_node.cc" - "${LIBRARY_DIR}/compute/exec/query_context.cc" - "${LIBRARY_DIR}/compute/exec/sink_node.cc" - "${LIBRARY_DIR}/compute/exec/source_node.cc" - "${LIBRARY_DIR}/compute/exec/swiss_join.cc" - "${LIBRARY_DIR}/compute/exec/task_util.cc" - "${LIBRARY_DIR}/compute/exec/tpch_node.cc" - "${LIBRARY_DIR}/compute/exec/union_node.cc" - "${LIBRARY_DIR}/compute/exec/util.cc" "${LIBRARY_DIR}/compute/function.cc" "${LIBRARY_DIR}/compute/function_internal.cc" "${LIBRARY_DIR}/compute/kernel.cc" @@ -403,8 +387,13 @@ set(ARROW_SRCS "${LIBRARY_DIR}/compute/kernels/vector_select_k.cc" "${LIBRARY_DIR}/compute/kernels/vector_selection.cc" "${LIBRARY_DIR}/compute/kernels/vector_sort.cc" + "${LIBRARY_DIR}/compute/kernels/vector_selection_internal.cc" + "${LIBRARY_DIR}/compute/kernels/vector_selection_filter_internal.cc" + "${LIBRARY_DIR}/compute/kernels/vector_selection_take_internal.cc" "${LIBRARY_DIR}/compute/light_array.cc" "${LIBRARY_DIR}/compute/registry.cc" + "${LIBRARY_DIR}/compute/expression.cc" + "${LIBRARY_DIR}/compute/ordering.cc" "${LIBRARY_DIR}/compute/row/compare_internal.cc" "${LIBRARY_DIR}/compute/row/encode_internal.cc" "${LIBRARY_DIR}/compute/row/grouper.cc" @@ -459,7 +448,7 @@ target_link_libraries(_arrow PUBLIC _orc) add_dependencies(_arrow protoc) target_include_directories(_arrow SYSTEM BEFORE PUBLIC ${ARROW_SRC_DIR}) -target_include_directories(_arrow SYSTEM BEFORE PUBLIC "${CMAKE_CURRENT_BINARY_DIR}/cpp/src") +target_include_directories(_arrow SYSTEM BEFORE PUBLIC "${ClickHouse_SOURCE_DIR}/contrib/arrow-cmake/cpp/src") target_include_directories(_arrow SYSTEM PRIVATE ${ARROW_SRC_DIR}) target_include_directories(_arrow SYSTEM PRIVATE ${HDFS_INCLUDE_DIR}) @@ -488,10 +477,10 @@ set(PARQUET_SRCS "${LIBRARY_DIR}/exception.cc" "${LIBRARY_DIR}/file_reader.cc" "${LIBRARY_DIR}/file_writer.cc" + "${LIBRARY_DIR}/page_index.cc" "${LIBRARY_DIR}/level_conversion.cc" "${LIBRARY_DIR}/level_comparison.cc" "${LIBRARY_DIR}/metadata.cc" - "${LIBRARY_DIR}/murmur3.cc" "${LIBRARY_DIR}/platform.cc" "${LIBRARY_DIR}/printer.cc" "${LIBRARY_DIR}/properties.cc" @@ -500,6 +489,8 @@ set(PARQUET_SRCS "${LIBRARY_DIR}/stream_reader.cc" "${LIBRARY_DIR}/stream_writer.cc" "${LIBRARY_DIR}/types.cc" + "${LIBRARY_DIR}/bloom_filter_reader.cc" + "${LIBRARY_DIR}/xxhasher.cc" "${GEN_LIBRARY_DIR}/parquet_constants.cpp" "${GEN_LIBRARY_DIR}/parquet_types.cpp" diff --git a/contrib/arrow-cmake/cpp/src/arrow/util/config.h b/contrib/arrow-cmake/cpp/src/arrow/util/config.h new file mode 100644 index 00000000000..cacff7b16cb --- /dev/null +++ b/contrib/arrow-cmake/cpp/src/arrow/util/config.h @@ -0,0 +1,61 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#define ARROW_VERSION_MAJOR 11 +#define ARROW_VERSION_MINOR 0 +#define ARROW_VERSION_PATCH 0 +#define ARROW_VERSION ((ARROW_VERSION_MAJOR * 1000) + ARROW_VERSION_MINOR) * 1000 + ARROW_VERSION_PATCH + +#define ARROW_VERSION_STRING "11.0.0" + +#define ARROW_SO_VERSION "1100" +#define ARROW_FULL_SO_VERSION "1100.0.0" + +#define ARROW_CXX_COMPILER_ID "Clang" +#define ARROW_CXX_COMPILER_VERSION "ClickHouse" +#define ARROW_CXX_COMPILER_FLAGS "" + +#define ARROW_BUILD_TYPE "" + +#define ARROW_GIT_ID "" +#define ARROW_GIT_DESCRIPTION "" + +#define ARROW_PACKAGE_KIND "" + +/* #undef ARROW_COMPUTE */ +/* #undef ARROW_CSV */ +/* #undef ARROW_CUDA */ +/* #undef ARROW_DATASET */ +/* #undef ARROW_FILESYSTEM */ +/* #undef ARROW_FLIGHT */ +/* #undef ARROW_FLIGHT_SQL */ +/* #undef ARROW_IPC */ +/* #undef ARROW_JEMALLOC */ +/* #undef ARROW_JEMALLOC_VENDORED */ +/* #undef ARROW_JSON */ +/* #undef ARROW_ORC */ +/* #undef ARROW_PARQUET */ +/* #undef ARROW_SUBSTRAIT */ + +/* #undef ARROW_GCS */ +/* #undef ARROW_S3 */ +/* #undef ARROW_USE_NATIVE_INT128 */ +/* #undef ARROW_WITH_MUSL */ +/* #undef ARROW_WITH_OPENTELEMETRY */ +/* #undef ARROW_WITH_UCX */ + +/* #undef GRPCPP_PP_INCLUDE */ diff --git a/contrib/arrow-cmake/cpp/src/orc/c++/include/orc/orc-config.hh b/contrib/arrow-cmake/cpp/src/orc/c++/include/orc/orc-config.hh new file mode 100644 index 00000000000..1b0f71ddd40 --- /dev/null +++ b/contrib/arrow-cmake/cpp/src/orc/c++/include/orc/orc-config.hh @@ -0,0 +1,38 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ORC_CONFIG_HH +#define ORC_CONFIG_HH + +#define ORC_VERSION "" + +#define ORC_CXX_HAS_CSTDINT + +#ifdef ORC_CXX_HAS_CSTDINT + #include +#else + #include +#endif + +// Following MACROS should be keeped for backward compatibility. +#define ORC_NOEXCEPT noexcept +#define ORC_NULLPTR nullptr +#define ORC_OVERRIDE override +#define ORC_UNIQUE_PTR std::unique_ptr + +#endif diff --git a/contrib/aws-cmake/AwsFeatureTests.cmake b/contrib/aws-cmake/AwsFeatureTests.cmake index 54727e08d6b..e58b6634f42 100644 --- a/contrib/aws-cmake/AwsFeatureTests.cmake +++ b/contrib/aws-cmake/AwsFeatureTests.cmake @@ -1,114 +1,13 @@ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. -include(CheckCSourceRuns) - option(USE_CPU_EXTENSIONS "Whenever possible, use functions optimized for CPUs with specific extensions (ex: SSE, AVX)." ON) -# In the current (11/2/21) state of mingw64, the packaged gcc is not capable of emitting properly aligned avx2 instructions under certain circumstances. -# This leads to crashes for windows builds using mingw64 when invoking the avx2-enabled versions of certain functions. Until we can find a better -# work-around, disable avx2 (and all other extensions) in mingw builds. -# -# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=54412 -# -if (MINGW) - message(STATUS "MINGW detected! Disabling avx2 and other CPU extensions") - set(USE_CPU_EXTENSIONS OFF) -endif() +if (ARCH_AMD64) + set (AWS_ARCH_INTEL 1) +elseif (ARCH_AARCH64) + set (AWS_ARCH_ARM64 1) +endif () -if(NOT CMAKE_CROSSCOMPILING) - check_c_source_runs(" - #include - bool foo(int a, int b, int *c) { - return __builtin_mul_overflow(a, b, c); - } - - int main() { - int out; - if (foo(1, 2, &out)) { - return 0; - } - - return 0; - }" AWS_HAVE_GCC_OVERFLOW_MATH_EXTENSIONS) - - if (USE_CPU_EXTENSIONS) - check_c_source_runs(" - int main() { - int foo = 42; - _mulx_u32(1, 2, &foo); - return foo != 2; - }" AWS_HAVE_MSVC_MULX) - endif() - -endif() - -check_c_source_compiles(" - #include - #if WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP) - int main() { - return 0; - } - #else - it's not windows desktop - #endif -" AWS_HAVE_WINAPI_DESKTOP) - -check_c_source_compiles(" - int main() { -#if !(defined(__x86_64__) || defined(__i386__) || defined(_M_X64) || defined(_M_IX86)) -# error \"not intel\" -#endif - return 0; - } -" AWS_ARCH_INTEL) - -check_c_source_compiles(" - int main() { -#if !(defined(__aarch64__) || defined(_M_ARM64)) -# error \"not arm64\" -#endif - return 0; - } -" AWS_ARCH_ARM64) - -check_c_source_compiles(" - int main() { -#if !(defined(__arm__) || defined(_M_ARM)) -# error \"not arm\" -#endif - return 0; - } -" AWS_ARCH_ARM32) - -check_c_source_compiles(" -int main() { - int foo = 42, bar = 24; - __asm__ __volatile__(\"\":\"=r\"(foo):\"r\"(bar):\"memory\"); -}" AWS_HAVE_GCC_INLINE_ASM) - -check_c_source_compiles(" -#include -int main() { -#ifdef __linux__ - getauxval(AT_HWCAP); - getauxval(AT_HWCAP2); -#endif - return 0; -}" AWS_HAVE_AUXV) - -string(REGEX MATCH "^(aarch64|arm)" ARM_CPU "${CMAKE_SYSTEM_PROCESSOR}") -if(NOT LEGACY_COMPILER_SUPPORT OR ARM_CPU) - check_c_source_compiles(" - #include - int main() { - backtrace(NULL, 0); - return 0; - }" AWS_HAVE_EXECINFO) -endif() - -check_c_source_compiles(" -#include -int main() { - return 1; -}" AWS_HAVE_LINUX_IF_LINK_H) +set (AWS_HAVE_GCC_INLINE_ASM 1) +set (AWS_HAVE_AUXV 1) diff --git a/contrib/aws-cmake/AwsSIMD.cmake b/contrib/aws-cmake/AwsSIMD.cmake index bd6f4064e78..24f7628e86f 100644 --- a/contrib/aws-cmake/AwsSIMD.cmake +++ b/contrib/aws-cmake/AwsSIMD.cmake @@ -1,54 +1,13 @@ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. -include(CheckCCompilerFlag) -include(CheckIncludeFile) - if (USE_CPU_EXTENSIONS) - if (MSVC) - check_c_compiler_flag("/arch:AVX2" HAVE_M_AVX2_FLAG) - if (HAVE_M_AVX2_FLAG) - set(AVX2_CFLAGS "/arch:AVX2") - endif() - else() - check_c_compiler_flag(-mavx2 HAVE_M_AVX2_FLAG) - if (HAVE_M_AVX2_FLAG) - set(AVX2_CFLAGS "-mavx -mavx2") - endif() + if (ENABLE_AVX2) + set (AVX2_CFLAGS "-mavx -mavx2") + set (HAVE_AVX2_INTRINSICS 1) + set (HAVE_MM256_EXTRACT_EPI64 1) endif() - - - cmake_push_check_state() - set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS} ${AVX2_CFLAGS}") - - check_c_source_compiles(" - #include - #include - #include - - int main() { - __m256i vec; - memset(&vec, 0, sizeof(vec)); - - _mm256_shuffle_epi8(vec, vec); - _mm256_set_epi32(1,2,3,4,5,6,7,8); - _mm256_permutevar8x32_epi32(vec, vec); - - return 0; - }" HAVE_AVX2_INTRINSICS) - - check_c_source_compiles(" - #include - #include - - int main() { - __m256i vec; - memset(&vec, 0, sizeof(vec)); - return (int)_mm256_extract_epi64(vec, 2); - }" HAVE_MM256_EXTRACT_EPI64) - - cmake_pop_check_state() -endif() # USE_CPU_EXTENSIONS +endif() macro(simd_add_definition_if target definition) if(${definition}) diff --git a/contrib/aws-cmake/AwsThreadAffinity.cmake b/contrib/aws-cmake/AwsThreadAffinity.cmake index 9e53481272c..7f30fb71b43 100644 --- a/contrib/aws-cmake/AwsThreadAffinity.cmake +++ b/contrib/aws-cmake/AwsThreadAffinity.cmake @@ -1,50 +1,9 @@ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. -include(CheckSymbolExists) - # Check if the platform supports setting thread affinity # (important for hitting full NIC entitlement on NUMA architectures) function(aws_set_thread_affinity_method target) - - # Non-POSIX, Android, and Apple platforms do not support thread affinity. - if (NOT UNIX OR ANDROID OR APPLE) - target_compile_definitions(${target} PRIVATE - -DAWS_AFFINITY_METHOD=AWS_AFFINITY_METHOD_NONE) - return() - endif() - - cmake_push_check_state() - list(APPEND CMAKE_REQUIRED_DEFINITIONS -D_GNU_SOURCE) - list(APPEND CMAKE_REQUIRED_LIBRARIES pthread) - - set(headers "pthread.h") - # BSDs put nonportable pthread declarations in a separate header. - if(CMAKE_SYSTEM_NAME MATCHES BSD) - set(headers "${headers};pthread_np.h") - endif() - - # Using pthread attrs is the preferred method, but is glibc-specific. - check_symbol_exists(pthread_attr_setaffinity_np "${headers}" USE_PTHREAD_ATTR_SETAFFINITY) - if (USE_PTHREAD_ATTR_SETAFFINITY) - target_compile_definitions(${target} PRIVATE - -DAWS_AFFINITY_METHOD=AWS_AFFINITY_METHOD_PTHREAD_ATTR) - return() - endif() - - # This method is still nonportable, but is supported by musl and BSDs. - check_symbol_exists(pthread_setaffinity_np "${headers}" USE_PTHREAD_SETAFFINITY) - if (USE_PTHREAD_SETAFFINITY) - target_compile_definitions(${target} PRIVATE - -DAWS_AFFINITY_METHOD=AWS_AFFINITY_METHOD_PTHREAD) - return() - endif() - - # If we got here, we expected thread affinity support but didn't find it. - # We still build with degraded NUMA performance, but show a warning. - message(WARNING "No supported method for setting thread affinity") - target_compile_definitions(${target} PRIVATE - -DAWS_AFFINITY_METHOD=AWS_AFFINITY_METHOD_NONE) - - cmake_pop_check_state() + # This code has been cut, because I don't care about it. + target_compile_definitions(${target} PRIVATE -DAWS_AFFINITY_METHOD=AWS_AFFINITY_METHOD_NONE) endfunction() diff --git a/contrib/aws-cmake/AwsThreadName.cmake b/contrib/aws-cmake/AwsThreadName.cmake index a67416b4f83..e17759435ed 100644 --- a/contrib/aws-cmake/AwsThreadName.cmake +++ b/contrib/aws-cmake/AwsThreadName.cmake @@ -1,61 +1,13 @@ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. -include(CheckSymbolExists) - # Check how the platform supports setting thread name function(aws_set_thread_name_method target) - - if (WINDOWS) - # On Windows we do a runtime check, instead of compile-time check - return() - elseif (APPLE) + if (APPLE) # All Apple platforms we support have the same function, so no need for compile-time check. return() endif() - cmake_push_check_state() - list(APPEND CMAKE_REQUIRED_DEFINITIONS -D_GNU_SOURCE) - list(APPEND CMAKE_REQUIRED_LIBRARIES pthread) - - # The start of the test program - set(c_source_start " - #define _GNU_SOURCE - #include - - #if defined(__FreeBSD__) || defined(__NETBSD__) - #include - #endif - - int main() { - pthread_t thread_id; - ") - - # The end of the test program - set(c_source_end "}") - # pthread_setname_np() usually takes 2 args - check_c_source_compiles(" - ${c_source_start} - pthread_setname_np(thread_id, \"asdf\"); - ${c_source_end}" - PTHREAD_SETNAME_TAKES_2ARGS) - if (PTHREAD_SETNAME_TAKES_2ARGS) - target_compile_definitions(${target} PRIVATE -DAWS_PTHREAD_SETNAME_TAKES_2ARGS) - return() - endif() - - # But on NetBSD it takes 3! - check_c_source_compiles(" - ${c_source_start} - pthread_setname_np(thread_id, \"asdf\", NULL); - ${c_source_end} - " PTHREAD_SETNAME_TAKES_3ARGS) - if (PTHREAD_SETNAME_TAKES_3ARGS) - target_compile_definitions(${target} PRIVATE -DAWS_PTHREAD_SETNAME_TAKES_3ARGS) - return() - endif() - - # And on many older/weirder platforms it's just not supported - cmake_pop_check_state() + target_compile_definitions(${target} PRIVATE -DAWS_PTHREAD_SETNAME_TAKES_2ARGS) endfunction() diff --git a/contrib/azure-cmake/CMakeLists.txt b/contrib/azure-cmake/CMakeLists.txt index 7aba81259d3..bb44c993e79 100644 --- a/contrib/azure-cmake/CMakeLists.txt +++ b/contrib/azure-cmake/CMakeLists.txt @@ -48,9 +48,8 @@ set(AZURE_SDK_INCLUDES "${AZURE_SDK_LIBRARY_DIR}/storage/azure-storage-blobs/inc/" ) -include("${AZURE_DIR}/cmake-modules/AzureTransportAdapters.cmake") - add_library(_azure_sdk ${AZURE_SDK_UNIFIED_SRC}) +target_compile_definitions(_azure_sdk PRIVATE BUILD_CURL_HTTP_TRANSPORT_ADAPTER) # Originally, on Windows azure-core is built with bcrypt and crypt32 by default if (TARGET OpenSSL::SSL) diff --git a/contrib/cassandra-cmake/CMakeLists.txt b/contrib/cassandra-cmake/CMakeLists.txt index 32611e0e151..0082364c130 100644 --- a/contrib/cassandra-cmake/CMakeLists.txt +++ b/contrib/cassandra-cmake/CMakeLists.txt @@ -68,8 +68,7 @@ list(APPEND INCLUDE_DIRS ${CASS_SRC_DIR}/third_party/hdr_histogram ${CASS_SRC_DIR}/third_party/http-parser ${CASS_SRC_DIR}/third_party/mt19937_64 - ${CASS_SRC_DIR}/third_party/rapidjson/rapidjson - ${CASS_SRC_DIR}/third_party/sparsehash/src) + ${CASS_SRC_DIR}/third_party/rapidjson/rapidjson) list(APPEND INCLUDE_DIRS ${CASS_INCLUDE_DIR} ${CASS_SRC_DIR}) @@ -83,10 +82,6 @@ set(HAVE_MEMCPY 1) set(HAVE_LONG_LONG 1) set(HAVE_UINT16_T 1) -configure_file("${CASS_SRC_DIR}/third_party/sparsehash/config.h.cmake" "${CMAKE_CURRENT_BINARY_DIR}/sparsehash/internal/sparseconfig.h") - - - # Determine random availability if (OS_LINUX) #set (HAVE_GETRANDOM 1) - not on every Linux kernel @@ -116,17 +111,17 @@ configure_file( ${CASS_ROOT_DIR}/driver_config.hpp.in ${CMAKE_CURRENT_BINARY_DIR}/driver_config.hpp) - add_library(_cassandra ${SOURCES} $ $ $) -target_link_libraries(_cassandra ch_contrib::zlib ch_contrib::minizip) +target_link_libraries(_cassandra ch_contrib::zlib ch_contrib::minizip ch_contrib::sparsehash) target_include_directories(_cassandra PRIVATE ${CMAKE_CURRENT_BINARY_DIR} ${INCLUDE_DIRS}) target_include_directories(_cassandra SYSTEM BEFORE PUBLIC ${CASS_INCLUDE_DIR}) target_compile_definitions(_cassandra PRIVATE CASS_BUILDING) +target_compile_definitions(_cassandra PRIVATE -DSPARSEHASH_HASH=std::hash -Dsparsehash=google) target_link_libraries(_cassandra ch_contrib::uv) diff --git a/contrib/fastops-cmake/CMakeLists.txt b/contrib/fastops-cmake/CMakeLists.txt index e9aa4803583..1b09b736b2a 100644 --- a/contrib/fastops-cmake/CMakeLists.txt +++ b/contrib/fastops-cmake/CMakeLists.txt @@ -13,12 +13,10 @@ set(LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/fastops") set(SRCS "") -if(HAVE_AVX) +if(ARCH_AMD64) set (SRCS ${SRCS} "${LIBRARY_DIR}/fastops/avx/ops_avx.cpp") set_source_files_properties("${LIBRARY_DIR}/fastops/avx/ops_avx.cpp" PROPERTIES COMPILE_FLAGS "-mavx -DNO_AVX2") -endif() -if(HAVE_AVX2) set (SRCS ${SRCS} "${LIBRARY_DIR}/fastops/avx2/ops_avx2.cpp") set_source_files_properties("${LIBRARY_DIR}/fastops/avx2/ops_avx2.cpp" PROPERTIES COMPILE_FLAGS "-mavx2 -mfma") endif() diff --git a/contrib/google-protobuf b/contrib/google-protobuf index 2a4fa1a4e95..0862007f6ca 160000 --- a/contrib/google-protobuf +++ b/contrib/google-protobuf @@ -1 +1 @@ -Subproject commit 2a4fa1a4e95012d754ac55d43c8bc462dd1c78a8 +Subproject commit 0862007f6ca1f5723c58f10f0ca34f3f25a63b2e diff --git a/contrib/google-protobuf-cmake/CMakeLists.txt b/contrib/google-protobuf-cmake/CMakeLists.txt index 268f0fbe0e4..727121e60b5 100644 --- a/contrib/google-protobuf-cmake/CMakeLists.txt +++ b/contrib/google-protobuf-cmake/CMakeLists.txt @@ -20,7 +20,6 @@ endif() set(protobuf_source_dir "${ClickHouse_SOURCE_DIR}/contrib/google-protobuf") set(protobuf_binary_dir "${ClickHouse_BINARY_DIR}/contrib/google-protobuf") - add_definitions(-DGOOGLE_PROTOBUF_CMAKE_BUILD) add_definitions(-DHAVE_PTHREAD) @@ -30,17 +29,69 @@ include_directories( ${protobuf_binary_dir} ${protobuf_source_dir}/src) +add_library(utf8_range + ${protobuf_source_dir}/third_party/utf8_range/naive.c + ${protobuf_source_dir}/third_party/utf8_range/range2-neon.c + ${protobuf_source_dir}/third_party/utf8_range/range2-sse.c +) +include_directories(${protobuf_source_dir}/third_party/utf8_range) + +add_library(utf8_validity + ${protobuf_source_dir}/third_party/utf8_range/utf8_validity.cc +) +target_link_libraries(utf8_validity PUBLIC absl::strings) + +set(protobuf_absl_used_targets + absl::absl_check + absl::absl_log + absl::algorithm + absl::base + absl::bind_front + absl::bits + absl::btree + absl::cleanup + absl::cord + absl::core_headers + absl::debugging + absl::die_if_null + absl::dynamic_annotations + absl::flags + absl::flat_hash_map + absl::flat_hash_set + absl::function_ref + absl::hash + absl::layout + absl::log_initialize + absl::log_severity + absl::memory + absl::node_hash_map + absl::node_hash_set + absl::optional + absl::span + absl::status + absl::statusor + absl::strings + absl::synchronization + absl::time + absl::type_traits + absl::utility + absl::variant +) + set(libprotobuf_lite_files ${protobuf_source_dir}/src/google/protobuf/any_lite.cc ${protobuf_source_dir}/src/google/protobuf/arena.cc + ${protobuf_source_dir}/src/google/protobuf/arena_align.cc ${protobuf_source_dir}/src/google/protobuf/arenastring.cc + ${protobuf_source_dir}/src/google/protobuf/arenaz_sampler.cc ${protobuf_source_dir}/src/google/protobuf/extension_set.cc ${protobuf_source_dir}/src/google/protobuf/generated_enum_util.cc + ${protobuf_source_dir}/src/google/protobuf/generated_message_tctable_lite.cc ${protobuf_source_dir}/src/google/protobuf/generated_message_util.cc ${protobuf_source_dir}/src/google/protobuf/implicit_weak_message.cc + ${protobuf_source_dir}/src/google/protobuf/inlined_string_field.cc ${protobuf_source_dir}/src/google/protobuf/io/coded_stream.cc ${protobuf_source_dir}/src/google/protobuf/io/io_win32.cc - ${protobuf_source_dir}/src/google/protobuf/io/strtod.cc ${protobuf_source_dir}/src/google/protobuf/io/zero_copy_stream.cc ${protobuf_source_dir}/src/google/protobuf/io/zero_copy_stream_impl.cc ${protobuf_source_dir}/src/google/protobuf/io/zero_copy_stream_impl_lite.cc @@ -48,21 +99,15 @@ set(libprotobuf_lite_files ${protobuf_source_dir}/src/google/protobuf/message_lite.cc ${protobuf_source_dir}/src/google/protobuf/parse_context.cc ${protobuf_source_dir}/src/google/protobuf/repeated_field.cc - ${protobuf_source_dir}/src/google/protobuf/stubs/bytestream.cc + ${protobuf_source_dir}/src/google/protobuf/repeated_ptr_field.cc ${protobuf_source_dir}/src/google/protobuf/stubs/common.cc - ${protobuf_source_dir}/src/google/protobuf/stubs/int128.cc - ${protobuf_source_dir}/src/google/protobuf/stubs/status.cc - ${protobuf_source_dir}/src/google/protobuf/stubs/statusor.cc - ${protobuf_source_dir}/src/google/protobuf/stubs/stringpiece.cc - ${protobuf_source_dir}/src/google/protobuf/stubs/stringprintf.cc - ${protobuf_source_dir}/src/google/protobuf/stubs/structurally_valid.cc - ${protobuf_source_dir}/src/google/protobuf/stubs/strutil.cc - ${protobuf_source_dir}/src/google/protobuf/stubs/time.cc ${protobuf_source_dir}/src/google/protobuf/wire_format_lite.cc ) add_library(_libprotobuf-lite ${libprotobuf_lite_files}) -target_link_libraries(_libprotobuf-lite pthread) +target_link_libraries(_libprotobuf-lite + pthread + utf8_validity) if(${CMAKE_SYSTEM_NAME} STREQUAL "Android") target_link_libraries(_libprotobuf-lite log) endif() @@ -71,67 +116,93 @@ add_library(protobuf::libprotobuf-lite ALIAS _libprotobuf-lite) set(libprotobuf_files - ${protobuf_source_dir}/src/google/protobuf/any.cc ${protobuf_source_dir}/src/google/protobuf/any.pb.cc ${protobuf_source_dir}/src/google/protobuf/api.pb.cc + ${protobuf_source_dir}/src/google/protobuf/duration.pb.cc + ${protobuf_source_dir}/src/google/protobuf/empty.pb.cc + ${protobuf_source_dir}/src/google/protobuf/field_mask.pb.cc + ${protobuf_source_dir}/src/google/protobuf/source_context.pb.cc + ${protobuf_source_dir}/src/google/protobuf/struct.pb.cc + ${protobuf_source_dir}/src/google/protobuf/timestamp.pb.cc + ${protobuf_source_dir}/src/google/protobuf/type.pb.cc + ${protobuf_source_dir}/src/google/protobuf/wrappers.pb.cc + ${protobuf_source_dir}/src/google/protobuf/any.cc + ${protobuf_source_dir}/src/google/protobuf/any_lite.cc + ${protobuf_source_dir}/src/google/protobuf/arena.cc + ${protobuf_source_dir}/src/google/protobuf/arena_align.cc + ${protobuf_source_dir}/src/google/protobuf/arenastring.cc + ${protobuf_source_dir}/src/google/protobuf/arenaz_sampler.cc ${protobuf_source_dir}/src/google/protobuf/compiler/importer.cc ${protobuf_source_dir}/src/google/protobuf/compiler/parser.cc + ${protobuf_source_dir}/src/google/protobuf/cpp_features.pb.cc ${protobuf_source_dir}/src/google/protobuf/descriptor.cc ${protobuf_source_dir}/src/google/protobuf/descriptor.pb.cc ${protobuf_source_dir}/src/google/protobuf/descriptor_database.cc - ${protobuf_source_dir}/src/google/protobuf/duration.pb.cc ${protobuf_source_dir}/src/google/protobuf/dynamic_message.cc - ${protobuf_source_dir}/src/google/protobuf/empty.pb.cc + ${protobuf_source_dir}/src/google/protobuf/extension_set.cc ${protobuf_source_dir}/src/google/protobuf/extension_set_heavy.cc - ${protobuf_source_dir}/src/google/protobuf/field_mask.pb.cc + ${protobuf_source_dir}/src/google/protobuf/feature_resolver.cc + ${protobuf_source_dir}/src/google/protobuf/generated_enum_util.cc ${protobuf_source_dir}/src/google/protobuf/generated_message_bases.cc ${protobuf_source_dir}/src/google/protobuf/generated_message_reflection.cc + ${protobuf_source_dir}/src/google/protobuf/generated_message_tctable_full.cc + ${protobuf_source_dir}/src/google/protobuf/generated_message_tctable_gen.cc + ${protobuf_source_dir}/src/google/protobuf/generated_message_tctable_lite.cc + ${protobuf_source_dir}/src/google/protobuf/generated_message_util.cc + ${protobuf_source_dir}/src/google/protobuf/implicit_weak_message.cc + ${protobuf_source_dir}/src/google/protobuf/inlined_string_field.cc + ${protobuf_source_dir}/src/google/protobuf/io/coded_stream.cc ${protobuf_source_dir}/src/google/protobuf/io/gzip_stream.cc + ${protobuf_source_dir}/src/google/protobuf/io/io_win32.cc ${protobuf_source_dir}/src/google/protobuf/io/printer.cc + ${protobuf_source_dir}/src/google/protobuf/io/strtod.cc ${protobuf_source_dir}/src/google/protobuf/io/tokenizer.cc + ${protobuf_source_dir}/src/google/protobuf/io/zero_copy_sink.cc + ${protobuf_source_dir}/src/google/protobuf/io/zero_copy_stream.cc + ${protobuf_source_dir}/src/google/protobuf/io/zero_copy_stream_impl.cc + ${protobuf_source_dir}/src/google/protobuf/io/zero_copy_stream_impl_lite.cc + ${protobuf_source_dir}/src/google/protobuf/json/internal/lexer.cc + ${protobuf_source_dir}/src/google/protobuf/json/internal/message_path.cc + ${protobuf_source_dir}/src/google/protobuf/json/internal/parser.cc + ${protobuf_source_dir}/src/google/protobuf/json/internal/unparser.cc + ${protobuf_source_dir}/src/google/protobuf/json/internal/untyped_message.cc + ${protobuf_source_dir}/src/google/protobuf/json/internal/writer.cc + ${protobuf_source_dir}/src/google/protobuf/json/internal/zero_copy_buffered_stream.cc + ${protobuf_source_dir}/src/google/protobuf/json/json.cc + ${protobuf_source_dir}/src/google/protobuf/map.cc ${protobuf_source_dir}/src/google/protobuf/map_field.cc ${protobuf_source_dir}/src/google/protobuf/message.cc + ${protobuf_source_dir}/src/google/protobuf/message_lite.cc + ${protobuf_source_dir}/src/google/protobuf/parse_context.cc + ${protobuf_source_dir}/src/google/protobuf/port.cc + ${protobuf_source_dir}/src/google/protobuf/raw_ptr.cc + ${protobuf_source_dir}/src/google/protobuf/reflection_mode.cc ${protobuf_source_dir}/src/google/protobuf/reflection_ops.cc + ${protobuf_source_dir}/src/google/protobuf/repeated_field.cc ${protobuf_source_dir}/src/google/protobuf/repeated_ptr_field.cc ${protobuf_source_dir}/src/google/protobuf/service.cc - ${protobuf_source_dir}/src/google/protobuf/source_context.pb.cc - ${protobuf_source_dir}/src/google/protobuf/struct.pb.cc - ${protobuf_source_dir}/src/google/protobuf/stubs/substitute.cc + ${protobuf_source_dir}/src/google/protobuf/stubs/common.cc ${protobuf_source_dir}/src/google/protobuf/text_format.cc - ${protobuf_source_dir}/src/google/protobuf/timestamp.pb.cc - ${protobuf_source_dir}/src/google/protobuf/type.pb.cc ${protobuf_source_dir}/src/google/protobuf/unknown_field_set.cc ${protobuf_source_dir}/src/google/protobuf/util/delimited_message_util.cc ${protobuf_source_dir}/src/google/protobuf/util/field_comparator.cc ${protobuf_source_dir}/src/google/protobuf/util/field_mask_util.cc - ${protobuf_source_dir}/src/google/protobuf/util/internal/datapiece.cc - ${protobuf_source_dir}/src/google/protobuf/util/internal/default_value_objectwriter.cc - ${protobuf_source_dir}/src/google/protobuf/util/internal/error_listener.cc - ${protobuf_source_dir}/src/google/protobuf/util/internal/field_mask_utility.cc - ${protobuf_source_dir}/src/google/protobuf/util/internal/json_escaping.cc - ${protobuf_source_dir}/src/google/protobuf/util/internal/json_objectwriter.cc - ${protobuf_source_dir}/src/google/protobuf/util/internal/json_stream_parser.cc - ${protobuf_source_dir}/src/google/protobuf/util/internal/object_writer.cc - ${protobuf_source_dir}/src/google/protobuf/util/internal/proto_writer.cc - ${protobuf_source_dir}/src/google/protobuf/util/internal/protostream_objectsource.cc - ${protobuf_source_dir}/src/google/protobuf/util/internal/protostream_objectwriter.cc - ${protobuf_source_dir}/src/google/protobuf/util/internal/type_info.cc - ${protobuf_source_dir}/src/google/protobuf/util/internal/type_info_test_helper.cc - ${protobuf_source_dir}/src/google/protobuf/util/internal/utility.cc - ${protobuf_source_dir}/src/google/protobuf/util/json_util.cc ${protobuf_source_dir}/src/google/protobuf/util/message_differencer.cc ${protobuf_source_dir}/src/google/protobuf/util/time_util.cc ${protobuf_source_dir}/src/google/protobuf/util/type_resolver_util.cc ${protobuf_source_dir}/src/google/protobuf/wire_format.cc - ${protobuf_source_dir}/src/google/protobuf/wrappers.pb.cc + ${protobuf_source_dir}/src/google/protobuf/wire_format_lite.cc ) add_library(_libprotobuf ${libprotobuf_lite_files} ${libprotobuf_files}) if (ENABLE_FUZZING) target_compile_options(_libprotobuf PRIVATE "-fsanitize-recover=all") endif() -target_link_libraries(_libprotobuf pthread) -target_link_libraries(_libprotobuf ch_contrib::zlib) +target_link_libraries(_libprotobuf + pthread + ch_contrib::zlib + utf8_validity + ${protobuf_absl_used_targets}) if(${CMAKE_SYSTEM_NAME} STREQUAL "Android") target_link_libraries(_libprotobuf log) endif() @@ -140,23 +211,26 @@ add_library(protobuf::libprotobuf ALIAS _libprotobuf) set(libprotoc_files + ${protobuf_source_dir}/src/google/protobuf/compiler/allowlists/editions.cc ${protobuf_source_dir}/src/google/protobuf/compiler/code_generator.cc ${protobuf_source_dir}/src/google/protobuf/compiler/command_line_interface.cc ${protobuf_source_dir}/src/google/protobuf/compiler/cpp/enum.cc - ${protobuf_source_dir}/src/google/protobuf/compiler/cpp/enum_field.cc ${protobuf_source_dir}/src/google/protobuf/compiler/cpp/extension.cc ${protobuf_source_dir}/src/google/protobuf/compiler/cpp/field.cc + ${protobuf_source_dir}/src/google/protobuf/compiler/cpp/field_generators/cord_field.cc + ${protobuf_source_dir}/src/google/protobuf/compiler/cpp/field_generators/enum_field.cc + ${protobuf_source_dir}/src/google/protobuf/compiler/cpp/field_generators/map_field.cc + ${protobuf_source_dir}/src/google/protobuf/compiler/cpp/field_generators/message_field.cc + ${protobuf_source_dir}/src/google/protobuf/compiler/cpp/field_generators/primitive_field.cc + ${protobuf_source_dir}/src/google/protobuf/compiler/cpp/field_generators/string_field.cc ${protobuf_source_dir}/src/google/protobuf/compiler/cpp/file.cc ${protobuf_source_dir}/src/google/protobuf/compiler/cpp/generator.cc ${protobuf_source_dir}/src/google/protobuf/compiler/cpp/helpers.cc - ${protobuf_source_dir}/src/google/protobuf/compiler/cpp/map_field.cc ${protobuf_source_dir}/src/google/protobuf/compiler/cpp/message.cc - ${protobuf_source_dir}/src/google/protobuf/compiler/cpp/message_field.cc ${protobuf_source_dir}/src/google/protobuf/compiler/cpp/padding_optimizer.cc ${protobuf_source_dir}/src/google/protobuf/compiler/cpp/parse_function_generator.cc - ${protobuf_source_dir}/src/google/protobuf/compiler/cpp/primitive_field.cc ${protobuf_source_dir}/src/google/protobuf/compiler/cpp/service.cc - ${protobuf_source_dir}/src/google/protobuf/compiler/cpp/string_field.cc + ${protobuf_source_dir}/src/google/protobuf/compiler/cpp/tracker.cc ${protobuf_source_dir}/src/google/protobuf/compiler/csharp/csharp_doc_comment.cc ${protobuf_source_dir}/src/google/protobuf/compiler/csharp/csharp_enum.cc ${protobuf_source_dir}/src/google/protobuf/compiler/csharp/csharp_enum_field.cc @@ -173,6 +247,7 @@ set(libprotoc_files ${protobuf_source_dir}/src/google/protobuf/compiler/csharp/csharp_repeated_primitive_field.cc ${protobuf_source_dir}/src/google/protobuf/compiler/csharp/csharp_source_generator_base.cc ${protobuf_source_dir}/src/google/protobuf/compiler/csharp/csharp_wrapper_field.cc + ${protobuf_source_dir}/src/google/protobuf/compiler/csharp/names.cc ${protobuf_source_dir}/src/google/protobuf/compiler/java/context.cc ${protobuf_source_dir}/src/google/protobuf/compiler/java/doc_comment.cc ${protobuf_source_dir}/src/google/protobuf/compiler/java/enum.cc @@ -195,38 +270,55 @@ set(libprotoc_files ${protobuf_source_dir}/src/google/protobuf/compiler/java/message_field.cc ${protobuf_source_dir}/src/google/protobuf/compiler/java/message_field_lite.cc ${protobuf_source_dir}/src/google/protobuf/compiler/java/message_lite.cc + ${protobuf_source_dir}/src/google/protobuf/compiler/java/message_serialization.cc ${protobuf_source_dir}/src/google/protobuf/compiler/java/name_resolver.cc + ${protobuf_source_dir}/src/google/protobuf/compiler/java/names.cc ${protobuf_source_dir}/src/google/protobuf/compiler/java/primitive_field.cc ${protobuf_source_dir}/src/google/protobuf/compiler/java/primitive_field_lite.cc ${protobuf_source_dir}/src/google/protobuf/compiler/java/service.cc ${protobuf_source_dir}/src/google/protobuf/compiler/java/shared_code_generator.cc ${protobuf_source_dir}/src/google/protobuf/compiler/java/string_field.cc ${protobuf_source_dir}/src/google/protobuf/compiler/java/string_field_lite.cc - ${protobuf_source_dir}/src/google/protobuf/compiler/objectivec/objectivec_enum.cc - ${protobuf_source_dir}/src/google/protobuf/compiler/objectivec/objectivec_enum_field.cc - ${protobuf_source_dir}/src/google/protobuf/compiler/objectivec/objectivec_extension.cc - ${protobuf_source_dir}/src/google/protobuf/compiler/objectivec/objectivec_field.cc - ${protobuf_source_dir}/src/google/protobuf/compiler/objectivec/objectivec_file.cc - ${protobuf_source_dir}/src/google/protobuf/compiler/objectivec/objectivec_generator.cc - ${protobuf_source_dir}/src/google/protobuf/compiler/objectivec/objectivec_helpers.cc - ${protobuf_source_dir}/src/google/protobuf/compiler/objectivec/objectivec_map_field.cc - ${protobuf_source_dir}/src/google/protobuf/compiler/objectivec/objectivec_message.cc - ${protobuf_source_dir}/src/google/protobuf/compiler/objectivec/objectivec_message_field.cc - ${protobuf_source_dir}/src/google/protobuf/compiler/objectivec/objectivec_oneof.cc - ${protobuf_source_dir}/src/google/protobuf/compiler/objectivec/objectivec_primitive_field.cc + ${protobuf_source_dir}/src/google/protobuf/compiler/objectivec/enum.cc + ${protobuf_source_dir}/src/google/protobuf/compiler/objectivec/enum_field.cc + ${protobuf_source_dir}/src/google/protobuf/compiler/objectivec/extension.cc + ${protobuf_source_dir}/src/google/protobuf/compiler/objectivec/field.cc + ${protobuf_source_dir}/src/google/protobuf/compiler/objectivec/file.cc + ${protobuf_source_dir}/src/google/protobuf/compiler/objectivec/generator.cc + ${protobuf_source_dir}/src/google/protobuf/compiler/objectivec/helpers.cc + ${protobuf_source_dir}/src/google/protobuf/compiler/objectivec/import_writer.cc + ${protobuf_source_dir}/src/google/protobuf/compiler/objectivec/line_consumer.cc + ${protobuf_source_dir}/src/google/protobuf/compiler/objectivec/map_field.cc + ${protobuf_source_dir}/src/google/protobuf/compiler/objectivec/message.cc + ${protobuf_source_dir}/src/google/protobuf/compiler/objectivec/message_field.cc + ${protobuf_source_dir}/src/google/protobuf/compiler/objectivec/names.cc + ${protobuf_source_dir}/src/google/protobuf/compiler/objectivec/oneof.cc + ${protobuf_source_dir}/src/google/protobuf/compiler/objectivec/primitive_field.cc + ${protobuf_source_dir}/src/google/protobuf/compiler/objectivec/text_format_decode_data.cc + ${protobuf_source_dir}/src/google/protobuf/compiler/php/names.cc ${protobuf_source_dir}/src/google/protobuf/compiler/php/php_generator.cc ${protobuf_source_dir}/src/google/protobuf/compiler/plugin.cc ${protobuf_source_dir}/src/google/protobuf/compiler/plugin.pb.cc ${protobuf_source_dir}/src/google/protobuf/compiler/python/generator.cc ${protobuf_source_dir}/src/google/protobuf/compiler/python/helpers.cc ${protobuf_source_dir}/src/google/protobuf/compiler/python/pyi_generator.cc + ${protobuf_source_dir}/src/google/protobuf/compiler/retention.cc ${protobuf_source_dir}/src/google/protobuf/compiler/ruby/ruby_generator.cc + ${protobuf_source_dir}/src/google/protobuf/compiler/rust/accessors/accessors.cc + ${protobuf_source_dir}/src/google/protobuf/compiler/rust/accessors/singular_bytes.cc + ${protobuf_source_dir}/src/google/protobuf/compiler/rust/accessors/singular_scalar.cc + ${protobuf_source_dir}/src/google/protobuf/compiler/rust/context.cc + ${protobuf_source_dir}/src/google/protobuf/compiler/rust/generator.cc + ${protobuf_source_dir}/src/google/protobuf/compiler/rust/message.cc + ${protobuf_source_dir}/src/google/protobuf/compiler/rust/naming.cc ${protobuf_source_dir}/src/google/protobuf/compiler/subprocess.cc ${protobuf_source_dir}/src/google/protobuf/compiler/zip_writer.cc ) add_library(_libprotoc ${libprotoc_files}) -target_link_libraries(_libprotoc _libprotobuf) +target_link_libraries(_libprotoc + _libprotobuf + ${protobuf_absl_used_targets}) add_library(protobuf::libprotoc ALIAS _libprotoc) set(protoc_files ${protobuf_source_dir}/src/google/protobuf/compiler/main.cc) @@ -235,7 +327,11 @@ if (CMAKE_HOST_SYSTEM_NAME STREQUAL CMAKE_SYSTEM_NAME AND CMAKE_HOST_SYSTEM_PROCESSOR STREQUAL CMAKE_SYSTEM_PROCESSOR) add_executable(protoc ${protoc_files}) - target_link_libraries(protoc _libprotoc _libprotobuf pthread) + target_link_libraries(protoc _libprotoc + _libprotobuf + pthread + utf8_validity + ${protobuf_absl_used_targets}) add_executable(protobuf::protoc ALIAS protoc) if (ENABLE_FUZZING) @@ -255,6 +351,8 @@ else () # This is quite ugly but I cannot make dependencies work propery. + set(abseil_source_dir "${ClickHouse_SOURCE_DIR}/contrib/abseil-cpp") + execute_process( COMMAND mkdir -p ${PROTOC_BUILD_DIR} COMMAND_ECHO STDOUT) @@ -269,7 +367,9 @@ else () "-Dprotobuf_BUILD_CONFORMANCE=0" "-Dprotobuf_BUILD_EXAMPLES=0" "-Dprotobuf_BUILD_PROTOC_BINARIES=1" - "${protobuf_source_dir}/cmake" + "-DABSL_ROOT_DIR=${abseil_source_dir}" + "-DABSL_ENABLE_INSTALL=0" + "${protobuf_source_dir}" WORKING_DIRECTORY "${PROTOC_BUILD_DIR}" COMMAND_ECHO STDOUT) @@ -278,38 +378,6 @@ else () COMMAND_ECHO STDOUT) endif () -# add_custom_command ( -# OUTPUT ${PROTOC_BUILD_DIR} -# COMMAND mkdir -p ${PROTOC_BUILD_DIR}) -# -# add_custom_command ( -# OUTPUT "${PROTOC_BUILD_DIR}/CMakeCache.txt" -# -# COMMAND ${CMAKE_COMMAND} -# -G"${CMAKE_GENERATOR}" -# -DCMAKE_MAKE_PROGRAM="${CMAKE_MAKE_PROGRAM}" -# -DCMAKE_C_COMPILER="${CMAKE_C_COMPILER}" -# -DCMAKE_CXX_COMPILER="${CMAKE_CXX_COMPILER}" -# -Dprotobuf_BUILD_TESTS=0 -# -Dprotobuf_BUILD_CONFORMANCE=0 -# -Dprotobuf_BUILD_EXAMPLES=0 -# -Dprotobuf_BUILD_PROTOC_BINARIES=1 -# "${protobuf_source_dir}/cmake" -# -# DEPENDS "${PROTOC_BUILD_DIR}" -# WORKING_DIRECTORY "${PROTOC_BUILD_DIR}" -# COMMENT "Configuring 'protoc' for host architecture." -# USES_TERMINAL) -# -# add_custom_command ( -# OUTPUT "${PROTOC_BUILD_DIR}/protoc" -# COMMAND ${CMAKE_COMMAND} --build "${PROTOC_BUILD_DIR}" -# DEPENDS "${PROTOC_BUILD_DIR}/CMakeCache.txt" -# COMMENT "Building 'protoc' for host architecture." -# USES_TERMINAL) -# -# add_custom_target (protoc-host DEPENDS "${PROTOC_BUILD_DIR}/protoc") - add_executable(protoc IMPORTED GLOBAL) set_target_properties (protoc PROPERTIES IMPORTED_LOCATION "${PROTOC_BUILD_DIR}/protoc") add_dependencies(protoc "${PROTOC_BUILD_DIR}/protoc") diff --git a/contrib/grpc b/contrib/grpc index b723ecae099..77b2737a709 160000 --- a/contrib/grpc +++ b/contrib/grpc @@ -1 +1 @@ -Subproject commit b723ecae0991bb873fe87a595dfb187178733fde +Subproject commit 77b2737a709d43d8c6895e3f03ca62b00bd9201c diff --git a/contrib/grpc-cmake/CMakeLists.txt b/contrib/grpc-cmake/CMakeLists.txt index 09ed2fe3f80..b8b5f5580c4 100644 --- a/contrib/grpc-cmake/CMakeLists.txt +++ b/contrib/grpc-cmake/CMakeLists.txt @@ -9,50 +9,14 @@ endif() set(_gRPC_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/grpc") set(_gRPC_BINARY_DIR "${ClickHouse_BINARY_DIR}/contrib/grpc") -# Use re2 from ClickHouse contrib, not from gRPC third_party. -set(gRPC_RE2_PROVIDER "clickhouse" CACHE STRING "" FORCE) -set(_gRPC_RE2_INCLUDE_DIR "") -set(_gRPC_RE2_LIBRARIES ch_contrib::re2) - -# Use zlib from ClickHouse contrib, not from gRPC third_party. -set(gRPC_ZLIB_PROVIDER "clickhouse" CACHE STRING "" FORCE) -set(_gRPC_ZLIB_INCLUDE_DIR "") -set(_gRPC_ZLIB_LIBRARIES ch_contrib::zlib) - -# Use protobuf from ClickHouse contrib, not from gRPC third_party. -set(gRPC_PROTOBUF_PROVIDER "clickhouse" CACHE STRING "" FORCE) -set(_gRPC_PROTOBUF_LIBRARIES ch_contrib::protobuf) -set(_gRPC_PROTOBUF_PROTOC "protoc") -set(_gRPC_PROTOBUF_PROTOC_EXECUTABLE $) -set(_gRPC_PROTOBUF_PROTOC_LIBRARIES ch_contrib::protoc) - if(TARGET OpenSSL::SSL) set(gRPC_USE_UNSECURE_LIBRARIES FALSE) else() set(gRPC_USE_UNSECURE_LIBRARIES TRUE) endif() -# Use OpenSSL from ClickHouse contrib, not from gRPC third_party. -set(gRPC_SSL_PROVIDER "clickhouse" CACHE STRING "" FORCE) -set(_gRPC_SSL_INCLUDE_DIR "") -set(_gRPC_SSL_LIBRARIES OpenSSL::Crypto OpenSSL::SSL) - -# Use abseil-cpp from ClickHouse contrib, not from gRPC third_party. -set(gRPC_ABSL_PROVIDER "clickhouse" CACHE STRING "" FORCE) - -# We don't want to build C# extensions. -set(gRPC_BUILD_CSHARP_EXT OFF) - -# TODO: Remove this. We generally like to compile with C++23 but grpc isn't ready yet. -set (CMAKE_CXX_STANDARD 20) - -set(_gRPC_CARES_LIBRARIES ch_contrib::c-ares) -set(gRPC_CARES_PROVIDER "clickhouse" CACHE STRING "" FORCE) -add_subdirectory("${_gRPC_SOURCE_DIR}" "${_gRPC_BINARY_DIR}") - -# The contrib/grpc/CMakeLists.txt redefined the PROTOBUF_GENERATE_GRPC_CPP() function for its own purposes, -# so we need to redefine it back. -include("${ClickHouse_SOURCE_DIR}/contrib/grpc-cmake/protobuf_generate_grpc.cmake") +include(grpc.cmake) +include(protobuf_generate_grpc.cmake) set(gRPC_CPP_PLUGIN $) set(gRPC_PYTHON_PLUGIN $) diff --git a/contrib/grpc-cmake/grpc.cmake b/contrib/grpc-cmake/grpc.cmake new file mode 100644 index 00000000000..c2488539211 --- /dev/null +++ b/contrib/grpc-cmake/grpc.cmake @@ -0,0 +1,1854 @@ +# This file was edited for ClickHouse. + +# GRPC global cmake file +# This currently builds C and C++ code. +# This file has been automatically generated from a template file. +# Please look at the templates directory instead. +# This file can be regenerated from the template by running +# tools/buildgen/generate_projects.sh +# +# Copyright 2015 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# We want to use C++23, but GRPC is not ready +set (CMAKE_CXX_STANDARD 20) + +set(_gRPC_ZLIB_INCLUDE_DIR "") +set(_gRPC_ZLIB_LIBRARIES ch_contrib::zlib) + +set(_gRPC_CARES_LIBRARIES ch_contrib::c-ares) + +set(_gRPC_RE2_INCLUDE_DIR "") +set(_gRPC_RE2_LIBRARIES ch_contrib::re2) + +set(_gRPC_SSL_INCLUDE_DIR "") +set(_gRPC_SSL_LIBRARIES OpenSSL::Crypto OpenSSL::SSL) + +set(_gRPC_PROTOBUF_LIBRARIES ch_contrib::protobuf) +set(_gRPC_PROTOBUF_PROTOC "protoc") +set(_gRPC_PROTOBUF_PROTOC_EXECUTABLE $) +set(_gRPC_PROTOBUF_PROTOC_LIBRARIES ch_contrib::protoc) + + +if(UNIX) + if(${CMAKE_SYSTEM_NAME} MATCHES "Linux") + set(_gRPC_PLATFORM_LINUX ON) + elseif(${CMAKE_SYSTEM_NAME} MATCHES "Darwin") + set(_gRPC_PLATFORM_MAC ON) + elseif(${CMAKE_SYSTEM_NAME} MATCHES "iOS") + set(_gRPC_PLATFORM_IOS ON) + elseif(${CMAKE_SYSTEM_NAME} MATCHES "Android") + set(_gRPC_PLATFORM_ANDROID ON) + else() + set(_gRPC_PLATFORM_POSIX ON) + endif() +endif() + +set(_gRPC_ADDRESS_SORTING_INCLUDE_DIR "${_gRPC_SOURCE_DIR}/third_party/address_sorting/include") +set(_gRPC_ADDRESS_SORTING_LIBRARIES address_sorting) + +set(UPB_ROOT_DIR ${_gRPC_SOURCE_DIR}/third_party/upb) + +set(_gRPC_UPB_INCLUDE_DIR "${UPB_ROOT_DIR}" "${_gRPC_SOURCE_DIR}/third_party/utf8_range") +set(_gRPC_UPB_GRPC_GENERATED_DIR "${_gRPC_SOURCE_DIR}/src//core/ext/upb-generated" "${_gRPC_SOURCE_DIR}/src//core/ext/upbdefs-generated") + +set(_gRPC_UPB_LIBRARIES upb) + +set(_gRPC_XXHASH_INCLUDE_DIR "${_gRPC_SOURCE_DIR}/third_party/xxhash") + +add_library(address_sorting + ${_gRPC_SOURCE_DIR}/third_party/address_sorting/address_sorting.c + ${_gRPC_SOURCE_DIR}/third_party/address_sorting/address_sorting_posix.c + ${_gRPC_SOURCE_DIR}/third_party/address_sorting/address_sorting_windows.c +) + +target_compile_features(address_sorting PUBLIC cxx_std_14) + +target_include_directories(address_sorting + PUBLIC ${_gRPC_SOURCE_DIR}/include + PRIVATE + ${_gRPC_SOURCE_DIR} + ${_gRPC_ADDRESS_SORTING_INCLUDE_DIR} + ${_gRPC_RE2_INCLUDE_DIR} + ${_gRPC_SSL_INCLUDE_DIR} + ${_gRPC_UPB_GENERATED_DIR} + ${_gRPC_UPB_GRPC_GENERATED_DIR} + ${_gRPC_UPB_INCLUDE_DIR} + ${_gRPC_XXHASH_INCLUDE_DIR} + ${_gRPC_ZLIB_INCLUDE_DIR} +) +target_link_libraries(address_sorting + ${_gRPC_ALLTARGETS_LIBRARIES} +) + + +add_library(gpr + ${_gRPC_SOURCE_DIR}/src/core/lib/config/config_vars.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/config/config_vars_non_generated.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/config/load_config.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/thread_local.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gpr/alloc.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gpr/android/log.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gpr/atm.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gpr/iphone/cpu.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gpr/linux/cpu.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gpr/linux/log.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gpr/log.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gpr/msys/tmpfile.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gpr/posix/cpu.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gpr/posix/log.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gpr/posix/string.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gpr/posix/sync.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gpr/posix/time.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gpr/posix/tmpfile.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gpr/string.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gpr/sync.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gpr/sync_abseil.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gpr/time.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gpr/time_precise.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gpr/windows/cpu.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gpr/windows/log.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gpr/windows/string.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gpr/windows/string_util.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gpr/windows/sync.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gpr/windows/time.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gpr/windows/tmpfile.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gpr/wrap_memcpy.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gprpp/crash.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gprpp/examine_stack.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gprpp/fork.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gprpp/host_port.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gprpp/linux/env.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gprpp/mpscq.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gprpp/posix/env.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gprpp/posix/stat.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gprpp/posix/thd.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gprpp/strerror.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gprpp/tchar.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gprpp/time_util.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gprpp/windows/env.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gprpp/windows/stat.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gprpp/windows/thd.cc +) + +target_compile_features(gpr PUBLIC cxx_std_14) + +target_include_directories(gpr + PUBLIC ${_gRPC_SOURCE_DIR}/include + PRIVATE + ${_gRPC_SOURCE_DIR} + ${_gRPC_ADDRESS_SORTING_INCLUDE_DIR} + ${_gRPC_RE2_INCLUDE_DIR} + ${_gRPC_SSL_INCLUDE_DIR} + ${_gRPC_UPB_GENERATED_DIR} + ${_gRPC_UPB_GRPC_GENERATED_DIR} + ${_gRPC_UPB_INCLUDE_DIR} + ${_gRPC_XXHASH_INCLUDE_DIR} + ${_gRPC_ZLIB_INCLUDE_DIR} +) +target_link_libraries(gpr + ${_gRPC_ALLTARGETS_LIBRARIES} + absl::base + absl::core_headers + absl::flags + absl::flags_marshalling + absl::any_invocable + absl::memory + absl::random_random + absl::status + absl::cord + absl::str_format + absl::strings + absl::synchronization + absl::time + absl::optional + absl::variant +) +if(_gRPC_PLATFORM_ANDROID) + target_link_libraries(gpr + android + log + ) +endif() + + +add_library(grpc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/backend_metrics/backend_metric_filter.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/census/grpc_context.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/channel_idle/channel_idle_filter.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/channel_idle/idle_filter_state.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/backend_metric.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/backup_poller.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/channel_connectivity.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/client_channel.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/client_channel_channelz.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/client_channel_factory.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/client_channel_plugin.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/client_channel_service_config.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/config_selector.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/dynamic_filters.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/global_subchannel_pool.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/http_proxy_mapper.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/lb_policy/address_filtering.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/lb_policy/child_policy_handler.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/lb_policy/endpoint_list.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/lb_policy/health_check_client.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/lb_policy/oob_backend_metric.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/lb_policy/outlier_detection/outlier_detection.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/lb_policy/priority/priority.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/lb_policy/ring_hash/ring_hash.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/lb_policy/rls/rls.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/lb_policy/weighted_round_robin/static_stride_scheduler.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/lb_policy/weighted_round_robin/weighted_round_robin.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/lb_policy/weighted_target/weighted_target.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/lb_policy/xds/cds.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/lb_policy/xds/xds_cluster_impl.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/lb_policy/xds/xds_cluster_manager.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/lb_policy/xds/xds_cluster_resolver.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/lb_policy/xds/xds_override_host.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/lb_policy/xds/xds_wrr_locality.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/local_subchannel_pool.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/resolver/binder/binder_resolver.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_windows.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_posix.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_windows.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/resolver/dns/dns_resolver_plugin.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/resolver/dns/event_engine/event_engine_client_channel_resolver.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/resolver/dns/event_engine/service_config_helper.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/resolver/google_c2p/google_c2p_resolver.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/resolver/polling_resolver.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/resolver/xds/xds_resolver.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/retry_filter.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/retry_filter_legacy_call_data.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/retry_service_config.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/retry_throttle.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/service_config_channel_arg_filter.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/subchannel.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/subchannel_pool_interface.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/subchannel_stream_client.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/deadline/deadline_filter.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/fault_injection/fault_injection_filter.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/fault_injection/fault_injection_service_config_parser.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/http/client/http_client_filter.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/http/client_authority_filter.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/http/http_filters_plugin.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/http/message_compress/compression_filter.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/http/server/http_server_filter.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/message_size/message_size_filter.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/rbac/rbac_filter.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/rbac/rbac_service_config_parser.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/server_config_selector/server_config_selector_filter.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/stateful_session/stateful_session_filter.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/stateful_session/stateful_session_service_config_parser.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/gcp/metadata_query.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/alpn/alpn.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/client/chttp2_connector.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/server/chttp2_server.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/bin_decoder.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/bin_encoder.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/chttp2_transport.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/decode_huff.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/flow_control.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/frame_data.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/frame_goaway.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/frame_ping.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/frame_rst_stream.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/frame_settings.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/frame_window_update.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/hpack_encoder.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/hpack_encoder_table.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/hpack_parse_result.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/hpack_parser.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/hpack_parser_table.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/http2_settings.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/http_trace.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/huffsyms.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/max_concurrent_streams_policy.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/parsing.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/ping_abuse_policy.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/ping_callbacks.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/ping_rate_policy.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/stream_lists.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/varint.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/write_size_policy.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/writing.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/inproc/inproc_plugin.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/inproc/inproc_transport.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/admin/v3/certs.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/admin/v3/clusters.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/admin/v3/config_dump.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/admin/v3/config_dump_shared.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/admin/v3/init_dump.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/admin/v3/listeners.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/admin/v3/memory.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/admin/v3/metrics.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/admin/v3/mutex_stats.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/admin/v3/server_info.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/admin/v3/tap.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/annotations/deprecation.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/annotations/resource.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/config/accesslog/v3/accesslog.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/config/bootstrap/v3/bootstrap.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/config/cluster/v3/circuit_breaker.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/config/cluster/v3/cluster.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/config/cluster/v3/filter.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/config/cluster/v3/outlier_detection.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/config/common/matcher/v3/matcher.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/config/core/v3/address.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/config/core/v3/backoff.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/config/core/v3/base.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/config/core/v3/config_source.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/config/core/v3/event_service_config.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/config/core/v3/extension.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/config/core/v3/grpc_method_list.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/config/core/v3/grpc_service.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/config/core/v3/health_check.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/config/core/v3/http_uri.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/config/core/v3/protocol.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/config/core/v3/proxy_protocol.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/config/core/v3/resolver.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/config/core/v3/socket_option.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/config/core/v3/substitution_format_string.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/config/core/v3/udp_socket_config.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/config/endpoint/v3/endpoint.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/config/endpoint/v3/endpoint_components.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/config/endpoint/v3/load_report.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/config/listener/v3/api_listener.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/config/listener/v3/listener.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/config/listener/v3/listener_components.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/config/listener/v3/quic_config.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/config/listener/v3/udp_listener_config.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/config/metrics/v3/metrics_service.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/config/metrics/v3/stats.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/config/overload/v3/overload.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/config/rbac/v3/rbac.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/config/route/v3/route.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/config/route/v3/route_components.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/config/route/v3/scoped_route.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/config/tap/v3/common.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/config/trace/v3/datadog.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/config/trace/v3/dynamic_ot.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/config/trace/v3/http_tracer.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/config/trace/v3/lightstep.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/config/trace/v3/opencensus.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/config/trace/v3/opentelemetry.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/config/trace/v3/service.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/config/trace/v3/skywalking.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/config/trace/v3/trace.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/config/trace/v3/xray.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/config/trace/v3/zipkin.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/data/accesslog/v3/accesslog.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/extensions/clusters/aggregate/v3/cluster.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/extensions/filters/common/fault/v3/fault.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/extensions/filters/http/fault/v3/fault.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/extensions/filters/http/rbac/v3/rbac.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/extensions/filters/http/router/v3/router.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/extensions/filters/http/stateful_session/v3/stateful_session.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/extensions/http/stateful_session/cookie/v3/cookie.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/extensions/load_balancing_policies/client_side_weighted_round_robin/v3/client_side_weighted_round_robin.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/extensions/load_balancing_policies/common/v3/common.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/extensions/load_balancing_policies/pick_first/v3/pick_first.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/extensions/load_balancing_policies/ring_hash/v3/ring_hash.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/extensions/load_balancing_policies/wrr_locality/v3/wrr_locality.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/extensions/transport_sockets/tls/v3/cert.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/extensions/transport_sockets/tls/v3/common.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/extensions/transport_sockets/tls/v3/secret.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/extensions/transport_sockets/tls/v3/tls.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/extensions/transport_sockets/tls/v3/tls_spiffe_validator_config.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/service/discovery/v3/ads.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/service/discovery/v3/discovery.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/service/load_stats/v3/lrs.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/service/status/v3/csds.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/type/http/v3/cookie.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/type/http/v3/path_transformation.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/type/matcher/v3/filter_state.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/type/matcher/v3/http_inputs.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/type/matcher/v3/metadata.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/type/matcher/v3/node.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/type/matcher/v3/number.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/type/matcher/v3/path.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/type/matcher/v3/regex.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/type/matcher/v3/status_code_input.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/type/matcher/v3/string.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/type/matcher/v3/struct.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/type/matcher/v3/value.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/type/metadata/v3/metadata.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/type/tracing/v3/custom_tag.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/type/v3/hash_policy.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/type/v3/http.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/type/v3/http_status.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/type/v3/percent.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/type/v3/range.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/type/v3/ratelimit_strategy.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/type/v3/ratelimit_unit.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/type/v3/semantic_version.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/envoy/type/v3/token_bucket.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/google/api/annotations.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/google/api/expr/v1alpha1/checked.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/google/api/expr/v1alpha1/syntax.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/google/api/http.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/google/api/httpbody.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/google/protobuf/any.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/google/protobuf/descriptor.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/google/protobuf/duration.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/google/protobuf/empty.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/google/protobuf/struct.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/google/protobuf/timestamp.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/google/protobuf/wrappers.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/google/rpc/status.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/opencensus/proto/trace/v1/trace_config.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/src/proto/grpc/gcp/altscontext.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/src/proto/grpc/gcp/handshaker.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/src/proto/grpc/gcp/transport_security_common.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/src/proto/grpc/health/v1/health.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/src/proto/grpc/lb/v1/load_balancer.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/src/proto/grpc/lookup/v1/rls.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/src/proto/grpc/lookup/v1/rls_config.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/udpa/annotations/migrate.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/udpa/annotations/security.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/udpa/annotations/sensitive.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/udpa/annotations/status.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/udpa/annotations/versioning.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/validate/validate.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/xds/annotations/v3/migrate.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/xds/annotations/v3/security.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/xds/annotations/v3/sensitive.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/xds/annotations/v3/status.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/xds/annotations/v3/versioning.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/xds/core/v3/authority.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/xds/core/v3/cidr.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/xds/core/v3/collection_entry.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/xds/core/v3/context_params.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/xds/core/v3/extension.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/xds/core/v3/resource.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/xds/core/v3/resource_locator.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/xds/core/v3/resource_name.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/xds/data/orca/v3/orca_load_report.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/xds/service/orca/v3/orca.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/xds/type/matcher/v3/cel.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/xds/type/matcher/v3/domain.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/xds/type/matcher/v3/http_inputs.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/xds/type/matcher/v3/ip.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/xds/type/matcher/v3/matcher.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/xds/type/matcher/v3/range.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/xds/type/matcher/v3/regex.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/xds/type/matcher/v3/string.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/xds/type/v3/cel.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/xds/type/v3/range.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/xds/type/v3/typed_struct.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/admin/v3/certs.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/admin/v3/clusters.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/admin/v3/config_dump.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/admin/v3/config_dump_shared.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/admin/v3/init_dump.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/admin/v3/listeners.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/admin/v3/memory.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/admin/v3/metrics.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/admin/v3/mutex_stats.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/admin/v3/server_info.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/admin/v3/tap.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/annotations/deprecation.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/annotations/resource.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/config/accesslog/v3/accesslog.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/config/bootstrap/v3/bootstrap.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/config/cluster/v3/circuit_breaker.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/config/cluster/v3/cluster.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/config/cluster/v3/filter.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/config/cluster/v3/outlier_detection.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/config/common/matcher/v3/matcher.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/config/core/v3/address.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/config/core/v3/backoff.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/config/core/v3/base.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/config/core/v3/config_source.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/config/core/v3/event_service_config.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/config/core/v3/extension.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/config/core/v3/grpc_method_list.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/config/core/v3/grpc_service.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/config/core/v3/health_check.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/config/core/v3/http_uri.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/config/core/v3/protocol.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/config/core/v3/proxy_protocol.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/config/core/v3/resolver.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/config/core/v3/socket_option.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/config/core/v3/substitution_format_string.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/config/core/v3/udp_socket_config.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/config/endpoint/v3/endpoint.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/config/endpoint/v3/endpoint_components.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/config/endpoint/v3/load_report.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/config/listener/v3/api_listener.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/config/listener/v3/listener.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/config/listener/v3/listener_components.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/config/listener/v3/quic_config.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/config/listener/v3/udp_listener_config.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/config/metrics/v3/metrics_service.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/config/metrics/v3/stats.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/config/overload/v3/overload.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/config/rbac/v3/rbac.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/config/route/v3/route.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/config/route/v3/route_components.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/config/route/v3/scoped_route.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/config/tap/v3/common.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/config/trace/v3/datadog.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/config/trace/v3/dynamic_ot.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/config/trace/v3/http_tracer.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/config/trace/v3/lightstep.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/config/trace/v3/opencensus.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/config/trace/v3/opentelemetry.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/config/trace/v3/service.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/config/trace/v3/skywalking.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/config/trace/v3/trace.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/config/trace/v3/xray.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/config/trace/v3/zipkin.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/data/accesslog/v3/accesslog.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/extensions/clusters/aggregate/v3/cluster.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/extensions/filters/common/fault/v3/fault.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/extensions/filters/http/fault/v3/fault.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/extensions/filters/http/rbac/v3/rbac.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/extensions/filters/http/router/v3/router.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/extensions/filters/http/stateful_session/v3/stateful_session.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/extensions/http/stateful_session/cookie/v3/cookie.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/extensions/transport_sockets/tls/v3/cert.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/extensions/transport_sockets/tls/v3/common.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/extensions/transport_sockets/tls/v3/secret.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/extensions/transport_sockets/tls/v3/tls.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/extensions/transport_sockets/tls/v3/tls_spiffe_validator_config.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/service/discovery/v3/ads.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/service/discovery/v3/discovery.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/service/load_stats/v3/lrs.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/service/status/v3/csds.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/type/http/v3/cookie.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/type/http/v3/path_transformation.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/type/matcher/v3/filter_state.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/type/matcher/v3/http_inputs.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/type/matcher/v3/metadata.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/type/matcher/v3/node.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/type/matcher/v3/number.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/type/matcher/v3/path.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/type/matcher/v3/regex.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/type/matcher/v3/status_code_input.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/type/matcher/v3/string.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/type/matcher/v3/struct.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/type/matcher/v3/value.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/type/metadata/v3/metadata.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/type/tracing/v3/custom_tag.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/type/v3/hash_policy.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/type/v3/http.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/type/v3/http_status.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/type/v3/percent.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/type/v3/range.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/type/v3/ratelimit_strategy.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/type/v3/ratelimit_unit.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/type/v3/semantic_version.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/envoy/type/v3/token_bucket.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/google/api/annotations.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/google/api/expr/v1alpha1/checked.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/google/api/expr/v1alpha1/syntax.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/google/api/http.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/google/api/httpbody.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/google/protobuf/any.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/google/protobuf/descriptor.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/google/protobuf/duration.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/google/protobuf/empty.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/google/protobuf/struct.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/google/protobuf/timestamp.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/google/protobuf/wrappers.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/google/rpc/status.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/opencensus/proto/trace/v1/trace_config.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/src/proto/grpc/lookup/v1/rls_config.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/udpa/annotations/migrate.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/udpa/annotations/security.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/udpa/annotations/sensitive.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/udpa/annotations/status.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/udpa/annotations/versioning.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/validate/validate.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/xds/annotations/v3/migrate.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/xds/annotations/v3/security.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/xds/annotations/v3/sensitive.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/xds/annotations/v3/status.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/xds/annotations/v3/versioning.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/xds/core/v3/authority.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/xds/core/v3/cidr.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/xds/core/v3/collection_entry.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/xds/core/v3/context_params.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/xds/core/v3/extension.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/xds/core/v3/resource.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/xds/core/v3/resource_locator.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/xds/core/v3/resource_name.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/xds/type/matcher/v3/cel.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/xds/type/matcher/v3/domain.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/xds/type/matcher/v3/http_inputs.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/xds/type/matcher/v3/ip.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/xds/type/matcher/v3/matcher.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/xds/type/matcher/v3/range.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/xds/type/matcher/v3/regex.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/xds/type/matcher/v3/string.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/xds/type/v3/cel.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/xds/type/v3/range.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upbdefs-generated/xds/type/v3/typed_struct.upbdefs.c + ${_gRPC_SOURCE_DIR}/src/core/ext/xds/certificate_provider_store.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/xds/file_watcher_certificate_provider_factory.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/xds/xds_api.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/xds/xds_audit_logger_registry.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/xds/xds_bootstrap.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/xds/xds_bootstrap_grpc.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/xds/xds_certificate_provider.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/xds/xds_channel_stack_modifier.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/xds/xds_client.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/xds/xds_client_grpc.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/xds/xds_client_stats.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/xds/xds_cluster.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/xds/xds_cluster_specifier_plugin.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/xds/xds_common_types.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/xds/xds_endpoint.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/xds/xds_health_status.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/xds/xds_http_fault_filter.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/xds/xds_http_filters.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/xds/xds_http_rbac_filter.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/xds/xds_http_stateful_session_filter.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/xds/xds_lb_policy_registry.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/xds/xds_listener.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/xds/xds_route_config.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/xds/xds_routing.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/xds/xds_server_config_fetcher.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/xds/xds_transport_grpc.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/address_utils/parse_address.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/address_utils/sockaddr_utils.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/backoff/backoff.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/backoff/random_early_detection.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/channel/call_tracer.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/channel/channel_args.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/channel/channel_args_preconditioning.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/channel/channel_stack.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/channel/channel_stack_builder.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/channel/channel_stack_builder_impl.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/channel/channel_trace.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/channel/channelz.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/channel/channelz_registry.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/channel/connected_channel.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/channel/promise_based_filter.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/channel/server_call_tracer_filter.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/channel/status_util.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/compression/compression.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/compression/compression_internal.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/compression/message_compress.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/config/core_configuration.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/debug/event_log.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/debug/histogram_view.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/debug/stats.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/debug/stats_data.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/debug/trace.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/ares_resolver.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/cf_engine/cf_engine.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/cf_engine/cfstream_endpoint.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/cf_engine/dns_service_resolver.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/channel_args_endpoint_config.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/default_event_engine.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/default_event_engine_factory.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/event_engine.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/forkable.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/memory_allocator.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/posix_engine/ev_epoll1_linux.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/posix_engine/ev_poll_posix.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/posix_engine/event_poller_posix_default.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/posix_engine/internal_errqueue.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/posix_engine/lockfree_event.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/posix_engine/posix_endpoint.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/posix_engine/posix_engine.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/posix_engine/posix_engine_listener.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/posix_engine/posix_engine_listener_utils.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/posix_engine/tcp_socket_utils.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/posix_engine/timer.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/posix_engine/timer_heap.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/posix_engine/timer_manager.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/posix_engine/traced_buffer_list.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/posix_engine/wakeup_fd_eventfd.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/posix_engine/wakeup_fd_pipe.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/posix_engine/wakeup_fd_posix_default.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/resolved_address.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/shim.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/slice.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/slice_buffer.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/tcp_socket_utils.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/thread_pool/thread_count.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/thread_pool/thread_pool_factory.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/thread_pool/work_stealing_thread_pool.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/thready_event_engine/thready_event_engine.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/time_util.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/trace.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/utils.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/windows/iocp.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/windows/win_socket.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/windows/windows_endpoint.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/windows/windows_engine.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/windows/windows_listener.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/work_queue/basic_work_queue.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/experiments/config.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/experiments/experiments.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gprpp/load_file.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gprpp/per_cpu.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gprpp/ref_counted_string.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gprpp/status_helper.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gprpp/time.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gprpp/time_averaged_stats.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gprpp/validation_errors.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gprpp/work_serializer.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/handshaker/proxy_mapper_registry.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/http/format_request.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/http/httpcli.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/http/httpcli_security_connector.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/http/parser.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/buffer_list.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/call_combiner.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/cfstream_handle.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/closure.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/combiner.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/dualstack_socket_posix.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/endpoint.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/endpoint_cfstream.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/endpoint_pair_posix.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/endpoint_pair_windows.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/error.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/error_cfstream.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/ev_apple.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/ev_epoll1_linux.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/ev_poll_posix.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/ev_posix.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/ev_windows.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/event_engine_shims/closure.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/event_engine_shims/endpoint.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/event_engine_shims/tcp_client.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/exec_ctx.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/executor.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/fork_posix.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/fork_windows.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/gethostname_fallback.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/gethostname_host_name_max.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/gethostname_sysconf.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/grpc_if_nametoindex_posix.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/grpc_if_nametoindex_unsupported.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/internal_errqueue.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/iocp_windows.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/iomgr.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/iomgr_internal.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/iomgr_posix.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/iomgr_posix_cfstream.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/iomgr_windows.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/load_file.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/lockfree_event.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/polling_entity.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/pollset.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/pollset_set.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/pollset_set_windows.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/pollset_windows.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/resolve_address.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/resolve_address_posix.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/resolve_address_windows.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/sockaddr_utils_posix.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/socket_factory_posix.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/socket_mutator.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/socket_utils_common_posix.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/socket_utils_linux.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/socket_utils_posix.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/socket_utils_windows.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/socket_windows.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/systemd_utils.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/tcp_client.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/tcp_client_cfstream.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/tcp_client_posix.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/tcp_client_windows.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/tcp_posix.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/tcp_server.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/tcp_server_posix.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/tcp_server_utils_posix_common.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/tcp_server_utils_posix_ifaddrs.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/tcp_server_utils_posix_noifaddrs.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/tcp_server_windows.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/tcp_windows.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/timer.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/timer_generic.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/timer_heap.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/timer_manager.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/unix_sockets_posix.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/unix_sockets_posix_noop.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/vsock.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/wakeup_fd_eventfd.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/wakeup_fd_nospecial.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/wakeup_fd_pipe.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/wakeup_fd_posix.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/json/json_object_loader.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/json/json_reader.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/json/json_util.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/json/json_writer.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/load_balancing/lb_policy.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/load_balancing/lb_policy_registry.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/matchers/matchers.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/promise/activity.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/promise/party.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/promise/sleep.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/promise/trace.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/resolver/resolver.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/resolver/resolver_registry.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/resolver/server_address.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/resource_quota/api.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/resource_quota/arena.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/resource_quota/memory_quota.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/resource_quota/periodic_update.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/resource_quota/resource_quota.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/resource_quota/thread_quota.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/resource_quota/trace.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/authorization/audit_logging.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/authorization/authorization_policy_provider_vtable.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/authorization/evaluate_args.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/authorization/grpc_authorization_engine.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/authorization/grpc_server_authz_filter.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/authorization/matchers.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/authorization/rbac_policy.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/authorization/stdout_logger.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/certificate_provider/certificate_provider_registry.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/context/security_context.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/credentials/alts/alts_credentials.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/credentials/alts/check_gcp_environment.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/credentials/alts/check_gcp_environment_linux.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/credentials/alts/check_gcp_environment_no_op.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/credentials/alts/check_gcp_environment_windows.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/credentials/alts/grpc_alts_credentials_client_options.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/credentials/alts/grpc_alts_credentials_options.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/credentials/alts/grpc_alts_credentials_server_options.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/credentials/call_creds_util.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/credentials/channel_creds_registry_init.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/credentials/composite/composite_credentials.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/credentials/credentials.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/credentials/external/aws_external_account_credentials.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/credentials/external/aws_request_signer.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/credentials/external/external_account_credentials.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/credentials/external/file_external_account_credentials.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/credentials/external/url_external_account_credentials.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/credentials/fake/fake_credentials.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/credentials/google_default/credentials_generic.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/credentials/google_default/google_default_credentials.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/credentials/iam/iam_credentials.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/credentials/insecure/insecure_credentials.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/credentials/jwt/json_token.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/credentials/jwt/jwt_credentials.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/credentials/jwt/jwt_verifier.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/credentials/local/local_credentials.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/credentials/oauth2/oauth2_credentials.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/credentials/plugin/plugin_credentials.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/credentials/ssl/ssl_credentials.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/credentials/tls/grpc_tls_certificate_distributor.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/credentials/tls/grpc_tls_certificate_match.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/credentials/tls/grpc_tls_certificate_provider.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/credentials/tls/grpc_tls_certificate_verifier.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/credentials/tls/grpc_tls_credentials_options.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/credentials/tls/tls_credentials.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/credentials/tls/tls_utils.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/credentials/xds/xds_credentials.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/security_connector/alts/alts_security_connector.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/security_connector/fake/fake_security_connector.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/security_connector/insecure/insecure_security_connector.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/security_connector/load_system_roots_fallback.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/security_connector/load_system_roots_supported.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/security_connector/local/local_security_connector.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/security_connector/security_connector.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/security_connector/ssl/ssl_security_connector.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/security_connector/ssl_utils.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/security_connector/tls/tls_security_connector.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/transport/client_auth_filter.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/transport/secure_endpoint.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/transport/security_handshaker.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/transport/server_auth_filter.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/transport/tsi_error.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/util/json_util.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/service_config/service_config_impl.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/service_config/service_config_parser.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/slice/b64.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/slice/percent_encoding.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/slice/slice.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/slice/slice_buffer.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/slice/slice_refcount.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/slice/slice_string_helpers.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/surface/api_trace.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/surface/builtins.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/surface/byte_buffer.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/surface/byte_buffer_reader.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/surface/call.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/surface/call_details.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/surface/call_log_batch.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/surface/call_trace.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/surface/channel.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/surface/channel_init.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/surface/channel_ping.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/surface/channel_stack_type.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/surface/completion_queue.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/surface/completion_queue_factory.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/surface/event_string.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/surface/init.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/surface/init_internally.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/surface/lame_client.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/surface/metadata_array.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/surface/server.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/surface/validate_metadata.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/surface/version.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/transport/batch_builder.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/transport/bdp_estimator.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/transport/connectivity_state.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/transport/error_utils.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/transport/handshaker.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/transport/handshaker_registry.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/transport/http_connect_handshaker.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/transport/metadata_batch.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/transport/parsed_metadata.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/transport/pid_controller.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/transport/status_conversion.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/transport/tcp_connect_handshaker.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/transport/timeout_encoding.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/transport/transport.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/transport/transport_op_string.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/uri/uri_parser.cc + ${_gRPC_SOURCE_DIR}/src/core/plugin_registry/grpc_plugin_registry.cc + ${_gRPC_SOURCE_DIR}/src/core/plugin_registry/grpc_plugin_registry_extra.cc + ${_gRPC_SOURCE_DIR}/src/core/tsi/alts/crypt/aes_gcm.cc + ${_gRPC_SOURCE_DIR}/src/core/tsi/alts/crypt/gsec.cc + ${_gRPC_SOURCE_DIR}/src/core/tsi/alts/frame_protector/alts_counter.cc + ${_gRPC_SOURCE_DIR}/src/core/tsi/alts/frame_protector/alts_crypter.cc + ${_gRPC_SOURCE_DIR}/src/core/tsi/alts/frame_protector/alts_frame_protector.cc + ${_gRPC_SOURCE_DIR}/src/core/tsi/alts/frame_protector/alts_record_protocol_crypter_common.cc + ${_gRPC_SOURCE_DIR}/src/core/tsi/alts/frame_protector/alts_seal_privacy_integrity_crypter.cc + ${_gRPC_SOURCE_DIR}/src/core/tsi/alts/frame_protector/alts_unseal_privacy_integrity_crypter.cc + ${_gRPC_SOURCE_DIR}/src/core/tsi/alts/frame_protector/frame_handler.cc + ${_gRPC_SOURCE_DIR}/src/core/tsi/alts/handshaker/alts_handshaker_client.cc + ${_gRPC_SOURCE_DIR}/src/core/tsi/alts/handshaker/alts_shared_resource.cc + ${_gRPC_SOURCE_DIR}/src/core/tsi/alts/handshaker/alts_tsi_handshaker.cc + ${_gRPC_SOURCE_DIR}/src/core/tsi/alts/handshaker/alts_tsi_utils.cc + ${_gRPC_SOURCE_DIR}/src/core/tsi/alts/handshaker/transport_security_common_api.cc + ${_gRPC_SOURCE_DIR}/src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_integrity_only_record_protocol.cc + ${_gRPC_SOURCE_DIR}/src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_privacy_integrity_record_protocol.cc + ${_gRPC_SOURCE_DIR}/src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_record_protocol_common.cc + ${_gRPC_SOURCE_DIR}/src/core/tsi/alts/zero_copy_frame_protector/alts_iovec_record_protocol.cc + ${_gRPC_SOURCE_DIR}/src/core/tsi/alts/zero_copy_frame_protector/alts_zero_copy_grpc_protector.cc + ${_gRPC_SOURCE_DIR}/src/core/tsi/fake_transport_security.cc + ${_gRPC_SOURCE_DIR}/src/core/tsi/local_transport_security.cc + ${_gRPC_SOURCE_DIR}/src/core/tsi/ssl/key_logging/ssl_key_logging.cc + ${_gRPC_SOURCE_DIR}/src/core/tsi/ssl/session_cache/ssl_session_boringssl.cc + ${_gRPC_SOURCE_DIR}/src/core/tsi/ssl/session_cache/ssl_session_cache.cc + ${_gRPC_SOURCE_DIR}/src/core/tsi/ssl/session_cache/ssl_session_openssl.cc + ${_gRPC_SOURCE_DIR}/src/core/tsi/ssl_transport_security.cc + ${_gRPC_SOURCE_DIR}/src/core/tsi/ssl_transport_security_utils.cc + ${_gRPC_SOURCE_DIR}/src/core/tsi/transport_security.cc + ${_gRPC_SOURCE_DIR}/src/core/tsi/transport_security_grpc.cc +) + +target_compile_features(grpc PUBLIC cxx_std_14) + +target_include_directories(grpc + PUBLIC ${_gRPC_SOURCE_DIR}/include + PRIVATE + ${_gRPC_SOURCE_DIR} + ${_gRPC_ADDRESS_SORTING_INCLUDE_DIR} + ${_gRPC_RE2_INCLUDE_DIR} + ${_gRPC_SSL_INCLUDE_DIR} + ${_gRPC_UPB_GENERATED_DIR} + ${_gRPC_UPB_GRPC_GENERATED_DIR} + ${_gRPC_UPB_INCLUDE_DIR} + ${_gRPC_XXHASH_INCLUDE_DIR} + ${_gRPC_ZLIB_INCLUDE_DIR} +) +target_link_libraries(grpc + ${_gRPC_ALLTARGETS_LIBRARIES} + ${_gRPC_RE2_LIBRARIES} + upb_json_lib + upb_textformat_lib + ${_gRPC_ZLIB_LIBRARIES} + absl::algorithm_container + absl::cleanup + absl::flat_hash_map + absl::flat_hash_set + absl::inlined_vector + absl::bind_front + absl::function_ref + absl::hash + absl::type_traits + absl::random_bit_gen_ref + absl::random_distributions + absl::statusor + absl::span + absl::utility + ${_gRPC_CARES_LIBRARIES} + gpr + ${_gRPC_SSL_LIBRARIES} + ${_gRPC_ADDRESS_SORTING_LIBRARIES} +) +if(_gRPC_PLATFORM_IOS OR _gRPC_PLATFORM_MAC) + target_link_libraries(grpc "-framework CoreFoundation") +endif() + +add_library(grpc_unsecure + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/backend_metrics/backend_metric_filter.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/census/grpc_context.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/channel_idle/channel_idle_filter.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/channel_idle/idle_filter_state.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/backend_metric.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/backup_poller.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/channel_connectivity.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/client_channel.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/client_channel_channelz.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/client_channel_factory.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/client_channel_plugin.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/client_channel_service_config.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/config_selector.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/dynamic_filters.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/global_subchannel_pool.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/http_proxy_mapper.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/lb_policy/address_filtering.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/lb_policy/child_policy_handler.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/lb_policy/endpoint_list.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/lb_policy/health_check_client.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/lb_policy/oob_backend_metric.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/lb_policy/outlier_detection/outlier_detection.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/lb_policy/priority/priority.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/lb_policy/rls/rls.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/lb_policy/weighted_round_robin/static_stride_scheduler.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/lb_policy/weighted_round_robin/weighted_round_robin.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/lb_policy/weighted_target/weighted_target.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/local_subchannel_pool.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/resolver/binder/binder_resolver.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_windows.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_posix.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_windows.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/resolver/dns/dns_resolver_plugin.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/resolver/dns/event_engine/event_engine_client_channel_resolver.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/resolver/dns/event_engine/service_config_helper.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/resolver/polling_resolver.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/retry_filter.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/retry_filter_legacy_call_data.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/retry_service_config.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/retry_throttle.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/service_config_channel_arg_filter.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/subchannel.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/subchannel_pool_interface.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/client_channel/subchannel_stream_client.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/deadline/deadline_filter.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/fault_injection/fault_injection_filter.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/fault_injection/fault_injection_service_config_parser.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/http/client/http_client_filter.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/http/client_authority_filter.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/http/http_filters_plugin.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/http/message_compress/compression_filter.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/http/server/http_server_filter.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/filters/message_size/message_size_filter.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/client/chttp2_connector.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/server/chttp2_server.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/bin_decoder.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/bin_encoder.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/chttp2_transport.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/decode_huff.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/flow_control.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/frame_data.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/frame_goaway.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/frame_ping.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/frame_rst_stream.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/frame_settings.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/frame_window_update.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/hpack_encoder.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/hpack_encoder_table.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/hpack_parse_result.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/hpack_parser.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/hpack_parser_table.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/http2_settings.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/http_trace.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/huffsyms.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/max_concurrent_streams_policy.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/parsing.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/ping_abuse_policy.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/ping_callbacks.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/ping_rate_policy.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/stream_lists.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/varint.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/write_size_policy.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/chttp2/transport/writing.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/inproc/inproc_plugin.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/inproc/inproc_transport.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/google/api/annotations.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/google/api/http.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/google/protobuf/any.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/google/protobuf/descriptor.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/google/protobuf/duration.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/google/protobuf/empty.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/google/protobuf/struct.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/google/protobuf/timestamp.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/google/protobuf/wrappers.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/google/rpc/status.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/src/proto/grpc/gcp/altscontext.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/src/proto/grpc/gcp/handshaker.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/src/proto/grpc/gcp/transport_security_common.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/src/proto/grpc/health/v1/health.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/src/proto/grpc/lb/v1/load_balancer.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/src/proto/grpc/lookup/v1/rls.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/validate/validate.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/xds/data/orca/v3/orca_load_report.upb.c + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/xds/service/orca/v3/orca.upb.c + ${_gRPC_SOURCE_DIR}/src/core/lib/address_utils/parse_address.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/address_utils/sockaddr_utils.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/backoff/backoff.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/backoff/random_early_detection.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/channel/call_tracer.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/channel/channel_args.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/channel/channel_args_preconditioning.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/channel/channel_stack.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/channel/channel_stack_builder.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/channel/channel_stack_builder_impl.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/channel/channel_trace.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/channel/channelz.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/channel/channelz_registry.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/channel/connected_channel.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/channel/promise_based_filter.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/channel/server_call_tracer_filter.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/channel/status_util.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/compression/compression.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/compression/compression_internal.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/compression/message_compress.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/config/core_configuration.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/debug/event_log.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/debug/histogram_view.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/debug/stats.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/debug/stats_data.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/debug/trace.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/ares_resolver.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/cf_engine/cf_engine.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/cf_engine/cfstream_endpoint.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/cf_engine/dns_service_resolver.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/channel_args_endpoint_config.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/default_event_engine.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/default_event_engine_factory.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/event_engine.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/forkable.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/memory_allocator.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/posix_engine/ev_epoll1_linux.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/posix_engine/ev_poll_posix.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/posix_engine/event_poller_posix_default.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/posix_engine/internal_errqueue.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/posix_engine/lockfree_event.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/posix_engine/posix_endpoint.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/posix_engine/posix_engine.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/posix_engine/posix_engine_listener.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/posix_engine/posix_engine_listener_utils.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/posix_engine/tcp_socket_utils.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/posix_engine/timer.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/posix_engine/timer_heap.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/posix_engine/timer_manager.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/posix_engine/traced_buffer_list.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/posix_engine/wakeup_fd_eventfd.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/posix_engine/wakeup_fd_pipe.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/posix_engine/wakeup_fd_posix_default.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/resolved_address.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/shim.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/slice.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/slice_buffer.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/tcp_socket_utils.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/thread_pool/thread_count.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/thread_pool/thread_pool_factory.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/thread_pool/work_stealing_thread_pool.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/thready_event_engine/thready_event_engine.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/time_util.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/trace.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/utils.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/windows/iocp.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/windows/win_socket.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/windows/windows_endpoint.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/windows/windows_engine.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/windows/windows_listener.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/event_engine/work_queue/basic_work_queue.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/experiments/config.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/experiments/experiments.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gprpp/load_file.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gprpp/per_cpu.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gprpp/ref_counted_string.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gprpp/status_helper.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gprpp/time.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gprpp/time_averaged_stats.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gprpp/validation_errors.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/gprpp/work_serializer.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/handshaker/proxy_mapper_registry.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/http/format_request.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/http/httpcli.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/http/parser.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/buffer_list.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/call_combiner.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/cfstream_handle.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/closure.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/combiner.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/dualstack_socket_posix.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/endpoint.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/endpoint_cfstream.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/endpoint_pair_posix.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/endpoint_pair_windows.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/error.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/error_cfstream.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/ev_apple.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/ev_epoll1_linux.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/ev_poll_posix.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/ev_posix.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/ev_windows.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/event_engine_shims/closure.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/event_engine_shims/endpoint.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/event_engine_shims/tcp_client.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/exec_ctx.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/executor.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/fork_posix.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/fork_windows.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/gethostname_fallback.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/gethostname_host_name_max.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/gethostname_sysconf.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/grpc_if_nametoindex_posix.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/grpc_if_nametoindex_unsupported.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/internal_errqueue.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/iocp_windows.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/iomgr.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/iomgr_internal.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/iomgr_posix.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/iomgr_posix_cfstream.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/iomgr_windows.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/load_file.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/lockfree_event.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/polling_entity.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/pollset.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/pollset_set.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/pollset_set_windows.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/pollset_windows.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/resolve_address.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/resolve_address_posix.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/resolve_address_windows.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/sockaddr_utils_posix.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/socket_factory_posix.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/socket_mutator.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/socket_utils_common_posix.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/socket_utils_linux.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/socket_utils_posix.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/socket_utils_windows.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/socket_windows.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/systemd_utils.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/tcp_client.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/tcp_client_cfstream.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/tcp_client_posix.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/tcp_client_windows.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/tcp_posix.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/tcp_server.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/tcp_server_posix.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/tcp_server_utils_posix_common.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/tcp_server_utils_posix_ifaddrs.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/tcp_server_utils_posix_noifaddrs.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/tcp_server_windows.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/tcp_windows.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/timer.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/timer_generic.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/timer_heap.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/timer_manager.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/unix_sockets_posix.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/unix_sockets_posix_noop.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/vsock.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/wakeup_fd_eventfd.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/wakeup_fd_nospecial.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/wakeup_fd_pipe.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/iomgr/wakeup_fd_posix.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/json/json_object_loader.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/json/json_reader.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/json/json_writer.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/load_balancing/lb_policy.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/load_balancing/lb_policy_registry.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/promise/activity.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/promise/party.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/promise/sleep.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/promise/trace.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/resolver/resolver.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/resolver/resolver_registry.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/resolver/server_address.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/resource_quota/api.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/resource_quota/arena.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/resource_quota/memory_quota.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/resource_quota/periodic_update.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/resource_quota/resource_quota.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/resource_quota/thread_quota.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/resource_quota/trace.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/authorization/authorization_policy_provider_vtable.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/authorization/evaluate_args.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/authorization/grpc_server_authz_filter.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/certificate_provider/certificate_provider_registry.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/context/security_context.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/credentials/alts/check_gcp_environment.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/credentials/alts/check_gcp_environment_linux.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/credentials/alts/check_gcp_environment_no_op.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/credentials/alts/check_gcp_environment_windows.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/credentials/alts/grpc_alts_credentials_client_options.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/credentials/alts/grpc_alts_credentials_options.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/credentials/alts/grpc_alts_credentials_server_options.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/credentials/call_creds_util.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/credentials/composite/composite_credentials.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/credentials/credentials.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/credentials/fake/fake_credentials.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/credentials/insecure/insecure_credentials.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/credentials/plugin/plugin_credentials.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/credentials/tls/tls_utils.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/security_connector/fake/fake_security_connector.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/security_connector/insecure/insecure_security_connector.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/security_connector/load_system_roots_fallback.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/security_connector/load_system_roots_supported.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/security_connector/security_connector.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/transport/client_auth_filter.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/transport/secure_endpoint.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/transport/security_handshaker.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/transport/server_auth_filter.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/transport/tsi_error.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/security/util/json_util.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/service_config/service_config_impl.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/service_config/service_config_parser.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/slice/b64.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/slice/percent_encoding.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/slice/slice.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/slice/slice_buffer.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/slice/slice_refcount.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/slice/slice_string_helpers.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/surface/api_trace.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/surface/builtins.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/surface/byte_buffer.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/surface/byte_buffer_reader.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/surface/call.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/surface/call_details.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/surface/call_log_batch.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/surface/call_trace.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/surface/channel.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/surface/channel_init.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/surface/channel_ping.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/surface/channel_stack_type.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/surface/completion_queue.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/surface/completion_queue_factory.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/surface/event_string.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/surface/init.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/surface/init_internally.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/surface/lame_client.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/surface/metadata_array.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/surface/server.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/surface/validate_metadata.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/surface/version.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/transport/batch_builder.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/transport/bdp_estimator.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/transport/connectivity_state.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/transport/error_utils.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/transport/handshaker.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/transport/handshaker_registry.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/transport/http_connect_handshaker.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/transport/metadata_batch.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/transport/parsed_metadata.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/transport/pid_controller.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/transport/status_conversion.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/transport/tcp_connect_handshaker.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/transport/timeout_encoding.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/transport/transport.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/transport/transport_op_string.cc + ${_gRPC_SOURCE_DIR}/src/core/lib/uri/uri_parser.cc + ${_gRPC_SOURCE_DIR}/src/core/plugin_registry/grpc_plugin_registry.cc + ${_gRPC_SOURCE_DIR}/src/core/plugin_registry/grpc_plugin_registry_noextra.cc + ${_gRPC_SOURCE_DIR}/src/core/tsi/alts/handshaker/transport_security_common_api.cc + ${_gRPC_SOURCE_DIR}/src/core/tsi/fake_transport_security.cc + ${_gRPC_SOURCE_DIR}/src/core/tsi/local_transport_security.cc + ${_gRPC_SOURCE_DIR}/src/core/tsi/transport_security.cc + ${_gRPC_SOURCE_DIR}/src/core/tsi/transport_security_grpc.cc + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/message/accessors.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/mini_descriptor/build_enum.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/mini_descriptor/decode.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/mini_descriptor/internal/base92.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/mini_descriptor/link.c + ${gRPC_ADDITIONAL_DLL_SRC} +) + +target_compile_features(grpc_unsecure PUBLIC cxx_std_14) + +target_include_directories(grpc_unsecure + PUBLIC ${_gRPC_SOURCE_DIR}/include + PRIVATE + ${_gRPC_SOURCE_DIR} + ${_gRPC_ADDRESS_SORTING_INCLUDE_DIR} + ${_gRPC_RE2_INCLUDE_DIR} + ${_gRPC_SSL_INCLUDE_DIR} + ${_gRPC_UPB_GENERATED_DIR} + ${_gRPC_UPB_GRPC_GENERATED_DIR} + ${_gRPC_UPB_INCLUDE_DIR} + ${_gRPC_XXHASH_INCLUDE_DIR} + ${_gRPC_ZLIB_INCLUDE_DIR} +) +target_link_libraries(grpc_unsecure + ${_gRPC_ALLTARGETS_LIBRARIES} + upb_collections_lib + upb + ${_gRPC_ZLIB_LIBRARIES} + absl::algorithm_container + absl::cleanup + absl::flat_hash_map + absl::flat_hash_set + absl::inlined_vector + absl::bind_front + absl::function_ref + absl::hash + absl::type_traits + absl::random_bit_gen_ref + absl::random_distributions + absl::statusor + absl::span + absl::utility + ${_gRPC_CARES_LIBRARIES} + gpr + ${_gRPC_ADDRESS_SORTING_LIBRARIES} +) +if(_gRPC_PLATFORM_IOS OR _gRPC_PLATFORM_MAC) + target_link_libraries(grpc_unsecure "-framework CoreFoundation") +endif() + +add_library(upb + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/base/status.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/collections/array.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/collections/map.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/collections/map_sorter.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/hash/common.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/lex/atoi.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/lex/round_trip.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/lex/strtod.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/lex/unicode.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/mem/alloc.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/mem/arena.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/message/message.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/mini_table/extension_registry.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/mini_table/internal/message.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/mini_table/message.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/wire/decode.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/wire/decode_fast.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/wire/encode.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/wire/eps_copy_input_stream.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/wire/reader.c +) + +target_compile_features(upb PUBLIC cxx_std_14) + +target_include_directories(upb + PUBLIC ${_gRPC_SOURCE_DIR}/include + PRIVATE + ${_gRPC_SOURCE_DIR} + ${_gRPC_ADDRESS_SORTING_INCLUDE_DIR} + ${_gRPC_RE2_INCLUDE_DIR} + ${_gRPC_SSL_INCLUDE_DIR} + ${_gRPC_UPB_GENERATED_DIR} + ${_gRPC_UPB_GRPC_GENERATED_DIR} + ${_gRPC_UPB_INCLUDE_DIR} + ${_gRPC_XXHASH_INCLUDE_DIR} + ${_gRPC_ZLIB_INCLUDE_DIR} +) +target_link_libraries(upb + ${_gRPC_ALLTARGETS_LIBRARIES} + utf8_range_lib +) + + +add_library(upb_collections_lib + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/base/status.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/collections/array.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/collections/map.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/collections/map_sorter.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/hash/common.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/mem/alloc.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/mem/arena.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/message/message.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/mini_table/extension_registry.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/mini_table/internal/message.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/mini_table/message.c +) + +target_compile_features(upb_collections_lib PUBLIC cxx_std_14) + +target_include_directories(upb_collections_lib + PUBLIC ${_gRPC_SOURCE_DIR}/include + PRIVATE + ${_gRPC_SOURCE_DIR} + ${_gRPC_ADDRESS_SORTING_INCLUDE_DIR} + ${_gRPC_RE2_INCLUDE_DIR} + ${_gRPC_SSL_INCLUDE_DIR} + ${_gRPC_UPB_GENERATED_DIR} + ${_gRPC_UPB_GRPC_GENERATED_DIR} + ${_gRPC_UPB_INCLUDE_DIR} + ${_gRPC_XXHASH_INCLUDE_DIR} + ${_gRPC_ZLIB_INCLUDE_DIR} +) +target_link_libraries(upb_collections_lib + ${_gRPC_ALLTARGETS_LIBRARIES} +) + + + +add_library(upb_json_lib + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/google/protobuf/descriptor.upb.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/json/decode.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/json/encode.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/message/accessors.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/mini_descriptor/build_enum.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/mini_descriptor/decode.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/mini_descriptor/internal/base92.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/mini_descriptor/internal/encode.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/mini_descriptor/link.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/reflection/def_builder.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/reflection/def_pool.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/reflection/def_type.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/reflection/desc_state.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/reflection/enum_def.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/reflection/enum_reserved_range.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/reflection/enum_value_def.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/reflection/extension_range.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/reflection/field_def.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/reflection/file_def.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/reflection/message.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/reflection/message_def.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/reflection/message_reserved_range.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/reflection/method_def.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/reflection/oneof_def.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/reflection/service_def.c +) + +target_compile_features(upb_json_lib PUBLIC cxx_std_14) + +target_include_directories(upb_json_lib + PUBLIC ${_gRPC_SOURCE_DIR}/include + PRIVATE + ${_gRPC_SOURCE_DIR} + ${_gRPC_ADDRESS_SORTING_INCLUDE_DIR} + ${_gRPC_RE2_INCLUDE_DIR} + ${_gRPC_SSL_INCLUDE_DIR} + ${_gRPC_UPB_GENERATED_DIR} + ${_gRPC_UPB_GRPC_GENERATED_DIR} + ${_gRPC_UPB_INCLUDE_DIR} + ${_gRPC_XXHASH_INCLUDE_DIR} + ${_gRPC_ZLIB_INCLUDE_DIR} +) +target_link_libraries(upb_json_lib + ${_gRPC_ALLTARGETS_LIBRARIES} + upb_collections_lib + upb +) + + +add_library(upb_textformat_lib + ${_gRPC_SOURCE_DIR}/src/core/ext/upb-generated/google/protobuf/descriptor.upb.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/message/accessors.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/mini_descriptor/build_enum.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/mini_descriptor/decode.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/mini_descriptor/internal/base92.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/mini_descriptor/internal/encode.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/mini_descriptor/link.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/reflection/def_builder.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/reflection/def_pool.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/reflection/def_type.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/reflection/desc_state.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/reflection/enum_def.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/reflection/enum_reserved_range.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/reflection/enum_value_def.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/reflection/extension_range.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/reflection/field_def.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/reflection/file_def.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/reflection/message.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/reflection/message_def.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/reflection/message_reserved_range.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/reflection/method_def.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/reflection/oneof_def.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/reflection/service_def.c + ${_gRPC_SOURCE_DIR}/third_party/upb/upb/text/encode.c +) + +target_compile_features(upb_textformat_lib PUBLIC cxx_std_14) + +target_include_directories(upb_textformat_lib + PUBLIC ${_gRPC_SOURCE_DIR}/include + PRIVATE + ${_gRPC_SOURCE_DIR} + ${_gRPC_ADDRESS_SORTING_INCLUDE_DIR} + ${_gRPC_RE2_INCLUDE_DIR} + ${_gRPC_SSL_INCLUDE_DIR} + ${_gRPC_UPB_GENERATED_DIR} + ${_gRPC_UPB_GRPC_GENERATED_DIR} + ${_gRPC_UPB_INCLUDE_DIR} + ${_gRPC_XXHASH_INCLUDE_DIR} + ${_gRPC_ZLIB_INCLUDE_DIR} +) +target_link_libraries(upb_textformat_lib + ${_gRPC_ALLTARGETS_LIBRARIES} + upb_collections_lib + upb +) + + +add_library(utf8_range_lib + ${_gRPC_SOURCE_DIR}/third_party/utf8_range/naive.c + ${_gRPC_SOURCE_DIR}/third_party/utf8_range/range2-neon.c + ${_gRPC_SOURCE_DIR}/third_party/utf8_range/range2-sse.c +) + +target_compile_features(utf8_range_lib PUBLIC cxx_std_14) + +target_include_directories(utf8_range_lib + PUBLIC ${_gRPC_SOURCE_DIR}/include + PRIVATE + ${_gRPC_SOURCE_DIR} + ${_gRPC_ADDRESS_SORTING_INCLUDE_DIR} + ${_gRPC_RE2_INCLUDE_DIR} + ${_gRPC_SSL_INCLUDE_DIR} + ${_gRPC_UPB_GENERATED_DIR} + ${_gRPC_UPB_GRPC_GENERATED_DIR} + ${_gRPC_UPB_INCLUDE_DIR} + ${_gRPC_XXHASH_INCLUDE_DIR} + ${_gRPC_ZLIB_INCLUDE_DIR} +) +target_link_libraries(utf8_range_lib + ${_gRPC_ALLTARGETS_LIBRARIES} +) + + +add_library(grpc++ + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/binder/client/binder_connector.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/binder/client/channel_create.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/binder/client/channel_create_impl.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/binder/client/connection_id_generator.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/binder/client/endpoint_binder_pool.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/binder/client/jni_utils.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/binder/client/security_policy_setting.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/binder/security_policy/binder_security_policy.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/binder/server/binder_server.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/binder/server/binder_server_credentials.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/binder/transport/binder_transport.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/binder/utils/ndk_binder.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/binder/utils/transport_stream_receiver_impl.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/binder/wire_format/binder_android.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/binder/wire_format/binder_constants.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/binder/wire_format/transaction.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/binder/wire_format/wire_reader_impl.cc + ${_gRPC_SOURCE_DIR}/src/core/ext/transport/binder/wire_format/wire_writer.cc + ${_gRPC_SOURCE_DIR}/src/cpp/client/channel_cc.cc + ${_gRPC_SOURCE_DIR}/src/cpp/client/client_callback.cc + ${_gRPC_SOURCE_DIR}/src/cpp/client/client_context.cc + ${_gRPC_SOURCE_DIR}/src/cpp/client/client_interceptor.cc + ${_gRPC_SOURCE_DIR}/src/cpp/client/client_stats_interceptor.cc + ${_gRPC_SOURCE_DIR}/src/cpp/client/create_channel.cc + ${_gRPC_SOURCE_DIR}/src/cpp/client/create_channel_internal.cc + ${_gRPC_SOURCE_DIR}/src/cpp/client/create_channel_posix.cc + ${_gRPC_SOURCE_DIR}/src/cpp/client/insecure_credentials.cc + ${_gRPC_SOURCE_DIR}/src/cpp/client/secure_credentials.cc + ${_gRPC_SOURCE_DIR}/src/cpp/client/xds_credentials.cc + ${_gRPC_SOURCE_DIR}/src/cpp/common/alarm.cc + ${_gRPC_SOURCE_DIR}/src/cpp/common/auth_property_iterator.cc + ${_gRPC_SOURCE_DIR}/src/cpp/common/channel_arguments.cc + ${_gRPC_SOURCE_DIR}/src/cpp/common/channel_filter.cc + ${_gRPC_SOURCE_DIR}/src/cpp/common/completion_queue_cc.cc + ${_gRPC_SOURCE_DIR}/src/cpp/common/resource_quota_cc.cc + ${_gRPC_SOURCE_DIR}/src/cpp/common/rpc_method.cc + ${_gRPC_SOURCE_DIR}/src/cpp/common/secure_auth_context.cc + ${_gRPC_SOURCE_DIR}/src/cpp/common/secure_channel_arguments.cc + ${_gRPC_SOURCE_DIR}/src/cpp/common/secure_create_auth_context.cc + ${_gRPC_SOURCE_DIR}/src/cpp/common/tls_certificate_provider.cc + ${_gRPC_SOURCE_DIR}/src/cpp/common/tls_certificate_verifier.cc + ${_gRPC_SOURCE_DIR}/src/cpp/common/tls_credentials_options.cc + ${_gRPC_SOURCE_DIR}/src/cpp/common/validate_service_config.cc + ${_gRPC_SOURCE_DIR}/src/cpp/common/version_cc.cc + ${_gRPC_SOURCE_DIR}/src/cpp/server/async_generic_service.cc + ${_gRPC_SOURCE_DIR}/src/cpp/server/backend_metric_recorder.cc + ${_gRPC_SOURCE_DIR}/src/cpp/server/channel_argument_option.cc + ${_gRPC_SOURCE_DIR}/src/cpp/server/create_default_thread_pool.cc + ${_gRPC_SOURCE_DIR}/src/cpp/server/external_connection_acceptor_impl.cc + ${_gRPC_SOURCE_DIR}/src/cpp/server/health/default_health_check_service.cc + ${_gRPC_SOURCE_DIR}/src/cpp/server/health/health_check_service.cc + ${_gRPC_SOURCE_DIR}/src/cpp/server/health/health_check_service_server_builder_option.cc + ${_gRPC_SOURCE_DIR}/src/cpp/server/insecure_server_credentials.cc + ${_gRPC_SOURCE_DIR}/src/cpp/server/secure_server_credentials.cc + ${_gRPC_SOURCE_DIR}/src/cpp/server/server_builder.cc + ${_gRPC_SOURCE_DIR}/src/cpp/server/server_callback.cc + ${_gRPC_SOURCE_DIR}/src/cpp/server/server_cc.cc + ${_gRPC_SOURCE_DIR}/src/cpp/server/server_context.cc + ${_gRPC_SOURCE_DIR}/src/cpp/server/server_posix.cc + ${_gRPC_SOURCE_DIR}/src/cpp/server/xds_server_credentials.cc + ${_gRPC_SOURCE_DIR}/src/cpp/thread_manager/thread_manager.cc + ${_gRPC_SOURCE_DIR}/src/cpp/util/byte_buffer_cc.cc + ${_gRPC_SOURCE_DIR}/src/cpp/util/status.cc + ${_gRPC_SOURCE_DIR}/src/cpp/util/string_ref.cc + ${_gRPC_SOURCE_DIR}/src/cpp/util/time_cc.cc + ${gRPC_UPB_GEN_DUPL_SRC} +) + +target_compile_features(grpc++ PUBLIC cxx_std_14) + +target_include_directories(grpc++ + PUBLIC ${_gRPC_SOURCE_DIR}/include + PRIVATE + ${_gRPC_SOURCE_DIR} + ${_gRPC_ADDRESS_SORTING_INCLUDE_DIR} + ${_gRPC_RE2_INCLUDE_DIR} + ${_gRPC_SSL_INCLUDE_DIR} + ${_gRPC_UPB_GENERATED_DIR} + ${_gRPC_UPB_GRPC_GENERATED_DIR} + ${_gRPC_UPB_INCLUDE_DIR} + ${_gRPC_XXHASH_INCLUDE_DIR} + ${_gRPC_ZLIB_INCLUDE_DIR} +) +target_link_libraries(grpc++ + ${_gRPC_ALLTARGETS_LIBRARIES} + grpc + ${_gRPC_PROTOBUF_LIBRARIES} +) + +add_library(grpc++_unsecure + ${_gRPC_SOURCE_DIR}/src/cpp/client/channel_cc.cc + ${_gRPC_SOURCE_DIR}/src/cpp/client/client_callback.cc + ${_gRPC_SOURCE_DIR}/src/cpp/client/client_context.cc + ${_gRPC_SOURCE_DIR}/src/cpp/client/client_interceptor.cc + ${_gRPC_SOURCE_DIR}/src/cpp/client/client_stats_interceptor.cc + ${_gRPC_SOURCE_DIR}/src/cpp/client/create_channel.cc + ${_gRPC_SOURCE_DIR}/src/cpp/client/create_channel_internal.cc + ${_gRPC_SOURCE_DIR}/src/cpp/client/create_channel_posix.cc + ${_gRPC_SOURCE_DIR}/src/cpp/client/insecure_credentials.cc + ${_gRPC_SOURCE_DIR}/src/cpp/common/alarm.cc + ${_gRPC_SOURCE_DIR}/src/cpp/common/channel_arguments.cc + ${_gRPC_SOURCE_DIR}/src/cpp/common/channel_filter.cc + ${_gRPC_SOURCE_DIR}/src/cpp/common/completion_queue_cc.cc + ${_gRPC_SOURCE_DIR}/src/cpp/common/insecure_create_auth_context.cc + ${_gRPC_SOURCE_DIR}/src/cpp/common/resource_quota_cc.cc + ${_gRPC_SOURCE_DIR}/src/cpp/common/rpc_method.cc + ${_gRPC_SOURCE_DIR}/src/cpp/common/validate_service_config.cc + ${_gRPC_SOURCE_DIR}/src/cpp/common/version_cc.cc + ${_gRPC_SOURCE_DIR}/src/cpp/server/async_generic_service.cc + ${_gRPC_SOURCE_DIR}/src/cpp/server/backend_metric_recorder.cc + ${_gRPC_SOURCE_DIR}/src/cpp/server/channel_argument_option.cc + ${_gRPC_SOURCE_DIR}/src/cpp/server/create_default_thread_pool.cc + ${_gRPC_SOURCE_DIR}/src/cpp/server/external_connection_acceptor_impl.cc + ${_gRPC_SOURCE_DIR}/src/cpp/server/health/default_health_check_service.cc + ${_gRPC_SOURCE_DIR}/src/cpp/server/health/health_check_service.cc + ${_gRPC_SOURCE_DIR}/src/cpp/server/health/health_check_service_server_builder_option.cc + ${_gRPC_SOURCE_DIR}/src/cpp/server/insecure_server_credentials.cc + ${_gRPC_SOURCE_DIR}/src/cpp/server/server_builder.cc + ${_gRPC_SOURCE_DIR}/src/cpp/server/server_callback.cc + ${_gRPC_SOURCE_DIR}/src/cpp/server/server_cc.cc + ${_gRPC_SOURCE_DIR}/src/cpp/server/server_context.cc + ${_gRPC_SOURCE_DIR}/src/cpp/server/server_posix.cc + ${_gRPC_SOURCE_DIR}/src/cpp/thread_manager/thread_manager.cc + ${_gRPC_SOURCE_DIR}/src/cpp/util/byte_buffer_cc.cc + ${_gRPC_SOURCE_DIR}/src/cpp/util/status.cc + ${_gRPC_SOURCE_DIR}/src/cpp/util/string_ref.cc + ${_gRPC_SOURCE_DIR}/src/cpp/util/time_cc.cc + ${gRPC_UPB_GEN_DUPL_SRC} +) + +target_compile_features(grpc++_unsecure PUBLIC cxx_std_14) + +target_include_directories(grpc++_unsecure + PUBLIC ${_gRPC_SOURCE_DIR}/include + PRIVATE + ${_gRPC_SOURCE_DIR} + ${_gRPC_ADDRESS_SORTING_INCLUDE_DIR} + ${_gRPC_RE2_INCLUDE_DIR} + ${_gRPC_SSL_INCLUDE_DIR} + ${_gRPC_UPB_GENERATED_DIR} + ${_gRPC_UPB_GRPC_GENERATED_DIR} + ${_gRPC_UPB_INCLUDE_DIR} + ${_gRPC_XXHASH_INCLUDE_DIR} + ${_gRPC_ZLIB_INCLUDE_DIR} +) +target_link_libraries(grpc++_unsecure + ${_gRPC_ALLTARGETS_LIBRARIES} + grpc_unsecure + ${_gRPC_PROTOBUF_LIBRARIES} +) + +add_library(grpc_plugin_support + ${_gRPC_SOURCE_DIR}/src/compiler/cpp_generator.cc + ${_gRPC_SOURCE_DIR}/src/compiler/csharp_generator.cc + ${_gRPC_SOURCE_DIR}/src/compiler/node_generator.cc + ${_gRPC_SOURCE_DIR}/src/compiler/objective_c_generator.cc + ${_gRPC_SOURCE_DIR}/src/compiler/php_generator.cc + ${_gRPC_SOURCE_DIR}/src/compiler/proto_parser_helper.cc + ${_gRPC_SOURCE_DIR}/src/compiler/python_generator.cc + ${_gRPC_SOURCE_DIR}/src/compiler/ruby_generator.cc +) + +target_compile_features(grpc_plugin_support PUBLIC cxx_std_14) + +target_include_directories(grpc_plugin_support + PUBLIC ${_gRPC_SOURCE_DIR}/include + PRIVATE + ${_gRPC_SOURCE_DIR} + ${_gRPC_ADDRESS_SORTING_INCLUDE_DIR} + ${_gRPC_RE2_INCLUDE_DIR} + ${_gRPC_SSL_INCLUDE_DIR} + ${_gRPC_UPB_GENERATED_DIR} + ${_gRPC_UPB_GRPC_GENERATED_DIR} + ${_gRPC_UPB_INCLUDE_DIR} + ${_gRPC_XXHASH_INCLUDE_DIR} + ${_gRPC_ZLIB_INCLUDE_DIR} +) +target_link_libraries(grpc_plugin_support + ${_gRPC_ALLTARGETS_LIBRARIES} + ${_gRPC_PROTOBUF_LIBRARIES} + ${_gRPC_PROTOBUF_PROTOC_LIBRARIES} +) + + +add_executable(grpc_cpp_plugin + ${_gRPC_SOURCE_DIR}/src/compiler/cpp_plugin.cc +) +target_compile_features(grpc_cpp_plugin PUBLIC cxx_std_14) +target_include_directories(grpc_cpp_plugin + PRIVATE + ${_gRPC_SOURCE_DIR} + ${_gRPC_SOURCE_DIR}/include + ${_gRPC_ADDRESS_SORTING_INCLUDE_DIR} + ${_gRPC_RE2_INCLUDE_DIR} + ${_gRPC_SSL_INCLUDE_DIR} + ${_gRPC_UPB_GENERATED_DIR} + ${_gRPC_UPB_GRPC_GENERATED_DIR} + ${_gRPC_UPB_INCLUDE_DIR} + ${_gRPC_XXHASH_INCLUDE_DIR} + ${_gRPC_ZLIB_INCLUDE_DIR} +) + +target_link_libraries(grpc_cpp_plugin + ${_gRPC_ALLTARGETS_LIBRARIES} + grpc_plugin_support +) diff --git a/contrib/libcxx-cmake/CMakeLists.txt b/contrib/libcxx-cmake/CMakeLists.txt index b7e59e2c9a3..c77d5d8319e 100644 --- a/contrib/libcxx-cmake/CMakeLists.txt +++ b/contrib/libcxx-cmake/CMakeLists.txt @@ -1,5 +1,3 @@ -include(CheckCXXCompilerFlag) - set(LIBCXX_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/llvm-project/libcxx") set(SRCS diff --git a/contrib/libunwind b/contrib/libunwind index 30cc1d3fd36..40d8eadf96b 160000 --- a/contrib/libunwind +++ b/contrib/libunwind @@ -1 +1 @@ -Subproject commit 30cc1d3fd3655a5cfa0ab112fe320fb9fc0a8344 +Subproject commit 40d8eadf96b127d9b22d53ce7a4fc52aaedea965 diff --git a/contrib/libunwind-cmake/CMakeLists.txt b/contrib/libunwind-cmake/CMakeLists.txt index 733f99d07f5..8f3cd8bd07b 100644 --- a/contrib/libunwind-cmake/CMakeLists.txt +++ b/contrib/libunwind-cmake/CMakeLists.txt @@ -1,6 +1,3 @@ -include(CheckCCompilerFlag) -include(CheckCXXCompilerFlag) - set(LIBUNWIND_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/libunwind") set(LIBUNWIND_CXX_SOURCES @@ -23,15 +20,7 @@ set(LIBUNWIND_ASM_SOURCES "${LIBUNWIND_SOURCE_DIR}/src/UnwindRegistersRestore.S" "${LIBUNWIND_SOURCE_DIR}/src/UnwindRegistersSave.S") -# CMake doesn't pass the correct architecture for Apple prior to CMake 3.19 [1] -# Workaround these two issues by compiling as C. -# -# [1]: https://gitlab.kitware.com/cmake/cmake/-/issues/20771 -if (APPLE AND CMAKE_VERSION VERSION_LESS 3.19) - set_source_files_properties(${LIBUNWIND_ASM_SOURCES} PROPERTIES LANGUAGE C) -else() - enable_language(ASM) -endif() +enable_language(ASM) set(LIBUNWIND_SOURCES ${LIBUNWIND_CXX_SOURCES} @@ -48,27 +37,11 @@ target_compile_definitions(unwind PRIVATE -D_LIBUNWIND_NO_HEAP=1 -D_DEBUG -D_LIB # and disable sanitizers (otherwise infinite loop may happen) target_compile_options(unwind PRIVATE -O3 -fno-exceptions -funwind-tables -fno-sanitize=all $<$:-nostdinc++ -fno-rtti>) -check_c_compiler_flag(-Wunused-but-set-variable HAVE_WARNING_UNUSED_BUT_SET_VARIABLE) -if (HAVE_WARNING_UNUSED_BUT_SET_VARIABLE) - target_compile_options(unwind PRIVATE -Wno-unused-but-set-variable) -endif () - -check_cxx_compiler_flag(-Wmissing-attributes HAVE_WARNING_MISSING_ATTRIBUTES) -if (HAVE_WARNING_MISSING_ATTRIBUTES) - target_compile_options(unwind PRIVATE -Wno-missing-attributes) -endif () - -check_cxx_compiler_flag(-Wmaybe-uninitialized HAVE_WARNING_MAYBE_UNINITIALIZED) -if (HAVE_WARNING_MAYBE_UNINITIALIZED) - target_compile_options(unwind PRIVATE -Wno-maybe-uninitialized) -endif () +target_compile_options(unwind PRIVATE -Wno-unused-but-set-variable) # The library is using register variables that are bound to specific registers # Example: DwarfInstructions.hpp: register unsigned long long x16 __asm("x16") = cfa; -check_cxx_compiler_flag(-Wregister HAVE_WARNING_REGISTER) -if (HAVE_WARNING_REGISTER) - target_compile_options(unwind PRIVATE "$<$:-Wno-register>") -endif () +target_compile_options(unwind PRIVATE "$<$:-Wno-register>") install( TARGETS unwind diff --git a/contrib/llvm-project-cmake/CMakeLists.txt b/contrib/llvm-project-cmake/CMakeLists.txt index d6133f145bc..406bac73e90 100644 --- a/contrib/llvm-project-cmake/CMakeLists.txt +++ b/contrib/llvm-project-cmake/CMakeLists.txt @@ -61,6 +61,9 @@ set (REQUIRED_LLVM_LIBRARIES LLVMDemangle ) +# Skip useless "install" instructions from CMake: +set (LLVM_INSTALL_TOOLCHAIN_ONLY 1 CACHE INTERNAL "") + if (ARCH_AMD64) set (LLVM_TARGETS_TO_BUILD "X86" CACHE INTERNAL "") list(APPEND REQUIRED_LLVM_LIBRARIES LLVMX86Info LLVMX86Desc LLVMX86CodeGen) diff --git a/contrib/pocketfft b/contrib/pocketfft new file mode 160000 index 00000000000..9efd4da52cf --- /dev/null +++ b/contrib/pocketfft @@ -0,0 +1 @@ +Subproject commit 9efd4da52cf8d28d14531d14e43ad9d913807546 diff --git a/contrib/pocketfft-cmake/CMakeLists.txt b/contrib/pocketfft-cmake/CMakeLists.txt new file mode 100644 index 00000000000..01911ee4496 --- /dev/null +++ b/contrib/pocketfft-cmake/CMakeLists.txt @@ -0,0 +1,10 @@ +option (ENABLE_POCKETFFT "Enable pocketfft" ${ENABLE_LIBRARIES}) + +if (NOT ENABLE_POCKETFFT) + message(STATUS "Not using pocketfft") + return() +endif() + +add_library(_pocketfft INTERFACE) +target_include_directories(_pocketfft INTERFACE ${ClickHouse_SOURCE_DIR}/contrib/pocketfft) +add_library(ch_contrib::pocketfft ALIAS _pocketfft) diff --git a/contrib/qpl-cmake/CMakeLists.txt b/contrib/qpl-cmake/CMakeLists.txt index 4e6c66fe731..7a84048e16b 100644 --- a/contrib/qpl-cmake/CMakeLists.txt +++ b/contrib/qpl-cmake/CMakeLists.txt @@ -16,8 +16,7 @@ function(GetLibraryVersion _content _outputVar) SET(${_outputVar} ${CMAKE_MATCH_1} PARENT_SCOPE) endfunction() -FILE(READ "${QPL_PROJECT_DIR}/CMakeLists.txt" HEADER_CONTENT) -GetLibraryVersion("${HEADER_CONTENT}" QPL_VERSION) +set (QPL_VERSION 1.2.0) message(STATUS "Intel QPL version: ${QPL_VERSION}") @@ -28,16 +27,422 @@ message(STATUS "Intel QPL version: ${QPL_VERSION}") # The qpl submodule comes with its own version of isal. It contains code which does not exist in upstream isal. It would be nice to link # only upstream isal (ch_contrib::isal) but at this point we can't. -include("${QPL_PROJECT_DIR}/cmake/CompileOptions.cmake") +# ========================================================================== +# Copyright (C) 2022 Intel Corporation +# +# SPDX-License-Identifier: MIT +# ========================================================================== + +set(QPL_LINUX_TOOLCHAIN_CPP_EMBEDDED_FLAGS "-fno-exceptions;-fno-rtti") + +function(modify_standard_language_flag) + # Declaring function parameters + set(OPTIONS "") + set(ONE_VALUE_ARGS + LANGUAGE_NAME + FLAG_NAME + NEW_FLAG_VALUE) + set(MULTI_VALUE_ARGS "") + + # Parsing function parameters + cmake_parse_arguments(MODIFY + "${OPTIONS}" + "${ONE_VALUE_ARGS}" + "${MULTI_VALUE_ARGS}" + ${ARGN}) + + # Variables + set(FLAG_REGULAR_EXPRESSION "${MODIFY_FLAG_NAME}.*[ ]*") + set(NEW_VALUE "${MODIFY_FLAG_NAME}${MODIFY_NEW_FLAG_VALUE}") + + # Replacing specified flag with new value + string(REGEX REPLACE + ${FLAG_REGULAR_EXPRESSION} ${NEW_VALUE} + NEW_COMPILE_FLAGS + "${CMAKE_${MODIFY_LANGUAGE_NAME}_FLAGS}") + + # Returning the value + set(CMAKE_${MODIFY_LANGUAGE_NAME}_FLAGS ${NEW_COMPILE_FLAGS} PARENT_SCOPE) +endfunction() + +function(get_function_name_with_default_bit_width in_function_name bit_width out_function_name) + + if(in_function_name MATCHES ".*_i") + + string(REPLACE "_i" "" in_function_name ${in_function_name}) + + set(${out_function_name} "${in_function_name}_${bit_width}_i" PARENT_SCOPE) + + else() + + set(${out_function_name} "${in_function_name}_${bit_width}" PARENT_SCOPE) + + endif() + +endfunction() + +macro(get_list_of_supported_optimizations PLATFORMS_LIST) + list(APPEND PLATFORMS_LIST "") + list(APPEND PLATFORMS_LIST "px") + list(APPEND PLATFORMS_LIST "avx512") +endmacro(get_list_of_supported_optimizations) + +function(generate_unpack_kernel_arrays current_directory PLATFORMS_LIST) + list(APPEND UNPACK_POSTFIX_LIST "") + list(APPEND UNPACK_PRLE_POSTFIX_LIST "") + list(APPEND PACK_POSTFIX_LIST "") + list(APPEND PACK_INDEX_POSTFIX_LIST "") + list(APPEND SCAN_POSTFIX_LIST "") + list(APPEND DEFAULT_BIT_WIDTH_FUNCTIONS_LIST "") + list(APPEND DEFAULT_BIT_WIDTH_LIST "") + + #create list of functions that use only 8u 16u 32u postfixes + list(APPEND DEFAULT_BIT_WIDTH_FUNCTIONS_LIST "unpack_prle") + list(APPEND DEFAULT_BIT_WIDTH_FUNCTIONS_LIST "extract") + list(APPEND DEFAULT_BIT_WIDTH_FUNCTIONS_LIST "extract_i") + list(APPEND DEFAULT_BIT_WIDTH_FUNCTIONS_LIST "select") + list(APPEND DEFAULT_BIT_WIDTH_FUNCTIONS_LIST "select_i") + list(APPEND DEFAULT_BIT_WIDTH_FUNCTIONS_LIST "expand") + + #create default bit width list + list(APPEND DEFAULT_BIT_WIDTH_LIST "8u") + list(APPEND DEFAULT_BIT_WIDTH_LIST "16u") + list(APPEND DEFAULT_BIT_WIDTH_LIST "32u") + + #create scan kernel postfixes + list(APPEND SCAN_COMPARATOR_LIST "") + + list(APPEND SCAN_COMPARATOR_LIST "eq") + list(APPEND SCAN_COMPARATOR_LIST "ne") + list(APPEND SCAN_COMPARATOR_LIST "lt") + list(APPEND SCAN_COMPARATOR_LIST "le") + list(APPEND SCAN_COMPARATOR_LIST "gt") + list(APPEND SCAN_COMPARATOR_LIST "ge") + list(APPEND SCAN_COMPARATOR_LIST "range") + list(APPEND SCAN_COMPARATOR_LIST "not_range") + + foreach(SCAN_COMPARATOR IN LISTS SCAN_COMPARATOR_LIST) + list(APPEND SCAN_POSTFIX_LIST "_${SCAN_COMPARATOR}_8u") + list(APPEND SCAN_POSTFIX_LIST "_${SCAN_COMPARATOR}_16u8u") + list(APPEND SCAN_POSTFIX_LIST "_${SCAN_COMPARATOR}_32u8u") + endforeach() + + # create unpack kernel postfixes + foreach(input_width RANGE 1 32 1) + if(input_width LESS 8 OR input_width EQUAL 8) + list(APPEND UNPACK_POSTFIX_LIST "_${input_width}u8u") + + elseif(input_width LESS 16 OR input_width EQUAL 16) + list(APPEND UNPACK_POSTFIX_LIST "_${input_width}u16u") + + else() + list(APPEND UNPACK_POSTFIX_LIST "_${input_width}u32u") + endif() + endforeach() + + # create pack kernel postfixes + foreach(output_width RANGE 1 8 1) + list(APPEND PACK_POSTFIX_LIST "_8u${output_width}u") + endforeach() + + foreach(output_width RANGE 9 16 1) + list(APPEND PACK_POSTFIX_LIST "_16u${output_width}u") + endforeach() + + foreach(output_width RANGE 17 32 1) + list(APPEND PACK_POSTFIX_LIST "_32u${output_width}u") + endforeach() + + list(APPEND PACK_POSTFIX_LIST "_8u16u") + list(APPEND PACK_POSTFIX_LIST "_8u32u") + list(APPEND PACK_POSTFIX_LIST "_16u32u") + + # create pack index kernel postfixes + list(APPEND PACK_INDEX_POSTFIX_LIST "_nu") + list(APPEND PACK_INDEX_POSTFIX_LIST "_8u") + list(APPEND PACK_INDEX_POSTFIX_LIST "_8u16u") + list(APPEND PACK_INDEX_POSTFIX_LIST "_8u32u") + + # write to file + file(MAKE_DIRECTORY ${current_directory}/generated) + + foreach(PLATFORM_VALUE IN LISTS PLATFORMS_LIST) + set(directory "${current_directory}/generated") + set(PLATFORM_PREFIX "${PLATFORM_VALUE}_") + + # + # Write unpack table + # + file(WRITE ${directory}/${PLATFORM_PREFIX}unpack.cpp "#include \"qplc_api.h\"\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}unpack.cpp "#include \"dispatcher/dispatcher.hpp\"\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}unpack.cpp "namespace qpl::core_sw::dispatcher\n{\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}unpack.cpp "unpack_table_t ${PLATFORM_PREFIX}unpack_table = {\n") + + #write LE kernels + foreach(UNPACK_POSTFIX IN LISTS UNPACK_POSTFIX_LIST) + file(APPEND ${directory}/${PLATFORM_PREFIX}unpack.cpp "\t${PLATFORM_PREFIX}qplc_unpack${UNPACK_POSTFIX},\n") + endforeach() + + #write BE kernels + + #get last element of the list + set(LAST_ELEMENT "") + list(GET UNPACK_POSTFIX_LIST -1 LAST_ELEMENT) + + foreach(UNPACK_POSTFIX IN LISTS UNPACK_POSTFIX_LIST) + + if(UNPACK_POSTFIX STREQUAL LAST_ELEMENT) + file(APPEND ${directory}/${PLATFORM_PREFIX}unpack.cpp "\t${PLATFORM_PREFIX}qplc_unpack_be${UNPACK_POSTFIX}};\n") + else() + file(APPEND ${directory}/${PLATFORM_PREFIX}unpack.cpp "\t${PLATFORM_PREFIX}qplc_unpack_be${UNPACK_POSTFIX},\n") + endif() + endforeach() + + file(APPEND ${directory}/${PLATFORM_PREFIX}unpack.cpp "}\n") + + # + # Write pack table + # + file(WRITE ${directory}/${PLATFORM_PREFIX}pack.cpp "#include \"qplc_api.h\"\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}pack.cpp "#include \"dispatcher/dispatcher.hpp\"\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}pack.cpp "namespace qpl::core_sw::dispatcher\n{\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}pack.cpp "pack_table_t ${PLATFORM_PREFIX}pack_table = {\n") + + #write LE kernels + foreach(PACK_POSTFIX IN LISTS PACK_POSTFIX_LIST) + file(APPEND ${directory}/${PLATFORM_PREFIX}pack.cpp "\t${PLATFORM_PREFIX}qplc_pack${PACK_POSTFIX},\n") + endforeach() + + #write BE kernels + + #get last element of the list + set(LAST_ELEMENT "") + list(GET PACK_POSTFIX_LIST -1 LAST_ELEMENT) + + foreach(PACK_POSTFIX IN LISTS PACK_POSTFIX_LIST) + + if(PACK_POSTFIX STREQUAL LAST_ELEMENT) + file(APPEND ${directory}/${PLATFORM_PREFIX}pack.cpp "\t${PLATFORM_PREFIX}qplc_pack_be${PACK_POSTFIX}};\n") + else() + file(APPEND ${directory}/${PLATFORM_PREFIX}pack.cpp "\t${PLATFORM_PREFIX}qplc_pack_be${PACK_POSTFIX},\n") + endif() + endforeach() + + file(APPEND ${directory}/${PLATFORM_PREFIX}pack.cpp "}\n") + + # + # Write scan table + # + file(WRITE ${directory}/${PLATFORM_PREFIX}scan.cpp "#include \"qplc_api.h\"\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}scan.cpp "#include \"dispatcher/dispatcher.hpp\"\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}scan.cpp "namespace qpl::core_sw::dispatcher\n{\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}scan.cpp "scan_table_t ${PLATFORM_PREFIX}scan_table = {\n") + + #get last element of the list + set(LAST_ELEMENT "") + list(GET SCAN_POSTFIX_LIST -1 LAST_ELEMENT) + + foreach(SCAN_POSTFIX IN LISTS SCAN_POSTFIX_LIST) + + if(SCAN_POSTFIX STREQUAL LAST_ELEMENT) + file(APPEND ${directory}/${PLATFORM_PREFIX}scan.cpp "\t${PLATFORM_PREFIX}qplc_scan${SCAN_POSTFIX}};\n") + else() + file(APPEND ${directory}/${PLATFORM_PREFIX}scan.cpp "\t${PLATFORM_PREFIX}qplc_scan${SCAN_POSTFIX},\n") + endif() + endforeach() + + file(APPEND ${directory}/${PLATFORM_PREFIX}scan.cpp "}\n") + + # + # Write scan_i table + # + file(WRITE ${directory}/${PLATFORM_PREFIX}scan_i.cpp "#include \"qplc_api.h\"\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}scan_i.cpp "#include \"dispatcher/dispatcher.hpp\"\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}scan_i.cpp "namespace qpl::core_sw::dispatcher\n{\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}scan_i.cpp "scan_i_table_t ${PLATFORM_PREFIX}scan_i_table = {\n") + + #get last element of the list + set(LAST_ELEMENT "") + list(GET SCAN_POSTFIX_LIST -1 LAST_ELEMENT) + + foreach(SCAN_POSTFIX IN LISTS SCAN_POSTFIX_LIST) + + if(SCAN_POSTFIX STREQUAL LAST_ELEMENT) + file(APPEND ${directory}/${PLATFORM_PREFIX}scan_i.cpp "\t${PLATFORM_PREFIX}qplc_scan${SCAN_POSTFIX}_i};\n") + else() + file(APPEND ${directory}/${PLATFORM_PREFIX}scan_i.cpp "\t${PLATFORM_PREFIX}qplc_scan${SCAN_POSTFIX}_i,\n") + endif() + endforeach() + + file(APPEND ${directory}/${PLATFORM_PREFIX}scan_i.cpp "}\n") + + # + # Write pack_index table + # + file(WRITE ${directory}/${PLATFORM_PREFIX}pack_index.cpp "#include \"qplc_api.h\"\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}pack_index.cpp "#include \"dispatcher/dispatcher.hpp\"\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}pack_index.cpp "namespace qpl::core_sw::dispatcher\n{\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}pack_index.cpp "pack_index_table_t ${PLATFORM_PREFIX}pack_index_table = {\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}pack_index.cpp "\t${PLATFORM_PREFIX}qplc_pack_bits_nu,\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}pack_index.cpp "\t${PLATFORM_PREFIX}qplc_pack_index_8u,\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}pack_index.cpp "\t${PLATFORM_PREFIX}qplc_pack_index_8u16u,\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}pack_index.cpp "\t${PLATFORM_PREFIX}qplc_pack_index_8u32u,\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}pack_index.cpp "\t${PLATFORM_PREFIX}qplc_pack_bits_be_nu,\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}pack_index.cpp "\t${PLATFORM_PREFIX}qplc_pack_index_8u,\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}pack_index.cpp "\t${PLATFORM_PREFIX}qplc_pack_index_be_8u16u,\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}pack_index.cpp "\t${PLATFORM_PREFIX}qplc_pack_index_be_8u32u};\n") + + file(APPEND ${directory}/${PLATFORM_PREFIX}pack_index.cpp "}\n") + + # + # Write default bit width functions + # + foreach(DEAULT_BIT_WIDTH_FUNCTION IN LISTS DEFAULT_BIT_WIDTH_FUNCTIONS_LIST) + file(WRITE ${directory}/${PLATFORM_PREFIX}${DEAULT_BIT_WIDTH_FUNCTION}.cpp "#include \"qplc_api.h\"\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}${DEAULT_BIT_WIDTH_FUNCTION}.cpp "#include \"dispatcher/dispatcher.hpp\"\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}${DEAULT_BIT_WIDTH_FUNCTION}.cpp "namespace qpl::core_sw::dispatcher\n{\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}${DEAULT_BIT_WIDTH_FUNCTION}.cpp "${DEAULT_BIT_WIDTH_FUNCTION}_table_t ${PLATFORM_PREFIX}${DEAULT_BIT_WIDTH_FUNCTION}_table = {\n") + + #get last element of the list + set(LAST_ELEMENT "") + list(GET DEFAULT_BIT_WIDTH_LIST -1 LAST_ELEMENT) + + foreach(BIT_WIDTH IN LISTS DEFAULT_BIT_WIDTH_LIST) + + set(FUNCTION_NAME "") + get_function_name_with_default_bit_width(${DEAULT_BIT_WIDTH_FUNCTION} ${BIT_WIDTH} FUNCTION_NAME) + + if(BIT_WIDTH STREQUAL LAST_ELEMENT) + file(APPEND ${directory}/${PLATFORM_PREFIX}${DEAULT_BIT_WIDTH_FUNCTION}.cpp "\t${PLATFORM_PREFIX}qplc_${FUNCTION_NAME}};\n") + else() + file(APPEND ${directory}/${PLATFORM_PREFIX}${DEAULT_BIT_WIDTH_FUNCTION}.cpp "\t${PLATFORM_PREFIX}qplc_${FUNCTION_NAME},\n") + endif() + endforeach() + + file(APPEND ${directory}/${PLATFORM_PREFIX}${DEAULT_BIT_WIDTH_FUNCTION}.cpp "}\n") + endforeach() + + # + # Write aggregates table + # + file(WRITE ${directory}/${PLATFORM_PREFIX}aggregates.cpp "#include \"qplc_api.h\"\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}aggregates.cpp "#include \"dispatcher/dispatcher.hpp\"\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}aggregates.cpp "namespace qpl::core_sw::dispatcher\n{\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}aggregates.cpp "aggregates_table_t ${PLATFORM_PREFIX}aggregates_table = {\n") + + file(APPEND ${directory}/${PLATFORM_PREFIX}aggregates.cpp "\t${PLATFORM_PREFIX}qplc_bit_aggregates_8u,\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}aggregates.cpp "\t${PLATFORM_PREFIX}qplc_aggregates_8u,\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}aggregates.cpp "\t${PLATFORM_PREFIX}qplc_aggregates_16u,\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}aggregates.cpp "\t${PLATFORM_PREFIX}qplc_aggregates_32u};\n") + + file(APPEND ${directory}/${PLATFORM_PREFIX}aggregates.cpp "}\n") + + # + # Write mem_copy functions table + # + file(WRITE ${directory}/${PLATFORM_PREFIX}memory_copy.cpp "#include \"qplc_api.h\"\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}memory_copy.cpp "#include \"dispatcher/dispatcher.hpp\"\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}memory_copy.cpp "namespace qpl::core_sw::dispatcher\n{\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}memory_copy.cpp "memory_copy_table_t ${PLATFORM_PREFIX}memory_copy_table = {\n") + + file(APPEND ${directory}/${PLATFORM_PREFIX}memory_copy.cpp "\t${PLATFORM_PREFIX}qplc_copy_8u,\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}memory_copy.cpp "\t${PLATFORM_PREFIX}qplc_copy_16u,\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}memory_copy.cpp "\t${PLATFORM_PREFIX}qplc_copy_32u};\n") + + file(APPEND ${directory}/${PLATFORM_PREFIX}memory_copy.cpp "}\n") + + # + # Write mem_copy functions table + # + file(WRITE ${directory}/${PLATFORM_PREFIX}zero.cpp "#include \"qplc_api.h\"\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}zero.cpp "#include \"dispatcher/dispatcher.hpp\"\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}zero.cpp "namespace qpl::core_sw::dispatcher\n{\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}zero.cpp "zero_table_t ${PLATFORM_PREFIX}zero_table = {\n") + + file(APPEND ${directory}/${PLATFORM_PREFIX}zero.cpp "\t${PLATFORM_PREFIX}qplc_zero_8u};\n") + + file(APPEND ${directory}/${PLATFORM_PREFIX}zero.cpp "}\n") + + # + # Write move functions table + # + file(WRITE ${directory}/${PLATFORM_PREFIX}move.cpp "#include \"qplc_api.h\"\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}move.cpp "#include \"dispatcher/dispatcher.hpp\"\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}move.cpp "namespace qpl::core_sw::dispatcher\n{\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}move.cpp "move_table_t ${PLATFORM_PREFIX}move_table = {\n") + + file(APPEND ${directory}/${PLATFORM_PREFIX}move.cpp "\t${PLATFORM_PREFIX}qplc_move_8u};\n") + + file(APPEND ${directory}/${PLATFORM_PREFIX}move.cpp "}\n") + + # + # Write crc64 function table + # + file(WRITE ${directory}/${PLATFORM_PREFIX}crc64.cpp "#include \"qplc_api.h\"\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}crc64.cpp "#include \"dispatcher/dispatcher.hpp\"\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}crc64.cpp "namespace qpl::core_sw::dispatcher\n{\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}crc64.cpp "crc64_table_t ${PLATFORM_PREFIX}crc64_table = {\n") + + file(APPEND ${directory}/${PLATFORM_PREFIX}crc64.cpp "\t${PLATFORM_PREFIX}qplc_crc64};\n") + + file(APPEND ${directory}/${PLATFORM_PREFIX}crc64.cpp "}\n") + + # + # Write xor_checksum function table + # + file(WRITE ${directory}/${PLATFORM_PREFIX}xor_checksum.cpp "#include \"qplc_api.h\"\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}xor_checksum.cpp "#include \"dispatcher/dispatcher.hpp\"\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}xor_checksum.cpp "namespace qpl::core_sw::dispatcher\n{\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}xor_checksum.cpp "xor_checksum_table_t ${PLATFORM_PREFIX}xor_checksum_table = {\n") + + file(APPEND ${directory}/${PLATFORM_PREFIX}xor_checksum.cpp "\t${PLATFORM_PREFIX}qplc_xor_checksum_8u};\n") + + file(APPEND ${directory}/${PLATFORM_PREFIX}xor_checksum.cpp "}\n") + + # + # Write deflate functions table + # + file(WRITE ${directory}/${PLATFORM_PREFIX}deflate.cpp "#include \"deflate_slow_icf.h\"\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}deflate.cpp "#include \"deflate_hash_table.h\"\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}deflate.cpp "#include \"deflate_histogram.h\"\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}deflate.cpp "#include \"dispatcher/dispatcher.hpp\"\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}deflate.cpp "namespace qpl::core_sw::dispatcher\n{\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}deflate.cpp "deflate_table_t ${PLATFORM_PREFIX}deflate_table = {\n") + + file(APPEND ${directory}/${PLATFORM_PREFIX}deflate.cpp "\t reinterpret_cast(&${PLATFORM_PREFIX}slow_deflate_icf_body),\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}deflate.cpp "\t reinterpret_cast(&${PLATFORM_PREFIX}deflate_histogram_reset),\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}deflate.cpp "\t reinterpret_cast(&${PLATFORM_PREFIX}deflate_hash_table_reset)};\n") + + file(APPEND ${directory}/${PLATFORM_PREFIX}deflate.cpp "}\n") + + # + # Write deflate fix functions table + # + file(WRITE ${directory}/${PLATFORM_PREFIX}deflate_fix.cpp "#include \"deflate_slow.h\"\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}deflate_fix.cpp "#include \"dispatcher/dispatcher.hpp\"\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}deflate_fix.cpp "namespace qpl::core_sw::dispatcher\n{\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}deflate_fix.cpp "deflate_fix_table_t ${PLATFORM_PREFIX}deflate_fix_table = {\n") + + file(APPEND ${directory}/${PLATFORM_PREFIX}deflate_fix.cpp "\t reinterpret_cast(&${PLATFORM_PREFIX}slow_deflate_body)};\n") + + file(APPEND ${directory}/${PLATFORM_PREFIX}deflate_fix.cpp "}\n") + + # + # Write setup_dictionary functions table + # + file(WRITE ${directory}/${PLATFORM_PREFIX}setup_dictionary.cpp "#include \"deflate_slow_utils.h\"\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}setup_dictionary.cpp "#include \"dispatcher/dispatcher.hpp\"\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}setup_dictionary.cpp "namespace qpl::core_sw::dispatcher\n{\n") + file(APPEND ${directory}/${PLATFORM_PREFIX}setup_dictionary.cpp "setup_dictionary_table_t ${PLATFORM_PREFIX}setup_dictionary_table = {\n") + + file(APPEND ${directory}/${PLATFORM_PREFIX}setup_dictionary.cpp "\t reinterpret_cast(&${PLATFORM_PREFIX}setup_dictionary)};\n") + + file(APPEND ${directory}/${PLATFORM_PREFIX}setup_dictionary.cpp "}\n") + + endforeach() +endfunction() -# check nasm compiler -include(CheckLanguage) -check_language(ASM_NASM) -if(NOT CMAKE_ASM_NASM_COMPILER) - message(FATAL_ERROR "Please install NASM from 'https://www.nasm.us/' because NASM compiler can not be found!") -endif() -# [SUBDIR]isal enable_language(ASM_NASM) set(ISAL_C_SRC ${QPL_SRC_DIR}/isal/igzip/adler32_base.c @@ -107,11 +512,6 @@ set_target_properties(isal PROPERTIES CXX_STANDARD 11 C_STANDARD 99) -target_compile_options(isal PRIVATE - "$<$:${QPL_LINUX_TOOLCHAIN_REQUIRED_FLAGS}>" - "$<$:>" - "$<$:>") - # AS_FEATURE_LEVEL=10 means "Check SIMD capabilities of the target system at runtime and use up to AVX512 if available". # HAVE_KNOWS_AVX512 means rely on AVX512 being available on the target system. target_compile_options(isal_asm PRIVATE "-I${QPL_SRC_DIR}/isal/include/" @@ -164,15 +564,7 @@ foreach(PLATFORM_ID IN LISTS PLATFORMS_LIST) PUBLIC $ PRIVATE $) - set_target_properties(qplcore_${PLATFORM_ID} PROPERTIES - $<$:C_STANDARD 17>) - - target_compile_options(qplcore_${PLATFORM_ID} - PRIVATE ${QPL_LINUX_TOOLCHAIN_REQUIRED_FLAGS} - PRIVATE "$<$:>" - PRIVATE "$<$:-O3;-D_FORTIFY_SOURCE=2>") - - # Set specific compiler options and/or definitions based on a platform + # Set specific compiler options and/or definitions based on a platform if (${PLATFORM_ID} MATCHES "avx512") target_compile_definitions(qplcore_${PLATFORM_ID} PRIVATE PLATFORM=2) target_compile_options(qplcore_${PLATFORM_ID} PRIVATE -march=skylake-avx512) @@ -221,10 +613,7 @@ set_target_properties(qplcore_sw_dispatcher PROPERTIES CXX_STANDARD 17) target_compile_definitions(qplcore_sw_dispatcher PUBLIC -DQPL_LIB) target_compile_options(qplcore_sw_dispatcher - PRIVATE $<$:${QPL_LINUX_TOOLCHAIN_REQUIRED_FLAGS}; - ${QPL_LINUX_TOOLCHAIN_DYNAMIC_LIBRARY_FLAGS}; - $<$:-O3;-D_FORTIFY_SOURCE=2>> - PRIVATE $<$:${QPL_LINUX_TOOLCHAIN_CPP_EMBEDDED_FLAGS}>) + PRIVATE ${QPL_LINUX_TOOLCHAIN_CPP_EMBEDDED_FLAGS}) # [SUBDIR]core-iaa file(GLOB HW_PATH_SRC ${QPL_SRC_DIR}/core-iaa/sources/aecs/*.c @@ -249,14 +638,6 @@ target_include_directories(core_iaa PRIVATE $ # own_checkers.h PRIVATE $) -set_target_properties(core_iaa PROPERTIES - $<$:C_STANDARD 17> - CXX_STANDARD 17) - -target_compile_options(core_iaa - PRIVATE $<$:${QPL_LINUX_TOOLCHAIN_REQUIRED_FLAGS}; - $<$:-O3;-D_FORTIFY_SOURCE=2>>) - target_compile_features(core_iaa PRIVATE c_std_11) target_compile_definitions(core_iaa PRIVATE QPL_BADARG_CHECK @@ -286,10 +667,7 @@ set_property(GLOBAL APPEND PROPERTY QPL_LIB_DEPS $) target_compile_options(middle_layer_lib - PRIVATE $<$:${QPL_LINUX_TOOLCHAIN_REQUIRED_FLAGS}; - ${QPL_LINUX_TOOLCHAIN_DYNAMIC_LIBRARY_FLAGS}; - $<$:-O3;-D_FORTIFY_SOURCE=2>> - PRIVATE $<$:${QPL_LINUX_TOOLCHAIN_CPP_EMBEDDED_FLAGS}>) + PRIVATE ${QPL_LINUX_TOOLCHAIN_CPP_EMBEDDED_FLAGS}) target_compile_definitions(middle_layer_lib PUBLIC QPL_VERSION="${QPL_VERSION}" @@ -324,15 +702,8 @@ target_include_directories(_qpl PRIVATE $ PRIVATE $) -set_target_properties(_qpl PROPERTIES - $<$:C_STANDARD 17> - CXX_STANDARD 17) - target_compile_options(_qpl - PRIVATE $<$:${QPL_LINUX_TOOLCHAIN_REQUIRED_FLAGS}; - ${QPL_LINUX_TOOLCHAIN_DYNAMIC_LIBRARY_FLAGS}; - $<$:-O3;-D_FORTIFY_SOURCE=2>> - PRIVATE $<$:${QPL_LINUX_TOOLCHAIN_CPP_EMBEDDED_FLAGS}>) + PRIVATE ${QPL_LINUX_TOOLCHAIN_CPP_EMBEDDED_FLAGS}) target_compile_definitions(_qpl PRIVATE -DQPL_LIB diff --git a/contrib/qpl-cmake/benchmark_sample/client_scripts/allin1_ssb.sh b/contrib/qpl-cmake/benchmark_sample/client_scripts/allin1_ssb.sh deleted file mode 100644 index 31017b565b6..00000000000 --- a/contrib/qpl-cmake/benchmark_sample/client_scripts/allin1_ssb.sh +++ /dev/null @@ -1,530 +0,0 @@ -#!/bin/bash -ckhost="localhost" -ckport=("9000" "9001" "9002" "9003") -WORKING_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)/.." -OUTPUT_DIR="${WORKING_DIR}/output" -LOG_DIR="${OUTPUT_DIR}/log" -RAWDATA_DIR="${WORKING_DIR}/rawdata_dir" -database_dir="${WORKING_DIR}/database_dir" -CLIENT_SCRIPTS_DIR="${WORKING_DIR}/client_scripts" -LOG_PACK_FILE="$(date +%Y-%m-%d-%H-%M-%S)" -QUERY_FILE="queries_ssb.sql" -SERVER_BIND_CMD[0]="numactl -m 0 -N 0" -SERVER_BIND_CMD[1]="numactl -m 0 -N 0" -SERVER_BIND_CMD[2]="numactl -m 1 -N 1" -SERVER_BIND_CMD[3]="numactl -m 1 -N 1" -CLIENT_BIND_CMD="" -SSB_GEN_FACTOR=20 -TABLE_NAME="lineorder_flat" -TALBE_ROWS="119994608" -CODEC_CONFIG="lz4 deflate zstd" - -# define instance number -inst_num=$1 -if [ ! -n "$1" ]; then - echo "Please clarify instance number from 1,2,3 or 4" - exit 1 -else - echo "Benchmarking with instance number:$1" -fi - -if [ ! -d "$OUTPUT_DIR" ]; then -mkdir $OUTPUT_DIR -fi -if [ ! -d "$LOG_DIR" ]; then -mkdir $LOG_DIR -fi -if [ ! -d "$RAWDATA_DIR" ]; then -mkdir $RAWDATA_DIR -fi - -# define different directories -dir_server=("" "_s2" "_s3" "_s4") -ckreadSql=" - CREATE TABLE customer - ( - C_CUSTKEY UInt32, - C_NAME String, - C_ADDRESS String, - C_CITY LowCardinality(String), - C_NATION LowCardinality(String), - C_REGION LowCardinality(String), - C_PHONE String, - C_MKTSEGMENT LowCardinality(String) - ) - ENGINE = MergeTree ORDER BY (C_CUSTKEY); - - CREATE TABLE lineorder - ( - LO_ORDERKEY UInt32, - LO_LINENUMBER UInt8, - LO_CUSTKEY UInt32, - LO_PARTKEY UInt32, - LO_SUPPKEY UInt32, - LO_ORDERDATE Date, - LO_ORDERPRIORITY LowCardinality(String), - LO_SHIPPRIORITY UInt8, - LO_QUANTITY UInt8, - LO_EXTENDEDPRICE UInt32, - LO_ORDTOTALPRICE UInt32, - LO_DISCOUNT UInt8, - LO_REVENUE UInt32, - LO_SUPPLYCOST UInt32, - LO_TAX UInt8, - LO_COMMITDATE Date, - LO_SHIPMODE LowCardinality(String) - ) - ENGINE = MergeTree PARTITION BY toYear(LO_ORDERDATE) ORDER BY (LO_ORDERDATE, LO_ORDERKEY); - - CREATE TABLE part - ( - P_PARTKEY UInt32, - P_NAME String, - P_MFGR LowCardinality(String), - P_CATEGORY LowCardinality(String), - P_BRAND LowCardinality(String), - P_COLOR LowCardinality(String), - P_TYPE LowCardinality(String), - P_SIZE UInt8, - P_CONTAINER LowCardinality(String) - ) - ENGINE = MergeTree ORDER BY P_PARTKEY; - - CREATE TABLE supplier - ( - S_SUPPKEY UInt32, - S_NAME String, - S_ADDRESS String, - S_CITY LowCardinality(String), - S_NATION LowCardinality(String), - S_REGION LowCardinality(String), - S_PHONE String - ) - ENGINE = MergeTree ORDER BY S_SUPPKEY; -" -supplier_table=" - CREATE TABLE supplier - ( - S_SUPPKEY UInt32, - S_NAME String, - S_ADDRESS String, - S_CITY LowCardinality(String), - S_NATION LowCardinality(String), - S_REGION LowCardinality(String), - S_PHONE String - ) - ENGINE = MergeTree ORDER BY S_SUPPKEY; -" -part_table=" - CREATE TABLE part - ( - P_PARTKEY UInt32, - P_NAME String, - P_MFGR LowCardinality(String), - P_CATEGORY LowCardinality(String), - P_BRAND LowCardinality(String), - P_COLOR LowCardinality(String), - P_TYPE LowCardinality(String), - P_SIZE UInt8, - P_CONTAINER LowCardinality(String) - ) - ENGINE = MergeTree ORDER BY P_PARTKEY; -" -lineorder_table=" - CREATE TABLE lineorder - ( - LO_ORDERKEY UInt32, - LO_LINENUMBER UInt8, - LO_CUSTKEY UInt32, - LO_PARTKEY UInt32, - LO_SUPPKEY UInt32, - LO_ORDERDATE Date, - LO_ORDERPRIORITY LowCardinality(String), - LO_SHIPPRIORITY UInt8, - LO_QUANTITY UInt8, - LO_EXTENDEDPRICE UInt32, - LO_ORDTOTALPRICE UInt32, - LO_DISCOUNT UInt8, - LO_REVENUE UInt32, - LO_SUPPLYCOST UInt32, - LO_TAX UInt8, - LO_COMMITDATE Date, - LO_SHIPMODE LowCardinality(String) - ) - ENGINE = MergeTree PARTITION BY toYear(LO_ORDERDATE) ORDER BY (LO_ORDERDATE, LO_ORDERKEY); -" -customer_table=" - CREATE TABLE customer - ( - C_CUSTKEY UInt32, - C_NAME String, - C_ADDRESS String, - C_CITY LowCardinality(String), - C_NATION LowCardinality(String), - C_REGION LowCardinality(String), - C_PHONE String, - C_MKTSEGMENT LowCardinality(String) - ) - ENGINE = MergeTree ORDER BY (C_CUSTKEY); -" - -lineorder_flat_table=" - SET max_memory_usage = 20000000000; - CREATE TABLE lineorder_flat - ENGINE = MergeTree - PARTITION BY toYear(LO_ORDERDATE) - ORDER BY (LO_ORDERDATE, LO_ORDERKEY) AS - SELECT - l.LO_ORDERKEY AS LO_ORDERKEY, - l.LO_LINENUMBER AS LO_LINENUMBER, - l.LO_CUSTKEY AS LO_CUSTKEY, - l.LO_PARTKEY AS LO_PARTKEY, - l.LO_SUPPKEY AS LO_SUPPKEY, - l.LO_ORDERDATE AS LO_ORDERDATE, - l.LO_ORDERPRIORITY AS LO_ORDERPRIORITY, - l.LO_SHIPPRIORITY AS LO_SHIPPRIORITY, - l.LO_QUANTITY AS LO_QUANTITY, - l.LO_EXTENDEDPRICE AS LO_EXTENDEDPRICE, - l.LO_ORDTOTALPRICE AS LO_ORDTOTALPRICE, - l.LO_DISCOUNT AS LO_DISCOUNT, - l.LO_REVENUE AS LO_REVENUE, - l.LO_SUPPLYCOST AS LO_SUPPLYCOST, - l.LO_TAX AS LO_TAX, - l.LO_COMMITDATE AS LO_COMMITDATE, - l.LO_SHIPMODE AS LO_SHIPMODE, - c.C_NAME AS C_NAME, - c.C_ADDRESS AS C_ADDRESS, - c.C_CITY AS C_CITY, - c.C_NATION AS C_NATION, - c.C_REGION AS C_REGION, - c.C_PHONE AS C_PHONE, - c.C_MKTSEGMENT AS C_MKTSEGMENT, - s.S_NAME AS S_NAME, - s.S_ADDRESS AS S_ADDRESS, - s.S_CITY AS S_CITY, - s.S_NATION AS S_NATION, - s.S_REGION AS S_REGION, - s.S_PHONE AS S_PHONE, - p.P_NAME AS P_NAME, - p.P_MFGR AS P_MFGR, - p.P_CATEGORY AS P_CATEGORY, - p.P_BRAND AS P_BRAND, - p.P_COLOR AS P_COLOR, - p.P_TYPE AS P_TYPE, - p.P_SIZE AS P_SIZE, - p.P_CONTAINER AS P_CONTAINER - FROM lineorder AS l - INNER JOIN customer AS c ON c.C_CUSTKEY = l.LO_CUSTKEY - INNER JOIN supplier AS s ON s.S_SUPPKEY = l.LO_SUPPKEY - INNER JOIN part AS p ON p.P_PARTKEY = l.LO_PARTKEY; - show settings ilike 'max_memory_usage'; -" - -function insert_data(){ - echo "insert_data:$1" - create_table_prefix="clickhouse client --host ${ckhost} --port $2 --multiquery -q" - insert_data_prefix="clickhouse client --query " - case $1 in - all) - clickhouse client --host ${ckhost} --port $2 --multiquery -q"$ckreadSql" && { - ${insert_data_prefix} "INSERT INTO customer FORMAT CSV" < ${RAWDATA_DIR}/ssb-dbgen/customer.tbl --port=$2 - ${insert_data_prefix} "INSERT INTO part FORMAT CSV" < ${RAWDATA_DIR}/ssb-dbgen/part.tbl --port=$2 - ${insert_data_prefix} "INSERT INTO supplier FORMAT CSV" < ${RAWDATA_DIR}/ssb-dbgen/supplier.tbl --port=$2 - ${insert_data_prefix} "INSERT INTO lineorder FORMAT CSV" < ${RAWDATA_DIR}/ssb-dbgen/lineorder.tbl --port=$2 - } - ${create_table_prefix}"${lineorder_flat_table}" - ;; - customer) - echo ${create_table_prefix}\"${customer_table}\" - ${create_table_prefix}"${customer_table}" && { - echo "${insert_data_prefix} \"INSERT INTO $1 FORMAT CSV\" < ${RAWDATA_DIR}/ssb-dbgen/$1.tbl --port=$2" - ${insert_data_prefix} "INSERT INTO $1 FORMAT CSV" < ${RAWDATA_DIR}/ssb-dbgen/$1.tbl --port=$2 - } - ;; - part) - echo ${create_table_prefix}\"${part_table}\" - ${create_table_prefix}"${part_table}" && { - echo "${insert_data_prefix} \"INSERT INTO $1 FORMAT CSV\" < ${RAWDATA_DIR}/ssb-dbgen/$1.tbl --port=$2" - ${insert_data_prefix} "INSERT INTO $1 FORMAT CSV" < ${RAWDATA_DIR}/ssb-dbgen/$1.tbl --port=$2 - } - ;; - supplier) - echo ${create_table_prefix}"${supplier_table}" - ${create_table_prefix}"${supplier_table}" && { - echo "${insert_data_prefix} \"INSERT INTO $1 FORMAT CSV\" < ${RAWDATA_DIR}/ssb-dbgen/$1.tbl --port=$2" - ${insert_data_prefix} "INSERT INTO $1 FORMAT CSV" < ${RAWDATA_DIR}/ssb-dbgen/$1.tbl --port=$2 - } - ;; - lineorder) - echo ${create_table_prefix}"${lineorder_table}" - ${create_table_prefix}"${lineorder_table}" && { - echo "${insert_data_prefix} \"INSERT INTO $1 FORMAT CSV\" < ${RAWDATA_DIR}/ssb-dbgen/$1.tbl --port=$2" - ${insert_data_prefix} "INSERT INTO $1 FORMAT CSV" < ${RAWDATA_DIR}/ssb-dbgen/$1.tbl --port=$2 - } - ;; - lineorder_flat) - echo ${create_table_prefix}"${lineorder_flat_table}" - ${create_table_prefix}"${lineorder_flat_table}" - return 0 - ;; - *) - exit 0 - ;; - - esac -} - -function check_sql(){ - select_sql="select * from "$1" limit 1" - clickhouse client --host ${ckhost} --port $2 --multiquery -q"${select_sql}" -} - -function check_table(){ - checknum=0 - source_tables="customer part supplier lineorder lineorder_flat" - test_tables=${1:-${source_tables}} - echo "Checking table data required in server..." - for i in $(seq 0 $[inst_num-1]) - do - for j in `echo ${test_tables}` - do - check_sql $j ${ckport[i]} &> /dev/null || { - let checknum+=1 && insert_data "$j" ${ckport[i]} - } - done - done - - for i in $(seq 0 $[inst_num-1]) - do - echo "clickhouse client --host ${ckhost} --port ${ckport[i]} -m -q\"select count() from ${TABLE_NAME};\"" - var=$(clickhouse client --host ${ckhost} --port ${ckport[i]} -m -q"select count() from ${TABLE_NAME};") - if [ $var -eq $TALBE_ROWS ];then - echo "Instance_${i} Table data integrity check OK -> Rows:$var" - else - echo "Instance_${i} Table data integrity check Failed -> Rows:$var" - exit 1 - fi - done - if [ $checknum -gt 0 ];then - echo "Need sleep 10s after first table data insertion...$checknum" - sleep 10 - fi -} - -function check_instance(){ -instance_alive=0 -for i in {1..10} -do - sleep 1 - netstat -nltp | grep ${1} > /dev/null - if [ $? -ne 1 ];then - instance_alive=1 - break - fi - -done - -if [ $instance_alive -eq 0 ];then - echo "check_instance -> clickhouse server instance faild to launch due to 10s timeout!" - exit 1 -else - echo "check_instance -> clickhouse server instance launch successfully!" -fi -} - -function start_clickhouse_for_insertion(){ - echo "start_clickhouse_for_insertion" - for i in $(seq 0 $[inst_num-1]) - do - echo "cd ${database_dir}/$1${dir_server[i]}" - echo "${SERVER_BIND_CMD[i]} clickhouse server -C config_${1}${dir_server[i]}.xml >&${LOG_DIR}/${1}_${i}_server_log& > /dev/null" - - cd ${database_dir}/$1${dir_server[i]} - ${SERVER_BIND_CMD[i]} clickhouse server -C config_${1}${dir_server[i]}.xml >&${LOG_DIR}/${1}_${i}_server_log& > /dev/null - check_instance ${ckport[i]} - done -} - -function start_clickhouse_for_stressing(){ - echo "start_clickhouse_for_stressing" - for i in $(seq 0 $[inst_num-1]) - do - echo "cd ${database_dir}/$1${dir_server[i]}" - echo "${SERVER_BIND_CMD[i]} clickhouse server -C config_${1}${dir_server[i]}.xml >&/dev/null&" - - cd ${database_dir}/$1${dir_server[i]} - ${SERVER_BIND_CMD[i]} clickhouse server -C config_${1}${dir_server[i]}.xml >&/dev/null& - check_instance ${ckport[i]} - done -} -yum -y install git make gcc sudo net-tools &> /dev/null -pip3 install clickhouse_driver numpy &> /dev/null -test -d ${RAWDATA_DIR}/ssb-dbgen || git clone https://github.com/vadimtk/ssb-dbgen.git ${RAWDATA_DIR}/ssb-dbgen && cd ${RAWDATA_DIR}/ssb-dbgen - -if [ ! -f ${RAWDATA_DIR}/ssb-dbgen/dbgen ];then - make && { - test -f ${RAWDATA_DIR}/ssb-dbgen/customer.tbl || echo y |./dbgen -s ${SSB_GEN_FACTOR} -T c - test -f ${RAWDATA_DIR}/ssb-dbgen/part.tbl || echo y | ./dbgen -s ${SSB_GEN_FACTOR} -T p - test -f ${RAWDATA_DIR}/ssb-dbgen/supplier.tbl || echo y | ./dbgen -s ${SSB_GEN_FACTOR} -T s - test -f ${RAWDATA_DIR}/ssb-dbgen/date.tbl || echo y | ./dbgen -s ${SSB_GEN_FACTOR} -T d - test -f ${RAWDATA_DIR}/ssb-dbgen/lineorder.tbl || echo y | ./dbgen -s ${SSB_GEN_FACTOR} -T l - } -else - test -f ${RAWDATA_DIR}/ssb-dbgen/customer.tbl || echo y | ./dbgen -s ${SSB_GEN_FACTOR} -T c - test -f ${RAWDATA_DIR}/ssb-dbgen/part.tbl || echo y | ./dbgen -s ${SSB_GEN_FACTOR} -T p - test -f ${RAWDATA_DIR}/ssb-dbgen/supplier.tbl || echo y | ./dbgen -s ${SSB_GEN_FACTOR} -T s - test -f ${RAWDATA_DIR}/ssb-dbgen/date.tbl || echo y | ./dbgen -s ${SSB_GEN_FACTOR} -T d - test -f ${RAWDATA_DIR}/ssb-dbgen/lineorder.tbl || echo y | ./dbgen -s ${SSB_GEN_FACTOR} -T l - -fi - -filenum=`find ${RAWDATA_DIR}/ssb-dbgen/ -name "*.tbl" | wc -l` - -if [ $filenum -ne 5 ];then - echo "generate ssb data file *.tbl faild" - exit 1 -fi - -function kill_instance(){ -instance_alive=1 -for i in {1..2} -do - pkill clickhouse && sleep 5 - instance_alive=0 - for i in $(seq 0 $[inst_num-1]) - do - netstat -nltp | grep ${ckport[i]} > /dev/null - if [ $? -ne 1 ];then - instance_alive=1 - break; - fi - done - if [ $instance_alive -eq 0 ];then - break; - fi -done -if [ $instance_alive -eq 0 ];then - echo "kill_instance OK!" -else - echo "kill_instance Failed -> clickhouse server instance still alive due to 10s timeout" - exit 1 -fi -} - -function run_test(){ -is_xml=0 -for i in $(seq 0 $[inst_num-1]) -do - if [ -f ${database_dir}/${1}${dir_server[i]}/config_${1}${dir_server[i]}.xml ]; then - is_xml=$[is_xml+1] - fi -done -if [ $is_xml -eq $inst_num ];then - echo "Benchmark with $inst_num instance" - start_clickhouse_for_insertion ${1} - - for i in $(seq 0 $[inst_num-1]) - do - clickhouse client --host ${ckhost} --port ${ckport[i]} -m -q"show databases;" >/dev/null - done - - if [ $? -eq 0 ];then - check_table - fi - kill_instance - - if [ $1 == "deflate" ];then - test -f ${LOG_DIR}/${1}_server_log && deflatemsg=`cat ${LOG_DIR}/${1}_server_log | grep DeflateJobHWPool` - if [ -n "$deflatemsg" ];then - echo ------------------------------------------------------ - echo $deflatemsg - echo ------------------------------------------------------ - fi - fi - echo "Check table data required in server_${1} -> Done! " - - start_clickhouse_for_stressing ${1} - for i in $(seq 0 $[inst_num-1]) - do - clickhouse client --host ${ckhost} --port ${ckport[i]} -m -q"show databases;" >/dev/null - done - if [ $? -eq 0 ];then - test -d ${CLIENT_SCRIPTS_DIR} && cd ${CLIENT_SCRIPTS_DIR} - echo "Client stressing... " - echo "${CLIENT_BIND_CMD} python3 client_stressing_test.py ${QUERY_FILE} $inst_num &> ${LOG_DIR}/${1}.log" - ${CLIENT_BIND_CMD} python3 client_stressing_test.py ${QUERY_FILE} $inst_num &> ${LOG_DIR}/${1}.log - echo "Completed client stressing, checking log... " - finish_log=`grep "Finished" ${LOG_DIR}/${1}.log | wc -l` - if [ $finish_log -eq 1 ] ;then - kill_instance - test -f ${LOG_DIR}/${1}.log && echo "${1}.log ===> ${LOG_DIR}/${1}.log" - else - kill_instance - echo "No find 'Finished' in client log -> Performance test may fail" - exit 1 - - fi - - else - echo "${1} clickhouse server start fail" - exit 1 - fi -else - echo "clickhouse server start fail -> Please check xml files required in ${database_dir} for each instance" - exit 1 - -fi -} -function clear_log(){ - if [ -d "$LOG_DIR" ]; then - cd ${LOG_DIR} && rm -rf * - fi -} - -function gather_log_for_codec(){ - cd ${OUTPUT_DIR} && mkdir -p ${LOG_PACK_FILE}/${1} - cp -rf ${LOG_DIR} ${OUTPUT_DIR}/${LOG_PACK_FILE}/${1} -} - -function pack_log(){ - if [ -e "${OUTPUT_DIR}/run.log" ]; then - cp ${OUTPUT_DIR}/run.log ${OUTPUT_DIR}/${LOG_PACK_FILE}/ - fi - echo "Please check all log information in ${OUTPUT_DIR}/${LOG_PACK_FILE}" -} - -function setup_check(){ - - iax_dev_num=`accel-config list | grep iax | wc -l` - if [ $iax_dev_num -eq 0 ] ;then - iax_dev_num=`accel-config list | grep iax | wc -l` - if [ $iax_dev_num -eq 0 ] ;then - echo "No IAA devices available -> Please check IAA hardware setup manually!" - exit 1 - else - echo "IAA enabled devices number:$iax_dev_num" - fi - else - echo "IAA enabled devices number:$iax_dev_num" - fi - libaccel_version=`accel-config -v` - clickhouser_version=`clickhouse server --version` - kernel_dxd_log=`dmesg | grep dxd` - echo "libaccel_version:$libaccel_version" - echo "clickhouser_version:$clickhouser_version" - echo -e "idxd section in kernel log:\n$kernel_dxd_log" -} - -setup_check -export CLICKHOUSE_WATCHDOG_ENABLE=0 -for i in ${CODEC_CONFIG[@]} -do - clear_log - codec=${i} - echo "run test------------$codec" - run_test $codec - gather_log_for_codec $codec -done - -pack_log -echo "Done." \ No newline at end of file diff --git a/contrib/qpl-cmake/benchmark_sample/client_scripts/client_stressing_test.py b/contrib/qpl-cmake/benchmark_sample/client_scripts/client_stressing_test.py deleted file mode 100644 index f12381a198c..00000000000 --- a/contrib/qpl-cmake/benchmark_sample/client_scripts/client_stressing_test.py +++ /dev/null @@ -1,278 +0,0 @@ -from operator import eq -import os -import random -import time -import sys -from clickhouse_driver import Client -import numpy as np -import subprocess -import multiprocessing -from multiprocessing import Manager - -warmup_runs = 10 -calculated_runs = 10 -seconds = 30 -max_instances_number = 8 -retest_number = 3 -retest_tolerance = 10 - - -def checkInt(str): - try: - int(str) - return True - except ValueError: - return False - - -def setup_client(index): - if index < 4: - port_idx = index - else: - port_idx = index + 4 - client = Client( - host="localhost", - database="default", - user="default", - password="", - port="900%d" % port_idx, - ) - union_mode_query = "SET union_default_mode='DISTINCT'" - client.execute(union_mode_query) - return client - - -def warm_client(clientN, clientL, query, loop): - for c_idx in range(clientN): - for _ in range(loop): - clientL[c_idx].execute(query) - - -def read_queries(queries_list): - queries = list() - queries_id = list() - with open(queries_list, "r") as f: - for line in f: - line = line.rstrip() - line = line.split("$") - queries_id.append(line[0]) - queries.append(line[1]) - return queries_id, queries - - -def run_task(client, cname, query, loop, query_latency): - start_time = time.time() - for i in range(loop): - client.execute(query) - query_latency.append(client.last_query.elapsed) - - end_time = time.time() - p95 = np.percentile(query_latency, 95) - print( - "CLIENT: {0} end. -> P95: %f, qps: %f".format(cname) - % (p95, loop / (end_time - start_time)) - ) - - -def run_multi_clients(clientN, clientList, query, loop): - client_pids = {} - start_time = time.time() - manager = multiprocessing.Manager() - query_latency_list0 = manager.list() - query_latency_list1 = manager.list() - query_latency_list2 = manager.list() - query_latency_list3 = manager.list() - query_latency_list4 = manager.list() - query_latency_list5 = manager.list() - query_latency_list6 = manager.list() - query_latency_list7 = manager.list() - - for c_idx in range(clientN): - client_name = "Role_%d" % c_idx - if c_idx == 0: - client_pids[c_idx] = multiprocessing.Process( - target=run_task, - args=(clientList[c_idx], client_name, query, loop, query_latency_list0), - ) - elif c_idx == 1: - client_pids[c_idx] = multiprocessing.Process( - target=run_task, - args=(clientList[c_idx], client_name, query, loop, query_latency_list1), - ) - elif c_idx == 2: - client_pids[c_idx] = multiprocessing.Process( - target=run_task, - args=(clientList[c_idx], client_name, query, loop, query_latency_list2), - ) - elif c_idx == 3: - client_pids[c_idx] = multiprocessing.Process( - target=run_task, - args=(clientList[c_idx], client_name, query, loop, query_latency_list3), - ) - elif c_idx == 4: - client_pids[c_idx] = multiprocessing.Process( - target=run_task, - args=(clientList[c_idx], client_name, query, loop, query_latency_list4), - ) - elif c_idx == 5: - client_pids[c_idx] = multiprocessing.Process( - target=run_task, - args=(clientList[c_idx], client_name, query, loop, query_latency_list5), - ) - elif c_idx == 6: - client_pids[c_idx] = multiprocessing.Process( - target=run_task, - args=(clientList[c_idx], client_name, query, loop, query_latency_list6), - ) - elif c_idx == 7: - client_pids[c_idx] = multiprocessing.Process( - target=run_task, - args=(clientList[c_idx], client_name, query, loop, query_latency_list7), - ) - else: - print("ERROR: CLIENT number dismatch!!") - exit() - print("CLIENT: %s start" % client_name) - client_pids[c_idx].start() - - for c_idx in range(clientN): - client_pids[c_idx].join() - end_time = time.time() - totalT = end_time - start_time - - query_latencyTotal = list() - for item in query_latency_list0: - query_latencyTotal.append(item) - for item in query_latency_list1: - query_latencyTotal.append(item) - for item in query_latency_list2: - query_latencyTotal.append(item) - for item in query_latency_list3: - query_latencyTotal.append(item) - for item in query_latency_list4: - query_latencyTotal.append(item) - for item in query_latency_list5: - query_latencyTotal.append(item) - for item in query_latency_list6: - query_latencyTotal.append(item) - for item in query_latency_list7: - query_latencyTotal.append(item) - - totalP95 = np.percentile(query_latencyTotal, 95) * 1000 - return totalT, totalP95 - - -def run_task_caculated(client, cname, query, loop): - query_latency = list() - start_time = time.time() - for i in range(loop): - client.execute(query) - query_latency.append(client.last_query.elapsed) - end_time = time.time() - p95 = np.percentile(query_latency, 95) - - -def run_multi_clients_caculated(clientN, clientList, query, loop): - client_pids = {} - start_time = time.time() - for c_idx in range(clientN): - client_name = "Role_%d" % c_idx - client_pids[c_idx] = multiprocessing.Process( - target=run_task_caculated, - args=(clientList[c_idx], client_name, query, loop), - ) - client_pids[c_idx].start() - for c_idx in range(clientN): - client_pids[c_idx].join() - end_time = time.time() - totalT = end_time - start_time - return totalT - - -if __name__ == "__main__": - client_number = 1 - queries = list() - queries_id = list() - - if len(sys.argv) != 3: - print( - "usage: python3 client_stressing_test.py [queries_file_path] [client_number]" - ) - sys.exit() - else: - queries_list = sys.argv[1] - client_number = int(sys.argv[2]) - print( - "queries_file_path: %s, client_number: %d" % (queries_list, client_number) - ) - if not os.path.isfile(queries_list) or not os.access(queries_list, os.R_OK): - print("please check the right path for queries file") - sys.exit() - if ( - not checkInt(sys.argv[2]) - or int(sys.argv[2]) > max_instances_number - or int(sys.argv[2]) < 1 - ): - print("client_number should be in [1~%d]" % max_instances_number) - sys.exit() - - client_list = {} - queries_id, queries = read_queries(queries_list) - - for c_idx in range(client_number): - client_list[c_idx] = setup_client(c_idx) - # clear cache - os.system("sync; echo 3 > /proc/sys/vm/drop_caches") - - print("###Polit Run Begin") - for i in queries: - warm_client(client_number, client_list, i, 1) - print("###Polit Run End -> Start stressing....") - - query_index = 0 - for q in queries: - print( - "\n###START -> Index: %d, ID: %s, Query: %s" - % (query_index, queries_id[query_index], q) - ) - warm_client(client_number, client_list, q, warmup_runs) - print("###Warm Done!") - for j in range(0, retest_number): - totalT = run_multi_clients_caculated( - client_number, client_list, q, calculated_runs - ) - curr_loop = int(seconds * calculated_runs / totalT) + 1 - print( - "###Calculation Done! -> loopN: %d, expected seconds:%d" - % (curr_loop, seconds) - ) - - print("###Stress Running! -> %d iterations......" % curr_loop) - - totalT, totalP95 = run_multi_clients( - client_number, client_list, q, curr_loop - ) - - if totalT > (seconds - retest_tolerance) and totalT < ( - seconds + retest_tolerance - ): - break - else: - print( - "###totalT:%d is far way from expected seconds:%d. Run again ->j:%d!" - % (totalT, seconds, j) - ) - - print( - "###Completed! -> ID: %s, clientN: %d, totalT: %.2f s, latencyAVG: %.2f ms, P95: %.2f ms, QPS_Final: %.2f" - % ( - queries_id[query_index], - client_number, - totalT, - totalT * 1000 / (curr_loop * client_number), - totalP95, - ((curr_loop * client_number) / totalT), - ) - ) - query_index += 1 - print("###Finished!") diff --git a/contrib/qpl-cmake/benchmark_sample/client_scripts/queries_ssb.sql b/contrib/qpl-cmake/benchmark_sample/client_scripts/queries_ssb.sql deleted file mode 100644 index abf2df6503a..00000000000 --- a/contrib/qpl-cmake/benchmark_sample/client_scripts/queries_ssb.sql +++ /dev/null @@ -1,10 +0,0 @@ -Q1.1$SELECT sum(LO_EXTENDEDPRICE * LO_DISCOUNT) AS revenue FROM lineorder_flat WHERE toYear(LO_ORDERDATE) = 1993 AND LO_DISCOUNT BETWEEN 1 AND 3 AND LO_QUANTITY < 25; -Q2.1$SELECT sum(LO_REVENUE),toYear(LO_ORDERDATE) AS year,P_BRAND FROM lineorder_flat WHERE P_CATEGORY = 'MFGR#12' AND S_REGION = 'AMERICA' GROUP BY year,P_BRAND ORDER BY year,P_BRAND; -Q2.2$SELECT sum(LO_REVENUE),toYear(LO_ORDERDATE) AS year,P_BRAND FROM lineorder_flat WHERE P_BRAND >= 'MFGR#2221' AND P_BRAND <= 'MFGR#2228' AND S_REGION = 'ASIA' GROUP BY year,P_BRAND ORDER BY year,P_BRAND; -Q2.3$SELECT sum(LO_REVENUE),toYear(LO_ORDERDATE) AS year,P_BRAND FROM lineorder_flat WHERE P_BRAND = 'MFGR#2239' AND S_REGION = 'EUROPE' GROUP BY year,P_BRAND ORDER BY year,P_BRAND; -Q3.1$SELECT C_NATION,S_NATION,toYear(LO_ORDERDATE) AS year,sum(LO_REVENUE) AS revenue FROM lineorder_flat WHERE C_REGION = 'ASIA' AND S_REGION = 'ASIA' AND year >= 1992 AND year <= 1997 GROUP BY C_NATION,S_NATION,year ORDER BY year ASC,revenue DESC; -Q3.2$SELECT C_CITY,S_CITY,toYear(LO_ORDERDATE) AS year,sum(LO_REVENUE) AS revenue FROM lineorder_flat WHERE C_NATION = 'UNITED STATES' AND S_NATION = 'UNITED STATES' AND year >= 1992 AND year <= 1997 GROUP BY C_CITY,S_CITY,year ORDER BY year ASC,revenue DESC; -Q3.3$SELECT C_CITY,S_CITY,toYear(LO_ORDERDATE) AS year,sum(LO_REVENUE) AS revenue FROM lineorder_flat WHERE (C_CITY = 'UNITED KI1' OR C_CITY = 'UNITED KI5') AND (S_CITY = 'UNITED KI1' OR S_CITY = 'UNITED KI5') AND year >= 1992 AND year <= 1997 GROUP BY C_CITY,S_CITY,year ORDER BY year ASC,revenue DESC; -Q4.1$SELECT toYear(LO_ORDERDATE) AS year,C_NATION,sum(LO_REVENUE - LO_SUPPLYCOST) AS profit FROM lineorder_flat WHERE C_REGION = 'AMERICA' AND S_REGION = 'AMERICA' AND (P_MFGR = 'MFGR#1' OR P_MFGR = 'MFGR#2') GROUP BY year,C_NATION ORDER BY year ASC,C_NATION ASC; -Q4.2$SELECT toYear(LO_ORDERDATE) AS year,S_NATION,P_CATEGORY,sum(LO_REVENUE - LO_SUPPLYCOST) AS profit FROM lineorder_flat WHERE C_REGION = 'AMERICA' AND S_REGION = 'AMERICA' AND (year = 1997 OR year = 1998) AND (P_MFGR = 'MFGR#1' OR P_MFGR = 'MFGR#2') GROUP BY year,S_NATION,P_CATEGORY ORDER BY year ASC,S_NATION ASC,P_CATEGORY ASC; -Q4.3$SELECT toYear(LO_ORDERDATE) AS year,S_CITY,P_BRAND,sum(LO_REVENUE - LO_SUPPLYCOST) AS profit FROM lineorder_flat WHERE S_NATION = 'UNITED STATES' AND (year = 1997 OR year = 1998) AND P_CATEGORY = 'MFGR#14' GROUP BY year,S_CITY,P_BRAND ORDER BY year ASC,S_CITY ASC,P_BRAND ASC; diff --git a/contrib/qpl-cmake/benchmark_sample/client_scripts/run_ssb.sh b/contrib/qpl-cmake/benchmark_sample/client_scripts/run_ssb.sh deleted file mode 100644 index 6067b1058f2..00000000000 --- a/contrib/qpl-cmake/benchmark_sample/client_scripts/run_ssb.sh +++ /dev/null @@ -1,6 +0,0 @@ -WORKING_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)/.." -if [ ! -d "${WORKING_DIR}/output" ]; then -mkdir ${WORKING_DIR}/output -fi -bash allin1_ssb.sh 2 > ${WORKING_DIR}/output/run.log -echo "Please check log in: ${WORKING_DIR}/output/run.log" \ No newline at end of file diff --git a/contrib/qpl-cmake/benchmark_sample/database_dir/deflate/config_deflate.xml b/contrib/qpl-cmake/benchmark_sample/database_dir/deflate/config_deflate.xml deleted file mode 100644 index ab77a9cdcbe..00000000000 --- a/contrib/qpl-cmake/benchmark_sample/database_dir/deflate/config_deflate.xml +++ /dev/null @@ -1,49 +0,0 @@ - - - - - trace - true - - - 8123 - 9000 - 9004 - - ./ - - 8589934592 - 5368709120 - true - - - - deflate_qpl - - - - - - - - - ::/0 - - - default - default - 1 - - - - - - - - - - - diff --git a/contrib/qpl-cmake/benchmark_sample/database_dir/deflate_s2/config_deflate_s2.xml b/contrib/qpl-cmake/benchmark_sample/database_dir/deflate_s2/config_deflate_s2.xml deleted file mode 100644 index b71456486f5..00000000000 --- a/contrib/qpl-cmake/benchmark_sample/database_dir/deflate_s2/config_deflate_s2.xml +++ /dev/null @@ -1,49 +0,0 @@ - - - - - trace - true - - - 8124 - 9001 - 9005 - - ./ - - 8589934592 - 5368709120 - true - - - - deflate_qpl - - - - - - - - - ::/0 - - - default - default - 1 - - - - - - - - - - - diff --git a/contrib/qpl-cmake/benchmark_sample/database_dir/lz4/config_lz4.xml b/contrib/qpl-cmake/benchmark_sample/database_dir/lz4/config_lz4.xml deleted file mode 100644 index f4dc59b60aa..00000000000 --- a/contrib/qpl-cmake/benchmark_sample/database_dir/lz4/config_lz4.xml +++ /dev/null @@ -1,49 +0,0 @@ - - - - - trace - true - - - 8123 - 9000 - 9004 - - ./ - - 8589934592 - 5368709120 - true - - - - lz4 - - - - - - - - - ::/0 - - - default - default - 1 - - - - - - - - - - - diff --git a/contrib/qpl-cmake/benchmark_sample/database_dir/lz4_s2/config_lz4_s2.xml b/contrib/qpl-cmake/benchmark_sample/database_dir/lz4_s2/config_lz4_s2.xml deleted file mode 100644 index 357db8942d7..00000000000 --- a/contrib/qpl-cmake/benchmark_sample/database_dir/lz4_s2/config_lz4_s2.xml +++ /dev/null @@ -1,49 +0,0 @@ - - - - - trace - true - - - 8124 - 9001 - 9005 - - ./ - - 8589934592 - 5368709120 - true - - - - lz4 - - - - - - - - - ::/0 - - - default - default - 1 - - - - - - - - - - - diff --git a/contrib/qpl-cmake/benchmark_sample/database_dir/zstd/config_zstd.xml b/contrib/qpl-cmake/benchmark_sample/database_dir/zstd/config_zstd.xml deleted file mode 100644 index 1c4c738edaf..00000000000 --- a/contrib/qpl-cmake/benchmark_sample/database_dir/zstd/config_zstd.xml +++ /dev/null @@ -1,49 +0,0 @@ - - - - - trace - true - - - 8123 - 9000 - 9004 - - ./ - - 8589934592 - 5368709120 - true - - - - zstd - - - - - - - - - ::/0 - - - default - default - 1 - - - - - - - - - - - diff --git a/contrib/qpl-cmake/benchmark_sample/database_dir/zstd_s2/config_zstd_s2.xml b/contrib/qpl-cmake/benchmark_sample/database_dir/zstd_s2/config_zstd_s2.xml deleted file mode 100644 index f3db01b7739..00000000000 --- a/contrib/qpl-cmake/benchmark_sample/database_dir/zstd_s2/config_zstd_s2.xml +++ /dev/null @@ -1,49 +0,0 @@ - - - - - trace - true - - - 8124 - 9001 - 9005 - - ./ - - 8589934592 - 5368709120 - true - - - - zstd - - - - - - - - - ::/0 - - - default - default - 1 - - - - - - - - - - - diff --git a/contrib/re2-cmake/CMakeLists.txt b/contrib/re2-cmake/CMakeLists.txt index e72b5e1fca8..f773bc65a69 100644 --- a/contrib/re2-cmake/CMakeLists.txt +++ b/contrib/re2-cmake/CMakeLists.txt @@ -27,6 +27,17 @@ set(RE2_SOURCES add_library(_re2 ${RE2_SOURCES}) target_include_directories(_re2 PUBLIC "${SRC_DIR}") -target_link_libraries(_re2 ch_contrib::abseil_str_format) +target_link_libraries(_re2 PRIVATE + absl::base + absl::core_headers + absl::fixed_array + absl::flat_hash_map + absl::flat_hash_set + absl::inlined_vector + absl::strings + absl::str_format + absl::synchronization + absl::optional + absl::span) add_library(ch_contrib::re2 ALIAS _re2) diff --git a/contrib/rocksdb-cmake/CMakeLists.txt b/contrib/rocksdb-cmake/CMakeLists.txt index 466adf6aff0..7d7666dff87 100644 --- a/contrib/rocksdb-cmake/CMakeLists.txt +++ b/contrib/rocksdb-cmake/CMakeLists.txt @@ -76,7 +76,6 @@ else() endif() endif() -include(CheckCCompilerFlag) if(CMAKE_SYSTEM_PROCESSOR MATCHES "^(powerpc|ppc)64") if(POWER9) set(HAS_POWER9 1) @@ -88,26 +87,15 @@ if(CMAKE_SYSTEM_PROCESSOR MATCHES "^(powerpc|ppc)64") endif(CMAKE_SYSTEM_PROCESSOR MATCHES "^(powerpc|ppc)64") if(CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64|AARCH64|arm64|ARM64") - CHECK_C_COMPILER_FLAG("-march=armv8-a+crc+crypto" HAS_ARMV8_CRC) - if(HAS_ARMV8_CRC) - message(STATUS " HAS_ARMV8_CRC yes") - set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -march=armv8-a+crc+crypto -Wno-unused-function") - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -march=armv8-a+crc+crypto -Wno-unused-function") - endif(HAS_ARMV8_CRC) + set(HAS_ARMV8_CRC 1) + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -march=armv8-a+crc+crypto -Wno-unused-function") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -march=armv8-a+crc+crypto -Wno-unused-function") endif(CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64|AARCH64|arm64|ARM64") -include(CheckCXXSourceCompiles) -if(NOT MSVC) - set(CMAKE_REQUIRED_FLAGS "-msse4.2 -mpclmul") -endif() - -unset(CMAKE_REQUIRED_FLAGS) -if(HAVE_SSE42) +if(ENABLE_AVX2 AND ENABLE_PCLMULQDQ) add_definitions(-DHAVE_SSE42) add_definitions(-DHAVE_PCLMUL) -elseif(FORCE_SSE42) - message(FATAL_ERROR "FORCE_SSE42=ON but unable to compile with SSE4.2 enabled") endif() set (HAVE_THREAD_LOCAL 1) @@ -121,75 +109,18 @@ elseif(CMAKE_SYSTEM_NAME MATCHES "Linux") add_definitions(-DOS_LINUX) elseif(CMAKE_SYSTEM_NAME MATCHES "SunOS") add_definitions(-DOS_SOLARIS) -elseif(CMAKE_SYSTEM_NAME MATCHES "kFreeBSD") - add_definitions(-DOS_GNU_KFREEBSD) elseif(CMAKE_SYSTEM_NAME MATCHES "FreeBSD") add_definitions(-DOS_FREEBSD) -elseif(CMAKE_SYSTEM_NAME MATCHES "NetBSD") - add_definitions(-DOS_NETBSD) -elseif(CMAKE_SYSTEM_NAME MATCHES "OpenBSD") - add_definitions(-DOS_OPENBSD) -elseif(CMAKE_SYSTEM_NAME MATCHES "DragonFly") - add_definitions(-DOS_DRAGONFLYBSD) elseif(CMAKE_SYSTEM_NAME MATCHES "Android") add_definitions(-DOS_ANDROID) -elseif(CMAKE_SYSTEM_NAME MATCHES "Windows") - add_definitions(-DWIN32 -DOS_WIN -D_MBCS -DWIN64 -DNOMINMAX) - if(MINGW) - add_definitions(-D_WIN32_WINNT=_WIN32_WINNT_VISTA) - endif() endif() -if(NOT WIN32) - add_definitions(-DROCKSDB_PLATFORM_POSIX -DROCKSDB_LIB_IO_POSIX) -endif() +add_definitions(-DROCKSDB_PLATFORM_POSIX -DROCKSDB_LIB_IO_POSIX) -option(WITH_FALLOCATE "build with fallocate" ON) -if(WITH_FALLOCATE) - CHECK_C_SOURCE_COMPILES(" -#include -#include -int main() { - int fd = open(\"/dev/null\", 0); - fallocate(fd, FALLOC_FL_KEEP_SIZE, 0, 1024); -} -" HAVE_FALLOCATE) - if(HAVE_FALLOCATE) - add_definitions(-DROCKSDB_FALLOCATE_PRESENT) - endif() -endif() - -CHECK_C_SOURCE_COMPILES(" -#include -int main() { - int fd = open(\"/dev/null\", 0); - sync_file_range(fd, 0, 1024, SYNC_FILE_RANGE_WRITE); -} -" HAVE_SYNC_FILE_RANGE_WRITE) -if(HAVE_SYNC_FILE_RANGE_WRITE) - add_definitions(-DROCKSDB_RANGESYNC_PRESENT) -endif() - -CHECK_C_SOURCE_COMPILES(" -#include -int main() { - (void) PTHREAD_MUTEX_ADAPTIVE_NP; -} -" HAVE_PTHREAD_MUTEX_ADAPTIVE_NP) -if(HAVE_PTHREAD_MUTEX_ADAPTIVE_NP) +if (OS_LINUX OR OS_FREEBSD) add_definitions(-DROCKSDB_PTHREAD_ADAPTIVE_MUTEX) endif() -include(CheckCXXSymbolExists) -if (OS_FREEBSD) - check_cxx_symbol_exists(malloc_usable_size "${ROCKSDB_SOURCE_DIR}/malloc_np.h" HAVE_MALLOC_USABLE_SIZE) -else() - check_cxx_symbol_exists(malloc_usable_size "${ROCKSDB_SOURCE_DIR}/malloc.h" HAVE_MALLOC_USABLE_SIZE) -endif() -if(HAVE_MALLOC_USABLE_SIZE) - add_definitions(-DROCKSDB_MALLOC_USABLE_SIZE) -endif() - if (OS_LINUX) add_definitions(-DROCKSDB_SCHED_GETCPU_PRESENT) add_definitions(-DROCKSDB_AUXV_SYSAUXV_PRESENT) @@ -204,7 +135,6 @@ include_directories("${ROCKSDB_SOURCE_DIR}/include") if(WITH_FOLLY_DISTRIBUTED_MUTEX) include_directories("${ROCKSDB_SOURCE_DIR}/third-party/folly") endif() -find_package(Threads REQUIRED) # Main library source code @@ -497,7 +427,7 @@ set(SOURCES ${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/range/range_tree/lib/util/memarena.cc rocksdb_build_version.cc) -if(HAVE_SSE42 AND NOT MSVC) +if(ENABLE_SSE42 AND ENABLE_PCLMULQDQ) set_source_files_properties( "${ROCKSDB_SOURCE_DIR}/util/crc32c.cc" PROPERTIES COMPILE_FLAGS "-msse4.2 -mpclmul") diff --git a/contrib/thrift-cmake/CMakeLists.txt b/contrib/thrift-cmake/CMakeLists.txt index d6aa6b9e5f2..89a444cfb83 100644 --- a/contrib/thrift-cmake/CMakeLists.txt +++ b/contrib/thrift-cmake/CMakeLists.txt @@ -47,8 +47,6 @@ set(thriftcpp_threads_SOURCES "${LIBRARY_DIR}/src/thrift/concurrency/Mutex.cpp" ) -include("${ClickHouse_SOURCE_DIR}/contrib/thrift/build/cmake/ConfigureChecks.cmake") # makes config.h - set (HAVE_ARPA_INET_H 1) set (HAVE_FCNTL_H 1) set (HAVE_GETOPT_H 1) @@ -81,10 +79,6 @@ if (OS_LINUX AND NOT USE_MUSL) set (STRERROR_R_CHAR_P 1) endif () -#set(PACKAGE ${PACKAGE_NAME}) -#set(PACKAGE_STRING "${PACKAGE_NAME} ${PACKAGE_VERSION}") -#set(VERSION ${thrift_VERSION}) - # generate a config.h file configure_file("${CMAKE_CURRENT_SOURCE_DIR}/build/cmake/config.h.in" "${CMAKE_CURRENT_BINARY_DIR}/thrift/config.h") diff --git a/contrib/update-submodules.sh b/contrib/update-submodules.sh index b612d25352b..b12f3f924dc 100755 --- a/contrib/update-submodules.sh +++ b/contrib/update-submodules.sh @@ -9,4 +9,16 @@ cd $GIT_DIR contrib/sparse-checkout/setup-sparse-checkout.sh git submodule init git submodule sync -git config --file .gitmodules --get-regexp .*path | sed 's/[^ ]* //' | xargs -I _ --max-procs 64 git submodule update --depth=1 --single-branch _ +# NOTE: do not use --remote for `git submodule update`[1] command, since the submodule references to the specific commit SHA1 in the subproject. +# It may cause unexpected behavior. Instead you need to commit a new SHA1 for a submodule. +# +# [1] - https://git-scm.com/book/en/v2/Git-Tools-Submodules +git config --file .gitmodules --get-regexp '.*path' | sed 's/[^ ]* //' | xargs -I _ --max-procs 64 git submodule update --depth=1 --single-branch _ + +# We don't want to depend on any third-party CMake files. +# To check it, find and delete them. +grep -o -P '"contrib/[^"]+"' .gitmodules | + grep -v -P 'contrib/(llvm-project|google-protobuf|grpc|abseil-cpp|corrosion)' | + xargs -I@ find @ \ + -'(' -name 'CMakeLists.txt' -or -name '*.cmake' -')' -and -not -name '*.h.cmake' \ + -delete diff --git a/contrib/xz-cmake/CMakeLists.txt b/contrib/xz-cmake/CMakeLists.txt index c3a8203c83e..c73433d9863 100644 --- a/contrib/xz-cmake/CMakeLists.txt +++ b/contrib/xz-cmake/CMakeLists.txt @@ -98,8 +98,6 @@ if (ARCH_S390X) add_compile_definitions(WORDS_BIGENDIAN) endif () -find_package(Threads REQUIRED) - add_library(_liblzma ${SRC_DIR}/src/common/mythread.h diff --git a/docker/docs/builder/Dockerfile b/docker/docs/builder/Dockerfile index 3ca2bdafcb3..b7b706a8a5c 100644 --- a/docker/docs/builder/Dockerfile +++ b/docker/docs/builder/Dockerfile @@ -4,8 +4,8 @@ FROM node:16-alpine RUN apk add --no-cache git openssh bash -# At this point we want to really update /opt/clickhouse-docs -# despite the cached images +# At this point we want to really update /opt/clickhouse-docs directory +# So we reset the cache ARG CACHE_INVALIDATOR=0 RUN git clone https://github.com/ClickHouse/clickhouse-docs.git \ diff --git a/docker/keeper/Dockerfile b/docker/keeper/Dockerfile index 1f4fd39bc26..b174dfde675 100644 --- a/docker/keeper/Dockerfile +++ b/docker/keeper/Dockerfile @@ -34,7 +34,7 @@ RUN arch=${TARGETARCH:-amd64} \ # lts / testing / prestable / etc ARG REPO_CHANNEL="stable" ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}" -ARG VERSION="23.10.3.5" +ARG VERSION="23.10.5.20" ARG PACKAGES="clickhouse-keeper" # user/group precreated explicitly with fixed uid/gid on purpose. diff --git a/docker/packager/binary/Dockerfile b/docker/packager/binary/Dockerfile index fb033e28959..20fb97c80bb 100644 --- a/docker/packager/binary/Dockerfile +++ b/docker/packager/binary/Dockerfile @@ -6,29 +6,27 @@ FROM clickhouse/test-util:latest AS cctools ENV CC=clang-${LLVM_VERSION} ENV CXX=clang++-${LLVM_VERSION} # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! -# DO NOT PUT ANYTHING BEFORE THREE NEXT `RUN` DIRECTIVES +# DO NOT PUT ANYTHING BEFORE THE NEXT TWO `RUN` DIRECTIVES # THE MOST HEAVY OPERATION MUST BE THE FIRST IN THE CACHE # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # libtapi is required to support .tbh format from recent MacOS SDKs -RUN git clone --depth 1 https://github.com/tpoechtrager/apple-libtapi.git \ +RUN git clone https://github.com/tpoechtrager/apple-libtapi.git \ && cd apple-libtapi \ + && git checkout 15dfc2a8c9a2a89d06ff227560a69f5265b692f9 \ && INSTALLPREFIX=/cctools ./build.sh \ && ./install.sh \ && cd .. \ && rm -rf apple-libtapi # Build and install tools for cross-linking to Darwin (x86-64) -RUN git clone --depth 1 https://github.com/tpoechtrager/cctools-port.git \ +# Build and install tools for cross-linking to Darwin (aarch64) +RUN git clone https://github.com/tpoechtrager/cctools-port.git \ && cd cctools-port/cctools \ + && git checkout 2a3e1c2a6ff54a30f898b70cfb9ba1692a55fad7 \ && ./configure --prefix=/cctools --with-libtapi=/cctools \ --target=x86_64-apple-darwin \ && make install -j$(nproc) \ - && cd ../.. \ - && rm -rf cctools-port - -# Build and install tools for cross-linking to Darwin (aarch64) -RUN git clone --depth 1 https://github.com/tpoechtrager/cctools-port.git \ - && cd cctools-port/cctools \ + && make clean \ && ./configure --prefix=/cctools --with-libtapi=/cctools \ --target=aarch64-apple-darwin \ && make install -j$(nproc) \ @@ -62,19 +60,12 @@ RUN curl https://sh.rustup.rs -sSf | bash -s -- -y && \ rustup target add aarch64-unknown-linux-musl && \ rustup target add riscv64gc-unknown-linux-gnu -# NOTE: Seems like gcc-11 is too new for ubuntu20 repository # A cross-linker for RISC-V 64 (we need it, because LLVM's LLD does not work): RUN add-apt-repository ppa:ubuntu-toolchain-r/test --yes \ && apt-get update \ && apt-get install --yes \ binutils-riscv64-linux-gnu \ build-essential \ - g++-11 \ - gcc-11 \ - gcc-aarch64-linux-gnu \ - libc6 \ - libc6-dev \ - libc6-dev-arm64-cross \ python3-boto3 \ yasm \ zstd \ diff --git a/docker/packager/binary/build.sh b/docker/packager/binary/build.sh index 150ce1ab385..fd9bfcaabb2 100755 --- a/docker/packager/binary/build.sh +++ b/docker/packager/binary/build.sh @@ -22,6 +22,7 @@ if [ "$EXTRACT_TOOLCHAIN_DARWIN" = "1" ]; then fi fi + # Uncomment to debug ccache. Don't put ccache log in /output right away, or it # will be confusingly packed into the "performance" package. # export CCACHE_LOGFILE=/build/ccache.log @@ -32,6 +33,7 @@ mkdir -p /build/build_docker cd /build/build_docker rm -f CMakeCache.txt + if [ -n "$MAKE_DEB" ]; then rm -rf /build/packages/root # NOTE: this is for backward compatibility with previous releases, @@ -177,11 +179,12 @@ then tar c -C /build/ --exclude='.git/modules/**' .git | tar x -C "$PERF_OUTPUT"/ch # Create branch pr and origin/master to have them for the following performance comparison git -C "$PERF_OUTPUT"/ch branch pr - git -C "$PERF_OUTPUT"/ch fetch --no-tags --depth 50 origin master:origin/master + git -C "$PERF_OUTPUT"/ch fetch --no-tags --no-recurse-submodules --depth 50 origin master:origin/master # Clean remote, to not have it stale git -C "$PERF_OUTPUT"/ch remote | xargs -n1 git -C "$PERF_OUTPUT"/ch remote remove # And clean all tags - git -C "$PERF_OUTPUT"/ch tag | xargs git -C "$PERF_OUTPUT"/ch tag -d + echo "Deleting $(git -C "$PERF_OUTPUT"/ch tag | wc -l) tags" + git -C "$PERF_OUTPUT"/ch tag | xargs git -C "$PERF_OUTPUT"/ch tag -d >/dev/null git -C "$PERF_OUTPUT"/ch reset --soft pr git -C "$PERF_OUTPUT"/ch log -5 ( diff --git a/docker/packager/packager b/docker/packager/packager index e63a4912e7c..b5bcbada1da 100755 --- a/docker/packager/packager +++ b/docker/packager/packager @@ -236,16 +236,14 @@ def parse_env_variables( cc = compiler result.append("DEB_ARCH=amd64") - cxx = cc.replace("gcc", "g++").replace("clang", "clang++") + cxx = cc.replace("clang", "clang++") if package_type == "deb": - # NOTE: This are the env for packages/build script + # NOTE: This is the env for packages/build script result.append("MAKE_DEB=true") cmake_flags.append("-DENABLE_TESTS=0") cmake_flags.append("-DENABLE_UTILS=0") - cmake_flags.append("-DCMAKE_EXPORT_NO_PACKAGE_REGISTRY=ON") cmake_flags.append("-DCMAKE_FIND_PACKAGE_NO_PACKAGE_REGISTRY=ON") - cmake_flags.append("-DCMAKE_AUTOGEN_VERBOSE=ON") cmake_flags.append("-DCMAKE_INSTALL_PREFIX=/usr") cmake_flags.append("-DCMAKE_INSTALL_SYSCONFDIR=/etc") cmake_flags.append("-DCMAKE_INSTALL_LOCALSTATEDIR=/var") @@ -265,12 +263,7 @@ def parse_env_variables( elif package_type == "fuzzers": cmake_flags.append("-DENABLE_FUZZING=1") cmake_flags.append("-DENABLE_PROTOBUF=1") - cmake_flags.append("-DUSE_INTERNAL_PROTOBUF_LIBRARY=1") cmake_flags.append("-DWITH_COVERAGE=1") - cmake_flags.append("-DCMAKE_AUTOGEN_VERBOSE=ON") - # cmake_flags.append("-DCMAKE_INSTALL_PREFIX=/usr") - # cmake_flags.append("-DCMAKE_INSTALL_SYSCONFDIR=/etc") - # cmake_flags.append("-DCMAKE_INSTALL_LOCALSTATEDIR=/var") # Reduce linking and building time by avoid *install/all dependencies cmake_flags.append("-DCMAKE_SKIP_INSTALL_ALL_DEPENDENCY=ON") diff --git a/docker/server/.dockerignore b/docker/server/.dockerignore deleted file mode 100644 index d360712c18f..00000000000 --- a/docker/server/.dockerignore +++ /dev/null @@ -1,8 +0,0 @@ -# post / preinstall scripts (not needed, we do it in Dockerfile) -alpine-root/install/* - -# docs (looks useless) -alpine-root/usr/share/doc/* - -# packages, etc. (used by alpine-build.sh) -tgz-packages/* diff --git a/docker/server/Dockerfile.alpine b/docker/server/Dockerfile.alpine index 41be7e611a3..d4498abda6a 100644 --- a/docker/server/Dockerfile.alpine +++ b/docker/server/Dockerfile.alpine @@ -32,7 +32,7 @@ RUN arch=${TARGETARCH:-amd64} \ # lts / testing / prestable / etc ARG REPO_CHANNEL="stable" ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}" -ARG VERSION="23.10.3.5" +ARG VERSION="23.10.5.20" ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static" # user/group precreated explicitly with fixed uid/gid on purpose. diff --git a/docker/server/Dockerfile.ubuntu b/docker/server/Dockerfile.ubuntu index 0ff6ae2e227..08e95cd535b 100644 --- a/docker/server/Dockerfile.ubuntu +++ b/docker/server/Dockerfile.ubuntu @@ -30,7 +30,7 @@ RUN sed -i "s|http://archive.ubuntu.com|${apt_archive}|g" /etc/apt/sources.list ARG REPO_CHANNEL="stable" ARG REPOSITORY="deb [signed-by=/usr/share/keyrings/clickhouse-keyring.gpg] https://packages.clickhouse.com/deb ${REPO_CHANNEL} main" -ARG VERSION="23.10.3.5" +ARG VERSION="23.10.5.20" ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static" # set non-empty deb_location_url url to create a docker image diff --git a/docker/test/base/setup_export_logs.sh b/docker/test/base/setup_export_logs.sh index ec24b237752..6e3721956c0 100755 --- a/docker/test/base/setup_export_logs.sh +++ b/docker/test/base/setup_export_logs.sh @@ -126,6 +126,9 @@ function setup_logs_replication # It's doesn't make sense to try creating tables if SYNC fails echo "SYSTEM SYNC DATABASE REPLICA default" | clickhouse-client "${CONNECTION_ARGS[@]}" || return 0 + debug_or_sanitizer_build=$(clickhouse-client -q "WITH ((SELECT value FROM system.build_options WHERE name='BUILD_TYPE') AS build, (SELECT value FROM system.build_options WHERE name='CXX_FLAGS') as flags) SELECT build='Debug' OR flags LIKE '%fsanitize%'") + echo "Build is debug or sanitizer: $debug_or_sanitizer_build" + # For each system log table: echo 'Create %_log tables' clickhouse-client --query "SHOW TABLES FROM system LIKE '%\\_log'" | while read -r table @@ -133,7 +136,14 @@ function setup_logs_replication if [[ "$table" = "trace_log" ]] then EXTRA_COLUMNS_FOR_TABLE="${EXTRA_COLUMNS_TRACE_LOG}" - EXTRA_COLUMNS_EXPRESSION_FOR_TABLE="${EXTRA_COLUMNS_EXPRESSION_TRACE_LOG}" + # Do not try to resolve stack traces in case of debug/sanitizers + # build, since it is too slow (flushing of trace_log can take ~1min + # with such MV attached) + if [[ "$debug_or_sanitizer_build" = 1 ]]; then + EXTRA_COLUMNS_EXPRESSION_FOR_TABLE="${EXTRA_COLUMNS_EXPRESSION}" + else + EXTRA_COLUMNS_EXPRESSION_FOR_TABLE="${EXTRA_COLUMNS_EXPRESSION_TRACE_LOG}" + fi else EXTRA_COLUMNS_FOR_TABLE="${EXTRA_COLUMNS}" EXTRA_COLUMNS_EXPRESSION_FOR_TABLE="${EXTRA_COLUMNS_EXPRESSION}" @@ -182,3 +192,13 @@ function setup_logs_replication " || continue done ) + +function stop_logs_replication +{ + echo "Detach all logs replication" + clickhouse-client --query "select database||'.'||table from system.tables where database = 'system' and (table like '%_sender' or table like '%_watcher')" | { + tee /dev/stderr + } | { + xargs -n1 -r -i clickhouse-client --query "drop table {}" + } +} diff --git a/docker/test/fasttest/run.sh b/docker/test/fasttest/run.sh index 1b72dab5e3c..d3695ba2613 100755 --- a/docker/test/fasttest/run.sh +++ b/docker/test/fasttest/run.sh @@ -206,7 +206,7 @@ function build ( cd "$FASTTEST_BUILD" TIMEFORMAT=$'\nreal\t%3R\nuser\t%3U\nsys\t%3S' - ( time ninja clickhouse-bundle) |& ts '%Y-%m-%d %H:%M:%S' | tee "$FASTTEST_OUTPUT/build_log.txt" + ( time ninja clickhouse-bundle clickhouse-stripped) |& ts '%Y-%m-%d %H:%M:%S' | tee "$FASTTEST_OUTPUT/build_log.txt" BUILD_SECONDS_ELAPSED=$(awk '/^....-..-.. ..:..:.. real\t[0-9]/ {print $4}' < "$FASTTEST_OUTPUT/build_log.txt") echo "build_clickhouse_fasttest_binary: [ OK ] $BUILD_SECONDS_ELAPSED sec." \ | ts '%Y-%m-%d %H:%M:%S' \ @@ -215,7 +215,6 @@ function build mkdir -p "$FASTTEST_OUTPUT/binaries/" cp programs/clickhouse "$FASTTEST_OUTPUT/binaries/clickhouse" - strip programs/clickhouse -o programs/clickhouse-stripped zstd --threads=0 programs/clickhouse-stripped -o "$FASTTEST_OUTPUT/binaries/clickhouse-stripped.zst" fi ccache_status diff --git a/docker/test/fuzzer/generate-test-j2.py b/docker/test/fuzzer/generate-test-j2.py index 11525163ed8..6fd37d6bd02 100755 --- a/docker/test/fuzzer/generate-test-j2.py +++ b/docker/test/fuzzer/generate-test-j2.py @@ -3,6 +3,7 @@ from argparse import ArgumentParser import os import jinja2 +import itertools def removesuffix(text, suffix): @@ -47,6 +48,7 @@ def main(args): loader=jinja2.FileSystemLoader(suite_dir), keep_trailing_newline=True, ) + j2env.globals.update(product=itertools.product) test_names = os.listdir(suite_dir) for test_name in test_names: diff --git a/docker/test/fuzzer/query-fuzzer-tweaks-users.xml b/docker/test/fuzzer/query-fuzzer-tweaks-users.xml index ecd7aae2e4a..023f257253a 100644 --- a/docker/test/fuzzer/query-fuzzer-tweaks-users.xml +++ b/docker/test/fuzzer/query-fuzzer-tweaks-users.xml @@ -23,11 +23,6 @@ 10G - - - - - 200 diff --git a/docker/test/fuzzer/run-fuzzer.sh b/docker/test/fuzzer/run-fuzzer.sh index af1ce0c4dd4..8aeb06ec27b 100755 --- a/docker/test/fuzzer/run-fuzzer.sh +++ b/docker/test/fuzzer/run-fuzzer.sh @@ -212,11 +212,11 @@ quit gdb -batch -command script.gdb -p $server_pid & sleep 5 - # gdb will send SIGSTOP, spend some time loading debug info and then send SIGCONT, wait for it (up to send_timeout, 300s) + # gdb will send SIGSTOP, spend some time loading debug info, and then send SIGCONT, wait for it (up to send_timeout, 300s) time clickhouse-client --query "SELECT 'Connected to clickhouse-server after attaching gdb'" ||: # Check connectivity after we attach gdb, because it might cause the server - # to freeze and the fuzzer will fail. In debug build it can take a lot of time. + # to freeze, and the fuzzer will fail. In debug build, it can take a lot of time. for _ in {1..180} do if clickhouse-client --query "select 1" @@ -226,14 +226,15 @@ quit sleep 1 done kill -0 $server_pid # This checks that it is our server that is started and not some other one - echo 'Server started and responded' + echo 'Server started and responded.' setup_logs_replication # SC2012: Use find instead of ls to better handle non-alphanumeric filenames. They are all alphanumeric. - # SC2046: Quote this to prevent word splitting. Actually I need word splitting. + # SC2046: Quote this to prevent word splitting. Actually, I need word splitting. # shellcheck disable=SC2012,SC2046 timeout -s TERM --preserve-status 30m clickhouse-client \ + --max_memory_usage_in_client=1000000000 \ --receive_timeout=10 \ --receive_data_timeout_ms=10000 \ --stacktrace \ @@ -253,10 +254,10 @@ quit wait "$fuzzer_pid" || fuzzer_exit_code=$? echo "Fuzzer exit code is $fuzzer_exit_code" - # If the server dies, most often the fuzzer returns code 210: connetion + # If the server dies, most often the fuzzer returns Code 210: Connetion # refused, and sometimes also code 32: attempt to read after eof. For - # simplicity, check again whether the server is accepting connections, using - # clickhouse-client. We don't check for existence of server process, because + # simplicity, check again whether the server is accepting connections using + # clickhouse-client. We don't check for the existence of the server process, because # the process is still present while the server is terminating and not # accepting the connections anymore. diff --git a/docker/test/integration/mysql_java_client/MySQLJavaClientTest.java b/docker/test/integration/mysql_java_client/MySQLJavaClientTest.java index 1ac21ffe4b4..445e384ba1a 100644 --- a/docker/test/integration/mysql_java_client/MySQLJavaClientTest.java +++ b/docker/test/integration/mysql_java_client/MySQLJavaClientTest.java @@ -39,8 +39,7 @@ public class MySQLJavaClientTest { // useServerPrepStmts=true -> COM_STMT_PREPARE + COM_STMT_EXECUTE -> binary // useServerPrepStmts=false -> COM_QUERY -> text - String jdbcUrl = String.format("jdbc:mysql://%s:%s/%s?useSSL=false&useServerPrepStmts=%s", - host, port, database, binary); + String jdbcUrl = String.format("jdbc:mysql://%s:%s/%s?useSSL=false&useServerPrepStmts=%s", host, port, database, binary); try { Class.forName("com.mysql.cj.jdbc.Driver"); @@ -67,21 +66,21 @@ public class MySQLJavaClientTest { int rowNum = 1; while (rs.next()) { System.out.printf("Row #%d\n", rowNum++); - System.out.printf("%s, value: %d\n", getMysqlType(rs, "i8"), rs.getInt("i8")); - System.out.printf("%s, value: %d\n", getMysqlType(rs, "i16"), rs.getInt("i16")); - System.out.printf("%s, value: %d\n", getMysqlType(rs, "i32"), rs.getInt("i32")); - System.out.printf("%s, value: %d\n", getMysqlType(rs, "i64"), rs.getLong("i64")); - System.out.printf("%s, value: %s\n", getMysqlType(rs, "i128"), rs.getString("i128")); - System.out.printf("%s, value: %s\n", getMysqlType(rs, "i256"), rs.getString("i256")); - System.out.printf("%s, value: %d\n", getMysqlType(rs, "ui8"), rs.getInt("ui8")); - System.out.printf("%s, value: %d\n", getMysqlType(rs, "ui16"), rs.getInt("ui16")); - System.out.printf("%s, value: %d\n", getMysqlType(rs, "ui32"), rs.getLong("ui32")); - System.out.printf("%s, value: %s\n", getMysqlType(rs, "ui64"), rs.getString("ui64")); - System.out.printf("%s, value: %s\n", getMysqlType(rs, "ui128"), rs.getString("ui128")); - System.out.printf("%s, value: %s\n", getMysqlType(rs, "ui256"), rs.getString("ui256")); - System.out.printf("%s, value: %f\n", getMysqlType(rs, "f32"), rs.getFloat("f32")); - System.out.printf("%s, value: %f\n", getMysqlType(rs, "f64"), rs.getFloat("f64")); - System.out.printf("%s, value: %b\n", getMysqlType(rs, "b"), rs.getBoolean("b")); + System.out.printf("%s, value: %d, wasNull: %b\n", getMysqlType(rs, "i8"), rs.getInt("i8"), rs.wasNull()); + System.out.printf("%s, value: %d, wasNull: %b\n", getMysqlType(rs, "i16"), rs.getInt("i16"), rs.wasNull()); + System.out.printf("%s, value: %d, wasNull: %b\n", getMysqlType(rs, "i32"), rs.getInt("i32"), rs.wasNull()); + System.out.printf("%s, value: %d, wasNull: %b\n", getMysqlType(rs, "i64"), rs.getLong("i64"), rs.wasNull()); + System.out.printf("%s, value: %s, wasNull: %b\n", getMysqlType(rs, "i128"), rs.getString("i128"), rs.wasNull()); + System.out.printf("%s, value: %s, wasNull: %b\n", getMysqlType(rs, "i256"), rs.getString("i256"), rs.wasNull()); + System.out.printf("%s, value: %d, wasNull: %b\n", getMysqlType(rs, "ui8"), rs.getInt("ui8"), rs.wasNull()); + System.out.printf("%s, value: %d, wasNull: %b\n", getMysqlType(rs, "ui16"), rs.getInt("ui16"), rs.wasNull()); + System.out.printf("%s, value: %d, wasNull: %b\n", getMysqlType(rs, "ui32"), rs.getLong("ui32"), rs.wasNull()); + System.out.printf("%s, value: %s, wasNull: %b\n", getMysqlType(rs, "ui64"), rs.getString("ui64"), rs.wasNull()); + System.out.printf("%s, value: %s, wasNull: %b\n", getMysqlType(rs, "ui128"), rs.getString("ui128"), rs.wasNull()); + System.out.printf("%s, value: %s, wasNull: %b\n", getMysqlType(rs, "ui256"), rs.getString("ui256"), rs.wasNull()); + System.out.printf("%s, value: %f, wasNull: %b\n", getMysqlType(rs, "f32"), rs.getFloat("f32"), rs.wasNull()); + System.out.printf("%s, value: %f, wasNull: %b\n", getMysqlType(rs, "f64"), rs.getFloat("f64"), rs.wasNull()); + System.out.printf("%s, value: %b, wasNull: %b\n", getMysqlType(rs, "b"), rs.getBoolean("b"), rs.wasNull()); } System.out.println(); } @@ -92,10 +91,10 @@ public class MySQLJavaClientTest { int rowNum = 1; while (rs.next()) { System.out.printf("Row #%d\n", rowNum++); - System.out.printf("%s, value: %s\n", getMysqlType(rs, "s"), rs.getString("s")); - System.out.printf("%s, value: %s\n", getMysqlType(rs, "sn"), rs.getString("sn")); - System.out.printf("%s, value: %s\n", getMysqlType(rs, "lc"), rs.getString("lc")); - System.out.printf("%s, value: %s\n", getMysqlType(rs, "nlc"), rs.getString("nlc")); + System.out.printf("%s, value: %s, wasNull: %b\n", getMysqlType(rs, "s"), rs.getString("s"), rs.wasNull()); + System.out.printf("%s, value: %s, wasNull: %b\n", getMysqlType(rs, "sn"), rs.getString("sn"), rs.wasNull()); + System.out.printf("%s, value: %s, wasNull: %b\n", getMysqlType(rs, "lc"), rs.getString("lc"), rs.wasNull()); + System.out.printf("%s, value: %s, wasNull: %b\n", getMysqlType(rs, "nlc"), rs.getString("nlc"), rs.wasNull()); } System.out.println(); } @@ -106,10 +105,10 @@ public class MySQLJavaClientTest { int rowNum = 1; while (rs.next()) { System.out.printf("Row #%d\n", rowNum++); - System.out.printf("%s, value: %s\n", getMysqlType(rs, "ilc"), rs.getInt("ilc")); - System.out.printf("%s, value: %s\n", getMysqlType(rs, "dlc"), rs.getDate("dlc")); + System.out.printf("%s, value: %s, wasNull: %b\n", getMysqlType(rs, "ilc"), rs.getInt("ilc"), rs.wasNull()); + System.out.printf("%s, value: %s, wasNull: %b\n", getMysqlType(rs, "dlc"), rs.getDate("dlc"), rs.wasNull()); // NULL int is represented as zero - System.out.printf("%s, value: %s\n", getMysqlType(rs, "ni"), rs.getInt("ni")); + System.out.printf("%s, value: %s, wasNull: %b\n", getMysqlType(rs, "ni"), rs.getInt("ni"), rs.wasNull()); } System.out.println(); } @@ -120,12 +119,11 @@ public class MySQLJavaClientTest { int rowNum = 1; while (rs.next()) { System.out.printf("Row #%d\n", rowNum++); - System.out.printf("%s, value: %s\n", getMysqlType(rs, "d32"), rs.getBigDecimal("d32").toPlainString()); - System.out.printf("%s, value: %s\n", getMysqlType(rs, "d64"), rs.getBigDecimal("d64").toPlainString()); - System.out.printf("%s, value: %s\n", getMysqlType(rs, "d128_native"), - rs.getBigDecimal("d128_native").toPlainString()); - System.out.printf("%s, value: %s\n", getMysqlType(rs, "d128_text"), rs.getString("d128_text")); - System.out.printf("%s, value: %s\n", getMysqlType(rs, "d256"), rs.getString("d256")); + System.out.printf("%s, value: %s, wasNull: %b\n", getMysqlType(rs, "d32"), rs.getBigDecimal("d32").toPlainString(), rs.wasNull()); + System.out.printf("%s, value: %s, wasNull: %b\n", getMysqlType(rs, "d64"), rs.getBigDecimal("d64").toPlainString(), rs.wasNull()); + System.out.printf("%s, value: %s, wasNull: %b\n", getMysqlType(rs, "d128_native"), rs.getBigDecimal("d128_native").toPlainString(), rs.wasNull()); + System.out.printf("%s, value: %s, wasNull: %b\n", getMysqlType(rs, "d128_text"), rs.getString("d128_text"), rs.wasNull()); + System.out.printf("%s, value: %s, wasNull: %b\n", getMysqlType(rs, "d256"), rs.getString("d256"), rs.wasNull()); } System.out.println(); } @@ -136,12 +134,12 @@ public class MySQLJavaClientTest { int rowNum = 1; while (rs.next()) { System.out.printf("Row #%d\n", rowNum++); - System.out.printf("%s, value: %s\n", getMysqlType(rs, "d"), rs.getDate("d")); - System.out.printf("%s, value: %s\n", getMysqlType(rs, "d32"), rs.getDate("d32")); - System.out.printf("%s, value: %s\n", getMysqlType(rs, "dt"), rs.getTimestamp("dt")); - System.out.printf("%s, value: %s\n", getMysqlType(rs, "dt64_3"), rs.getTimestamp("dt64_3")); - System.out.printf("%s, value: %s\n", getMysqlType(rs, "dt64_6"), rs.getTimestamp("dt64_6")); - System.out.printf("%s, value: %s\n", getMysqlType(rs, "dt64_9"), rs.getTimestamp("dt64_9")); + System.out.printf("%s, value: %s, wasNull: %b\n", getMysqlType(rs, "d"), rs.getDate("d"), rs.wasNull()); + System.out.printf("%s, value: %s, wasNull: %b\n", getMysqlType(rs, "d32"), rs.getDate("d32"), rs.wasNull()); + System.out.printf("%s, value: %s, wasNull: %b\n", getMysqlType(rs, "dt"), rs.getTimestamp("dt"), rs.wasNull()); + System.out.printf("%s, value: %s, wasNull: %b\n", getMysqlType(rs, "dt64_3"), rs.getTimestamp("dt64_3"), rs.wasNull()); + System.out.printf("%s, value: %s, wasNull: %b\n", getMysqlType(rs, "dt64_6"), rs.getTimestamp("dt64_6"), rs.wasNull()); + System.out.printf("%s, value: %s, wasNull: %b\n", getMysqlType(rs, "dt64_9"), rs.getTimestamp("dt64_9"), rs.wasNull()); } System.out.println(); } @@ -152,13 +150,13 @@ public class MySQLJavaClientTest { int rowNum = 1; while (rs.next()) { System.out.printf("Row #%d\n", rowNum++); - System.out.printf("%s, value: %s\n", getMysqlType(rs, "dt64_0"), rs.getTimestamp("dt64_0")); - System.out.printf("%s, value: %s\n", getMysqlType(rs, "dt64_1"), rs.getTimestamp("dt64_1")); - System.out.printf("%s, value: %s\n", getMysqlType(rs, "dt64_2"), rs.getTimestamp("dt64_2")); - System.out.printf("%s, value: %s\n", getMysqlType(rs, "dt64_4"), rs.getTimestamp("dt64_4")); - System.out.printf("%s, value: %s\n", getMysqlType(rs, "dt64_5"), rs.getTimestamp("dt64_5")); - System.out.printf("%s, value: %s\n", getMysqlType(rs, "dt64_7"), rs.getTimestamp("dt64_7")); - System.out.printf("%s, value: %s\n", getMysqlType(rs, "dt64_8"), rs.getTimestamp("dt64_8")); + System.out.printf("%s, value: %s, wasNull: %b\n", getMysqlType(rs, "dt64_0"), rs.getTimestamp("dt64_0"), rs.wasNull()); + System.out.printf("%s, value: %s, wasNull: %b\n", getMysqlType(rs, "dt64_1"), rs.getTimestamp("dt64_1"), rs.wasNull()); + System.out.printf("%s, value: %s, wasNull: %b\n", getMysqlType(rs, "dt64_2"), rs.getTimestamp("dt64_2"), rs.wasNull()); + System.out.printf("%s, value: %s, wasNull: %b\n", getMysqlType(rs, "dt64_4"), rs.getTimestamp("dt64_4"), rs.wasNull()); + System.out.printf("%s, value: %s, wasNull: %b\n", getMysqlType(rs, "dt64_5"), rs.getTimestamp("dt64_5"), rs.wasNull()); + System.out.printf("%s, value: %s, wasNull: %b\n", getMysqlType(rs, "dt64_7"), rs.getTimestamp("dt64_7"), rs.wasNull()); + System.out.printf("%s, value: %s, wasNull: %b\n", getMysqlType(rs, "dt64_8"), rs.getTimestamp("dt64_8"), rs.wasNull()); } System.out.println(); } @@ -169,8 +167,8 @@ public class MySQLJavaClientTest { int rowNum = 1; while (rs.next()) { System.out.printf("Row #%d\n", rowNum++); - System.out.printf("%s, value: %s\n", getMysqlType(rs, "dt"), rs.getTimestamp("dt")); - System.out.printf("%s, value: %s\n", getMysqlType(rs, "dt64_3"), rs.getTimestamp("dt64_3")); + System.out.printf("%s, value: %s, wasNull: %b\n", getMysqlType(rs, "dt"), rs.getTimestamp("dt"), rs.wasNull()); + System.out.printf("%s, value: %s, wasNull: %b\n", getMysqlType(rs, "dt64_3"), rs.getTimestamp("dt64_3"), rs.wasNull()); } System.out.println(); } @@ -181,10 +179,10 @@ public class MySQLJavaClientTest { int rowNum = 1; while (rs.next()) { System.out.printf("Row #%d\n", rowNum++); - System.out.printf("%s, value: %s\n", getMysqlType(rs, "a"), rs.getString("a")); - System.out.printf("%s, value: %s\n", getMysqlType(rs, "u"), rs.getString("u")); - System.out.printf("%s, value: %s\n", getMysqlType(rs, "t"), rs.getString("t")); - System.out.printf("%s, value: %s\n", getMysqlType(rs, "m"), rs.getString("m")); + System.out.printf("%s, value: %s, wasNull: %b\n", getMysqlType(rs, "a"), rs.getString("a"), rs.wasNull()); + System.out.printf("%s, value: %s, wasNull: %b\n", getMysqlType(rs, "u"), rs.getString("u"), rs.wasNull()); + System.out.printf("%s, value: %s, wasNull: %b\n", getMysqlType(rs, "t"), rs.getString("t"), rs.wasNull()); + System.out.printf("%s, value: %s, wasNull: %b\n", getMysqlType(rs, "m"), rs.getString("m"), rs.wasNull()); } System.out.println(); } @@ -196,17 +194,15 @@ public class MySQLJavaClientTest { int rowNum = 1; while (rs.next()) { System.out.printf("Row #%d\n", rowNum++); - System.out.printf("%s, value: %s\n", getMysqlType(rs, "f"), rs.getFloat("f")); - System.out.printf("%s, value: %s\n", getMysqlType(rs, "d"), rs.getDate("d")); - System.out.printf("%s, value: %s\n", getMysqlType(rs, "dt"), rs.getTimestamp("dt")); + System.out.printf("%s, value: %s, wasNull: %b\n", getMysqlType(rs, "f"), rs.getFloat("f"), rs.wasNull()); + System.out.printf("%s, value: %s, wasNull: %b\n", getMysqlType(rs, "d"), rs.getDate("d"), rs.wasNull()); + System.out.printf("%s, value: %s, wasNull: %b\n", getMysqlType(rs, "dt"), rs.getTimestamp("dt"), rs.wasNull()); } System.out.println(); } private static String getMysqlType(ResultSet rs, String columnLabel) throws SQLException { ResultSetMetaData meta = rs.getMetaData(); - return String.format("%s type is %s", columnLabel, - MysqlType.getByJdbcType(meta.getColumnType(rs.findColumn(columnLabel)))); + return String.format("%s type is %s", columnLabel, MysqlType.getByJdbcType(meta.getColumnType(rs.findColumn(columnLabel)))); } - } diff --git a/docker/test/integration/runner/Dockerfile b/docker/test/integration/runner/Dockerfile index 8345e3d5791..458ca2b1da8 100644 --- a/docker/test/integration/runner/Dockerfile +++ b/docker/test/integration/runner/Dockerfile @@ -68,6 +68,7 @@ RUN python3 -m pip install --no-cache-dir \ asyncio \ avro==1.10.2 \ azure-storage-blob \ + boto3 \ cassandra-driver \ confluent-kafka==1.9.2 \ delta-spark==2.3.0 \ diff --git a/docker/test/integration/runner/compose/docker_compose_rabbitmq.yml b/docker/test/integration/runner/compose/docker_compose_rabbitmq.yml index 2db9fb589d2..61b21e0e3d9 100644 --- a/docker/test/integration/runner/compose/docker_compose_rabbitmq.yml +++ b/docker/test/integration/runner/compose/docker_compose_rabbitmq.yml @@ -6,9 +6,13 @@ services: hostname: rabbitmq1 expose: - ${RABBITMQ_PORT:-5672} + - ${RABBITMQ_SECURE_PORT:-5671} volumes: - type: ${RABBITMQ_LOGS_FS:-tmpfs} source: ${RABBITMQ_LOGS:-} target: /rabbitmq_logs/ - "${RABBITMQ_COOKIE_FILE}:/var/lib/rabbitmq/.erlang.cookie" - - /misc/rabbitmq.conf:/etc/rabbitmq/rabbitmq.conf \ No newline at end of file + - /misc/rabbitmq/rabbitmq.conf:/etc/rabbitmq/rabbitmq.conf + - /misc/rabbitmq/ca-cert.pem:/etc/rabbitmq/ca-cert.pem + - /misc/rabbitmq/server-cert.pem:/etc/rabbitmq/server-cert.pem + - /misc/rabbitmq/server-key.pem:/etc/rabbitmq/server-key.pem diff --git a/docker/test/integration/runner/misc/rabbitmq.conf b/docker/test/integration/runner/misc/rabbitmq.conf deleted file mode 100644 index 3527c83880b..00000000000 --- a/docker/test/integration/runner/misc/rabbitmq.conf +++ /dev/null @@ -1,8 +0,0 @@ -loopback_users.guest = false -listeners.tcp.default = 5672 -default_pass = clickhouse -default_user = root -management.tcp.port = 15672 - -log.file = /rabbitmq_logs/rabbit.log -log.file.level = debug diff --git a/docker/test/integration/runner/misc/rabbitmq/ca-cert.pem b/docker/test/integration/runner/misc/rabbitmq/ca-cert.pem new file mode 100644 index 00000000000..4a7b88f7936 --- /dev/null +++ b/docker/test/integration/runner/misc/rabbitmq/ca-cert.pem @@ -0,0 +1,32 @@ +-----BEGIN CERTIFICATE----- +MIIFhTCCA22gAwIBAgIUWhfjFfbwannH3KIqITDtgcvSItMwDQYJKoZIhvcNAQEL +BQAwUjELMAkGA1UEBhMCUlUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoM +GEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDELMAkGA1UEAwwCY2EwHhcNMjMxMTE0 +MTgyODI2WhcNMzMxMTExMTgyODI2WjBSMQswCQYDVQQGEwJSVTETMBEGA1UECAwK +U29tZS1TdGF0ZTEhMB8GA1UECgwYSW50ZXJuZXQgV2lkZ2l0cyBQdHkgTHRkMQsw +CQYDVQQDDAJjYTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAJfJegdC +gavNGYzSdva+5QMxGvqyLwZzjophMeyEzlW/Di4KFGPho+fVlVMB/EwaTRoBRLEu +SQusQwoFg71mGvUTOpgHzlsUz4vcVVFOiL4bJdzCWQKzdC8M8rUFoks9FMboVeSx +jhAnKAm/NpCLpm9VYnRjEq2KEbJp7VkPAHgZEXR7VABwCFvmDcztrfcWfmXxm6IH +o+AkF/nqdphLu7Q1yDQiF8Q8TuszuhqgQ7/1PrRcaSADrF15jJjQb05sILpGCT3e +lxJYId5RF0+fgTIqy03bAKB53+8V8cAkowI4rvPTmcFXhcG3rkDO6lyZixHhlpKi +PmXEzHh0kfsRjzkNBP0CKqPnu3D2iymROiPAH2cteaYe6jdD2HIjuVLk/TjX1ZFy +DlZCrJIwj0l8A2xAfLq8Gw5RSr0a9k5TiMD5nZtfd12Vd0K82vO32vmcjO2Igddc +VWccDDwUY/ZWV3uznkusOBrB8wba3ZsXA5hjJzs0KlTvQKPjX0y4lFMmZGbelwjt +pR5dRNLi5XTdMPzV0mAnvJhDTFEmME19Bh6AEsjuAz3gHUdwNTbSxUS3mF/hTL9k +v2wh5udUAOwqD1uEzqPJyG4JCJQozIDOEEZVixWqQ60b9wUHN8meqO4y9fxTdmHW +Vo5BAF1xEJhJJb0QY/O6GahPtWqb/Mr1rtPJAgMBAAGjUzBRMB0GA1UdDgQWBBSw +fQcOabXwX/v9F1hd2cmuIug56jAfBgNVHSMEGDAWgBSwfQcOabXwX/v9F1hd2cmu +Iug56jAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4ICAQAms8y6RVxl +mKSUbsU8JscYwOzcRUQJWETeIr4rtZvMHH+3vkdBU0yKxGpEm7U8J3+5oVTYPhbs +11ZAL+DvIZ6gT6pjDvECyVox1OkjNogz843fTMbNqjuuehjSKXwpMTy5/kmT2aLj +//nBi5UX1xo3RQ9vtmBwzZ3VFK99DFXraDOPS/yk43WV2uqdWsXCNvyEyCHmM1IB +9FQe2EFcO6s4/N+TarhIZ8Udhj5bl8d4eDd1yEckmTD4aHJBgMII2uEwrAxR5CT1 +tCqUKutvNrkXI5PIULvmy+Lwm7PJAC7grPtUHK6anSugpljd7bFj18fHH9APiC45 +Ou4OOK1BUZogCEo7rD36UlanxQO0GEzgDCVEoEdoe0WRdc6T9b4fM8vpQqwBdf9t +nkPB8oLCKerqqYwCiMuWm4BcRmExA7ypIkUCcluGO9/kTmdps3NqOvET9oLTjXuA +z5TPmaK5a3poKLoxBfv6WfRTgisOnMNTsjL1R8+xuhEn5hSlE2r3wAi8Cys9Z9PV +LhTj0SRTXILd2NW3lO8QfO0pGdjgk90GqkyUY9YjuiMVPvdUAFQsHm+0GEZEXjOD +Bw7tLSJQ4IKhfactg/Puxd15ahcWAxeelyED+w/zVGdHYblqbvfdtiGj370KVhoj +DL5HkdPa0IhTPqMBnmoVQ4C/WzKofXBjQQ== +-----END CERTIFICATE----- diff --git a/docker/test/integration/runner/misc/rabbitmq/generate_certs.sh b/docker/test/integration/runner/misc/rabbitmq/generate_certs.sh new file mode 100755 index 00000000000..442d2fe004f --- /dev/null +++ b/docker/test/integration/runner/misc/rabbitmq/generate_certs.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +# 1. Generate CA's private key and self-signed certificate +openssl req -newkey rsa:4096 -x509 -days 3650 -nodes -batch -keyout ca-key.pem -out ca-cert.pem -subj "/C=RU/ST=Some-State/O=Internet Widgits Pty Ltd/CN=ca" + +# 2. Generate server's private key and certificate signing request (CSR) +openssl req -newkey rsa:4096 -nodes -batch -keyout server-key.pem -out server-req.pem -subj "/C=RU/ST=Some-State/O=Internet Widgits Pty Ltd/CN=server" + +# 3. Use CA's private key to sign server's CSR and get back the signed certificate +openssl x509 -req -days 3650 -in server-req.pem -CA ca-cert.pem -CAkey ca-key.pem -CAcreateserial -extfile server-ext.cnf -out server-cert.pem diff --git a/docker/test/integration/runner/misc/rabbitmq/rabbitmq.conf b/docker/test/integration/runner/misc/rabbitmq/rabbitmq.conf new file mode 100644 index 00000000000..258a282907a --- /dev/null +++ b/docker/test/integration/runner/misc/rabbitmq/rabbitmq.conf @@ -0,0 +1,15 @@ +loopback_users.guest = false +listeners.tcp.default = 5672 +default_pass = clickhouse +default_user = root +management.tcp.port = 15672 + +log.file = /rabbitmq_logs/rabbit.log +log.file.level = debug + +listeners.ssl.default = 5671 +ssl_options.verify = verify_none +ssl_options.fail_if_no_peer_cert = false +ssl_options.cacertfile = /etc/rabbitmq/ca-cert.pem +ssl_options.certfile = /etc/rabbitmq/server-cert.pem +ssl_options.keyfile = /etc/rabbitmq/server-key.pem diff --git a/docker/test/integration/runner/misc/rabbitmq/server-cert.pem b/docker/test/integration/runner/misc/rabbitmq/server-cert.pem new file mode 100644 index 00000000000..338de91aa0f --- /dev/null +++ b/docker/test/integration/runner/misc/rabbitmq/server-cert.pem @@ -0,0 +1,33 @@ +-----BEGIN CERTIFICATE----- +MIIFpTCCA42gAwIBAgIUJvQslezZO09XgFGQCxOM6orIsWowDQYJKoZIhvcNAQEL +BQAwUjELMAkGA1UEBhMCUlUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoM +GEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDELMAkGA1UEAwwCY2EwHhcNMjMxMTE0 +MTgyODI5WhcNMzMxMTExMTgyODI5WjBWMQswCQYDVQQGEwJSVTETMBEGA1UECAwK +U29tZS1TdGF0ZTEhMB8GA1UECgwYSW50ZXJuZXQgV2lkZ2l0cyBQdHkgTHRkMQ8w +DQYDVQQDDAZzZXJ2ZXIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCe +o/K71WdKpVpdDvhaZy6wBVhFlu7j7DhfTSYvcPpAJfExmzO8JK3vh5/yGyAO1t79 +gAjqyXLMCZKw7ajM2rez9YnGYqaFi70BlTcU2KQ8LbFEYRc3cYNDmmWIKBpwpSri +We5SQrRLnDXqAn6T8FG5ejQ/t+1IUMrtZENB4lp8fBmEOJb5yr1TE++6EhiDBQho +cLDWWWP8b55kyZhqP/VgmId4lvboGMRKxbiRJ6/SPr/i/pteBD8jTYfbJr6ceXov +/p5yxIp61z5ry1anU7W3B8jTl/gj7SqtFdSnRajZ0DGJJAUKpiiJSCSlp5YB5Ub2 +eBBMHmdA5R1MuiU9TOA35nUW5wkhEOJXnBR/WCsYioVmn/+5dm6JPYiwp/TefYnr +x9iLbb/Tyx7MnXzeyvKg781SwmnvS6Blhtr0zhAW9szZz8cVHPBqFs6PzGs/5mwE +C+tM3Zp85aHd28nIT4NQLHdMDwVmGwmPdy4uavtYWMDhsuIyEU8hCZymiHhPnuHU +VbmfZ8GOTIzUgQAvZb0fL1Xow2Tf6XuARnvuU9weRttg9jSOqPuUENRsFXv0mU8M +EpQjrxry88Wfz7bBEjN5JHC16PB/Nu7zTGJ4/slThbxNv0bIONzvTBPbXrKnxw7Z +d9WhGJI+LQxRqLTynQe6yzDwIuW9LRdBNTp7CtQRwQIDAQABo28wbTArBgNVHREE +JDAigiBpbnRlZ3JhdGlvbi10ZXN0cy5jbGlja2hvdXNlLmNvbTAdBgNVHQ4EFgQU +54GvBUYWvMADpTz/zglwMlaJuskwHwYDVR0jBBgwFoAUsH0HDmm18F/7/RdYXdnJ +riLoOeowDQYJKoZIhvcNAQELBQADggIBADfNH6O6ay+xg0XmV6sR0n4j6PwL9Cnc +VjuCmHQbpFXfMvgCdfHvbtT0Y/pG7IoeKmrrm0JPvKa2E9Ht0j6ZnowQ2m9mJk8U +5Fd/PbC1I4KgVCw6HRSOcwqANJxOGe7RyN9PTZZ8fxzmzIR3FiQ2bXfr+LaotZOK +aVS8F8xCOzoMvL9LFls2YpEn20p/1EATIf2MFX3j9vKfcJVOyDJV4i5BMImStFLM +g3sdC96de/59yxt9khM0PNucU1ldNFs/kZVEcNSwGOAIgQEPwULJtDY+ZSWeROpX +EpWndN6zQsv1pdNvLtXsDXfi4YoH9QVaA/k4aFFJ08CjSZfMYmwyPOGsf/wqT65i +ADID2yb1A/FIIe/fM+d2gXHBVFBDmydJ1JCdCoYrEJgfWj1LO/0jLi34ZZ17Hu7F +D33fLARF9nlLzlUiWjcQlOjNoCM48AgG/3wHk4eiSfc/3PIJDuDGDa0NdtDeKKhH +XkP2ll4cMUH6EQ9KO1jHPmf5RokX4QJgH+ofO4U5XQFwc3lOyJzEQnED+wame7do +R7TE4F/OXhxLqA6DFkzXe89/kSCoAF9bjzmUn/ilrg8NXKKgprgHg4DJHgvCQVVC +34ab7Xj7msUm4D9vI+GAeUbUqnqCaWxDF6vCMT0Qq7iSVDxa/SV8TX8Vp2Zh+PSh +4m23Did+KjLq +-----END CERTIFICATE----- diff --git a/docker/test/integration/runner/misc/rabbitmq/server-ext.cnf b/docker/test/integration/runner/misc/rabbitmq/server-ext.cnf new file mode 100644 index 00000000000..49859873222 --- /dev/null +++ b/docker/test/integration/runner/misc/rabbitmq/server-ext.cnf @@ -0,0 +1 @@ +subjectAltName=DNS:integration-tests.clickhouse.com diff --git a/docker/test/integration/runner/misc/rabbitmq/server-key.pem b/docker/test/integration/runner/misc/rabbitmq/server-key.pem new file mode 100644 index 00000000000..92e93e8fba5 --- /dev/null +++ b/docker/test/integration/runner/misc/rabbitmq/server-key.pem @@ -0,0 +1,52 @@ +-----BEGIN PRIVATE KEY----- +MIIJQgIBADANBgkqhkiG9w0BAQEFAASCCSwwggkoAgEAAoICAQCeo/K71WdKpVpd +DvhaZy6wBVhFlu7j7DhfTSYvcPpAJfExmzO8JK3vh5/yGyAO1t79gAjqyXLMCZKw +7ajM2rez9YnGYqaFi70BlTcU2KQ8LbFEYRc3cYNDmmWIKBpwpSriWe5SQrRLnDXq +An6T8FG5ejQ/t+1IUMrtZENB4lp8fBmEOJb5yr1TE++6EhiDBQhocLDWWWP8b55k +yZhqP/VgmId4lvboGMRKxbiRJ6/SPr/i/pteBD8jTYfbJr6ceXov/p5yxIp61z5r +y1anU7W3B8jTl/gj7SqtFdSnRajZ0DGJJAUKpiiJSCSlp5YB5Ub2eBBMHmdA5R1M +uiU9TOA35nUW5wkhEOJXnBR/WCsYioVmn/+5dm6JPYiwp/TefYnrx9iLbb/Tyx7M +nXzeyvKg781SwmnvS6Blhtr0zhAW9szZz8cVHPBqFs6PzGs/5mwEC+tM3Zp85aHd +28nIT4NQLHdMDwVmGwmPdy4uavtYWMDhsuIyEU8hCZymiHhPnuHUVbmfZ8GOTIzU +gQAvZb0fL1Xow2Tf6XuARnvuU9weRttg9jSOqPuUENRsFXv0mU8MEpQjrxry88Wf +z7bBEjN5JHC16PB/Nu7zTGJ4/slThbxNv0bIONzvTBPbXrKnxw7Zd9WhGJI+LQxR +qLTynQe6yzDwIuW9LRdBNTp7CtQRwQIDAQABAoICAA0lev0T3z5xW36wueYL/PN7 +TehebKeYsMc9BngR/bsJKea5fN0PkRZzf865brusFMifLp3+WbQM6wocd8uaKHUS +WPuGu1P/04bpDap9lYajJriK7ziaAI2+osFYyXAiT954I2bPvk8xv8oHsOOjm7Iq +LWBGZrSCdX6cu3IfRu5f/mFVqzVCFtRmp4wc6ckZxquZAx6QQ9fsjAzAJBBSAoyh +t0BICmgLfWDQ582no0tiBdbS0J9G7NCJIUQI/uzKqFSH3iuWm/84DSUzsZemOT3U +uFDInDil885qK7g87pQ2S5SY1o4eXOebgeX0cFrx3CKaqocUUewv0HDGUEW3NDFs +KhUvlJZIFgk6bMend16U6kfRCUsjLA22Rfxzanl53cGVywCeIMirnLYuEu0TsxyK +CblBvyhcpjrGi7FQskzR+J9LpZPnmtn6TAb7JCAALRVHcAGKhGeh613SjPUfkWb0 +KpDps08x8MWGEAALuHbOK0nMLFm+PuMt7+krqCeJET+XM44GT+6ZstrDv0RufxUN ++pkLW7AsVZoXcFvaOWjuyBvX/f6UHCSfueo0mB3H80WoftDIfdhM+AI7/oBTYCBx +Z8BtW+g7Eq3pOUg/Um7S7Z2bybBWE14kpi95gRf3upEYPqHJUpJPdu20lk24iAt9 +LCXF4AjZBIdAuyJrYOJBAoIBAQDd/Bm14WvmBOablGLn6hmohi6M75D+/eQanlg9 +eJhXJUVd8FzOTjKi70EHWvkqswenNDbe/WGtImqG+9G+N/ol2qhi5xVSQ2XQmcVQ +U+k15Bzm9xKM0OqsStFvRgP1Cy6Ms3/jxr5JEEwUepmjvWTDGTlhTQASA/D7Uh2q +5HpPiHEVm4g5eTAYWeAbI6cGwVS0L4y6xkFGde37Kh2P8ZodWB+d3fglVu4Ok9Nf +wE2f8MK2ewQ0SbF/Nj2WjlVomvOvOJG/2CDLuiH/vc4YUvLAm8pNwvsmgtSh1Okt +E/HfXegrlPPEgw6owqoQFt+aGUITgEhiwEVAcYS0pXzzkQX5AoIBAQC28wJ8ueKr +fINpJM2pSc7WRDFduP5yGsRreSLBXLKMbvOlIVb3PaWp11Cg3+X5O90bPXYJ9mBI +WGR0g14/VD8edxs2D5TUZcP4/vKXGHaWRY9Z4A3jVpjzAxAaviNDHJ08tLXEMXZQ +lbA7dX8z6lpoQfwnPzjBwB01mVegwXPeIwIIfT/FmAiGzvSnAMXBGSGWRRdzof0M +/vPFbgllcQmM4AnEGcErCgFRpwcssO87T2jnvf6QVE5JCcnUcGIli1ThxCU9TRZM +5s6R7Nvk3/UjwcpRcqMtnGpTT2QXSnRwvWUfM+bKTwaxz4PjqKpgIc11kwJAjlxk +4CxYf1mDGLwJAoIBAGFJRTNS8ejDKRXyOE6PaGNVOz2FGLTILJoF34JBQfKfYQFE +gEfiOYry9Dr3AdBW2fnLhmi//3jTZoB2CHwnKDhC1h1STSPaadq8KZ+ExuZZbNlE +WxrfzJlpyNPNiZpxJht/54K57Vc0D0PCX2dFb82ZVm5wQqGinJBocpwcugX1NCpW +GaOmmw9xBCigvWjWffriA/kvPhhVQtEaqg4Vwoctwd18FG645Gf7HV4Pd3WrHIrA +6xzHV0T7To6XHpNTpYybbDT50ZW3o4LjellqsPz8yfK+izdbizjJiM+6t/w+uauw +Ag2Tqm8HsWSPwbtVaoIFbLPqs+8EUTaieFp+qnECggEAVuaTdd9uFfrtCNKchh8z +CoAV2uj2pAim6E3//k0j2qURQozVnFdCC6zk9aWkvYB8BGZrXUwUbAjgnp+P8xD3 +cmctG77G+STls66WWMMcAUFFWHGe5y/JMxVvXuSWJ1i+L4m/FVRRWPHhZjznkSdu +jjtZpOLY+N9igIU4JHn/qbKDUrj7w8X1tuMzPuiVBqYDWDe1bg2x/6xS6qLb/71z +xeDdgrKhGOqFud1XARmCaW/M6tdKxg/lp7fokOpZFHBcf2kGL1ogj6LK2HHj+ZGQ +Bc4VZh7H9/BmaPA7IP0S1kKAeBPVOp/TFD737Pm/BC7KQ2DzHusAZEI/jkHfqO/k +0QKCAQEAuiYLn9iLgk4uQO9oaSBGWKrJsR2L2dqI7IWU0X9xJlsQrJKcEeWg4LXt +djLsz0HrxZV/c+Pnh79hmFlBoEmH+hz32D/xd+/qrwwAcMkHAwMbznJu0IIuW2O9 +Uzma++7SvVmr9H0DkUwXFP3jn1A2n3uuI4czqtQ8N7GiH0UAWR5CsIP7azHvZTSj +s4Fzf8rTE6pNqVgQXjrVbI9H/h0uPP4alJbhnPba9mgB1cGmfBEnPkKgYNqSZse+ +95G2TlcK74sKBUSdBKqYBZ4ZUeTXV974Nva9guE9vzDQt1Cj6k0HWISVPUshPzIh +qrdHdxcM6yhA0Z0Gu6zj+Zsy4lU8gA== +-----END PRIVATE KEY----- diff --git a/docker/test/performance-comparison/compare.sh b/docker/test/performance-comparison/compare.sh index 7d6de732489..f10236b7135 100755 --- a/docker/test/performance-comparison/compare.sh +++ b/docker/test/performance-comparison/compare.sh @@ -189,6 +189,8 @@ function run_tests test_prefix=right/performance fi + run_only_changed_tests=0 + # Determine which tests to run. if [ -v CHPC_TEST_GREP ] then @@ -203,6 +205,7 @@ function run_tests # tests. The lists of changed files are prepared in entrypoint.sh because # it has the repository. test_files=($(sed "s/tests\/performance/${test_prefix//\//\\/}/" changed-test-definitions.txt)) + run_only_changed_tests=1 else # The default -- run all tests found in the test dir. test_files=($(ls "$test_prefix"/*.xml)) @@ -226,6 +229,13 @@ function run_tests test_files=("${test_files[@]}") fi + if [ "$run_only_changed_tests" -ne 0 ]; then + if [ ${#test_files[@]} -eq 0 ]; then + time "$script_dir/report.py" --no-tests-run > report.html + exit 0 + fi + fi + # For PRs w/o changes in test definitons, test only a subset of queries, # and run them less times. If the corresponding environment variables are # already set, keep those values. diff --git a/docker/test/performance-comparison/config/users.d/perf-comparison-tweaks-users.xml b/docker/test/performance-comparison/config/users.d/perf-comparison-tweaks-users.xml index cb591f1a184..e780a99ecde 100644 --- a/docker/test/performance-comparison/config/users.d/perf-comparison-tweaks-users.xml +++ b/docker/test/performance-comparison/config/users.d/perf-comparison-tweaks-users.xml @@ -34,9 +34,4 @@ 0 - - - 1 - - diff --git a/docker/test/performance-comparison/report.py b/docker/test/performance-comparison/report.py index 7da30ba7a08..c2bc773bd54 100755 --- a/docker/test/performance-comparison/report.py +++ b/docker/test/performance-comparison/report.py @@ -19,6 +19,7 @@ parser.add_argument( choices=["main", "all-queries"], help="Which report to build", ) +parser.add_argument("--no-tests-run", action="store_true", default=False) args = parser.parse_args() tables = [] @@ -354,6 +355,36 @@ if args.report == "main": add_tested_commits() + def print_status(status, message): + print( + ( + """ + + + """.format( + status=status, message=message + ) + ) + ) + + if args.no_tests_run: + for t in tables: + print(t) + print( + "

No tests to run. Only changed tests were run, but all changed tests are from another batch.

" + ) + print( + f""" + + {os.getenv("CHPC_ADD_REPORT_LINKS") or ''} + + + """ + ) + # Why failure? Because otherwise we will not notice if we have a bug that leads to 0 tests being run + print_status("failure", "No tests changed, nothing to run") + exit(0) + run_error_rows = tsvRows("run-errors.tsv") error_tests += len(run_error_rows) addSimpleTable("Run Errors", ["Test", "Error"], run_error_rows) @@ -646,16 +677,7 @@ if args.report == "main": status = "failure" message = "Errors while building the report." - print( - ( - """ - - - """.format( - status=status, message=message - ) - ) - ) + print_status(status, message) elif args.report == "all-queries": print((header_template.format())) diff --git a/docker/test/stateless/run.sh b/docker/test/stateless/run.sh index 9951d79d6ac..07b40ea3b3d 100755 --- a/docker/test/stateless/run.sh +++ b/docker/test/stateless/run.sh @@ -217,6 +217,9 @@ ls -la / clickhouse-client -q "system flush logs" ||: +# stop logs replication to make it possible to dump logs tables via clickhouse-local +stop_logs_replication + # Stop server so we can safely read data with clickhouse-local. # Why do we read data with clickhouse-local? # Because it's the simplest way to read it when server has crashed. diff --git a/docker/test/stateless/stress_tests.lib b/docker/test/stateless/stress_tests.lib index 2309e307324..8f89c1b80dd 100644 --- a/docker/test/stateless/stress_tests.lib +++ b/docker/test/stateless/stress_tests.lib @@ -53,7 +53,7 @@ function configure() > /etc/clickhouse-server/config.d/keeper_port.xml.tmp sudo mv /etc/clickhouse-server/config.d/keeper_port.xml.tmp /etc/clickhouse-server/config.d/keeper_port.xml - function randomize_keeper_config_boolean_value { + function randomize_config_boolean_value { value=$(($RANDOM % 2)) sudo cat /etc/clickhouse-server/config.d/$2.xml \ | sed "s|<$1>[01]|<$1>$value|" \ @@ -72,9 +72,11 @@ function configure() sudo chown clickhouse /etc/clickhouse-server/config.d/keeper_port.xml sudo chgrp clickhouse /etc/clickhouse-server/config.d/keeper_port.xml - randomize_config_boolean_value use_compression zookeeper - - randomize_config_boolean_value allow_experimental_block_number_column merge_tree_settings + if [[ -n "$ZOOKEEPER_FAULT_INJECTION" ]] && [[ "$ZOOKEEPER_FAULT_INJECTION" -eq 1 ]]; then + randomize_config_boolean_value use_compression zookeeper_fault_injection + else + randomize_config_boolean_value use_compression zookeeper + fi # for clickhouse-server (via service) echo "ASAN_OPTIONS='malloc_context_size=10 verbosity=1 allocator_release_to_os_interval_ms=10000'" >> /etc/environment @@ -138,21 +140,6 @@ EOL --> $PWD -EOL - - # Analyzer is not yet ready for testing - cat > /etc/clickhouse-server/users.d/no_analyzer.xml < - - - - - - - - - - EOL } diff --git a/docker/test/upgrade/run.sh b/docker/test/upgrade/run.sh index 356e3f27728..57b683a16c3 100644 --- a/docker/test/upgrade/run.sh +++ b/docker/test/upgrade/run.sh @@ -78,6 +78,7 @@ remove_keeper_config "create_if_not_exists" "[01]" rm /etc/clickhouse-server/config.d/merge_tree.xml rm /etc/clickhouse-server/config.d/enable_wait_for_shutdown_replicated_tables.xml rm /etc/clickhouse-server/users.d/nonconst_timezone.xml +rm /etc/clickhouse-server/users.d/s3_cache_new.xml start stop @@ -114,6 +115,7 @@ sudo chgrp clickhouse /etc/clickhouse-server/config.d/s3_storage_policy_by_defau rm /etc/clickhouse-server/config.d/merge_tree.xml rm /etc/clickhouse-server/config.d/enable_wait_for_shutdown_replicated_tables.xml rm /etc/clickhouse-server/users.d/nonconst_timezone.xml +rm /etc/clickhouse-server/users.d/s3_cache_new.xml start diff --git a/docs/_includes/install/universal.sh b/docs/_includes/install/universal.sh index 0ae77f464eb..d474aa98e76 100755 --- a/docs/_includes/install/universal.sh +++ b/docs/_includes/install/universal.sh @@ -20,9 +20,9 @@ then fi elif [ "${ARCH}" = "aarch64" -o "${ARCH}" = "arm64" ] then - # If the system has >=ARMv8.2 (https://en.wikipedia.org/wiki/AArch64), choose the corresponding build, else fall back to a v8.0 - # compat build. Unfortunately, the ARM ISA level cannot be read directly, we need to guess from the "features" in /proc/cpuinfo. - # Also, the flags in /proc/cpuinfo are named differently than the flags passed to the compiler (cmake/cpu_features.cmake). + # Dispatch between standard and compatibility builds, see cmake/cpu_features.cmake for details. Unfortunately, (1) the ARM ISA level + # cannot be read directly, we need to guess from the "features" in /proc/cpuinfo, and (2) the flags in /proc/cpuinfo are named + # differently than the flags passed to the compiler in cpu_features.cmake. HAS_ARMV82=$(grep -m 1 'Features' /proc/cpuinfo | awk '/asimd/ && /sha1/ && /aes/ && /atomics/ && /lrcpc/') if [ "${HAS_ARMV82}" ] then diff --git a/docs/changelogs/v23.10.4.25-stable.md b/docs/changelogs/v23.10.4.25-stable.md new file mode 100644 index 00000000000..2d7d2a38e04 --- /dev/null +++ b/docs/changelogs/v23.10.4.25-stable.md @@ -0,0 +1,28 @@ +--- +sidebar_position: 1 +sidebar_label: 2023 +--- + +# 2023 Changelog + +### ClickHouse release v23.10.4.25-stable (330fd687d41) FIXME as compared to v23.10.3.5-stable (b2ba7637a41) + +#### Build/Testing/Packaging Improvement +* Backported in [#56633](https://github.com/ClickHouse/ClickHouse/issues/56633): In [#54043](https://github.com/ClickHouse/ClickHouse/issues/54043) the setup plan started to appear in the logs. It should be only in the `runner_get_all_tests.log` only. As well, send the failed infrastructure event to CI db. [#56214](https://github.com/ClickHouse/ClickHouse/pull/56214) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Backported in [#56737](https://github.com/ClickHouse/ClickHouse/issues/56737): Do not fetch changed submodules in the builder container. [#56689](https://github.com/ClickHouse/ClickHouse/pull/56689) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). + +#### Bug Fix (user-visible misbehavior in an official stable release) + +* Select from system tables when table based on table function. [#55540](https://github.com/ClickHouse/ClickHouse/pull/55540) ([MikhailBurdukov](https://github.com/MikhailBurdukov)). +* Fix restore from backup with `flatten_nested` and `data_type_default_nullable` [#56306](https://github.com/ClickHouse/ClickHouse/pull/56306) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Fix segfault during Kerberos initialization [#56401](https://github.com/ClickHouse/ClickHouse/pull/56401) ([Nikolay Degterinsky](https://github.com/evillique)). +* Fix: RabbitMQ OpenSSL dynamic loading issue [#56703](https://github.com/ClickHouse/ClickHouse/pull/56703) ([Igor Nikonov](https://github.com/devcrafter)). +* Fix crash in GCD codec in case when zeros present in data [#56704](https://github.com/ClickHouse/ClickHouse/pull/56704) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* Fix crash in FPC codec [#56795](https://github.com/ClickHouse/ClickHouse/pull/56795) ([Alexey Milovidov](https://github.com/alexey-milovidov)). + +#### NOT FOR CHANGELOG / INSIGNIFICANT + +* Rewrite jobs to use callable workflow [#56385](https://github.com/ClickHouse/ClickHouse/pull/56385) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Continue rewriting workflows to reusable tests [#56501](https://github.com/ClickHouse/ClickHouse/pull/56501) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Better exception messages [#56854](https://github.com/ClickHouse/ClickHouse/pull/56854) ([Antonio Andelic](https://github.com/antonio2368)). + diff --git a/docs/changelogs/v23.10.5.20-stable.md b/docs/changelogs/v23.10.5.20-stable.md new file mode 100644 index 00000000000..03e8c47481b --- /dev/null +++ b/docs/changelogs/v23.10.5.20-stable.md @@ -0,0 +1,28 @@ +--- +sidebar_position: 1 +sidebar_label: 2023 +--- + +# 2023 Changelog + +### ClickHouse release v23.10.5.20-stable (e84001e5c61) FIXME as compared to v23.10.4.25-stable (330fd687d41) + +#### Improvement +* Backported in [#56924](https://github.com/ClickHouse/ClickHouse/issues/56924): There was a potential vulnerability in previous ClickHouse versions: if a user has connected and unsuccessfully tried to authenticate with the "interserver secret" method, the server didn't terminate the connection immediately but continued to receive and ignore the leftover packets from the client. While these packets are ignored, they are still parsed, and if they use a compression method with another known vulnerability, it will lead to exploitation of it without authentication. This issue was found with [ClickHouse Bug Bounty Program](https://github.com/ClickHouse/ClickHouse/issues/38986) by https://twitter.com/malacupa. [#56794](https://github.com/ClickHouse/ClickHouse/pull/56794) ([Alexey Milovidov](https://github.com/alexey-milovidov)). + +#### Build/Testing/Packaging Improvement +* Backported in [#57023](https://github.com/ClickHouse/ClickHouse/issues/57023): There was an attempt to have the proper listing in [#44311](https://github.com/ClickHouse/ClickHouse/issues/44311), but the fix itself was in the wrong place, so it's still broken. See an [example](https://github.com/ClickHouse/ClickHouse/actions/runs/6897342568/job/18781001022#step:8:25). [#56989](https://github.com/ClickHouse/ClickHouse/pull/56989) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). + +#### Bug Fix (user-visible misbehavior in an official stable release) + +* Fix ON CLUSTER queries without database on initial node [#56484](https://github.com/ClickHouse/ClickHouse/pull/56484) ([Nikolay Degterinsky](https://github.com/evillique)). +* Fix buffer overflow in Gorilla codec [#57107](https://github.com/ClickHouse/ClickHouse/pull/57107) ([Nikolay Degterinsky](https://github.com/evillique)). +* Close interserver connection on any exception before authentication [#57142](https://github.com/ClickHouse/ClickHouse/pull/57142) ([Antonio Andelic](https://github.com/antonio2368)). + +#### NOT FOR CHANGELOG / INSIGNIFICANT + +* Fix client suggestions for user without grants [#56234](https://github.com/ClickHouse/ClickHouse/pull/56234) ([Nikolay Degterinsky](https://github.com/evillique)). +* Fix pygithub [#56778](https://github.com/ClickHouse/ClickHouse/pull/56778) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Avoid dependencies with no fixed versions [#56914](https://github.com/ClickHouse/ClickHouse/pull/56914) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Tiny improvement security [#57171](https://github.com/ClickHouse/ClickHouse/pull/57171) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). + diff --git a/docs/changelogs/v23.3.17.13-lts.md b/docs/changelogs/v23.3.17.13-lts.md new file mode 100644 index 00000000000..a18ced70d46 --- /dev/null +++ b/docs/changelogs/v23.3.17.13-lts.md @@ -0,0 +1,23 @@ +--- +sidebar_position: 1 +sidebar_label: 2023 +--- + +# 2023 Changelog + +### ClickHouse release v23.3.17.13-lts (e867d59020f) FIXME as compared to v23.3.16.7-lts (fb4125cc92a) + +#### Build/Testing/Packaging Improvement +* Backported in [#56731](https://github.com/ClickHouse/ClickHouse/issues/56731): Do not fetch changed submodules in the builder container. [#56689](https://github.com/ClickHouse/ClickHouse/pull/56689) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). + +#### Bug Fix (user-visible misbehavior in an official stable release) + +* Fix segfault during Kerberos initialization [#56401](https://github.com/ClickHouse/ClickHouse/pull/56401) ([Nikolay Degterinsky](https://github.com/evillique)). +* Fix crash in FPC codec [#56795](https://github.com/ClickHouse/ClickHouse/pull/56795) ([Alexey Milovidov](https://github.com/alexey-milovidov)). + +#### NOT FOR CHANGELOG / INSIGNIFICANT + +* Rewrite jobs to use callable workflow [#56385](https://github.com/ClickHouse/ClickHouse/pull/56385) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Continue rewriting workflows to reusable tests [#56501](https://github.com/ClickHouse/ClickHouse/pull/56501) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Better exception messages [#56854](https://github.com/ClickHouse/ClickHouse/pull/56854) ([Antonio Andelic](https://github.com/antonio2368)). + diff --git a/docs/changelogs/v23.3.18.15-lts.md b/docs/changelogs/v23.3.18.15-lts.md new file mode 100644 index 00000000000..3bf993a0960 --- /dev/null +++ b/docs/changelogs/v23.3.18.15-lts.md @@ -0,0 +1,26 @@ +--- +sidebar_position: 1 +sidebar_label: 2023 +--- + +# 2023 Changelog + +### ClickHouse release v23.3.18.15-lts (7228475d77a) FIXME as compared to v23.3.17.13-lts (e867d59020f) + +#### Improvement +* Backported in [#56928](https://github.com/ClickHouse/ClickHouse/issues/56928): There was a potential vulnerability in previous ClickHouse versions: if a user has connected and unsuccessfully tried to authenticate with the "interserver secret" method, the server didn't terminate the connection immediately but continued to receive and ignore the leftover packets from the client. While these packets are ignored, they are still parsed, and if they use a compression method with another known vulnerability, it will lead to exploitation of it without authentication. This issue was found with [ClickHouse Bug Bounty Program](https://github.com/ClickHouse/ClickHouse/issues/38986) by https://twitter.com/malacupa. [#56794](https://github.com/ClickHouse/ClickHouse/pull/56794) ([Alexey Milovidov](https://github.com/alexey-milovidov)). + +#### Build/Testing/Packaging Improvement +* Backported in [#57019](https://github.com/ClickHouse/ClickHouse/issues/57019): There was an attempt to have the proper listing in [#44311](https://github.com/ClickHouse/ClickHouse/issues/44311), but the fix itself was in the wrong place, so it's still broken. See an [example](https://github.com/ClickHouse/ClickHouse/actions/runs/6897342568/job/18781001022#step:8:25). [#56989](https://github.com/ClickHouse/ClickHouse/pull/56989) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). + +#### Bug Fix (user-visible misbehavior in an official stable release) + +* Fix buffer overflow in Gorilla codec [#57107](https://github.com/ClickHouse/ClickHouse/pull/57107) ([Nikolay Degterinsky](https://github.com/evillique)). +* Close interserver connection on any exception before authentication [#57142](https://github.com/ClickHouse/ClickHouse/pull/57142) ([Antonio Andelic](https://github.com/antonio2368)). + +#### NOT FOR CHANGELOG / INSIGNIFICANT + +* Fix pygithub [#56778](https://github.com/ClickHouse/ClickHouse/pull/56778) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Avoid dependencies with no fixed versions [#56914](https://github.com/ClickHouse/ClickHouse/pull/56914) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Tiny improvement security [#57171](https://github.com/ClickHouse/ClickHouse/pull/57171) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). + diff --git a/docs/changelogs/v23.8.7.24-lts.md b/docs/changelogs/v23.8.7.24-lts.md new file mode 100644 index 00000000000..37862c17315 --- /dev/null +++ b/docs/changelogs/v23.8.7.24-lts.md @@ -0,0 +1,31 @@ +--- +sidebar_position: 1 +sidebar_label: 2023 +--- + +# 2023 Changelog + +### ClickHouse release v23.8.7.24-lts (812b95e14ba) FIXME as compared to v23.8.6.16-lts (077df679bed) + +#### Build/Testing/Packaging Improvement +* Backported in [#56733](https://github.com/ClickHouse/ClickHouse/issues/56733): Do not fetch changed submodules in the builder container. [#56689](https://github.com/ClickHouse/ClickHouse/pull/56689) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). + +#### Bug Fix (user-visible misbehavior in an official stable release) + +* Select from system tables when table based on table function. [#55540](https://github.com/ClickHouse/ClickHouse/pull/55540) ([MikhailBurdukov](https://github.com/MikhailBurdukov)). +* Fix incomplete query result for UNION in view() function. [#56274](https://github.com/ClickHouse/ClickHouse/pull/56274) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Fix crash in case of adding a column with type Object(JSON) [#56307](https://github.com/ClickHouse/ClickHouse/pull/56307) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* Fix segfault during Kerberos initialization [#56401](https://github.com/ClickHouse/ClickHouse/pull/56401) ([Nikolay Degterinsky](https://github.com/evillique)). +* Fix: RabbitMQ OpenSSL dynamic loading issue [#56703](https://github.com/ClickHouse/ClickHouse/pull/56703) ([Igor Nikonov](https://github.com/devcrafter)). +* Fix crash in FPC codec [#56795](https://github.com/ClickHouse/ClickHouse/pull/56795) ([Alexey Milovidov](https://github.com/alexey-milovidov)). + +#### NO CL CATEGORY + +* Backported in [#56601](https://github.com/ClickHouse/ClickHouse/issues/56601):. [#56598](https://github.com/ClickHouse/ClickHouse/pull/56598) ([Maksim Kita](https://github.com/kitaisreal)). + +#### NOT FOR CHANGELOG / INSIGNIFICANT + +* Rewrite jobs to use callable workflow [#56385](https://github.com/ClickHouse/ClickHouse/pull/56385) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Continue rewriting workflows to reusable tests [#56501](https://github.com/ClickHouse/ClickHouse/pull/56501) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Better exception messages [#56854](https://github.com/ClickHouse/ClickHouse/pull/56854) ([Antonio Andelic](https://github.com/antonio2368)). + diff --git a/docs/changelogs/v23.8.8.20-lts.md b/docs/changelogs/v23.8.8.20-lts.md new file mode 100644 index 00000000000..345cfcccf17 --- /dev/null +++ b/docs/changelogs/v23.8.8.20-lts.md @@ -0,0 +1,28 @@ +--- +sidebar_position: 1 +sidebar_label: 2023 +--- + +# 2023 Changelog + +### ClickHouse release v23.8.8.20-lts (5e012a03bf2) FIXME as compared to v23.8.7.24-lts (812b95e14ba) + +#### Improvement +* Backported in [#56509](https://github.com/ClickHouse/ClickHouse/issues/56509): Allow backup of materialized view with dropped inner table instead of failing the backup. [#56387](https://github.com/ClickHouse/ClickHouse/pull/56387) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Backported in [#56929](https://github.com/ClickHouse/ClickHouse/issues/56929): There was a potential vulnerability in previous ClickHouse versions: if a user has connected and unsuccessfully tried to authenticate with the "interserver secret" method, the server didn't terminate the connection immediately but continued to receive and ignore the leftover packets from the client. While these packets are ignored, they are still parsed, and if they use a compression method with another known vulnerability, it will lead to exploitation of it without authentication. This issue was found with [ClickHouse Bug Bounty Program](https://github.com/ClickHouse/ClickHouse/issues/38986) by https://twitter.com/malacupa. [#56794](https://github.com/ClickHouse/ClickHouse/pull/56794) ([Alexey Milovidov](https://github.com/alexey-milovidov)). + +#### Build/Testing/Packaging Improvement +* Backported in [#57020](https://github.com/ClickHouse/ClickHouse/issues/57020): There was an attempt to have the proper listing in [#44311](https://github.com/ClickHouse/ClickHouse/issues/44311), but the fix itself was in the wrong place, so it's still broken. See an [example](https://github.com/ClickHouse/ClickHouse/actions/runs/6897342568/job/18781001022#step:8:25). [#56989](https://github.com/ClickHouse/ClickHouse/pull/56989) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). + +#### Bug Fix (user-visible misbehavior in an official stable release) + +* Fix ON CLUSTER queries without database on initial node [#56484](https://github.com/ClickHouse/ClickHouse/pull/56484) ([Nikolay Degterinsky](https://github.com/evillique)). +* Fix buffer overflow in Gorilla codec [#57107](https://github.com/ClickHouse/ClickHouse/pull/57107) ([Nikolay Degterinsky](https://github.com/evillique)). +* Close interserver connection on any exception before authentication [#57142](https://github.com/ClickHouse/ClickHouse/pull/57142) ([Antonio Andelic](https://github.com/antonio2368)). + +#### NOT FOR CHANGELOG / INSIGNIFICANT + +* Fix pygithub [#56778](https://github.com/ClickHouse/ClickHouse/pull/56778) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Avoid dependencies with no fixed versions [#56914](https://github.com/ClickHouse/ClickHouse/pull/56914) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Tiny improvement security [#57171](https://github.com/ClickHouse/ClickHouse/pull/57171) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). + diff --git a/docs/changelogs/v23.9.5.29-stable.md b/docs/changelogs/v23.9.5.29-stable.md new file mode 100644 index 00000000000..02572d0e562 --- /dev/null +++ b/docs/changelogs/v23.9.5.29-stable.md @@ -0,0 +1,34 @@ +--- +sidebar_position: 1 +sidebar_label: 2023 +--- + +# 2023 Changelog + +### ClickHouse release v23.9.5.29-stable (f8554c1a1ff) FIXME as compared to v23.9.4.11-stable (74c1f49dd6a) + +#### Build/Testing/Packaging Improvement +* Backported in [#56631](https://github.com/ClickHouse/ClickHouse/issues/56631): In [#54043](https://github.com/ClickHouse/ClickHouse/issues/54043) the setup plan started to appear in the logs. It should be only in the `runner_get_all_tests.log` only. As well, send the failed infrastructure event to CI db. [#56214](https://github.com/ClickHouse/ClickHouse/pull/56214) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Backported in [#56735](https://github.com/ClickHouse/ClickHouse/issues/56735): Do not fetch changed submodules in the builder container. [#56689](https://github.com/ClickHouse/ClickHouse/pull/56689) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). + +#### Bug Fix (user-visible misbehavior in an official stable release) + +* Select from system tables when table based on table function. [#55540](https://github.com/ClickHouse/ClickHouse/pull/55540) ([MikhailBurdukov](https://github.com/MikhailBurdukov)). +* Fix incomplete query result for UNION in view() function. [#56274](https://github.com/ClickHouse/ClickHouse/pull/56274) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Fix crash in case of adding a column with type Object(JSON) [#56307](https://github.com/ClickHouse/ClickHouse/pull/56307) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* Fix segfault during Kerberos initialization [#56401](https://github.com/ClickHouse/ClickHouse/pull/56401) ([Nikolay Degterinsky](https://github.com/evillique)). +* Fix: RabbitMQ OpenSSL dynamic loading issue [#56703](https://github.com/ClickHouse/ClickHouse/pull/56703) ([Igor Nikonov](https://github.com/devcrafter)). +* Fix crash in GCD codec in case when zeros present in data [#56704](https://github.com/ClickHouse/ClickHouse/pull/56704) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* Fix crash in FPC codec [#56795](https://github.com/ClickHouse/ClickHouse/pull/56795) ([Alexey Milovidov](https://github.com/alexey-milovidov)). + +#### NO CL CATEGORY + +* Backported in [#56603](https://github.com/ClickHouse/ClickHouse/issues/56603):. [#56598](https://github.com/ClickHouse/ClickHouse/pull/56598) ([Maksim Kita](https://github.com/kitaisreal)). + +#### NOT FOR CHANGELOG / INSIGNIFICANT + +* Improve enrich image [#55793](https://github.com/ClickHouse/ClickHouse/pull/55793) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Rewrite jobs to use callable workflow [#56385](https://github.com/ClickHouse/ClickHouse/pull/56385) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Continue rewriting workflows to reusable tests [#56501](https://github.com/ClickHouse/ClickHouse/pull/56501) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Better exception messages [#56854](https://github.com/ClickHouse/ClickHouse/pull/56854) ([Antonio Andelic](https://github.com/antonio2368)). + diff --git a/docs/changelogs/v23.9.6.20-stable.md b/docs/changelogs/v23.9.6.20-stable.md new file mode 100644 index 00000000000..b4aed625fea --- /dev/null +++ b/docs/changelogs/v23.9.6.20-stable.md @@ -0,0 +1,28 @@ +--- +sidebar_position: 1 +sidebar_label: 2023 +--- + +# 2023 Changelog + +### ClickHouse release v23.9.6.20-stable (cf7e84bb8cf) FIXME as compared to v23.9.5.29-stable (f8554c1a1ff) + +#### Improvement +* Backported in [#56930](https://github.com/ClickHouse/ClickHouse/issues/56930): There was a potential vulnerability in previous ClickHouse versions: if a user has connected and unsuccessfully tried to authenticate with the "interserver secret" method, the server didn't terminate the connection immediately but continued to receive and ignore the leftover packets from the client. While these packets are ignored, they are still parsed, and if they use a compression method with another known vulnerability, it will lead to exploitation of it without authentication. This issue was found with [ClickHouse Bug Bounty Program](https://github.com/ClickHouse/ClickHouse/issues/38986) by https://twitter.com/malacupa. [#56794](https://github.com/ClickHouse/ClickHouse/pull/56794) ([Alexey Milovidov](https://github.com/alexey-milovidov)). + +#### Build/Testing/Packaging Improvement +* Backported in [#57022](https://github.com/ClickHouse/ClickHouse/issues/57022): There was an attempt to have the proper listing in [#44311](https://github.com/ClickHouse/ClickHouse/issues/44311), but the fix itself was in the wrong place, so it's still broken. See an [example](https://github.com/ClickHouse/ClickHouse/actions/runs/6897342568/job/18781001022#step:8:25). [#56989](https://github.com/ClickHouse/ClickHouse/pull/56989) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). + +#### Bug Fix (user-visible misbehavior in an official stable release) + +* Fix ON CLUSTER queries without database on initial node [#56484](https://github.com/ClickHouse/ClickHouse/pull/56484) ([Nikolay Degterinsky](https://github.com/evillique)). +* Fix buffer overflow in Gorilla codec [#57107](https://github.com/ClickHouse/ClickHouse/pull/57107) ([Nikolay Degterinsky](https://github.com/evillique)). +* Close interserver connection on any exception before authentication [#57142](https://github.com/ClickHouse/ClickHouse/pull/57142) ([Antonio Andelic](https://github.com/antonio2368)). + +#### NOT FOR CHANGELOG / INSIGNIFICANT + +* Fix client suggestions for user without grants [#56234](https://github.com/ClickHouse/ClickHouse/pull/56234) ([Nikolay Degterinsky](https://github.com/evillique)). +* Fix pygithub [#56778](https://github.com/ClickHouse/ClickHouse/pull/56778) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Avoid dependencies with no fixed versions [#56914](https://github.com/ClickHouse/ClickHouse/pull/56914) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Tiny improvement security [#57171](https://github.com/ClickHouse/ClickHouse/pull/57171) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). + diff --git a/docs/en/engines/database-engines/materialized-mysql.md b/docs/en/engines/database-engines/materialized-mysql.md index b7e567c7b6c..f32698f84f6 100644 --- a/docs/en/engines/database-engines/materialized-mysql.md +++ b/docs/en/engines/database-engines/materialized-mysql.md @@ -7,7 +7,10 @@ sidebar_position: 70 # [experimental] MaterializedMySQL :::note -This is an experimental feature that should not be used in production. +This database engine is experimental. To use it, set `allow_experimental_database_materialized_mysql` to 1 in your configuration files or by using the `SET` command: +```sql +SET allow_experimental_database_materialized_mysql=1 +``` ::: Creates a ClickHouse database with all the tables existing in MySQL, and all the data in those tables. The ClickHouse server works as MySQL replica. It reads `binlog` and performs DDL and DML queries. diff --git a/docs/en/engines/database-engines/materialized-postgresql.md b/docs/en/engines/database-engines/materialized-postgresql.md index 4e978947e36..3aa6dd01ea3 100644 --- a/docs/en/engines/database-engines/materialized-postgresql.md +++ b/docs/en/engines/database-engines/materialized-postgresql.md @@ -8,7 +8,7 @@ sidebar_position: 60 Creates a ClickHouse database with tables from PostgreSQL database. Firstly, database with engine `MaterializedPostgreSQL` creates a snapshot of PostgreSQL database and loads required tables. Required tables can include any subset of tables from any subset of schemas from specified database. Along with the snapshot database engine acquires LSN and once initial dump of tables is performed - it starts pulling updates from WAL. After database is created, newly added tables to PostgreSQL database are not automatically added to replication. They have to be added manually with `ATTACH TABLE db.table` query. -Replication is implemented with PostgreSQL Logical Replication Protocol, which does not allow to replicate DDL, but allows to know whether replication breaking changes happened (column type changes, adding/removing columns). Such changes are detected and according tables stop receiving updates. In this case you should use `ATTACH`/ `DETACH` queries to reload table completely. If DDL does not break replication (for example, renaming a column) table will still receive updates (insertion is done by position). +Replication is implemented with PostgreSQL Logical Replication Protocol, which does not allow to replicate DDL, but allows to know whether replication breaking changes happened (column type changes, adding/removing columns). Such changes are detected and according tables stop receiving updates. In this case you should use `ATTACH`/ `DETACH PERMANENTLY` queries to reload table completely. If DDL does not break replication (for example, renaming a column) table will still receive updates (insertion is done by position). :::note This database engine is experimental. To use it, set `allow_experimental_database_materialized_postgresql` to 1 in your configuration files or by using the `SET` command: @@ -63,7 +63,7 @@ Before version 22.1, adding a table to replication left a non-removed temporary It is possible to remove specific tables from replication: ``` sql -DETACH TABLE postgres_database.table_to_remove; +DETACH TABLE postgres_database.table_to_remove PERMANENTLY; ``` ## PostgreSQL schema {#schema} diff --git a/docs/en/engines/table-engines/integrations/azureBlobStorage.md b/docs/en/engines/table-engines/integrations/azureBlobStorage.md index 3df08ee2ffb..c6525121667 100644 --- a/docs/en/engines/table-engines/integrations/azureBlobStorage.md +++ b/docs/en/engines/table-engines/integrations/azureBlobStorage.md @@ -47,6 +47,12 @@ SELECT * FROM test_table; └──────┴───────┘ ``` +## Virtual columns {#virtual-columns} + +- `_path` — Path to the file. Type: `LowCardinalty(String)`. +- `_file` — Name of the file. Type: `LowCardinalty(String)`. +- `_size` — Size of the file in bytes. Type: `Nullable(UInt64)`. If the size is unknown, the value is `NULL`. + ## See also [Azure Blob Storage Table Function](/docs/en/sql-reference/table-functions/azureBlobStorage) diff --git a/docs/en/engines/table-engines/integrations/embedded-rocksdb.md b/docs/en/engines/table-engines/integrations/embedded-rocksdb.md index 23ab89e1983..9af857b0835 100644 --- a/docs/en/engines/table-engines/integrations/embedded-rocksdb.md +++ b/docs/en/engines/table-engines/integrations/embedded-rocksdb.md @@ -85,6 +85,10 @@ You can also change any [rocksdb options](https://github.com/facebook/rocksdb/wi ``` +By default trivial approximate count optimization is turned off, which might affect the performance `count()` queries. To enable this +optimization set up `optimize_trivial_approximate_count_query = 1`. Also, this setting affects `system.tables` for EmbeddedRocksDB engine, +turn on the settings to see approximate values for `total_rows` and `total_bytes`. + ## Supported operations {#supported-operations} ### Inserts diff --git a/docs/en/engines/table-engines/integrations/hdfs.md b/docs/en/engines/table-engines/integrations/hdfs.md index c677123a8d0..19221c256f9 100644 --- a/docs/en/engines/table-engines/integrations/hdfs.md +++ b/docs/en/engines/table-engines/integrations/hdfs.md @@ -230,8 +230,9 @@ libhdfs3 support HDFS namenode HA. ## Virtual Columns {#virtual-columns} -- `_path` — Path to the file. -- `_file` — Name of the file. +- `_path` — Path to the file. Type: `LowCardinalty(String)`. +- `_file` — Name of the file. Type: `LowCardinalty(String)`. +- `_size` — Size of the file in bytes. Type: `Nullable(UInt64)`. If the size is unknown, the value is `NULL`. ## Storage Settings {#storage-settings} diff --git a/docs/en/engines/table-engines/integrations/kafka.md b/docs/en/engines/table-engines/integrations/kafka.md index f16f9692bb6..de1a090d491 100644 --- a/docs/en/engines/table-engines/integrations/kafka.md +++ b/docs/en/engines/table-engines/integrations/kafka.md @@ -238,19 +238,19 @@ Example: ## Virtual Columns {#virtual-columns} -- `_topic` — Kafka topic. -- `_key` — Key of the message. -- `_offset` — Offset of the message. -- `_timestamp` — Timestamp of the message. -- `_timestamp_ms` — Timestamp in milliseconds of the message. -- `_partition` — Partition of Kafka topic. -- `_headers.name` — Array of message's headers keys. -- `_headers.value` — Array of message's headers values. +- `_topic` — Kafka topic. Data type: `LowCardinality(String)`. +- `_key` — Key of the message. Data type: `String`. +- `_offset` — Offset of the message. Data type: `UInt64`. +- `_timestamp` — Timestamp of the message Data type: `Nullable(DateTime)`. +- `_timestamp_ms` — Timestamp in milliseconds of the message. Data type: `Nullable(DateTime64(3))`. +- `_partition` — Partition of Kafka topic. Data type: `UInt64`. +- `_headers.name` — Array of message's headers keys. Data type: `Array(String)`. +- `_headers.value` — Array of message's headers values. Data type: `Array(String)`. Additional virtual columns when `kafka_handle_error_mode='stream'`: -- `_raw_message` - Raw message that couldn't be parsed successfully. -- `_error` - Exception message happened during failed parsing. +- `_raw_message` - Raw message that couldn't be parsed successfully. Data type: `String`. +- `_error` - Exception message happened during failed parsing. Data type: `String`. Note: `_raw_message` and `_error` virtual columns are filled only in case of exception during parsing, they are always empty when message was parsed successfully. diff --git a/docs/en/engines/table-engines/integrations/materialized-postgresql.md b/docs/en/engines/table-engines/integrations/materialized-postgresql.md index 02afec5cfd6..4d83ca79d5c 100644 --- a/docs/en/engines/table-engines/integrations/materialized-postgresql.md +++ b/docs/en/engines/table-engines/integrations/materialized-postgresql.md @@ -8,6 +8,14 @@ sidebar_label: MaterializedPostgreSQL Creates ClickHouse table with an initial data dump of PostgreSQL table and starts replication process, i.e. executes background job to apply new changes as they happen on PostgreSQL table in the remote PostgreSQL database. +:::note +This table engine is experimental. To use it, set `allow_experimental_materialized_postgresql_table` to 1 in your configuration files or by using the `SET` command: +```sql +SET allow_experimental_materialized_postgresql_table=1 +``` +::: + + If more than one table is required, it is highly recommended to use the [MaterializedPostgreSQL](../../../engines/database-engines/materialized-postgresql.md) database engine instead of the table engine and use the `materialized_postgresql_tables_list` setting, which specifies the tables to be replicated (will also be possible to add database `schema`). It will be much better in terms of CPU, fewer connections and fewer replication slots inside the remote PostgreSQL database. ## Creating a Table {#creating-a-table} diff --git a/docs/en/engines/table-engines/integrations/nats.md b/docs/en/engines/table-engines/integrations/nats.md index 5819a8e95c8..37a41159fab 100644 --- a/docs/en/engines/table-engines/integrations/nats.md +++ b/docs/en/engines/table-engines/integrations/nats.md @@ -163,14 +163,14 @@ If you want to change the target table by using `ALTER`, we recommend disabling ## Virtual Columns {#virtual-columns} -- `_subject` - NATS message subject. +- `_subject` - NATS message subject. Data type: `String`. Additional virtual columns when `kafka_handle_error_mode='stream'`: -- `_raw_message` - Raw message that couldn't be parsed successfully. -- `_error` - Exception message happened during failed parsing. +- `_raw_message` - Raw message that couldn't be parsed successfully. Data type: `Nullable(String)`. +- `_error` - Exception message happened during failed parsing. Data type: `Nullable(String)`. -Note: `_raw_message` and `_error` virtual columns are filled only in case of exception during parsing, they are always empty when message was parsed successfully. +Note: `_raw_message` and `_error` virtual columns are filled only in case of exception during parsing, they are always `NULL` when message was parsed successfully. ## Data formats support {#data-formats-support} diff --git a/docs/en/engines/table-engines/integrations/rabbitmq.md b/docs/en/engines/table-engines/integrations/rabbitmq.md index 10e7146ff85..53c6e089a70 100644 --- a/docs/en/engines/table-engines/integrations/rabbitmq.md +++ b/docs/en/engines/table-engines/integrations/rabbitmq.md @@ -184,19 +184,19 @@ Example: ## Virtual Columns {#virtual-columns} -- `_exchange_name` - RabbitMQ exchange name. -- `_channel_id` - ChannelID, on which consumer, who received the message, was declared. -- `_delivery_tag` - DeliveryTag of the received message. Scoped per channel. -- `_redelivered` - `redelivered` flag of the message. -- `_message_id` - messageID of the received message; non-empty if was set, when message was published. -- `_timestamp` - timestamp of the received message; non-empty if was set, when message was published. +- `_exchange_name` - RabbitMQ exchange name. Data type: `String`. +- `_channel_id` - ChannelID, on which consumer, who received the message, was declared. Data type: `String`. +- `_delivery_tag` - DeliveryTag of the received message. Scoped per channel. Data type: `UInt64`. +- `_redelivered` - `redelivered` flag of the message. Data type: `UInt8`. +- `_message_id` - messageID of the received message; non-empty if was set, when message was published. Data type: `String`. +- `_timestamp` - timestamp of the received message; non-empty if was set, when message was published. Data type: `UInt64`. Additional virtual columns when `kafka_handle_error_mode='stream'`: -- `_raw_message` - Raw message that couldn't be parsed successfully. -- `_error` - Exception message happened during failed parsing. +- `_raw_message` - Raw message that couldn't be parsed successfully. Data type: `Nullable(String)`. +- `_error` - Exception message happened during failed parsing. Data type: `Nullable(String)`. -Note: `_raw_message` and `_error` virtual columns are filled only in case of exception during parsing, they are always empty when message was parsed successfully. +Note: `_raw_message` and `_error` virtual columns are filled only in case of exception during parsing, they are always `NULL` when message was parsed successfully. ## Data formats support {#data-formats-support} diff --git a/docs/en/engines/table-engines/integrations/s3.md b/docs/en/engines/table-engines/integrations/s3.md index 2967a15494c..3144bdd32fa 100644 --- a/docs/en/engines/table-engines/integrations/s3.md +++ b/docs/en/engines/table-engines/integrations/s3.md @@ -142,8 +142,9 @@ Code: 48. DB::Exception: Received from localhost:9000. DB::Exception: Reading fr ## Virtual columns {#virtual-columns} -- `_path` — Path to the file. -- `_file` — Name of the file. +- `_path` — Path to the file. Type: `LowCardinalty(String)`. +- `_file` — Name of the file. Type: `LowCardinalty(String)`. +- `_size` — Size of the file in bytes. Type: `Nullable(UInt64)`. If the size is unknown, the value is `NULL`. For more information about virtual columns see [here](../../../engines/table-engines/index.md#table_engines-virtual_columns). diff --git a/docs/en/engines/table-engines/mergetree-family/custom-partitioning-key.md b/docs/en/engines/table-engines/mergetree-family/custom-partitioning-key.md index 7e564b23676..97d37e476ae 100644 --- a/docs/en/engines/table-engines/mergetree-family/custom-partitioning-key.md +++ b/docs/en/engines/table-engines/mergetree-family/custom-partitioning-key.md @@ -14,7 +14,7 @@ You should never use too granular of partitioning. Don't partition your data by Partitioning is available for the [MergeTree](../../../engines/table-engines/mergetree-family/mergetree.md) family tables (including [replicated](../../../engines/table-engines/mergetree-family/replication.md) tables). [Materialized views](../../../engines/table-engines/special/materializedview.md#materializedview) based on MergeTree tables support partitioning, as well. -A partition is a logical combination of records in a table by a specified criterion. You can set a partition by an arbitrary criterion, such as by month, by day, or by event type. Each partition is stored separately to simplify manipulations of this data. When accessing the data, ClickHouse uses the smallest subset of partitions possible. +A partition is a logical combination of records in a table by a specified criterion. You can set a partition by an arbitrary criterion, such as by month, by day, or by event type. Each partition is stored separately to simplify manipulations of this data. When accessing the data, ClickHouse uses the smallest subset of partitions possible. Partitions improve performance for queries containing a partitioning key because ClickHouse will filter for that partition before selecting the parts and granules within the partition. The partition is specified in the `PARTITION BY expr` clause when [creating a table](../../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-creating-a-table). The partition key can be any expression from the table columns. For example, to specify partitioning by month, use the expression `toYYYYMM(date_column)`: diff --git a/docs/en/engines/table-engines/mergetree-family/mergetree.md b/docs/en/engines/table-engines/mergetree-family/mergetree.md index e615c9ad9d3..9cbb48ef847 100644 --- a/docs/en/engines/table-engines/mergetree-family/mergetree.md +++ b/docs/en/engines/table-engines/mergetree-family/mergetree.md @@ -6,7 +6,7 @@ sidebar_label: MergeTree # MergeTree -The `MergeTree` engine and other engines of this family (`*MergeTree`) are the most robust ClickHouse table engines. +The `MergeTree` engine and other engines of this family (`*MergeTree`) are the most commonly used and most robust ClickHouse table engines. Engines in the `MergeTree` family are designed for inserting a very large amount of data into a table. The data is quickly written to the table part by part, then rules are applied for merging the parts in the background. This method is much more efficient than continually rewriting the data in storage during insert. @@ -32,6 +32,8 @@ Main features: The [Merge](/docs/en/engines/table-engines/special/merge.md/#merge) engine does not belong to the `*MergeTree` family. ::: +If you need to update rows frequently, we recommend using the [`ReplacingMergeTree`](/docs/en/engines/table-engines/mergetree-family/replacingmergetree.md) table engine. Using `ALTER TABLE my_table UPDATE` to update rows triggers a mutation, which causes parts to be re-written and uses IO/resources. With `ReplacingMergeTree`, you can simply insert the updated rows and the old rows will be replaced according to the table sorting key. + ## Creating a Table {#table_engine-mergetree-creating-a-table} ``` sql @@ -502,8 +504,8 @@ Indexes of type `set` can be utilized by all functions. The other index types ar | Function (operator) / Index | primary key | minmax | ngrambf_v1 | tokenbf_v1 | bloom_filter | inverted | |------------------------------------------------------------------------------------------------------------|-------------|--------|------------|------------|--------------|----------| -| [equals (=, ==)](/docs/en/sql-reference/functions/comparison-functions.md/#function-equals) | ✔ | ✔ | ✔ | ✔ | ✔ | ✔ | -| [notEquals(!=, <>)](/docs/en/sql-reference/functions/comparison-functions.md/#function-notequals) | ✔ | ✔ | ✔ | ✔ | ✔ | ✔ | +| [equals (=, ==)](/docs/en/sql-reference/functions/comparison-functions.md/#equals) | ✔ | ✔ | ✔ | ✔ | ✔ | ✔ | +| [notEquals(!=, <>)](/docs/en/sql-reference/functions/comparison-functions.md/#notequals) | ✔ | ✔ | ✔ | ✔ | ✔ | ✔ | | [like](/docs/en/sql-reference/functions/string-search-functions.md/#function-like) | ✔ | ✔ | ✔ | ✔ | ✗ | ✔ | | [notLike](/docs/en/sql-reference/functions/string-search-functions.md/#function-notlike) | ✔ | ✔ | ✔ | ✔ | ✗ | ✔ | | [startsWith](/docs/en/sql-reference/functions/string-functions.md/#startswith) | ✔ | ✔ | ✔ | ✔ | ✗ | ✔ | @@ -511,10 +513,10 @@ Indexes of type `set` can be utilized by all functions. The other index types ar | [multiSearchAny](/docs/en/sql-reference/functions/string-search-functions.md/#function-multisearchany) | ✗ | ✗ | ✔ | ✗ | ✗ | ✔ | | [in](/docs/en/sql-reference/functions/in-functions#in-functions) | ✔ | ✔ | ✔ | ✔ | ✔ | ✔ | | [notIn](/docs/en/sql-reference/functions/in-functions#in-functions) | ✔ | ✔ | ✔ | ✔ | ✔ | ✔ | -| [less (<)](/docs/en/sql-reference/functions/comparison-functions.md/#function-less) | ✔ | ✔ | ✗ | ✗ | ✗ | ✗ | -| [greater (>)](/docs/en/sql-reference/functions/comparison-functions.md/#function-greater) | ✔ | ✔ | ✗ | ✗ | ✗ | ✗ | -| [lessOrEquals (<=)](/docs/en/sql-reference/functions/comparison-functions.md/#function-lessorequals) | ✔ | ✔ | ✗ | ✗ | ✗ | ✗ | -| [greaterOrEquals (>=)](/docs/en/sql-reference/functions/comparison-functions.md/#function-greaterorequals) | ✔ | ✔ | ✗ | ✗ | ✗ | ✗ | +| [less (<)](/docs/en/sql-reference/functions/comparison-functions.md/#less) | ✔ | ✔ | ✗ | ✗ | ✗ | ✗ | +| [greater (>)](/docs/en/sql-reference/functions/comparison-functions.md/#greater) | ✔ | ✔ | ✗ | ✗ | ✗ | ✗ | +| [lessOrEquals (<=)](/docs/en/sql-reference/functions/comparison-functions.md/#lessorequals) | ✔ | ✔ | ✗ | ✗ | ✗ | ✗ | +| [greaterOrEquals (>=)](/docs/en/sql-reference/functions/comparison-functions.md/#greaterorequals) | ✔ | ✔ | ✗ | ✗ | ✗ | ✗ | | [empty](/docs/en/sql-reference/functions/array-functions#function-empty) | ✔ | ✔ | ✗ | ✗ | ✗ | ✗ | | [notEmpty](/docs/en/sql-reference/functions/array-functions#function-notempty) | ✔ | ✔ | ✗ | ✗ | ✗ | ✗ | | [has](/docs/en/sql-reference/functions/array-functions#function-has) | ✗ | ✗ | ✔ | ✔ | ✔ | ✔ | diff --git a/docs/en/engines/table-engines/special/file.md b/docs/en/engines/table-engines/special/file.md index 27945b30c03..6e3897398a5 100644 --- a/docs/en/engines/table-engines/special/file.md +++ b/docs/en/engines/table-engines/special/file.md @@ -87,12 +87,18 @@ $ echo -e "1,2\n3,4" | clickhouse-local -q "CREATE TABLE table (a Int64, b Int64 - Indices - Replication -## PARTITION BY +## PARTITION BY {#partition-by} `PARTITION BY` — Optional. It is possible to create separate files by partitioning the data on a partition key. In most cases, you don't need a partition key, and if it is needed you generally don't need a partition key more granular than by month. Partitioning does not speed up queries (in contrast to the ORDER BY expression). You should never use too granular partitioning. Don't partition your data by client identifiers or names (instead, make client identifier or name the first column in the ORDER BY expression). For partitioning by month, use the `toYYYYMM(date_column)` expression, where `date_column` is a column with a date of the type [Date](/docs/en/sql-reference/data-types/date.md). The partition names here have the `"YYYYMM"` format. +## Virtual Columns {#virtual-columns} + +- `_path` — Path to the file. Type: `LowCardinalty(String)`. +- `_file` — Name of the file. Type: `LowCardinalty(String)`. +- `_size` — Size of the file in bytes. Type: `Nullable(UInt64)`. If the size is unknown, the value is `NULL`. + ## Settings {#settings} - [engine_file_empty_if_not_exists](/docs/en/operations/settings/settings.md#engine-file-emptyif-not-exists) - allows to select empty data from a file that doesn't exist. Disabled by default. diff --git a/docs/en/engines/table-engines/special/filelog.md b/docs/en/engines/table-engines/special/filelog.md index 0c2a2601fc9..eef9a17444e 100644 --- a/docs/en/engines/table-engines/special/filelog.md +++ b/docs/en/engines/table-engines/special/filelog.md @@ -94,12 +94,12 @@ If you want to change the target table by using `ALTER`, we recommend disabling ## Virtual Columns {#virtual-columns} -- `_filename` - Name of the log file. -- `_offset` - Offset in the log file. +- `_filename` - Name of the log file. Data type: `LowCardinality(String)`. +- `_offset` - Offset in the log file. Data type: `UInt64`. Additional virtual columns when `kafka_handle_error_mode='stream'`: -- `_raw_record` - Raw record that couldn't be parsed successfully. -- `_error` - Exception message happened during failed parsing. +- `_raw_record` - Raw record that couldn't be parsed successfully. Data type: `Nullable(String)`. +- `_error` - Exception message happened during failed parsing. Data type: `Nullable(String)`. -Note: `_raw_record` and `_error` virtual columns are filled only in case of exception during parsing, they are always empty when message was parsed successfully. +Note: `_raw_record` and `_error` virtual columns are filled only in case of exception during parsing, they are always `NULL` when message was parsed successfully. diff --git a/docs/en/engines/table-engines/special/url.md b/docs/en/engines/table-engines/special/url.md index 5a5e1564180..f6183a779ae 100644 --- a/docs/en/engines/table-engines/special/url.md +++ b/docs/en/engines/table-engines/special/url.md @@ -103,6 +103,12 @@ SELECT * FROM url_engine_table For partitioning by month, use the `toYYYYMM(date_column)` expression, where `date_column` is a column with a date of the type [Date](/docs/en/sql-reference/data-types/date.md). The partition names here have the `"YYYYMM"` format. +## Virtual Columns {#virtual-columns} + +- `_path` — Path to the `URL`. Type: `LowCardinalty(String)`. +- `_file` — Resource name of the `URL`. Type: `LowCardinalty(String)`. +- `_size` — Size of the resource in bytes. Type: `Nullable(UInt64)`. If the size is unknown, the value is `NULL`. + ## Storage Settings {#storage-settings} - [engine_url_skip_empty_files](/docs/en/operations/settings/settings.md#engine_url_skip_empty_files) - allows to skip empty files while reading. Disabled by default. diff --git a/docs/en/interfaces/formats.md b/docs/en/interfaces/formats.md index 155ae316890..57de0555bf6 100644 --- a/docs/en/interfaces/formats.md +++ b/docs/en/interfaces/formats.md @@ -2469,6 +2469,7 @@ This function is designed to load a NumPy array from a .npy file into ClickHouse | u2 | UInt16 | | u4 | UInt32 | | u8 | UInt64 | +| f2 | Float32 | | f4 | Float32 | | f8 | Float64 | | S | String | diff --git a/docs/en/operations/_troubleshooting.md b/docs/en/operations/_troubleshooting.md index dbb0dad7976..b3846643e7a 100644 --- a/docs/en/operations/_troubleshooting.md +++ b/docs/en/operations/_troubleshooting.md @@ -17,12 +17,8 @@ - The issue may be happened when the GPG key is changed. -Please use the following scripts to resolve the issue: +Please use the manual from the [setup](../getting-started/install.md#setup-the-debian-repository) page to update the repository configuration. -```bash -sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 8919F6BD2B48D754 -sudo apt-get update -``` ### You Get Different Warnings with `apt-get update` {#you-get-different-warnings-with-apt-get-update} diff --git a/docs/en/operations/query-cache.md b/docs/en/operations/query-cache.md index 665ae6cdfdc..def0f48b968 100644 --- a/docs/en/operations/query-cache.md +++ b/docs/en/operations/query-cache.md @@ -169,7 +169,12 @@ Also, results of queries with non-deterministic functions are not cached by defa [`getMacro()`](../sql-reference/functions/other-functions.md#getMacro) etc. To force caching of results of queries with non-deterministic functions regardless, use setting -[query_cache_store_results_of_queries_with_nondeterministic_functions](settings/settings.md#query-cache-store-results-of-queries-with-nondeterministic-functions). +[query_cache_nondeterministic_function_handling](settings/settings.md#query-cache-nondeterministic-function-handling). + +:::note +Prior to ClickHouse v23.11, setting 'query_cache_store_results_of_queries_with_nondeterministic_functions = 0 / 1' controlled whether +results of queries with non-deterministic results were cached. In newer ClickHouse versions, this setting is obsolete and has no effect. +::: Finally, entries in the query cache are not shared between users due to security reasons. For example, user A must not be able to bypass a row policy on a table by running the same query as another user B for whom no such policy exists. However, if necessary, cache entries can diff --git a/docs/en/operations/server-configuration-parameters/settings.md b/docs/en/operations/server-configuration-parameters/settings.md index 98636a653fb..2b73c4ec624 100644 --- a/docs/en/operations/server-configuration-parameters/settings.md +++ b/docs/en/operations/server-configuration-parameters/settings.md @@ -74,7 +74,7 @@ The maximum number of threads that will be used for fetching data parts from ano Type: UInt64 -Default: 8 +Default: 16 ## background_merges_mutations_concurrency_ratio @@ -136,7 +136,7 @@ The maximum number of threads that will be used for constantly executing some li Type: UInt64 -Default: 128 +Default: 512 ## backup_threads @@ -961,9 +961,11 @@ See also “[Executable User Defined Functions](../../sql-reference/functions/in Lazy loading of dictionaries. -If `true`, then each dictionary is created on first use. If dictionary creation failed, the function that was using the dictionary throws an exception. +If `true`, then each dictionary is loaded on the first use. If the loading is failed, the function that was using the dictionary throws an exception. -If `false`, all dictionaries are created when the server starts, if the dictionary or dictionaries are created too long or are created with errors, then the server boots without of these dictionaries and continues to try to create these dictionaries. +If `false`, then the server loads all dictionaries at startup. +The server will wait at startup until all the dictionaries finish their loading before receiving any connections +(exception: if `wait_dictionaries_load_at_startup` is set to `false` - see below). The default is `true`. @@ -2391,6 +2393,28 @@ Path to the file that contains: users.xml ``` +## wait_dictionaries_load_at_startup {#wait_dictionaries_load_at_startup} + +This setting allows to specify behavior if `dictionaries_lazy_load` is `false`. +(If `dictionaries_lazy_load` is `true` this setting doesn't affect anything.) + +If `wait_dictionaries_load_at_startup` is `false`, then the server +will start loading all the dictionaries at startup and it will receive connections in parallel with that loading. +When a dictionary is used in a query for the first time then the query will wait until the dictionary is loaded if it's not loaded yet. +Setting `wait_dictionaries_load_at_startup` to `false` can make ClickHouse start faster, however some queries can be executed slower +(because they will have to wait for some dictionaries to be loaded). + +If `wait_dictionaries_load_at_startup` is `true`, then the server will wait at startup +until all the dictionaries finish their loading (successfully or not) before receiving any connections. + +The default is `true`. + +**Example** + +``` xml +true +``` + ## zookeeper {#server-settings_zookeeper} Contains settings that allow ClickHouse to interact with a [ZooKeeper](http://zookeeper.apache.org/) cluster. @@ -2718,7 +2742,7 @@ ClickHouse will use it to form the proxy URI using the following template: `{pro 10 - + http://resolver:8080/hostname diff --git a/docs/en/operations/settings/settings.md b/docs/en/operations/settings/settings.md index 2f3805e8e55..663572d91c8 100644 --- a/docs/en/operations/settings/settings.md +++ b/docs/en/operations/settings/settings.md @@ -731,11 +731,13 @@ Default value: LZ4. ## max_block_size {#setting-max_block_size} -In ClickHouse, data is processed by blocks (sets of column parts). The internal processing cycles for a single block are efficient enough, but there are noticeable expenditures on each block. The `max_block_size` setting is a recommendation for what size of the block (in a count of rows) to load from tables. The block size shouldn’t be too small, so that the expenditures on each block are still noticeable, but not too large so that the query with LIMIT that is completed after the first block is processed quickly. The goal is to avoid consuming too much memory when extracting a large number of columns in multiple threads and to preserve at least some cache locality. +In ClickHouse, data is processed by blocks, which are sets of column parts. The internal processing cycles for a single block are efficient but there are noticeable costs when processing each block. -Default value: 65,536. +The `max_block_size` setting indicates the recommended maximum number of rows to include in a single block when loading data from tables. Blocks the size of `max_block_size` are not always loaded from the table: if ClickHouse determines that less data needs to be retrieved, a smaller block is processed. -Blocks the size of `max_block_size` are not always loaded from the table. If it is obvious that less data needs to be retrieved, a smaller block is processed. +The block size should not be too small to avoid noticeable costs when processing each block. It should also not be too large to ensure that queries with a LIMIT clause execute quickly after processing the first block. When setting `max_block_size`, the goal should be to avoid consuming too much memory when extracting a large number of columns in multiple threads and to preserve at least some cache locality. + +Default value: `65,409` ## preferred_block_size_bytes {#preferred-block-size-bytes} @@ -1657,16 +1659,17 @@ Possible values: Default value: `1`. -## query_cache_store_results_of_queries_with_nondeterministic_functions {#query-cache-store-results-of-queries-with-nondeterministic-functions} +## query_cache_nondeterministic_function_handling {#query-cache-nondeterministic-function-handling} -If turned on, then results of `SELECT` queries with non-deterministic functions (e.g. `rand()`, `now()`) can be cached in the [query cache](../query-cache.md). +Controls how the [query cache](../query-cache.md) handles `SELECT` queries with non-deterministic functions like `rand()` or `now()`. Possible values: -- 0 - Disabled -- 1 - Enabled +- `'throw'` - Throw an exception and don't cache the query result. +- `'save'` - Cache the query result. +- `'ignore'` - Don't cache the query result and don't throw an exception. -Default value: `0`. +Default value: `throw`. ## query_cache_min_query_runs {#query-cache-min-query-runs} @@ -2713,6 +2716,10 @@ Default value: `0`. - [Distributed Table Engine](../../engines/table-engines/special/distributed.md/#distributed) - [Managing Distributed Tables](../../sql-reference/statements/system.md/#query-language-system-distributed) +## insert_distributed_sync {#insert_distributed_sync} + +Alias for [`distributed_foreground_insert`](#distributed_foreground_insert). + ## insert_shard_id {#insert_shard_id} If not `0`, specifies the shard of [Distributed](../../engines/table-engines/special/distributed.md/#distributed) table into which the data will be inserted synchronously. @@ -4798,6 +4805,243 @@ a Tuple( If a table has a space-filling curve in its index, e.g. `ORDER BY mortonEncode(x, y)`, and the query has conditions on its arguments, e.g. `x >= 10 AND x <= 20 AND y >= 20 AND y <= 30`, use the space-filling curve for index analysis. +## query_plan_enable_optimizations {#query_plan_enable_optimizations} + +Toggles query optimization at the query plan level. + +:::note +This is an expert-level setting which should only be used for debugging by developers. The setting may change in future in backward-incompatible ways or be removed. +::: + +Possible values: + +- 0 - Disable all optimizations at the query plan level +- 1 - Enable optimizations at the query plan level (but individual optimizations may still be disabled via their individual settings) + +Default value: `1`. + +## query_plan_max_optimizations_to_apply + +Limits the total number of optimizations applied to query plan, see setting [query_plan_enable_optimizations](#query_plan_enable_optimizations). +Useful to avoid long optimization times for complex queries. +If the actual number of optimizations exceeds this setting, an exception is thrown. + +:::note +This is an expert-level setting which should only be used for debugging by developers. The setting may change in future in backward-incompatible ways or be removed. +::: + +Type: [UInt64](../../sql-reference/data-types/int-uint.md). + +Default value: '10000' + +## query_plan_lift_up_array_join + +Toggles a query-plan-level optimization which moves ARRAY JOINs up in the execution plan. +Only takes effect if setting [query_plan_enable_optimizations](#query_plan_enable_optimizations) is 1. + +:::note +This is an expert-level setting which should only be used for debugging by developers. The setting may change in future in backward-incompatible ways or be removed. +::: + +Possible values: + +- 0 - Disable +- 1 - Enable + +Default value: `1`. + +## query_plan_push_down_limit + +Toggles a query-plan-level optimization which moves LIMITs down in the execution plan. +Only takes effect if setting [query_plan_enable_optimizations](#query_plan_enable_optimizations) is 1. + +:::note +This is an expert-level setting which should only be used for debugging by developers. The setting may change in future in backward-incompatible ways or be removed. +::: + +Possible values: + +- 0 - Disable +- 1 - Enable + +Default value: `1`. + +## query_plan_split_filter + +:::note +This is an expert-level setting which should only be used for debugging by developers. The setting may change in future in backward-incompatible ways or be removed. +::: + +Toggles a query-plan-level optimization which splits filters into expressions. +Only takes effect if setting [query_plan_enable_optimizations](#query_plan_enable_optimizations) is 1. + +Possible values: + +- 0 - Disable +- 1 - Enable + +Default value: `1`. + +## query_plan_merge_expressions + +Toggles a query-plan-level optimization which merges consecutive filters. +Only takes effect if setting [query_plan_enable_optimizations](#query_plan_enable_optimizations) is 1. + +:::note +This is an expert-level setting which should only be used for debugging by developers. The setting may change in future in backward-incompatible ways or be removed. +::: + +Possible values: + +- 0 - Disable +- 1 - Enable + +Default value: `1`. + +## query_plan_filter_push_down + +Toggles a query-plan-level optimization which moves filters down in the execution plan. +Only takes effect if setting [query_plan_enable_optimizations](#query_plan_enable_optimizations) is 1. + +:::note +This is an expert-level setting which should only be used for debugging by developers. The setting may change in future in backward-incompatible ways or be removed. +::: + +Possible values: + +- 0 - Disable +- 1 - Enable + +Default value: `1`. + +## query_plan_execute_functions_after_sorting + +Toggles a query-plan-level optimization which moves expressions after sorting steps. +Only takes effect if setting [query_plan_enable_optimizations](#query_plan_enable_optimizations) is 1. + +:::note +This is an expert-level setting which should only be used for debugging by developers. The setting may change in future in backward-incompatible ways or be removed. +::: + +Possible values: + +- 0 - Disable +- 1 - Enable + +Default value: `1`. + +## query_plan_reuse_storage_ordering_for_window_functions + +Toggles a query-plan-level optimization which uses storage sorting when sorting for window functions. +Only takes effect if setting [query_plan_enable_optimizations](#query_plan_enable_optimizations) is 1. + +:::note +This is an expert-level setting which should only be used for debugging by developers. The setting may change in future in backward-incompatible ways or be removed. +::: + +Possible values: + +- 0 - Disable +- 1 - Enable + +Default value: `1`. + +## query_plan_lift_up_union + +Toggles a query-plan-level optimization which moves larger subtrees of the query plan into union to enable further optimizations. +Only takes effect if setting [query_plan_enable_optimizations](#query_plan_enable_optimizations) is 1. + +:::note +This is an expert-level setting which should only be used for debugging by developers. The setting may change in future in backward-incompatible ways or be removed. +::: + +Possible values: + +- 0 - Disable +- 1 - Enable + +Default value: `1`. + +## query_plan_distinct_in_order + +Toggles the distinct in-order optimization query-plan-level optimization. +Only takes effect if setting [query_plan_enable_optimizations](#query_plan_enable_optimizations) is 1. + +:::note +This is an expert-level setting which should only be used for debugging by developers. The setting may change in future in backward-incompatible ways or be removed. +::: + +Possible values: + +- 0 - Disable +- 1 - Enable + +Default value: `1`. + +## query_plan_read_in_order + +Toggles the read in-order optimization query-plan-level optimization. +Only takes effect if setting [query_plan_enable_optimizations](#query_plan_enable_optimizations) is 1. + +:::note +This is an expert-level setting which should only be used for debugging by developers. The setting may change in future in backward-incompatible ways or be removed. +::: + +Possible values: + +- 0 - Disable +- 1 - Enable + +Default value: `1`. + +## query_plan_aggregation_in_order + +Toggles the aggregation in-order query-plan-level optimization. +Only takes effect if setting [query_plan_enable_optimizations](#query_plan_enable_optimizations) is 1. + +:::note +This is an expert-level setting which should only be used for debugging by developers. The setting may change in future in backward-incompatible ways or be removed. +::: + +Possible values: + +- 0 - Disable +- 1 - Enable + +Default value: `0`. + +## query_plan_remove_redundant_sorting + +Toggles a query-plan-level optimization which removes redundant sorting steps, e.g. in subqueries. +Only takes effect if setting [query_plan_enable_optimizations](#query_plan_enable_optimizations) is 1. + +:::note +This is an expert-level setting which should only be used for debugging by developers. The setting may change in future in backward-incompatible ways or be removed. +::: + +Possible values: + +- 0 - Disable +- 1 - Enable + +Default value: `1`. + +## query_plan_remove_redundant_distinct + +Toggles a query-plan-level optimization which removes redundant DISTINCT steps. +Only takes effect if setting [query_plan_enable_optimizations](#query_plan_enable_optimizations) is 1. + +:::note +This is an expert-level setting which should only be used for debugging by developers. The setting may change in future in backward-incompatible ways or be removed. +::: + +Possible values: + +- 0 - Disable +- 1 - Enable + +Default value: `1`. + ## dictionary_use_async_executor {#dictionary_use_async_executor} Execute a pipeline for reading dictionary source in several threads. It's supported only by dictionaries with local CLICKHOUSE source. @@ -4819,3 +5063,10 @@ When set to `true` the metadata files are written with `VERSION_FULL_OBJECT_KEY` When set to `false` the metadata files are written with the previous format version, `VERSION_INLINE_DATA`. With that format only suffixes of object storage key names are are written to the metadata files. The prefix for all of object storage key names is set in configurations files at `storage_configuration.disks` section. Default value: `false`. + +## s3_use_adaptive_timeouts {#s3_use_adaptive_timeouts} + +When set to `true` than for all s3 requests first two attempts are made with low send and receive timeouts. +When set to `false` than all attempts are made with identical timeouts. + +Default value: `true`. diff --git a/docs/en/operations/system-tables/blob_storage_log.md b/docs/en/operations/system-tables/blob_storage_log.md new file mode 100644 index 00000000000..2328f7f0346 --- /dev/null +++ b/docs/en/operations/system-tables/blob_storage_log.md @@ -0,0 +1,59 @@ +--- +slug: /en/operations/system-tables/blob_storage_log +--- +# blob_storage_log + +Contains logging entries with information about various blob storage operations such as uploads and deletes. + +Columns: + +- `event_date` ([Date](../../sql-reference/data-types/date.md)) — Date of the event. +- `event_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — Time of the event. +- `event_time_microseconds` ([DateTime64](../../sql-reference/data-types/datetime64.md)) — Time of the event with microseconds precision. +- `event_type` ([Enum8](../../sql-reference/data-types/enum.md)) — Type of the event. Possible values: + - `'Upload'` + - `'Delete'` + - `'MultiPartUploadCreate'` + - `'MultiPartUploadWrite'` + - `'MultiPartUploadComplete'` + - `'MultiPartUploadAbort'` +- `query_id` ([String](../../sql-reference/data-types/string.md)) — Identifier of the query associated with the event, if any. +- `thread_id` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Identifier of the thread performing the operation. +- `thread_name` ([String](../../sql-reference/data-types/string.md)) — Name of the thread performing the operation. +- `disk_name` ([LowCardinality(String)](../../sql-reference/data-types/lowcardinality.md)) — Name of the associated disk. +- `bucket` ([String](../../sql-reference/data-types/string.md)) — Name of the bucket. +- `remote_path` ([String](../../sql-reference/data-types/string.md)) — Path to the remote resource. +- `local_path` ([String](../../sql-reference/data-types/string.md)) — Path to the metadata file on the local system, which references the remote resource. +- `data_size` ([UInt32](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Size of the data involved in the upload event. +- `error` ([String](../../sql-reference/data-types/string.md)) — Error message associated with the event, if any. + +**Example** + +Suppose a blob storage operation uploads a file, and an event is logged: + +```sql +SELECT * FROM system.blob_storage_log WHERE query_id = '7afe0450-504d-4e4b-9a80-cd9826047972' ORDER BY event_date, event_time_microseconds \G +``` + +```text +Row 1: +────── +event_date: 2023-10-31 +event_time: 2023-10-31 16:03:40 +event_time_microseconds: 2023-10-31 16:03:40.481437 +event_type: Upload +query_id: 7afe0450-504d-4e4b-9a80-cd9826047972 +thread_id: 2381740 +disk_name: disk_s3 +bucket: bucket1 +remote_path: rrr/kxo/tbnqtrghgtnxkzgtcrlutwuslgawe +local_path: store/654/6549e8b3-d753-4447-8047-d462df6e6dbe/tmp_insert_all_1_1_0/checksums.txt +data_size: 259 +error: +``` + +In this example, upload operation was associated with the `INSERT` query with ID `7afe0450-504d-4e4b-9a80-cd9826047972`. The local metadata file `store/654/6549e8b3-d753-4447-8047-d462df6e6dbe/tmp_insert_all_1_1_0/checksums.txt` refers to remote path `rrr/kxo/tbnqtrghgtnxkzgtcrlutwuslgawe` in bucket `bucket1` on disk `disk_s3`, with a size of 259 bytes. + +**See Also** + +- [External Disks for Storing Data](../../operations/storing-data.md) diff --git a/docs/en/operations/system-tables/dashboards.md b/docs/en/operations/system-tables/dashboards.md new file mode 100644 index 00000000000..1d6876b9f8d --- /dev/null +++ b/docs/en/operations/system-tables/dashboards.md @@ -0,0 +1,68 @@ +--- +slug: /en/operations/system-tables/dashboards +--- +# dashboards + +Contains queries used by `/dashboard` page accessible though [HTTP interface](/docs/en/interfaces/http.md). +This table can be useful for monitoring and troubleshooting. The table contains a row for every chart in a dashboard. + +:::note +`/dashboard` page can render queries not only from `system.dashboards`, but from any table with the same schema. +This can be useful to create custom dashboards. +::: + +Example: + +``` sql +SELECT * +FROM system.dashboards +WHERE title ILIKE '%CPU%' +``` + +``` text +Row 1: +────── +dashboard: overview +title: CPU Usage (cores) +query: SELECT toStartOfInterval(event_time, INTERVAL {rounding:UInt32} SECOND)::INT AS t, avg(ProfileEvent_OSCPUVirtualTimeMicroseconds) / 1000000 +FROM system.metric_log +WHERE event_date >= toDate(now() - {seconds:UInt32}) AND event_time >= now() - {seconds:UInt32} +GROUP BY t +ORDER BY t WITH FILL STEP {rounding:UInt32} + +Row 2: +────── +dashboard: overview +title: CPU Wait +query: SELECT toStartOfInterval(event_time, INTERVAL {rounding:UInt32} SECOND)::INT AS t, avg(ProfileEvent_OSCPUWaitMicroseconds) / 1000000 +FROM system.metric_log +WHERE event_date >= toDate(now() - {seconds:UInt32}) AND event_time >= now() - {seconds:UInt32} +GROUP BY t +ORDER BY t WITH FILL STEP {rounding:UInt32} + +Row 3: +────── +dashboard: overview +title: OS CPU Usage (Userspace) +query: SELECT toStartOfInterval(event_time, INTERVAL {rounding:UInt32} SECOND)::INT AS t, avg(value) +FROM system.asynchronous_metric_log +WHERE event_date >= toDate(now() - {seconds:UInt32}) AND event_time >= now() - {seconds:UInt32} AND metric = 'OSUserTimeNormalized' +GROUP BY t +ORDER BY t WITH FILL STEP {rounding:UInt32} + +Row 4: +────── +dashboard: overview +title: OS CPU Usage (Kernel) +query: SELECT toStartOfInterval(event_time, INTERVAL {rounding:UInt32} SECOND)::INT AS t, avg(value) +FROM system.asynchronous_metric_log +WHERE event_date >= toDate(now() - {seconds:UInt32}) AND event_time >= now() - {seconds:UInt32} AND metric = 'OSSystemTimeNormalized' +GROUP BY t +ORDER BY t WITH FILL STEP {rounding:UInt32} +``` + +Columns: + +- `dashboard` (`String`) - The dashboard name. +- `title` (`String`) - The title of a chart. +- `query` (`String`) - The query to obtain data to be displayed. diff --git a/docs/en/operations/system-tables/databases.md b/docs/en/operations/system-tables/databases.md index f3d3d388c36..e3b0ded96e8 100644 --- a/docs/en/operations/system-tables/databases.md +++ b/docs/en/operations/system-tables/databases.md @@ -14,6 +14,7 @@ Columns: - `uuid` ([UUID](../../sql-reference/data-types/uuid.md)) — Database UUID. - `comment` ([String](../../sql-reference/data-types/enum.md)) — Database comment. - `engine_full` ([String](../../sql-reference/data-types/enum.md)) — Parameters of the database engine. +- `database` ([String](../../sql-reference/data-types/string.md)) – Alias for `name`. The `name` column from this system table is used for implementing the `SHOW DATABASES` query. diff --git a/docs/en/operations/utilities/backupview.md b/docs/en/operations/utilities/backupview.md new file mode 100644 index 00000000000..c4f1cc0ae71 --- /dev/null +++ b/docs/en/operations/utilities/backupview.md @@ -0,0 +1,50 @@ +--- +slug: /en/operations/utilities/backupview +title: clickhouse_backupview +--- + +# clickhouse_backupview {#clickhouse_backupview} + +Python module to help analyzing backups made by the [BACKUP](https://clickhouse.com/docs/en/operations/backup) command. +The main motivation was to allows getting some information from a backup without actually restoring it. + +This module provides functions to +- enumerate files contained in a backup +- read files from a backup +- get useful information in readable form about databases, tables, parts contained in a backup +- check integrity of a backup + +## Example: + +```python +from clickhouse_backupview import open_backup, S3, FileInfo + +# Open a backup. We could also use a local path: +# backup = open_backup("/backups/my_backup_1/") +backup = open_backup(S3("uri", "access_key_id", "secret_access_key")) + +# Get a list of databasess inside the backup. +print(backup.get_databases())) + +# Get a list of tables inside the backup, +# and for each table its create query and a list of parts and partitions. +for db in backup.get_databases(): + for tbl in backup.get_tables(database=db): + print(backup.get_create_query(database=db, table=tbl)) + print(backup.get_partitions(database=db, table=tbl)) + print(backup.get_parts(database=db, table=tbl)) + +# Extract everything from the backup. +backup.extract_all(table="mydb.mytable", out='/tmp/my_backup_1/all/') + +# Extract the data of a specific table. +backup.extract_table_data(table="mydb.mytable", out='/tmp/my_backup_1/mytable/') + +# Extract a single partition. +backup.extract_table_data(table="mydb.mytable", partition="202201", out='/tmp/my_backup_1/202201/') + +# Extract a single part. +backup.extract_table_data(table="mydb.mytable", part="202201_100_200_3", out='/tmp/my_backup_1/202201_100_200_3/') +``` + +For more examples see the [test](https://github.com/ClickHouse/ClickHouse/blob/master/utils/backupview/test/test.py). diff --git a/docs/en/operations/utilities/index.md b/docs/en/operations/utilities/index.md index 5667f99b6fa..8959073d00e 100644 --- a/docs/en/operations/utilities/index.md +++ b/docs/en/operations/utilities/index.md @@ -16,3 +16,4 @@ pagination_next: 'en/operations/utilities/clickhouse-copier' - [clickhouse-disks](../../operations/utilities/clickhouse-disks.md) -- Provides filesystem-like operations on files among different ClickHouse disks. - [clickhouse-odbc-bridge](../../operations/utilities/odbc-bridge.md) — A proxy server for ODBC driver. +- [clickhouse_backupview](../../operations/utilities/backupview.md) — A python module to analyze ClickHouse backups. diff --git a/docs/en/sql-reference/aggregate-functions/reference/count.md b/docs/en/sql-reference/aggregate-functions/reference/count.md index a98c8e50174..a40108a331a 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/count.md +++ b/docs/en/sql-reference/aggregate-functions/reference/count.md @@ -34,6 +34,10 @@ The `SELECT count() FROM table` query is optimized by default using metadata fro However `SELECT count(nullable_column) FROM table` query can be optimized by enabling the [optimize_functions_to_subcolumns](../../../operations/settings/settings.md#optimize-functions-to-subcolumns) setting. With `optimize_functions_to_subcolumns = 1` the function reads only [null](../../../sql-reference/data-types/nullable.md#finding-null) subcolumn instead of reading and processing the whole column data. The query `SELECT count(n) FROM table` transforms to `SELECT sum(NOT n.null) FROM table`. +**Improving COUNT(DISTINCT expr) performance** + +If your `COUNT(DISTINCT expr)` query is slow, consider adding a [`GROUP BY`](../../../sql-reference/statements/select/group-by.md) clause as this improves parallelization. You can also use a [projection](../../../sql-reference/statements/alter/projection.md) to create an index on the target column used with `COUNT(DISTINCT target_col)`. + **Examples** Example 1: diff --git a/docs/en/sql-reference/functions/comparison-functions.md b/docs/en/sql-reference/functions/comparison-functions.md index 297d84eb8a5..abe923adeb3 100644 --- a/docs/en/sql-reference/functions/comparison-functions.md +++ b/docs/en/sql-reference/functions/comparison-functions.md @@ -20,7 +20,7 @@ Strings are compared byte-by-byte. Note that this may lead to unexpected results A string S1 which has another string S2 as prefix is considered longer than S2. -## equals, `=`, `==` operators +## equals, `=`, `==` operators {#equals} **Syntax** @@ -32,7 +32,7 @@ Alias: - `a = b` (operator) - `a == b` (operator) -## notEquals, `!=`, `<>` operators +## notEquals, `!=`, `<>` operators {#notequals} **Syntax** @@ -44,7 +44,7 @@ Alias: - `a != b` (operator) - `a <> b` (operator) -## less, `<` operator +## less, `<` operator {#less} **Syntax** @@ -55,7 +55,7 @@ less(a, b) Alias: - `a < b` (operator) -## greater, `>` operator +## greater, `>` operator {#greater} **Syntax** @@ -66,7 +66,7 @@ greater(a, b) Alias: - `a > b` (operator) -## lessOrEquals, `<=` operator +## lessOrEquals, `<=` operator {#lessorequals} **Syntax** @@ -77,7 +77,7 @@ lessOrEquals(a, b) Alias: - `a <= b` (operator) -## greaterOrEquals, `>=` operator +## greaterOrEquals, `>=` operator {#greaterorequals} **Syntax** diff --git a/docs/en/sql-reference/functions/date-time-functions.md b/docs/en/sql-reference/functions/date-time-functions.md index 55d09be7847..43f7c9cc61e 100644 --- a/docs/en/sql-reference/functions/date-time-functions.md +++ b/docs/en/sql-reference/functions/date-time-functions.md @@ -1381,7 +1381,7 @@ toStartOfFifteenMinutes(toDateTime('2023-04-21 10:20:00')): 2023-04-21 10:15:00 toStartOfFifteenMinutes(toDateTime('2023-04-21 10:23:00')): 2023-04-21 10:15:00 ``` -## toStartOfInterval(time_or_data, INTERVAL x unit \[, time_zone\]) +## toStartOfInterval(date_or_date_with_time, INTERVAL x unit \[, time_zone\]) This function generalizes other `toStartOf*()` functions. For example, - `toStartOfInterval(t, INTERVAL 1 year)` returns the same as `toStartOfYear(t)`, diff --git a/docs/en/sql-reference/functions/math-functions.md b/docs/en/sql-reference/functions/math-functions.md index 9eab2274210..b27668caf0c 100644 --- a/docs/en/sql-reference/functions/math-functions.md +++ b/docs/en/sql-reference/functions/math-functions.md @@ -6,11 +6,9 @@ sidebar_label: Mathematical # Mathematical Functions -All the functions return a Float64 number. Results are generally as close to the actual result as possible, but in some cases less precise than the machine-representable number. - ## e -Returns e. +Returns e ([Euler's constant](https://en.wikipedia.org/wiki/Euler%27s_constant)) **Syntax** @@ -18,15 +16,22 @@ Returns e. e() ``` +**Returned value** + +Type: [Float64](../../sql-reference/data-types/float.md). + ## pi -Returns π. +Returns π ([Pi](https://en.wikipedia.org/wiki/Pi)). **Syntax** ```sql pi() ``` +**Returned value** + +Type: [Float64](../../sql-reference/data-types/float.md). ## exp @@ -38,6 +43,14 @@ Returns e to the power of the given argument. exp(x) ``` +**Arguments** + +- `x` - [(U)Int*](../../sql-reference/data-types/int-uint.md), [Float*](../../sql-reference/data-types/float.md) or [Decimal*](../../sql-reference/data-types/decimal.md) + +**Returned value** + +Type: [Float*](../../sql-reference/data-types/float.md). + ## log Returns the natural logarithm of the argument. @@ -50,6 +63,14 @@ log(x) Alias: `ln(x)` +**Arguments** + +- `x` - [(U)Int*](../../sql-reference/data-types/int-uint.md), [Float*](../../sql-reference/data-types/float.md) or [Decimal*](../../sql-reference/data-types/decimal.md) + +**Returned value** + +Type: [Float*](../../sql-reference/data-types/float.md). + ## exp2 Returns 2 to the power of the given argument @@ -60,6 +81,14 @@ Returns 2 to the power of the given argument exp2(x) ``` +**Arguments** + +- `x` - [(U)Int*](../../sql-reference/data-types/int-uint.md), [Float*](../../sql-reference/data-types/float.md) or [Decimal*](../../sql-reference/data-types/decimal.md) + +**Returned value** + +Type: [Float*](../../sql-reference/data-types/float.md). + ## intExp2 Like `exp` but returns a UInt64. @@ -80,6 +109,14 @@ Returns the binary logarithm of the argument. log2(x) ``` +**Arguments** + +- `x` - [(U)Int*](../../sql-reference/data-types/int-uint.md), [Float*](../../sql-reference/data-types/float.md) or [Decimal*](../../sql-reference/data-types/decimal.md) + +**Returned value** + +Type: [Float*](../../sql-reference/data-types/float.md). + ## exp10 Returns 10 to the power of the given argument. @@ -90,6 +127,14 @@ Returns 10 to the power of the given argument. exp10(x) ``` +**Arguments** + +- `x` - [(U)Int*](../../sql-reference/data-types/int-uint.md), [Float*](../../sql-reference/data-types/float.md) or [Decimal*](../../sql-reference/data-types/decimal.md) + +**Returned value** + +Type: [Float*](../../sql-reference/data-types/float.md). + ## intExp10 Like `exp10` but returns a UInt64. @@ -110,6 +155,14 @@ Returns the decimal logarithm of the argument. log10(x) ``` +**Arguments** + +- `x` - [(U)Int*](../../sql-reference/data-types/int-uint.md), [Float*](../../sql-reference/data-types/float.md) or [Decimal*](../../sql-reference/data-types/decimal.md) + +**Returned value** + +Type: [Float*](../../sql-reference/data-types/float.md). + ## sqrt Returns the square root of the argument. @@ -118,6 +171,14 @@ Returns the square root of the argument. sqrt(x) ``` +**Arguments** + +- `x` - [(U)Int*](../../sql-reference/data-types/int-uint.md), [Float*](../../sql-reference/data-types/float.md) or [Decimal*](../../sql-reference/data-types/decimal.md) + +**Returned value** + +Type: [Float*](../../sql-reference/data-types/float.md). + ## cbrt Returns the cubic root of the argument. @@ -126,6 +187,14 @@ Returns the cubic root of the argument. cbrt(x) ``` +**Arguments** + +- `x` - [(U)Int*](../../sql-reference/data-types/int-uint.md), [Float*](../../sql-reference/data-types/float.md) or [Decimal*](../../sql-reference/data-types/decimal.md) + +**Returned value** + +Type: [Float*](../../sql-reference/data-types/float.md). + ## erf If `x` is non-negative, then `erf(x / σ√2)` is the probability that a random variable having a normal distribution with standard deviation `σ` takes the value that is separated from the expected value by more than `x`. @@ -136,6 +205,14 @@ If `x` is non-negative, then `erf(x / σ√2)` is the probability that a random erf(x) ``` +**Arguments** + +- `x` - [(U)Int*](../../sql-reference/data-types/int-uint.md), [Float*](../../sql-reference/data-types/float.md) or [Decimal*](../../sql-reference/data-types/decimal.md) + +**Returned value** + +Type: [Float*](../../sql-reference/data-types/float.md). + **Example** (three sigma rule) @@ -160,6 +237,14 @@ Returns a number close to `1 - erf(x)` without loss of precision for large ‘x erfc(x) ``` +**Arguments** + +- `x` - [(U)Int*](../../sql-reference/data-types/int-uint.md), [Float*](../../sql-reference/data-types/float.md) or [Decimal*](../../sql-reference/data-types/decimal.md) + +**Returned value** + +Type: [Float*](../../sql-reference/data-types/float.md). + ## lgamma Returns the logarithm of the gamma function. @@ -170,6 +255,14 @@ Returns the logarithm of the gamma function. lgamma(x) ``` +**Arguments** + +- `x` - [(U)Int*](../../sql-reference/data-types/int-uint.md), [Float*](../../sql-reference/data-types/float.md) or [Decimal*](../../sql-reference/data-types/decimal.md) + +**Returned value** + +Type: [Float*](../../sql-reference/data-types/float.md). + ## tgamma Returns the gamma function. @@ -180,6 +273,14 @@ Returns the gamma function. gamma(x) ``` +**Arguments** + +- `x` - [(U)Int*](../../sql-reference/data-types/int-uint.md), [Float*](../../sql-reference/data-types/float.md) or [Decimal*](../../sql-reference/data-types/decimal.md) + +**Returned value** + +Type: [Float*](../../sql-reference/data-types/float.md). + ## sin Returns the sine of the argument @@ -190,6 +291,14 @@ Returns the sine of the argument sin(x) ``` +**Arguments** + +- `x` - [(U)Int*](../../sql-reference/data-types/int-uint.md), [Float*](../../sql-reference/data-types/float.md) or [Decimal*](../../sql-reference/data-types/decimal.md) + +**Returned value** + +Type: [Float*](../../sql-reference/data-types/float.md). + ## cos Returns the cosine of the argument. @@ -200,6 +309,14 @@ Returns the cosine of the argument. cos(x) ``` +**Arguments** + +- `x` - [(U)Int*](../../sql-reference/data-types/int-uint.md), [Float*](../../sql-reference/data-types/float.md) or [Decimal*](../../sql-reference/data-types/decimal.md) + +**Returned value** + +Type: [Float*](../../sql-reference/data-types/float.md). + ## tan Returns the tangent of the argument. @@ -210,6 +327,14 @@ Returns the tangent of the argument. tan(x) ``` +**Arguments** + +- `x` - [(U)Int*](../../sql-reference/data-types/int-uint.md), [Float*](../../sql-reference/data-types/float.md) or [Decimal*](../../sql-reference/data-types/decimal.md) + +**Returned value** + +Type: [Float*](../../sql-reference/data-types/float.md). + ## asin Returns the arc sine of the argument. @@ -220,6 +345,14 @@ Returns the arc sine of the argument. asin(x) ``` +**Arguments** + +- `x` - [(U)Int*](../../sql-reference/data-types/int-uint.md), [Float*](../../sql-reference/data-types/float.md) or [Decimal*](../../sql-reference/data-types/decimal.md) + +**Returned value** + +Type: [Float*](../../sql-reference/data-types/float.md). + ## acos Returns the arc cosine of the argument. @@ -230,6 +363,14 @@ Returns the arc cosine of the argument. acos(x) ``` +**Arguments** + +- `x` - [(U)Int*](../../sql-reference/data-types/int-uint.md), [Float*](../../sql-reference/data-types/float.md) or [Decimal*](../../sql-reference/data-types/decimal.md) + +**Returned value** + +Type: [Float*](../../sql-reference/data-types/float.md). + ## atan Returns the arc tangent of the argument. @@ -240,6 +381,14 @@ Returns the arc tangent of the argument. atan(x) ``` +**Arguments** + +- `x` - [(U)Int*](../../sql-reference/data-types/int-uint.md), [Float*](../../sql-reference/data-types/float.md) or [Decimal*](../../sql-reference/data-types/decimal.md) + +**Returned value** + +Type: [Float*](../../sql-reference/data-types/float.md). + ## pow Returns `x` to the power of `y`. @@ -252,6 +401,15 @@ pow(x, y) Alias: `power(x, y)` +**Arguments** + +- `x` - [(U)Int8/16/32/64](../../sql-reference/data-types/int-uint.md) or [Float*](../../sql-reference/data-types/float.md) +- `y` - [(U)Int8/16/32/64](../../sql-reference/data-types/int-uint.md) or [Float*](../../sql-reference/data-types/float.md) + +**Returned value** + +Type: [Float64](../../sql-reference/data-types/float.md). + ## cosh Returns the [hyperbolic cosine](https://in.mathworks.com/help/matlab/ref/cosh.html) of the argument. diff --git a/docs/en/sql-reference/functions/other-functions.md b/docs/en/sql-reference/functions/other-functions.md index 35fd5089bf0..4c103274f43 100644 --- a/docs/en/sql-reference/functions/other-functions.md +++ b/docs/en/sql-reference/functions/other-functions.md @@ -1556,7 +1556,7 @@ initializeAggregation (aggregate_function, arg1, arg2, ..., argN) - Result of aggregation for every row passed to the function. -The return type is the same as the return type of function, that `initializeAgregation` takes as first argument. +The return type is the same as the return type of function, that `initializeAggregation` takes as first argument. **Example** diff --git a/docs/en/sql-reference/functions/random-functions.md b/docs/en/sql-reference/functions/random-functions.md index 13c29329f41..6fd31e8d25c 100644 --- a/docs/en/sql-reference/functions/random-functions.md +++ b/docs/en/sql-reference/functions/random-functions.md @@ -6,9 +6,9 @@ sidebar_label: Random Numbers # Functions for Generating Random Numbers -All functions in this section accept zero or one arguments. The only use of the argument (if provided) is to prevent prevent [common subexpression -elimination](../../sql-reference/functions/index.md#common-subexpression-elimination) such that two different execution of the same random -function in a query return different random values. +All functions in this section accept zero or one arguments. The only use of the argument (if provided) is to prevent [common subexpression +elimination](../../sql-reference/functions/index.md#common-subexpression-elimination) such that two different executions within a row of the same random +function return different random values. Related content - Blog: [Generating random data in ClickHouse](https://clickhouse.com/blog/generating-random-test-distribution-data-for-clickhouse) diff --git a/docs/en/sql-reference/functions/string-functions.md b/docs/en/sql-reference/functions/string-functions.md index 4df987b5e2a..1940993ce0b 100644 --- a/docs/en/sql-reference/functions/string-functions.md +++ b/docs/en/sql-reference/functions/string-functions.md @@ -429,7 +429,7 @@ SELECT format('{} {}', 'Hello', 'World') ## concat -Concatenates the strings listed in the arguments without separator. +Concatenates the given arguments. **Syntax** @@ -439,7 +439,9 @@ concat(s1, s2, ...) **Arguments** -Values of type String or FixedString. +At least one value of arbitrary type. + +Arguments which are not of types [String](../../sql-reference/data-types/string.md) or [FixedString](../../sql-reference/data-types/fixedstring.md) are converted to strings using their default serialization. As this decreases performance, it is not recommended to use non-String/FixedString arguments. **Returned values** @@ -449,6 +451,8 @@ If any of arguments is `NULL`, the function returns `NULL`. **Example** +Query: + ``` sql SELECT concat('Hello, ', 'World!'); ``` @@ -461,6 +465,20 @@ Result: └─────────────────────────────┘ ``` +Query: + +```sql +SELECT concat(42, 144); +``` + +Result: + +```result +┌─concat(42, 144)─┐ +│ 42144 │ +└─────────────────┘ +``` + ## concatAssumeInjective Like [concat](#concat) but assumes that `concat(s1, s2, ...) → sn` is injective. Can be used for optimization of GROUP BY. @@ -526,6 +544,8 @@ Concatenates the given strings with a given separator. concatWithSeparator(sep, expr1, expr2, expr3...) ``` +Alias: `concat_ws` + **Arguments** - sep — separator. Const [String](../../sql-reference/data-types/string.md) or [FixedString](../../sql-reference/data-types/fixedstring.md). diff --git a/docs/en/sql-reference/functions/time-series-functions.md b/docs/en/sql-reference/functions/time-series-functions.md new file mode 100644 index 00000000000..e183fdcdcd7 --- /dev/null +++ b/docs/en/sql-reference/functions/time-series-functions.md @@ -0,0 +1,47 @@ +--- +slug: /en/sql-reference/functions/time-series-functions +sidebar_position: 172 +sidebar_label: Time Series +--- + +# Time Series Functions + +Below functions are used for time series analysis. + +## seriesPeriodDetectFFT + +Finds the period of the given time series data using FFT +Detect Period in time series data using FFT. +FFT - Fast Fourier transform (https://en.wikipedia.org/wiki/Fast_Fourier_transform) + +**Syntax** + +``` sql +seriesPeriodDetectFFT(series); +``` + +**Arguments** + +- `series` - An array of numeric values + +**Returned value** + +- A real value equal to the period of time series + +Type: [Float64](../../sql-reference/data-types/float.md). + +**Examples** + +Query: + +``` sql +SELECT seriesPeriodDetectFFT([1, 4, 6, 1, 4, 6, 1, 4, 6, 1, 4, 6, 1, 4, 6, 1, 4, 6, 1, 4, 6]) AS print_0; +``` + +Result: + +``` text +┌───────────print_0──────┐ +│ 3 │ +└────────────────────────┘ +``` diff --git a/docs/en/sql-reference/statements/optimize.md b/docs/en/sql-reference/statements/optimize.md index 49843eaff9a..07b5a196096 100644 --- a/docs/en/sql-reference/statements/optimize.md +++ b/docs/en/sql-reference/statements/optimize.md @@ -5,7 +5,7 @@ sidebar_label: OPTIMIZE title: "OPTIMIZE Statement" --- -This query tries to initialize an unscheduled merge of data parts for tables. +This query tries to initialize an unscheduled merge of data parts for tables. Note that we generally recommend against using `OPTIMIZE TABLE ... FINAL` (see these [docs](/docs/en/optimize/avoidoptimizefinal)) as its use case is meant for administration, not for daily operations. :::note `OPTIMIZE` can’t fix the `Too many parts` error. diff --git a/docs/en/sql-reference/table-functions/azureBlobStorage.md b/docs/en/sql-reference/table-functions/azureBlobStorage.md index 59c92e1327e..1510489ce83 100644 --- a/docs/en/sql-reference/table-functions/azureBlobStorage.md +++ b/docs/en/sql-reference/table-functions/azureBlobStorage.md @@ -67,6 +67,12 @@ SELECT count(*) FROM azureBlobStorage('DefaultEndpointsProtocol=https;AccountNam └─────────┘ ``` +## Virtual Columns {#virtual-columns} + +- `_path` — Path to the file. Type: `LowCardinalty(String)`. +- `_file` — Name of the file. Type: `LowCardinalty(String)`. +- `_size` — Size of the file in bytes. Type: `Nullable(UInt64)`. If the file size is unknown, the value is `NULL`. + **See Also** - [AzureBlobStorage Table Engine](/docs/en/engines/table-engines/integrations/azureBlobStorage.md) diff --git a/docs/en/sql-reference/table-functions/file.md b/docs/en/sql-reference/table-functions/file.md index 7e869af82ef..ad1feb87c60 100644 --- a/docs/en/sql-reference/table-functions/file.md +++ b/docs/en/sql-reference/table-functions/file.md @@ -1,4 +1,4 @@ ---- + -- slug: /en/sql-reference/table-functions/file sidebar_position: 60 sidebar_label: file @@ -6,7 +6,7 @@ sidebar_label: file # file -Provides a table-like interface to SELECT from and INSERT to files. This table function is similar to the [s3](/docs/en/sql-reference/table-functions/url.md) table function. Use file() when working with local files, and s3() when working with buckets in S3, GCS, or MinIO. +A table engine which provides a table-like interface to SELECT from and INSERT into files, similar to the [s3](/docs/en/sql-reference/table-functions/url.md) table function. Use `file()` when working with local files, and `s3()` when working with buckets in object storage such as S3, GCS, or MinIO. The `file` function can be used in `SELECT` and `INSERT` queries to read from or write to files. @@ -18,18 +18,18 @@ file([path_to_archive ::] path [,format] [,structure] [,compression]) **Parameters** -- `path` — The relative path to the file from [user_files_path](/docs/en/operations/server-configuration-parameters/settings.md#server_configuration_parameters-user_files_path). Path to file support following globs in read-only mode: `*`, `?`, `{abc,def}` and `{N..M}` where `N`, `M` — numbers, `'abc', 'def'` — strings. -- `path_to_archive` - The relative path to zip/tar/7z archive. Path to archive support the same globs as `path`. +- `path` — The relative path to the file from [user_files_path](/docs/en/operations/server-configuration-parameters/settings.md#server_configuration_parameters-user_files_path). Supports in read-only mode the following [globs](#globs_in_path): `*`, `?`, `{abc,def}` (with `'abc'` and `'def'` being strings) and `{N..M}` (with `N` and `M` being numbers). +- `path_to_archive` - The relative path to a zip/tar/7z archive. Supports the same globs as `path`. - `format` — The [format](/docs/en/interfaces/formats.md#formats) of the file. - `structure` — Structure of the table. Format: `'column1_name column1_type, column2_name column2_type, ...'`. -- `compression` — The existing compression type when used in a `SELECT` query, or the desired compression type when used in an `INSERT` query. The supported compression types are `gz`, `br`, `xz`, `zst`, `lz4`, and `bz2`. +- `compression` — The existing compression type when used in a `SELECT` query, or the desired compression type when used in an `INSERT` query. Supported compression types are `gz`, `br`, `xz`, `zst`, `lz4`, and `bz2`. **Returned value** -A table with the specified structure for reading or writing data in the specified file. +A table for reading or writing data in a file. -## File Write Examples +## Examples for Writing to a File ### Write to a TSV file @@ -48,9 +48,9 @@ As a result, the data is written into the file `test.tsv`: 1 3 2 ``` -### Partitioned Write to multiple TSV files +### Partitioned write to multiple TSV files -If you specify `PARTITION BY` expression when inserting data into a file() function, a separate file is created for each partition value. Splitting the data into separate files helps to improve reading operations efficiency. +If you specify a `PARTITION BY` expression when inserting data into a table function of type `file()`, then a separate file is created for each partition. Splitting the data into separate files helps to improve performance of read operations. ```sql INSERT INTO TABLE FUNCTION @@ -72,11 +72,11 @@ As a result, the data is written into three files: `test_1.tsv`, `test_2.tsv`, a 1 2 3 ``` -## File Read Examples +## Examples for Reading from a File ### SELECT from a CSV file -Setting `user_files_path` and the contents of the file `test.csv`: +First, set `user_files_path` in the server configuration and prepare a file `test.csv`: ``` bash $ grep user_files_path /etc/clickhouse-server/config.xml @@ -88,7 +88,7 @@ $ cat /var/lib/clickhouse/user_files/test.csv 78,43,45 ``` -Getting data from a table in `test.csv` and selecting the first two rows from it: +Then, read data from `test.csv` into a table and select its first two rows: ``` sql SELECT * FROM @@ -103,14 +103,6 @@ LIMIT 2; └─────────┴─────────┴─────────┘ ``` -Getting the first 10 lines of a table that contains 3 columns of [UInt32](/docs/en/sql-reference/data-types/int-uint.md) type from a CSV file: - -``` sql -SELECT * FROM -file('test.csv', 'CSV', 'column1 UInt32, column2 UInt32, column3 UInt32') -LIMIT 10; -``` - ### Inserting data from a file into a table: ``` sql @@ -130,41 +122,42 @@ file('test.csv', 'CSV', 'column1 UInt32, column2 UInt32, column3 UInt32'); └─────────┴─────────┴─────────┘ ``` -Getting data from table in table.csv, located in archive1.zip or/and archive2.zip +Reading data from `table.csv`, located in `archive1.zip` or/and `archive2.zip`: + ``` sql SELECT * FROM file('user_files/archives/archive{1..2}.zip :: table.csv'); ``` -## Globs in Path {#globs_in_path} +## Globbing {#globs_in_path} -Multiple path components can have globs. For being processed file must exist and match to the whole path pattern (not only suffix or prefix). +Paths may use globbing. Files must match the whole path pattern, not only the suffix or prefix. -- `*` — Substitutes any number of any characters except `/` including empty string. -- `?` — Substitutes any single character. -- `{some_string,another_string,yet_another_one}` — Substitutes any of strings `'some_string', 'another_string', 'yet_another_one'`. The strings can contain the `/` symbol. -- `{N..M}` — Substitutes any number in range from N to M including both borders. -- `**` - Fetches all files inside the folder recursively. +- `*` — Represents arbitrarily many characters except `/` but including the empty string. +- `?` — Represents an arbitrary single character. +- `{some_string,another_string,yet_another_one}` — Represents any of alternative strings `'some_string', 'another_string', 'yet_another_one'`. The strings may contain `/`. +- `{N..M}` — Represents any number `>= N` and `<= M`. +- `**` - Represents all files inside a folder recursively. Constructions with `{}` are similar to the [remote](remote.md) table function. **Example** -Suppose we have several files with the following relative paths: +Suppose there are these files with the following relative paths: -- 'some_dir/some_file_1' -- 'some_dir/some_file_2' -- 'some_dir/some_file_3' -- 'another_dir/some_file_1' -- 'another_dir/some_file_2' -- 'another_dir/some_file_3' +- `some_dir/some_file_1` +- `some_dir/some_file_2` +- `some_dir/some_file_3` +- `another_dir/some_file_1` +- `another_dir/some_file_2` +- `another_dir/some_file_3` -Query the number of rows in these files: +Query the total number of rows in all files: ``` sql SELECT count(*) FROM file('{some,another}_dir/some_file_{1..3}', 'TSV', 'name String, value UInt32'); ``` -Query the number of rows in all files of these two directories: +An alternative path expression which achieves the same: ``` sql SELECT count(*) FROM file('{some,another}_dir/*', 'TSV', 'name String, value UInt32'); @@ -176,7 +169,7 @@ If your listing of files contains number ranges with leading zeros, use the cons **Example** -Query the data from files named `file000`, `file001`, … , `file999`: +Query the total number of rows in files named `file000`, `file001`, … , `file999`: ``` sql SELECT count(*) FROM file('big_dir/file{0..9}{0..9}{0..9}', 'CSV', 'name String, value UInt32'); @@ -184,7 +177,7 @@ SELECT count(*) FROM file('big_dir/file{0..9}{0..9}{0..9}', 'CSV', 'name String, **Example** -Query the data from all files inside `big_dir` directory recursively: +Query the total number of rows from all files inside directory `big_dir/` recursively: ``` sql SELECT count(*) FROM file('big_dir/**', 'CSV', 'name String, value UInt32'); @@ -192,18 +185,19 @@ SELECT count(*) FROM file('big_dir/**', 'CSV', 'name String, value UInt32'); **Example** -Query the data from all `file002` files from any folder inside `big_dir` directory recursively: +Query the total number of rows from all files `file002` inside any folder in directory `big_dir/` recursively: ``` sql SELECT count(*) FROM file('big_dir/**/file002', 'CSV', 'name String, value UInt32'); ``` -## Virtual Columns +## Virtual Columns {#virtual-columns} -- `_path` — Path to the file. -- `_file` — Name of the file. +- `_path` — Path to the file. Type: `LowCardinalty(String)`. +- `_file` — Name of the file. Type: `LowCardinalty(String)`. +- `_size` — Size of the file in bytes. Type: `Nullable(UInt64)`. If the file size is unknown, the value is `NULL`. -## Settings +## Settings {#settings} - [engine_file_empty_if_not_exists](/docs/en/operations/settings/settings.md#engine-file-emptyif-not-exists) - allows to select empty data from a file that doesn't exist. Disabled by default. - [engine_file_truncate_on_insert](/docs/en/operations/settings/settings.md#engine-file-truncate-on-insert) - allows to truncate file before insert into it. Disabled by default. diff --git a/docs/en/sql-reference/table-functions/gcs.md b/docs/en/sql-reference/table-functions/gcs.md index c49ae6a8501..5ffc20189da 100644 --- a/docs/en/sql-reference/table-functions/gcs.md +++ b/docs/en/sql-reference/table-functions/gcs.md @@ -9,6 +9,10 @@ keywords: [gcs, bucket] Provides a table-like interface to `SELECT` and `INSERT` data from [Google Cloud Storage](https://cloud.google.com/storage/). Requires the [`Storage Object User` IAM role](https://cloud.google.com/storage/docs/access-control/iam-roles). +This is an alias of the [s3 table function](../../sql-reference/table-functions/s3.md). + +If you have multiple replicas in your cluster, you can use the [s3Cluster function](../../sql-reference/table-functions/s3Cluster.md) (which works with GCS) instead to parallelize inserts. + **Syntax** ``` sql diff --git a/docs/en/sql-reference/table-functions/hdfs.md b/docs/en/sql-reference/table-functions/hdfs.md index 678470e9150..31780e30e8e 100644 --- a/docs/en/sql-reference/table-functions/hdfs.md +++ b/docs/en/sql-reference/table-functions/hdfs.md @@ -94,8 +94,9 @@ FROM hdfs('hdfs://hdfs1:9000/big_dir/file{0..9}{0..9}{0..9}', 'CSV', 'name Strin ## Virtual Columns -- `_path` — Path to the file. -- `_file` — Name of the file. +- `_path` — Path to the file. Type: `LowCardinalty(String)`. +- `_file` — Name of the file. Type: `LowCardinalty(String)`. +- `_size` — Size of the file in bytes. Type: `Nullable(UInt64)`. If the size is unknown, the value is `NULL`. ## Storage Settings {#storage-settings} diff --git a/docs/en/sql-reference/table-functions/remote.md b/docs/en/sql-reference/table-functions/remote.md index 59ed4bf1985..3ca177050d3 100644 --- a/docs/en/sql-reference/table-functions/remote.md +++ b/docs/en/sql-reference/table-functions/remote.md @@ -6,7 +6,7 @@ sidebar_label: remote # remote, remoteSecure -Allows accessing remote servers, including migration of data, without creating a [Distributed](../../engines/table-engines/special/distributed.md) table. `remoteSecure` - same as `remote` but with a secured connection. +Table function `remote` allows to access remote servers on-the-fly, i.e. without creating a [Distributed](../../engines/table-engines/special/distributed.md) table. Table function `remoteSecure` is same as `remote` but over a secure connection. Both functions can be used in `SELECT` and `INSERT` queries. @@ -21,36 +21,36 @@ remoteSecure('addresses_expr', [db.table, 'user'[, 'password'], sharding_key]) ## Parameters -- `addresses_expr` — An expression that generates addresses of remote servers. This may be just one server address. The server address is `host:port`, or just `host`. +- `addresses_expr` — A remote server address or an expression that generates multiple addresses of remote servers. Format: `host` or `host:port`. - The host can be specified as the server name, or as the IPv4 or IPv6 address. An IPv6 address is specified in square brackets. + The `host` can be specified as a server name, or as a IPv4 or IPv6 address. An IPv6 address must be specified in square brackets. - The port is the TCP port on the remote server. If the port is omitted, it uses [tcp_port](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-tcp_port) from the server’s config file in `remote` (by default, 9000) and [tcp_port_secure](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-tcp_port_secure) in `remoteSecure` (by default, 9440). + The `port` is the TCP port on the remote server. If the port is omitted, it uses [tcp_port](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-tcp_port) from the server config file for table function `remote` (by default, 9000) and [tcp_port_secure](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-tcp_port_secure) for table function `remoteSecure` (by default, 9440). - The port is required for an IPv6 address. + For IPv6 addresses, a port is required. - If only specify this parameter, `db` and `table` will use `system.one` by default. + If only parameter `addresses_expr` is specified, `db` and `table` will use `system.one` by default. Type: [String](../../sql-reference/data-types/string.md). - `db` — Database name. Type: [String](../../sql-reference/data-types/string.md). - `table` — Table name. Type: [String](../../sql-reference/data-types/string.md). -- `user` — User name. If the user is not specified, `default` is used. Type: [String](../../sql-reference/data-types/string.md). -- `password` — User password. If the password is not specified, an empty password is used. Type: [String](../../sql-reference/data-types/string.md). +- `user` — User name. If not specified, `default` is used. Type: [String](../../sql-reference/data-types/string.md). +- `password` — User password. If not specified, an empty password is used. Type: [String](../../sql-reference/data-types/string.md). - `sharding_key` — Sharding key to support distributing data across nodes. For example: `insert into remote('127.0.0.1:9000,127.0.0.2', db, table, 'default', rand())`. Type: [UInt32](../../sql-reference/data-types/int-uint.md). ## Returned value -The dataset from remote servers. +A table located on a remote server. ## Usage -Unless you are migrating data from one system to another, using the `remote` table function is less optimal than creating a `Distributed` table because in this case the server connection is re-established for every request. Also, if hostnames are set, the names are resolved, and errors are not counted when working with various replicas. When processing a large number of queries, always create the `Distributed` table ahead of time, and do not use the `remote` table function. +As table functions `remote` and `remoteSecure` re-establish the connection for each request, it is recommended to use a `Distributed` table instead. Also, if hostnames are set, the names are resolved, and errors are not counted when working with various replicas. When processing a large number of queries, always create the `Distributed` table ahead of time, and do not use the `remote` table function. The `remote` table function can be useful in the following cases: -- Migrating data from one system to another -- Accessing a specific server for data comparison, debugging, and testing. +- One-time data migration from one system to another +- Accessing a specific server for data comparison, debugging, and testing, i.e. ad-hoc connections. - Queries between various ClickHouse clusters for research purposes. - Infrequent distributed requests that are made manually. - Distributed requests where the set of servers is re-defined each time. @@ -68,7 +68,7 @@ localhost [2a02:6b8:0:1111::11]:9000 ``` -Multiple addresses can be comma-separated. In this case, ClickHouse will use distributed processing, so it will send the query to all specified addresses (like shards with different data). Example: +Multiple addresses can be comma-separated. In this case, ClickHouse will use distributed processing and send the query to all specified addresses (like shards with different data). Example: ``` text example01-01-1,example01-02-1 @@ -91,10 +91,13 @@ SELECT * FROM remote_table; ``` ### Migration of tables from one system to another: + This example uses one table from a sample dataset. The database is `imdb`, and the table is `actors`. #### On the source ClickHouse system (the system that currently hosts the data) + - Verify the source database and table name (`imdb.actors`) + ```sql show databases ``` @@ -104,6 +107,7 @@ This example uses one table from a sample dataset. The database is `imdb`, and ``` - Get the CREATE TABLE statement from the source: + ``` select create_table_query from system.tables @@ -111,6 +115,7 @@ This example uses one table from a sample dataset. The database is `imdb`, and ``` Response + ```sql CREATE TABLE imdb.actors (`id` UInt32, `first_name` String, @@ -123,11 +128,13 @@ This example uses one table from a sample dataset. The database is `imdb`, and #### On the destination ClickHouse system: - Create the destination database: + ```sql CREATE DATABASE imdb ``` - Using the CREATE TABLE statement from the source, create the destination: + ```sql CREATE TABLE imdb.actors (`id` UInt32, `first_name` String, @@ -140,21 +147,23 @@ This example uses one table from a sample dataset. The database is `imdb`, and #### Back on the source deployment: Insert into the new database and table created on the remote system. You will need the host, port, username, password, destination database, and destination table. + ```sql INSERT INTO FUNCTION remoteSecure('remote.clickhouse.cloud:9440', 'imdb.actors', 'USER', 'PASSWORD') SELECT * from imdb.actors ``` -## Globs in Addresses {#globs-in-addresses} +## Globbing {#globs-in-addresses} Patterns in curly brackets `{ }` are used to generate a set of shards and to specify replicas. If there are multiple pairs of curly brackets, then the direct product of the corresponding sets is generated. + The following pattern types are supported. -- {*a*,*b*} - Any number of variants separated by a comma. The pattern is replaced with *a* in the first shard address and it is replaced with *b* in the second shard address and so on. For instance, `example0{1,2}-1` generates addresses `example01-1` and `example02-1`. -- {*n*..*m*} - A range of numbers. This pattern generates shard addresses with incrementing indices from *n* to *m*. `example0{1..2}-1` generates `example01-1` and `example02-1`. -- {*0n*..*0m*} - A range of numbers with leading zeroes. This modification preserves leading zeroes in indices. The pattern `example{01..03}-1` generates `example01-1`, `example02-1` and `example03-1`. -- {*a*|*b*} - Any number of variants separated by a `|`. The pattern specifies replicas. For instance, `example01-{1|2}` generates replicas `example01-1` and `example01-2`. +- `{a,b,c}` - Represents any of alternative strings `a`, `b` or `c`. The pattern is replaced with `a` in the first shard address and replaced with `b` in the second shard address and so on. For instance, `example0{1,2}-1` generates addresses `example01-1` and `example02-1`. +- `{N..M}` - A range of numbers. This pattern generates shard addresses with incrementing indices from `N` to (and including) `M`. For instance, `example0{1..2}-1` generates `example01-1` and `example02-1`. +- `{0n..0m}` - A range of numbers with leading zeroes. This pattern preserves leading zeroes in indices. For instance, `example{01..03}-1` generates `example01-1`, `example02-1` and `example03-1`. +- `{a|b}` - Any number of variants separated by a `|`. The pattern specifies replicas. For instance, `example01-{1|2}` generates replicas `example01-1` and `example01-2`. The query will be sent to the first healthy replica. However, for `remote` the replicas are iterated in the order currently set in the [load_balancing](../../operations/settings/settings.md#settings-load_balancing) setting. The number of generated addresses is limited by [table_function_remote_max_addresses](../../operations/settings/settings.md#table_function_remote_max_addresses) setting. diff --git a/docs/en/sql-reference/table-functions/s3.md b/docs/en/sql-reference/table-functions/s3.md index 8649295e815..dc11259c626 100644 --- a/docs/en/sql-reference/table-functions/s3.md +++ b/docs/en/sql-reference/table-functions/s3.md @@ -9,6 +9,10 @@ keywords: [s3, gcs, bucket] Provides a table-like interface to select/insert files in [Amazon S3](https://aws.amazon.com/s3/) and [Google Cloud Storage](https://cloud.google.com/storage/). This table function is similar to the [hdfs function](../../sql-reference/table-functions/hdfs.md), but provides S3-specific features. +If you have multiple replicas in your cluster, you can use the [s3Cluster function](../../sql-reference/table-functions/s3Cluster.md) instead to parallelize inserts. + +When using the `s3 table function` with [`INSERT INTO...SELECT`](../../sql-reference/statements/insert-into#inserting-the-results-of-select), data is read and inserted in a streaming fashion. Only a few blocks of data reside in memory while the blocks are continuously read from S3 and pushed into the destination table. + **Syntax** ``` sql @@ -224,6 +228,12 @@ FROM s3( LIMIT 5; ``` +## Virtual Columns {#virtual-columns} + +- `_path` — Path to the file. Type: `LowCardinalty(String)`. +- `_file` — Name of the file. Type: `LowCardinalty(String)`. +- `_size` — Size of the file in bytes. Type: `Nullable(UInt64)`. If the file size is unknown, the value is `NULL`. + ## Storage Settings {#storage-settings} - [s3_truncate_on_insert](/docs/en/operations/settings/settings.md#s3-truncate-on-insert) - allows to truncate file before insert into it. Disabled by default. diff --git a/docs/en/sql-reference/table-functions/s3Cluster.md b/docs/en/sql-reference/table-functions/s3Cluster.md index 675aef54d34..799eb31446a 100644 --- a/docs/en/sql-reference/table-functions/s3Cluster.md +++ b/docs/en/sql-reference/table-functions/s3Cluster.md @@ -5,7 +5,7 @@ sidebar_label: s3Cluster title: "s3Cluster Table Function" --- -Allows processing files from [Amazon S3](https://aws.amazon.com/s3/) in parallel from many nodes in a specified cluster. On initiator it creates a connection to all nodes in the cluster, discloses asterisks in S3 file path, and dispatches each file dynamically. On the worker node it asks the initiator about the next task to process and processes it. This is repeated until all tasks are finished. +Allows processing files from [Amazon S3](https://aws.amazon.com/s3/) and Google Cloud Storage [Google Cloud Storage](https://cloud.google.com/storage/) in parallel from many nodes in a specified cluster. On initiator it creates a connection to all nodes in the cluster, discloses asterisks in S3 file path, and dispatches each file dynamically. On the worker node it asks the initiator about the next task to process and processes it. This is repeated until all tasks are finished. **Syntax** diff --git a/docs/en/sql-reference/table-functions/url.md b/docs/en/sql-reference/table-functions/url.md index 859de86f019..4dc6e435b50 100644 --- a/docs/en/sql-reference/table-functions/url.md +++ b/docs/en/sql-reference/table-functions/url.md @@ -50,8 +50,9 @@ Character `|` inside patterns is used to specify failover addresses. They are it ## Virtual Columns -- `_path` — Path to the `URL`. -- `_file` — Resource name of the `URL`. +- `_path` — Path to the `URL`. Type: `LowCardinalty(String)`. +- `_file` — Resource name of the `URL`. Type: `LowCardinalty(String)`. +- `_size` — Size of the resource in bytes. Type: `Nullable(UInt64)`. If the size is unknown, the value is `NULL`. ## Storage Settings {#storage-settings} diff --git a/docs/en/sql-reference/window-functions/index.md b/docs/en/sql-reference/window-functions/index.md index a8f494a5afc..6340c369bff 100644 --- a/docs/en/sql-reference/window-functions/index.md +++ b/docs/en/sql-reference/window-functions/index.md @@ -86,14 +86,14 @@ WINDOW window_name as ([[PARTITION BY grouping_column] [ORDER BY sorting_column] These functions can be used only as a window function. -`row_number()` - Number the current row within its partition starting from 1. -`first_value(x)` - Return the first non-NULL value evaluated within its ordered frame. -`last_value(x)` - Return the last non-NULL value evaluated within its ordered frame. -`nth_value(x, offset)` - Return the first non-NULL value evaluated against the nth row (offset) in its ordered frame. -`rank()` - Rank the current row within its partition with gaps. -`dense_rank()` - Rank the current row within its partition without gaps. -`lagInFrame(x)` - Return a value evaluated at the row that is at a specified physical offset row before the current row within the ordered frame. -`leadInFrame(x)` - Return a value evaluated at the row that is offset rows after the current row within the ordered frame. +- `row_number()` - Number the current row within its partition starting from 1. +- `first_value(x)` - Return the first non-NULL value evaluated within its ordered frame. +- `last_value(x)` - Return the last non-NULL value evaluated within its ordered frame. +- `nth_value(x, offset)` - Return the first non-NULL value evaluated against the nth row (offset) in its ordered frame. +- `rank()` - Rank the current row within its partition with gaps. +- `dense_rank()` - Rank the current row within its partition without gaps. +- `lagInFrame(x)` - Return a value evaluated at the row that is at a specified physical offset row before the current row within the ordered frame. +- `leadInFrame(x)` - Return a value evaluated at the row that is offset rows after the current row within the ordered frame. ```text PARTITION diff --git a/docs/ru/engines/table-engines/mergetree-family/mergetree.md b/docs/ru/engines/table-engines/mergetree-family/mergetree.md index 00eb830c9ef..7195ee38af6 100644 --- a/docs/ru/engines/table-engines/mergetree-family/mergetree.md +++ b/docs/ru/engines/table-engines/mergetree-family/mergetree.md @@ -337,7 +337,7 @@ SELECT count() FROM table WHERE u64 * i32 == 10 AND u64 * length(s) >= 1234 Поддерживаемые типы данных: `Int*`, `UInt*`, `Float*`, `Enum`, `Date`, `DateTime`, `String`, `FixedString`. - Фильтром могут пользоваться функции: [equals](../../../sql-reference/functions/comparison-functions.md), [notEquals](../../../sql-reference/functions/comparison-functions.md), [in](../../../sql-reference/functions/in-functions.md), [notIn](../../../sql-reference/functions/in-functions.md), [has](../../../sql-reference/functions/array-functions.md#hasarr-elem), [hasAny](../../../sql-reference/functions/array-functions.md#hasany), [hasAll](../../../sql-reference/functions/array-functions.md#hasall). + Фильтром могут пользоваться функции: [equals](../../../sql-reference/functions/comparison-functions.md#equals), [notEquals](../../../sql-reference/functions/comparison-functions.md#notequals), [in](../../../sql-reference/functions/in-functions.md), [notIn](../../../sql-reference/functions/in-functions.md), [has](../../../sql-reference/functions/array-functions.md#hasarr-elem), [hasAny](../../../sql-reference/functions/array-functions.md#hasany), [hasAll](../../../sql-reference/functions/array-functions.md#hasall). **Примеры** @@ -354,8 +354,8 @@ INDEX b (u64 * length(str), i32 + f64 * 100, date, str) TYPE set(100) GRANULARIT | Функция (оператор) / Индекс | primary key | minmax | ngrambf_v1 | tokenbf_v1 | bloom_filter | |------------------------------------------------------------------------------------------------------------|-------------|--------|-------------|-------------|---------------| -| [equals (=, ==)](../../../sql-reference/functions/comparison-functions.md#function-equals) | ✔ | ✔ | ✔ | ✔ | ✔ | -| [notEquals(!=, <>)](../../../sql-reference/functions/comparison-functions.md#function-notequals) | ✔ | ✔ | ✔ | ✔ | ✔ | +| [equals (=, ==)](../../../sql-reference/functions/comparison-functions.md#equals) | ✔ | ✔ | ✔ | ✔ | ✔ | +| [notEquals(!=, <>)](../../../sql-reference/functions/comparison-functions.md#notequals) | ✔ | ✔ | ✔ | ✔ | ✔ | | [like](../../../sql-reference/functions/string-search-functions.md#function-like) | ✔ | ✔ | ✔ | ✔ | ✗ | | [notLike](../../../sql-reference/functions/string-search-functions.md#function-notlike) | ✔ | ✔ | ✔ | ✔ | ✗ | | [startsWith](../../../sql-reference/functions/string-functions.md#startswith) | ✔ | ✔ | ✔ | ✔ | ✗ | @@ -363,10 +363,10 @@ INDEX b (u64 * length(str), i32 + f64 * 100, date, str) TYPE set(100) GRANULARIT | [multiSearchAny](../../../sql-reference/functions/string-search-functions.md#function-multisearchany) | ✗ | ✗ | ✔ | ✗ | ✗ | | [in](../../../sql-reference/functions/in-functions.md#in-functions) | ✔ | ✔ | ✔ | ✔ | ✔ | | [notIn](../../../sql-reference/functions/in-functions.md#in-functions) | ✔ | ✔ | ✔ | ✔ | ✔ | -| [less (\<)](../../../sql-reference/functions/comparison-functions.md#function-less) | ✔ | ✔ | ✗ | ✗ | ✗ | -| [greater (\>)](../../../sql-reference/functions/comparison-functions.md#function-greater) | ✔ | ✔ | ✗ | ✗ | ✗ | -| [lessOrEquals (\<=)](../../../sql-reference/functions/comparison-functions.md#function-lessorequals) | ✔ | ✔ | ✗ | ✗ | ✗ | -| [greaterOrEquals (\>=)](../../../sql-reference/functions/comparison-functions.md#function-greaterorequals) | ✔ | ✔ | ✗ | ✗ | ✗ | +| [less (\<)](../../../sql-reference/functions/comparison-functions.md#less) | ✔ | ✔ | ✗ | ✗ | ✗ | +| [greater (\>)](../../../sql-reference/functions/comparison-functions.md#greater) | ✔ | ✔ | ✗ | ✗ | ✗ | +| [lessOrEquals (\<=)](../../../sql-reference/functions/comparison-functions.md#lessorequals) | ✔ | ✔ | ✗ | ✗ | ✗ | +| [greaterOrEquals (\>=)](../../../sql-reference/functions/comparison-functions.md#greaterorequals) | ✔ | ✔ | ✗ | ✗ | ✗ | | [empty](../../../sql-reference/functions/array-functions.md#function-empty) | ✔ | ✔ | ✗ | ✗ | ✗ | | [notEmpty](../../../sql-reference/functions/array-functions.md#function-notempty) | ✔ | ✔ | ✗ | ✗ | ✗ | | hasToken | ✗ | ✗ | ✗ | ✔ | ✗ | diff --git a/docs/ru/introduction/_category_.yml b/docs/ru/introduction/_category_.yml index 539f7ab97ed..b3e58207c12 100644 --- a/docs/ru/introduction/_category_.yml +++ b/docs/ru/introduction/_category_.yml @@ -2,6 +2,3 @@ position: 1 label: 'Введение' collapsible: true collapsed: true -link: - type: generated-index - title: Введение diff --git a/docs/ru/introduction/index.md b/docs/ru/introduction/index.md new file mode 100644 index 00000000000..74a6e4dd135 --- /dev/null +++ b/docs/ru/introduction/index.md @@ -0,0 +1,13 @@ +--- +slug: /ru/introduction/ +sidebar_label: "Введение" +sidebar_position: 8 +--- + +# Введение + +В этом разделе содержится информация о том, как начать работу с ClickHouse. + +- [Отличительные возможности ClickHouse](./distinctive-features.md) +- [Производительность](./performance.md) +- [История ClickHouse](./history.md) diff --git a/docs/ru/operations/server-configuration-parameters/settings.md b/docs/ru/operations/server-configuration-parameters/settings.md index 2c7f0b773e8..788693b581e 100644 --- a/docs/ru/operations/server-configuration-parameters/settings.md +++ b/docs/ru/operations/server-configuration-parameters/settings.md @@ -275,10 +275,11 @@ ClickHouse проверяет условия для `min_part_size` и `min_part Отложенная загрузка словарей. -Если `true`, то каждый словарь создаётся при первом использовании. Если словарь не удалось создать, то вызов функции, использующей словарь, сгенерирует исключение. +Если `true`, то каждый словарь загружается при первом использовании. Если словарь не удалось загрузить, то вызов функции, использующей словарь, сгенерирует исключение. -Если `false`, то все словари создаются при старте сервера, если словарь или словари создаются слишком долго или создаются с ошибкой, то сервер загружается без -этих словарей и продолжает попытки создать эти словари. +Если `false`, все словари будут загружаться на старте сервера. +Сервер будет ждать на старте окончания загрузки всех словарей перед началом обработки соединений +(исключение: если `wait_dictionaries_load_at_startup` установлена в `false` - см. ниже). По умолчанию - `true`. @@ -993,7 +994,7 @@ ClickHouse использует потоки из глобального пул - Положительное целое число. -Значение по умолчанию: 128. +Значение по умолчанию: 512. ## background_fetches_pool_size {#background_fetches_pool_size} @@ -1003,7 +1004,7 @@ ClickHouse использует потоки из глобального пул - Положительное целое число. -Значение по умолчанию: 8. +Значение по умолчанию: 16. ## background_distributed_schedule_pool_size {#background_distributed_schedule_pool_size} @@ -1718,6 +1719,27 @@ TCP порт для защищённого обмена данными с кли users.xml ``` +## wait_dictionaries_load_at_startup {#wait_dictionaries_load_at_startup} + +Эта настройка позволяет указать поведение если `dictionaries_lazy_load` установлено в `false`. +(Если `dictionaries_lazy_load` установлено в `true`, то эта настройка ни на что не влияет.) + +Если `wait_dictionaries_load_at_startup` установлено в `false`, то сервер начнет загрузку всех словарей на старте +и будет обрабатывать соединения, не дожидаясь окончания загрузки словарей. +Когда словарь первый раз используется в запросе, запрос будет ждать окончания загрузки этого словаря, если он еще не загрузился. +Установка `wait_dictionaries_load_at_startup` в `false` может помочь ClickHouse стартовать быстрее, однако некоторые запросы могут выполняться медленее (потому что они будут ждать окончания загрузки используемых в них словарей). + +Если `wait_dictionaries_load_at_startup` установлено в `true`, то сервер будет ждать окончания загрузки всех словарей на старте +до начала обработки соединений. + +По умолчанию - `true`. + +**Пример** + +``` xml +true +``` + ## zookeeper {#server-settings_zookeeper} Содержит параметры, позволяющие ClickHouse взаимодействовать с кластером [ZooKeeper](http://zookeeper.apache.org/). diff --git a/docs/ru/operations/utilities/backupview.md b/docs/ru/operations/utilities/backupview.md new file mode 100644 index 00000000000..702fafadc17 --- /dev/null +++ b/docs/ru/operations/utilities/backupview.md @@ -0,0 +1,50 @@ +--- +slug: /en/operations/utilities/backupview +title: clickhouse_backupview +--- + +# clickhouse_backupview {#clickhouse_backupview} + +Модуль на Питоне для анализа бэкапов, созданных командой [BACKUP](https://clickhouse.com/docs/ru/operations/backup) +Главная идея этого модуля была в том, чтобы позволить извлечение информации из бэкапа без выполнения команды RESTORE. + +Этот модуль содержит функции для +- получения списка файлов внутри бэкапа +- чтения файлов из бэкапа +- получения информации в читаемом виде о базах данных, таблицах, партах, содержащихся в бэкапе +- проверки целостности бэкапа + +## Пример: + +```python +from clickhouse_backupview import open_backup, S3, FileInfo + +# Открыть бэкап. Можно также использовать локальный путь: +# backup = open_backup("/backups/my_backup_1/") +backup = open_backup(S3("uri", "access_key_id", "secret_access_key")) + +# Получить список баз данных внутри бэкапа. +print(backup.get_databases())) + +# Получить список таблиц внутри бэкапа, +# и для каждой таблицы получить ее определение а также список партов и партиций. +for db in backup.get_databases(): + for tbl in backup.get_tables(database=db): + print(backup.get_create_query(database=db, table=tbl)) + print(backup.get_partitions(database=db, table=tbl)) + print(backup.get_parts(database=db, table=tbl)) + +# Извлечь все содержимое бэкапа. +backup.extract_all(table="mydb.mytable", out='/tmp/my_backup_1/all/') + +# Извлечь данные конкретной таблицы. +backup.extract_table_data(table="mydb.mytable", out='/tmp/my_backup_1/mytable/') + +# Извлечь одну партицию из бэкапа. +backup.extract_table_data(table="mydb.mytable", partition="202201", out='/tmp/my_backup_1/202201/') + +# Извлечь один парт из бэкапа. +backup.extract_table_data(table="mydb.mytable", part="202201_100_200_3", out='/tmp/my_backup_1/202201_100_200_3/') +``` + +Больше примеров смотрите в [тесте](https://github.com/ClickHouse/ClickHouse/blob/master/utils/backupview/test/test.py). diff --git a/docs/ru/operations/utilities/index.md b/docs/ru/operations/utilities/index.md index b12d58a6d83..9eb90a3037c 100644 --- a/docs/ru/operations/utilities/index.md +++ b/docs/ru/operations/utilities/index.md @@ -13,3 +13,4 @@ sidebar_position: 56 - [ClickHouse obfuscator](../../operations/utilities/clickhouse-obfuscator.md) — обфусцирует данные. - [ClickHouse compressor](../../operations/utilities/clickhouse-compressor.md) — упаковывает и распаковывает данные. - [clickhouse-odbc-bridge](../../operations/utilities/odbc-bridge.md) — прокси-сервер для ODBC. +- [clickhouse_backupview](../../operations/utilities/backupview.md) — модуль на Питоне для анализа бэкапов ClickHouse. diff --git a/docs/ru/sql-reference/functions/comparison-functions.md b/docs/ru/sql-reference/functions/comparison-functions.md index f66b42977cc..bb9322d5a82 100644 --- a/docs/ru/sql-reference/functions/comparison-functions.md +++ b/docs/ru/sql-reference/functions/comparison-functions.md @@ -23,14 +23,14 @@ sidebar_label: "Функции сравнения" Замечание. До версии 1.1.54134 сравнение знаковых и беззнаковых целых чисел производилось также, как в C++. То есть, вы могли получить неверный результат в таких случаях: SELECT 9223372036854775807 \> -1. С версии 1.1.54134 поведение изменилось и стало математически корректным. -## equals, оператор a = b и a == b {#function-equals} +## equals, оператор a = b и a == b {#equals} -## notEquals, оператор a != b и a `<>` b {#function-notequals} +## notEquals, оператор a != b и a `<>` b {#notequals} -## less, оператор `<` {#function-less} +## less, оператор `<` {#less} -## greater, оператор `>` {#function-greater} +## greater, оператор `>` {#greater} -## lessOrEquals, оператор `<=` {#function-lessorequals} +## lessOrEquals, оператор `<=` {#lessorequals} -## greaterOrEquals, оператор `>=` {#function-greaterorequals} +## greaterOrEquals, оператор `>=` {#greaterorequals} diff --git a/docs/ru/sql-reference/table-functions/numbers.md b/docs/ru/sql-reference/table-functions/numbers.md index 5a6edc0e988..f7e52793a3c 100644 --- a/docs/ru/sql-reference/table-functions/numbers.md +++ b/docs/ru/sql-reference/table-functions/numbers.md @@ -7,7 +7,7 @@ sidebar_label: numbers # numbers {#numbers} `numbers(N)` - возвращает таблицу с единственным столбцом `number` (UInt64), содержащим натуральные числа от `0` до `N-1`. -`numbers(N, M)` - возвращает таблицу с единственным столбцом `number` (UInt64), содержащим натуральные числа от `N` to `(N + M - 1)`. +`numbers(N, M)` - возвращает таблицу с единственным столбцом `number` (UInt64), содержащим натуральные числа от `N` до `(N + M - 1)`. Так же как и таблица `system.numbers` может использоваться для тестов и генерации последовательных значений. Функция `numbers(N, M)` работает более эффективно, чем выборка из `system.numbers`. diff --git a/docs/zh/engines/table-engines/mergetree-family/mergetree.md b/docs/zh/engines/table-engines/mergetree-family/mergetree.md index cec4cb09047..c738ae0f24c 100644 --- a/docs/zh/engines/table-engines/mergetree-family/mergetree.md +++ b/docs/zh/engines/table-engines/mergetree-family/mergetree.md @@ -66,7 +66,7 @@ ORDER BY expr - `PARTITION BY` — [分区键](custom-partitioning-key.md) ,可选项。 - 大多数情况下,不需要分使用区键。即使需要使用,也不需要使用比月更细粒度的分区键。分区不会加快查询(这与 ORDER BY 表达式不同)。永远也别使用过细粒度的分区键。不要使用客户端指定分区标识符或分区字段名称来对数据进行分区(而是将分区字段标识或名称作为 ORDER BY 表达式的第一列来指定分区)。 + 大多数情况下,不需要使用分区键。即使需要使用,也不需要使用比月更细粒度的分区键。分区不会加快查询(这与 ORDER BY 表达式不同)。永远也别使用过细粒度的分区键。不要使用客户端指定分区标识符或分区字段名称来对数据进行分区(而是将分区字段标识或名称作为 ORDER BY 表达式的第一列来指定分区)。 要按月分区,可以使用表达式 `toYYYYMM(date_column)` ,这里的 `date_column` 是一个 [Date](../../../engines/table-engines/mergetree-family/mergetree.md) 类型的列。分区名的格式会是 `"YYYYMM"` 。 @@ -349,8 +349,8 @@ WHERE 子句中的条件可以包含对某列数据进行运算的函数表达 | 函数 (操作符) / 索引 | primary key | minmax | ngrambf_v1 | tokenbf_v1 | bloom_filter | | ------------------------------------------------------------ | ----------- | ------ | ---------- | ---------- | ------------ | -| [equals (=, ==)](../../../sql-reference/functions/comparison-functions.md#function-equals) | ✔ | ✔ | ✔ | ✔ | ✔ | -| [notEquals(!=, <>)](../../../sql-reference/functions/comparison-functions.md#function-notequals) | ✔ | ✔ | ✔ | ✔ | ✔ | +| [equals (=, ==)](../../../sql-reference/functions/comparison-functions.md#equals) | ✔ | ✔ | ✔ | ✔ | ✔ | +| [notEquals(!=, <>)](../../../sql-reference/functions/comparison-functions.md#notequals) | ✔ | ✔ | ✔ | ✔ | ✔ | | [like](../../../sql-reference/functions/string-search-functions.md#function-like) | ✔ | ✔ | ✔ | ✔ | ✔ | | [notLike](../../../sql-reference/functions/string-search-functions.md#function-notlike) | ✔ | ✔ | ✗ | ✗ | ✗ | | [startsWith](../../../sql-reference/functions/string-functions.md#startswith) | ✔ | ✔ | ✔ | ✔ | ✗ | @@ -358,10 +358,10 @@ WHERE 子句中的条件可以包含对某列数据进行运算的函数表达 | [multiSearchAny](../../../sql-reference/functions/string-search-functions.md#function-multisearchany) | ✗ | ✗ | ✔ | ✗ | ✗ | | [in](../../../sql-reference/functions/in-functions.md#in-functions) | ✔ | ✔ | ✔ | ✔ | ✔ | | [notIn](../../../sql-reference/functions/in-functions.md#in-functions) | ✔ | ✔ | ✔ | ✔ | ✔ | -| [less (\<)](../../../sql-reference/functions/comparison-functions.md#function-less) | ✔ | ✔ | ✗ | ✗ | ✗ | -| [greater (\>)](../../../sql-reference/functions/comparison-functions.md#function-greater) | ✔ | ✔ | ✗ | ✗ | ✗ | -| [lessOrEquals (\<=)](../../../sql-reference/functions/comparison-functions.md#function-lessorequals) | ✔ | ✔ | ✗ | ✗ | ✗ | -| [greaterOrEquals (\>=)](../../../sql-reference/functions/comparison-functions.md#function-greaterorequals) | ✔ | ✔ | ✗ | ✗ | ✗ | +| [less (\<)](../../../sql-reference/functions/comparison-functions.md#less) | ✔ | ✔ | ✗ | ✗ | ✗ | +| [greater (\>)](../../../sql-reference/functions/comparison-functions.md#greater) | ✔ | ✔ | ✗ | ✗ | ✗ | +| [lessOrEquals (\<=)](../../../sql-reference/functions/comparison-functions.md#lessorequals) | ✔ | ✔ | ✗ | ✗ | ✗ | +| [greaterOrEquals (\>=)](../../../sql-reference/functions/comparison-functions.md#greaterorequals) | ✔ | ✔ | ✗ | ✗ | ✗ | | [empty](../../../sql-reference/functions/array-functions.md#function-empty) | ✔ | ✔ | ✗ | ✗ | ✗ | | [notEmpty](../../../sql-reference/functions/array-functions.md#function-notempty) | ✔ | ✔ | ✗ | ✗ | ✗ | | hasToken | ✗ | ✗ | ✗ | ✔ | ✗ | diff --git a/docs/zh/sql-reference/functions/comparison-functions.md b/docs/zh/sql-reference/functions/comparison-functions.md index ef3adf427f1..ed33dc40531 100644 --- a/docs/zh/sql-reference/functions/comparison-functions.md +++ b/docs/zh/sql-reference/functions/comparison-functions.md @@ -21,14 +21,14 @@ sidebar_label: 比较函数 字符串按字节进行比较。较短的字符串小于以其开头并且至少包含一个字符的所有字符串。 -## 等于,a=b和a==b 运算符 {#equals-a-b-and-a-b-operator} +## 等于,a=b和a==b 运算符 {#equals} -## 不等于,a!=b和a<>b 运算符 {#notequals-a-operator-b-and-a-b} +## 不等于,a!=b和a<>b 运算符 {#notequals} -## 少, < 运算符 {#less-operator} +## 少, < 运算符 {#less} -## 大于, > 运算符 {#greater-operator} +## 大于, > 运算符 {#greater} -## 小于等于, <= 运算符 {#lessorequals-operator} +## 小于等于, <= 运算符 {#lessorequals} -## 大于等于, >= 运算符 {#greaterorequals-operator} +## 大于等于, >= 运算符 {#greaterorequals} diff --git a/packages/clickhouse-server.yaml b/packages/clickhouse-server.yaml index 5e2bc7c7412..7894129b8e3 100644 --- a/packages/clickhouse-server.yaml +++ b/packages/clickhouse-server.yaml @@ -52,8 +52,6 @@ contents: dst: /lib/systemd/system/clickhouse-server.service - src: root/usr/bin/clickhouse-copier dst: /usr/bin/clickhouse-copier -- src: root/usr/bin/clickhouse-report - dst: /usr/bin/clickhouse-report - src: root/usr/bin/clickhouse-server dst: /usr/bin/clickhouse-server # clickhouse-keeper part diff --git a/programs/CMakeLists.txt b/programs/CMakeLists.txt index eb117e74f6b..f17aff65fb5 100644 --- a/programs/CMakeLists.txt +++ b/programs/CMakeLists.txt @@ -1,3 +1,5 @@ +add_compile_options($<$,$>:${COVERAGE_FLAGS}>) + if (USE_CLANG_TIDY) set (CMAKE_CXX_CLANG_TIDY "${CLANG_TIDY_PATH}") endif () @@ -61,8 +63,6 @@ option (ENABLE_CLICKHOUSE_SU "A tool similar to 'su'" ${ENABLE_CLICKHOUSE_ALL}) option (ENABLE_CLICKHOUSE_DISKS "A tool to manage disks" ${ENABLE_CLICKHOUSE_ALL}) -option (ENABLE_CLICKHOUSE_REPORT "A tiny tool to collect a clickhouse-server state" ${ENABLE_CLICKHOUSE_ALL}) - if (NOT ENABLE_NURAFT) # RECONFIGURE_MESSAGE_LEVEL should not be used here, # since ENABLE_NURAFT is set to OFF for FreeBSD and Darwin. @@ -388,9 +388,6 @@ if (ENABLE_CLICKHOUSE_SU) install (FILES "${CMAKE_CURRENT_BINARY_DIR}/clickhouse-su" DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse) list(APPEND CLICKHOUSE_BUNDLE clickhouse-su) endif () -if (ENABLE_CLICKHOUSE_REPORT) - include(${ClickHouse_SOURCE_DIR}/utils/report/CMakeLists.txt) -endif () if (ENABLE_CLICKHOUSE_KEEPER) if (NOT BUILD_STANDALONE_KEEPER AND CREATE_KEEPER_SYMLINK) @@ -432,6 +429,11 @@ if (USE_BINARY_HASH) add_custom_command(TARGET clickhouse POST_BUILD COMMAND ./clickhouse hash-binary > hash && ${OBJCOPY_PATH} --add-section .clickhouse.hash=hash clickhouse COMMENT "Adding section '.clickhouse.hash' to clickhouse binary" VERBATIM) endif() +if (CHECK_LARGE_OBJECT_SIZES) + add_custom_command(TARGET clickhouse POST_BUILD + COMMAND "${CMAKE_SOURCE_DIR}/utils/check-style/check-large-objects.sh" "${CMAKE_BINARY_DIR}") +endif () + if (SPLIT_DEBUG_SYMBOLS) clickhouse_split_debug_symbols(TARGET clickhouse DESTINATION_DIR ${CMAKE_CURRENT_BINARY_DIR}/${SPLITTED_DEBUG_SYMBOLS_DIR} BINARY_PATH clickhouse) else() diff --git a/programs/benchmark/Benchmark.cpp b/programs/benchmark/Benchmark.cpp index ed3d4a1ea69..d6b8b38d84d 100644 --- a/programs/benchmark/Benchmark.cpp +++ b/programs/benchmark/Benchmark.cpp @@ -46,6 +46,7 @@ namespace CurrentMetrics { extern const Metric LocalThread; extern const Metric LocalThreadActive; + extern const Metric LocalThreadScheduled; } namespace DB @@ -107,7 +108,7 @@ public: settings(settings_), shared_context(Context::createShared()), global_context(Context::createGlobal(shared_context.get())), - pool(CurrentMetrics::LocalThread, CurrentMetrics::LocalThreadActive, concurrency) + pool(CurrentMetrics::LocalThread, CurrentMetrics::LocalThreadActive, CurrentMetrics::LocalThreadScheduled, concurrency) { const auto secure = secure_ ? Protocol::Secure::Enable : Protocol::Secure::Disable; size_t connections_cnt = std::max(ports_.size(), hosts_.size()); diff --git a/programs/client/Client.cpp b/programs/client/Client.cpp index d29824581fa..d2527ad0c98 100644 --- a/programs/client/Client.cpp +++ b/programs/client/Client.cpp @@ -17,18 +17,15 @@ #include "Core/Protocol.h" #include "Parsers/formatAST.h" -#include - #include -#include "config_version.h" +#include #include #include #include #include #include -#include #include #include @@ -328,7 +325,7 @@ try processConfig(); adjustSettings(); - initTtyBuffer(toProgressOption(config().getString("progress", "default"))); + initTTYBuffer(toProgressOption(config().getString("progress", "default"))); { // All that just to set DB::CurrentThread::get().getGlobalContext() @@ -1241,7 +1238,6 @@ void Client::processConfig() global_context->setCurrentQueryId(query_id); } print_stack_trace = config().getBool("stacktrace", false); - logging_initialized = true; if (config().has("multiquery")) is_multiquery = true; @@ -1463,7 +1459,6 @@ int mainEntryClickHouseClient(int argc, char ** argv) DB::Client client; // Initialize command line options client.init(argc, argv); - /// Initialize config file return client.run(); } catch (const DB::Exception & e) diff --git a/programs/copier/ClusterCopier.cpp b/programs/copier/ClusterCopier.cpp index b2b4970d04f..1dfdcb3c745 100644 --- a/programs/copier/ClusterCopier.cpp +++ b/programs/copier/ClusterCopier.cpp @@ -25,6 +25,7 @@ namespace CurrentMetrics { extern const Metric LocalThread; extern const Metric LocalThreadActive; + extern const Metric LocalThreadScheduled; } namespace DB @@ -200,7 +201,7 @@ void ClusterCopier::discoverTablePartitions(const ConnectionTimeouts & timeouts, { /// Fetch partitions list from a shard { - ThreadPool thread_pool(CurrentMetrics::LocalThread, CurrentMetrics::LocalThreadActive, num_threads ? num_threads : 2 * getNumberOfPhysicalCPUCores()); + ThreadPool thread_pool(CurrentMetrics::LocalThread, CurrentMetrics::LocalThreadActive, CurrentMetrics::LocalThreadScheduled, num_threads ? num_threads : 2 * getNumberOfPhysicalCPUCores()); for (const TaskShardPtr & task_shard : task_table.all_shards) thread_pool.scheduleOrThrowOnError([this, timeouts, task_shard]() @@ -1407,7 +1408,7 @@ TaskStatus ClusterCopier::processPartitionPieceTaskImpl( /// 3) Create helping table on the whole destination cluster auto & settings_push = task_cluster->settings_push; - auto connection = task_table.cluster_push->getAnyShardInfo().pool->get(timeouts, &settings_push, true); + auto connection = task_table.cluster_push->getAnyShardInfo().pool->get(timeouts, settings_push, true); String create_query = getRemoteCreateTable(task_shard.task_table.table_push, *connection, settings_push); ParserCreateQuery parser_create_query; @@ -1785,7 +1786,7 @@ String ClusterCopier::getRemoteCreateTable(const DatabaseAndTableName & table, C ASTPtr ClusterCopier::getCreateTableForPullShard(const ConnectionTimeouts & timeouts, TaskShard & task_shard) { /// Fetch and parse (possibly) new definition - auto connection_entry = task_shard.info.pool->get(timeouts, &task_cluster->settings_pull, true); + auto connection_entry = task_shard.info.pool->get(timeouts, task_cluster->settings_pull, true); String create_query_pull_str = getRemoteCreateTable( task_shard.task_table.table_pull, *connection_entry, diff --git a/programs/disks/CommandCopy.cpp b/programs/disks/CommandCopy.cpp index 296fc708411..421e4038d12 100644 --- a/programs/disks/CommandCopy.cpp +++ b/programs/disks/CommandCopy.cpp @@ -57,7 +57,7 @@ public: String relative_path_from = validatePathAndGetAsRelative(path_from); String relative_path_to = validatePathAndGetAsRelative(path_to); - disk_from->copyDirectoryContent(relative_path_from, disk_to, relative_path_to, /* read_settings= */ {}, /* write_settings= */ {}); + disk_from->copyDirectoryContent(relative_path_from, disk_to, relative_path_to, /* read_settings= */ {}, /* write_settings= */ {}, /* cancellation_hook= */ {}); } }; } diff --git a/programs/keeper/Keeper.cpp b/programs/keeper/Keeper.cpp index 8ebaf865cf4..e04e669abae 100644 --- a/programs/keeper/Keeper.cpp +++ b/programs/keeper/Keeper.cpp @@ -35,7 +35,7 @@ #include "Core/Defines.h" #include "config.h" -#include "config_version.h" +#include #include "config_tools.h" @@ -556,7 +556,8 @@ catch (...) { /// Poco does not provide stacktrace. tryLogCurrentException("Application"); - throw; + auto code = getCurrentExceptionCode(); + return code ? code : -1; } diff --git a/programs/local/LocalServer.cpp b/programs/local/LocalServer.cpp index 36020d22cc0..f3b84fa3eb1 100644 --- a/programs/local/LocalServer.cpp +++ b/programs/local/LocalServer.cpp @@ -495,7 +495,7 @@ try processConfig(); adjustSettings(); - initTtyBuffer(toProgressOption(config().getString("progress", "default"))); + initTTYBuffer(toProgressOption(config().getString("progress", "default"))); applyCmdSettings(global_context); @@ -563,9 +563,6 @@ catch (...) void LocalServer::updateLoggerLevel(const String & logs_level) { - if (!logging_initialized) - return; - config().setString("logger.level", logs_level); updateLevels(config(), logger()); } @@ -607,21 +604,13 @@ void LocalServer::processConfig() Poco::AutoPtr pf = new OwnPatternFormatter; Poco::AutoPtr log = new OwnFormattingChannel(pf, new Poco::SimpleFileChannel(server_logs_file)); Poco::Logger::root().setChannel(log); - logging_initialized = true; - } - else if (logging || is_interactive) - { - config().setString("logger", "logger"); - auto log_level_default = is_interactive && !logging ? "none" : level; - config().setString("logger.level", config().getString("log-level", config().getString("send_logs_level", log_level_default))); - buildLoggers(config(), logger(), "clickhouse-local"); - logging_initialized = true; } else { - Poco::Logger::root().setLevel("none"); - Poco::Logger::root().setChannel(Poco::AutoPtr(new Poco::NullChannel())); - logging_initialized = false; + config().setString("logger", "logger"); + auto log_level_default = logging ? level : "fatal"; + config().setString("logger.level", config().getString("log-level", config().getString("send_logs_level", log_level_default))); + buildLoggers(config(), logger(), "clickhouse-local"); } shared_context = Context::createShared(); @@ -763,7 +752,7 @@ void LocalServer::processConfig() { DatabaseCatalog::instance().createBackgroundTasks(); loadMetadata(global_context); - DatabaseCatalog::instance().startupBackgroundCleanup(); + DatabaseCatalog::instance().startupBackgroundTasks(); } /// For ClickHouse local if path is not set the loader will be disabled. diff --git a/programs/server/Server.cpp b/programs/server/Server.cpp index 85ae6d7796c..11ad06640c8 100644 --- a/programs/server/Server.cpp +++ b/programs/server/Server.cpp @@ -98,7 +98,7 @@ #include #include "config.h" -#include "config_version.h" +#include #if defined(OS_LINUX) # include @@ -676,6 +676,10 @@ try global_context->addWarningMessage("Server was built with sanitizer. It will work slowly."); #endif +#if defined(SANITIZE_COVERAGE) || WITH_COVERAGE + global_context->addWarningMessage("Server was built with code coverage. It will work slowly."); +#endif + const size_t physical_server_memory = getMemoryAmount(); LOG_INFO(log, "Available RAM: {}; physical cores: {}; logical cores: {}.", @@ -1159,6 +1163,8 @@ try CompiledExpressionCacheFactory::instance().init(compiled_expression_cache_max_size_in_bytes, compiled_expression_cache_max_elements); #endif + NamedCollectionUtils::loadIfNot(); + /// Initialize main config reloader. std::string include_from_path = config().getString("include_from", "/etc/metrika.xml"); @@ -1372,6 +1378,8 @@ try global_context->reloadAuxiliaryZooKeepersConfigIfChanged(config); + global_context->reloadQueryMaskingRulesIfChanged(config); + std::lock_guard lock(servers_lock); updateServers(*config, server_pool, async_metrics, servers, servers_to_start_before_tables); } @@ -1691,7 +1699,7 @@ try /// Then, load remaining databases loadMetadata(global_context, default_database); convertDatabasesEnginesIfNeed(global_context); - database_catalog.startupBackgroundCleanup(); + database_catalog.startupBackgroundTasks(); /// After loading validate that default database exists database_catalog.assertDatabaseExists(default_database); /// Load user-defined SQL functions. @@ -1816,6 +1824,9 @@ try try { global_context->loadOrReloadDictionaries(config()); + + if (!config().getBool("dictionaries_lazy_load", true) && config().getBool("wait_dictionaries_load_at_startup", true)) + global_context->waitForDictionariesLoad(); } catch (...) { @@ -1961,7 +1972,8 @@ catch (...) { /// Poco does not provide stacktrace. tryLogCurrentException("Application"); - throw; + auto code = getCurrentExceptionCode(); + return code ? code : -1; } std::unique_ptr Server::buildProtocolStackFromConfig( diff --git a/programs/server/config.xml b/programs/server/config.xml index 228f3b0f07f..f81fbe9cc3b 100644 --- a/programs/server/config.xml +++ b/programs/server/config.xml @@ -830,13 +830,13 @@ And also (and which is more important), the initial_user will be used as current user for the query. - Right now the protocol is pretty simple and it only takes into account: + Right now the protocol is pretty simple, and it only takes into account: - cluster name - query - Also it will be nice if the following will be implemented: - - source hostname (see interserver_http_host), but then it will depends from DNS, - it can use IP address instead, but then the you need to get correct on the initiator node. + Also, it will be nice if the following will be implemented: + - source hostname (see interserver_http_host), but then it will depend on DNS, + it can use IP address instead, but then you need to get correct on the initiator node. - target hostname / ip address (same notes as for source hostname) - time-based security tokens --> @@ -1248,6 +1248,25 @@ 7500 + + + system + s3queue_log
+ toYYYYMM(event_date) + 7500 +
+ + + + system + blob_storage_log
+ toYYYYMM(event_date) + 7500 + event_date + INTERVAL 30 DAY +
+ *_dictionary.*ml + + true + + + true + *_function.*ml diff --git a/programs/server/dashboard.html b/programs/server/dashboard.html index d4fa1626873..123a15c5706 100644 --- a/programs/server/dashboard.html +++ b/programs/server/dashboard.html @@ -14,7 +14,7 @@ --moving-shadow-color: rgba(0, 0, 0, 0.5); --input-shadow-color: rgba(0, 255, 0, 1); --error-color: red; - --auth-error-color: white; + --global-error-color: white; --legend-background: rgba(255, 255, 255, 0.75); --title-color: #666; --text-color: black; @@ -202,6 +202,10 @@ margin-right: 0.25rem; } + #chart-params .param { + width: 6%; + } + input { font-family: Liberation Sans, DejaVu Sans, sans-serif, Noto Color Emoji, Apple Color Emoji, Segoe UI Emoji; outline: none; @@ -240,14 +244,13 @@ font-weight: bold; user-select: none; cursor: pointer; - margin-bottom: 1rem; } #run:hover { filter: contrast(125%); } - #add, #reload, #edit { + #add, #reload, #edit, #search { padding: 0.25rem 0.5rem; text-align: center; font-weight: bold; @@ -264,16 +267,22 @@ height: 3ex; } - #add:hover, #reload:hover, #edit:hover { + #add:hover, #reload:hover, #edit:hover, #search:hover { background: var(--button-background-color); } - #auth-error { + #search-query { + float: right; + width: 36%; + } + + + #global-error { align-self: center; width: 60%; padding: .5rem; - color: var(--auth-error-color); + color: var(--global-error-color); display: flex; flex-flow: row nowrap; @@ -417,7 +426,7 @@ } #mass-editor-message { - color: var(--auth-error-color); + color: var(--global-error-color); } #charts > div:only-child .display-only-if-more-than-one-chart { @@ -445,10 +454,11 @@ +
-
+
@@ -501,151 +511,11 @@ const errorMessages = [ } ] -/// This is just a demo configuration of the dashboard. -let queries = [ - { - "title": "Queries/second", - "query": `SELECT toStartOfInterval(event_time, INTERVAL {rounding:UInt32} SECOND)::INT AS t, avg(ProfileEvent_Query) -FROM system.metric_log -WHERE event_date >= toDate(now() - {seconds:UInt32}) AND event_time >= now() - {seconds:UInt32} -GROUP BY t -ORDER BY t WITH FILL STEP {rounding:UInt32}` - }, - { - "title": "CPU Usage (cores)", - "query": `SELECT toStartOfInterval(event_time, INTERVAL {rounding:UInt32} SECOND)::INT AS t, avg(ProfileEvent_OSCPUVirtualTimeMicroseconds) / 1000000 -FROM system.metric_log -WHERE event_date >= toDate(now() - {seconds:UInt32}) AND event_time >= now() - {seconds:UInt32} -GROUP BY t -ORDER BY t WITH FILL STEP {rounding:UInt32}` - }, - { - "title": "Queries Running", - "query": `SELECT toStartOfInterval(event_time, INTERVAL {rounding:UInt32} SECOND)::INT AS t, avg(CurrentMetric_Query) -FROM system.metric_log -WHERE event_date >= toDate(now() - {seconds:UInt32}) AND event_time >= now() - {seconds:UInt32} -GROUP BY t -ORDER BY t WITH FILL STEP {rounding:UInt32}` - }, - { - "title": "Merges Running", - "query": `SELECT toStartOfInterval(event_time, INTERVAL {rounding:UInt32} SECOND)::INT AS t, avg(CurrentMetric_Merge) -FROM system.metric_log -WHERE event_date >= toDate(now() - {seconds:UInt32}) AND event_time >= now() - {seconds:UInt32} -GROUP BY t -ORDER BY t WITH FILL STEP {rounding:UInt32}` - }, - { - "title": "Selected Bytes/second", - "query": `SELECT toStartOfInterval(event_time, INTERVAL {rounding:UInt32} SECOND)::INT AS t, avg(ProfileEvent_SelectedBytes) -FROM system.metric_log -WHERE event_date >= toDate(now() - {seconds:UInt32}) AND event_time >= now() - {seconds:UInt32} -GROUP BY t -ORDER BY t WITH FILL STEP {rounding:UInt32}` - }, - { - "title": "IO Wait", - "query": `SELECT toStartOfInterval(event_time, INTERVAL {rounding:UInt32} SECOND)::INT AS t, avg(ProfileEvent_OSIOWaitMicroseconds) / 1000000 -FROM system.metric_log -WHERE event_date >= toDate(now() - {seconds:UInt32}) AND event_time >= now() - {seconds:UInt32} -GROUP BY t -ORDER BY t WITH FILL STEP {rounding:UInt32}` - }, - { - "title": "CPU Wait", - "query": `SELECT toStartOfInterval(event_time, INTERVAL {rounding:UInt32} SECOND)::INT AS t, avg(ProfileEvent_OSCPUWaitMicroseconds) / 1000000 -FROM system.metric_log -WHERE event_date >= toDate(now() - {seconds:UInt32}) AND event_time >= now() - {seconds:UInt32} -GROUP BY t -ORDER BY t WITH FILL STEP {rounding:UInt32}` - }, - { - "title": "OS CPU Usage (Userspace)", - "query": `SELECT toStartOfInterval(event_time, INTERVAL {rounding:UInt32} SECOND)::INT AS t, avg(value) -FROM system.asynchronous_metric_log -WHERE event_date >= toDate(now() - {seconds:UInt32}) AND event_time >= now() - {seconds:UInt32} -AND metric = 'OSUserTimeNormalized' -GROUP BY t -ORDER BY t WITH FILL STEP {rounding:UInt32}` - }, - { - "title": "OS CPU Usage (Kernel)", - "query": `SELECT toStartOfInterval(event_time, INTERVAL {rounding:UInt32} SECOND)::INT AS t, avg(value) -FROM system.asynchronous_metric_log -WHERE event_date >= toDate(now() - {seconds:UInt32}) AND event_time >= now() - {seconds:UInt32} -AND metric = 'OSSystemTimeNormalized' -GROUP BY t -ORDER BY t WITH FILL STEP {rounding:UInt32}` - }, - { - "title": "Read From Disk", - "query": `SELECT toStartOfInterval(event_time, INTERVAL {rounding:UInt32} SECOND)::INT AS t, avg(ProfileEvent_OSReadBytes) -FROM system.metric_log -WHERE event_date >= toDate(now() - {seconds:UInt32}) AND event_time >= now() - {seconds:UInt32} -GROUP BY t -ORDER BY t WITH FILL STEP {rounding:UInt32}` - }, - { - "title": "Read From Filesystem", - "query": `SELECT toStartOfInterval(event_time, INTERVAL {rounding:UInt32} SECOND)::INT AS t, avg(ProfileEvent_OSReadChars) -FROM system.metric_log -WHERE event_date >= toDate(now() - {seconds:UInt32}) AND event_time >= now() - {seconds:UInt32} -GROUP BY t -ORDER BY t WITH FILL STEP {rounding:UInt32}` - }, - { - "title": "Memory (tracked)", - "query": `SELECT toStartOfInterval(event_time, INTERVAL {rounding:UInt32} SECOND)::INT AS t, avg(CurrentMetric_MemoryTracking) -FROM system.metric_log -WHERE event_date >= toDate(now() - {seconds:UInt32}) AND event_time >= now() - {seconds:UInt32} -GROUP BY t -ORDER BY t WITH FILL STEP {rounding:UInt32}` - }, - { - "title": "Load Average (15 minutes)", - "query": `SELECT toStartOfInterval(event_time, INTERVAL {rounding:UInt32} SECOND)::INT AS t, avg(value) -FROM system.asynchronous_metric_log -WHERE event_date >= toDate(now() - {seconds:UInt32}) AND event_time >= now() - {seconds:UInt32} -AND metric = 'LoadAverage15' -GROUP BY t -ORDER BY t WITH FILL STEP {rounding:UInt32}` - }, - { - "title": "Selected Rows/second", - "query": `SELECT toStartOfInterval(event_time, INTERVAL {rounding:UInt32} SECOND)::INT AS t, avg(ProfileEvent_SelectedRows) -FROM system.metric_log -WHERE event_date >= toDate(now() - {seconds:UInt32}) AND event_time >= now() - {seconds:UInt32} -GROUP BY t -ORDER BY t WITH FILL STEP {rounding:UInt32}` - }, - { - "title": "Inserted Rows/second", - "query": `SELECT toStartOfInterval(event_time, INTERVAL {rounding:UInt32} SECOND)::INT AS t, avg(ProfileEvent_InsertedRows) -FROM system.metric_log -WHERE event_date >= toDate(now() - {seconds:UInt32}) AND event_time >= now() - {seconds:UInt32} -GROUP BY t -ORDER BY t WITH FILL STEP {rounding:UInt32}` - }, - { - "title": "Total MergeTree Parts", - "query": `SELECT toStartOfInterval(event_time, INTERVAL {rounding:UInt32} SECOND)::INT AS t, avg(value) -FROM system.asynchronous_metric_log -WHERE event_date >= toDate(now() - {seconds:UInt32}) AND event_time >= now() - {seconds:UInt32} -AND metric = 'TotalPartsOfMergeTreeTables' -GROUP BY t -ORDER BY t WITH FILL STEP {rounding:UInt32}` - }, - { - "title": "Max Parts For Partition", - "query": `SELECT toStartOfInterval(event_time, INTERVAL {rounding:UInt32} SECOND)::INT AS t, max(value) -FROM system.asynchronous_metric_log -WHERE event_date >= toDate(now() - {seconds:UInt32}) AND event_time >= now() - {seconds:UInt32} -AND metric = 'MaxPartCountForPartition' -GROUP BY t -ORDER BY t WITH FILL STEP {rounding:UInt32}` - } -]; +/// Query to fill `queries` list for the dashboard +let search_query = `SELECT title, query FROM system.dashboards WHERE dashboard = 'overview'`; +let customized = false; +let queries = []; /// Query parameters with predefined default values. /// All other parameters will be automatically found in the queries. @@ -683,7 +553,7 @@ function findParamsInQuery(query, new_params) { } function findParamsInQueries() { - let new_params = {} + let new_params = {}; queries.forEach(q => findParamsInQuery(q.query, new_params)); params = new_params; } @@ -790,9 +660,10 @@ function insertChart(i) { title_text.data = ''; findParamsInQuery(q.query, params); buildParams(); + refreshCustomized(true); + saveState(); const idx = getCurrentIndex(); draw(idx, chart, getParamsForURL(), q.query); - saveState(); } query_editor_confirm.addEventListener('click', editConfirm); @@ -940,6 +811,7 @@ function insertChart(i) { findParamsInQueries(); buildParams(); resize(); + refreshCustomized(true); saveState(); }); @@ -958,7 +830,7 @@ function insertChart(i) { charts.appendChild(chart); return {chart: chart, textarea: query_editor_textarea}; -}; +} document.getElementById('add').addEventListener('click', e => { queries.push({ title: '', query: '' }); @@ -972,9 +844,12 @@ document.getElementById('add').addEventListener('click', e => { }); document.getElementById('reload').addEventListener('click', e => { - reloadAll(); + reloadAll(false); }); +document.getElementById('search').addEventListener('click', e => { + reloadAll(true); +}); let mass_editor_active = false; @@ -1002,8 +877,9 @@ function massEditorApplyChanges() { ({params, queries} = JSON.parse(editor.value)); hideMassEditor(); regenerate(); - drawAll(); + refreshCustomized(true); saveState(); + drawAll(); } document.getElementById('edit').addEventListener('click', e => { @@ -1085,13 +961,8 @@ function legendAsTooltipPlugin({ className, style = { background: "var(--legend- }; } -async function draw(idx, chart, url_params, query) { - if (plots[idx]) { - plots[idx].destroy(); - plots[idx] = null; - } - - host = document.getElementById('url').value; +async function doFetch(query, url_params = '') { + host = document.getElementById('url').value || host; user = document.getElementById('user').value; password = document.getElementById('password').value; @@ -1135,6 +1006,17 @@ async function draw(idx, chart, url_params, query) { } } + return {data, error}; +} + +async function draw(idx, chart, url_params, query) { + if (plots[idx]) { + plots[idx].destroy(); + plots[idx] = null; + } + + let {data, error} = await doFetch(query, url_params); + if (!error) { if (!Array.isArray(data)) { error = "Query should return an array."; @@ -1151,7 +1033,7 @@ async function draw(idx, chart, url_params, query) { let title_div = chart.querySelector('.title'); if (error) { error_div.firstChild.data = error; - title_div.style.display = 'none'; + title_div.style.display = 'none'; error_div.style.display = 'block'; return false; } else { @@ -1194,29 +1076,29 @@ async function draw(idx, chart, url_params, query) { /// Set title const title = queries[idx] && queries[idx].title ? queries[idx].title.replaceAll(/\{(\w+)\}/g, (_, name) => params[name] ) : ''; chart.querySelector('.title').firstChild.data = title; - return true + return true; } -function showAuthError(message) { +function showError(message) { const charts = document.getElementById('charts'); charts.style.height = '0px'; charts.style.opacity = '0'; document.getElementById('add').style.display = 'none'; document.getElementById('edit').style.display = 'none'; - const authError = document.getElementById('auth-error'); - authError.textContent = message; - authError.style.display = 'flex'; + const error = document.getElementById('global-error'); + error.textContent = message; + error.style.display = 'flex'; } -function hideAuthError() { +function hideError() { const charts = document.getElementById('charts'); charts.style.height = 'auto'; charts.style.opacity = '1'; - const authError = document.getElementById('auth-error'); - authError.textContent = ''; - authError.style.display = 'none'; + const error = document.getElementById('global-error'); + error.textContent = ''; + error.style.display = 'none'; } let firstLoad = true; @@ -1226,12 +1108,12 @@ async function drawAll() { const chartsArray = document.getElementsByClassName('chart'); if (!firstLoad) { - hideAuthError(); + hideError(); } await Promise.all([...Array(queries.length)].map(async (_, i) => { return draw(i, chartsArray[i], params, queries[i].query).catch((e) => { if (!firstLoad) { - showAuthError(e.message); + showError(e.message); } return false; }); @@ -1239,20 +1121,20 @@ async function drawAll() { if (firstLoad) { firstLoad = false; } else { - enableReloadButton(); - enableRunButton(); + enableButtons(); } if (results.includes(true)) { const element = document.querySelector('.inputs'); element.classList.remove('unconnected'); document.getElementById('add').style.display = 'inline-block'; document.getElementById('edit').style.display = 'inline-block'; - } + document.getElementById('search-span').style.display = ''; + } else { const charts = document.getElementById('charts') charts.style.height = '0px'; } - }) + }); } function resize() { @@ -1266,58 +1148,108 @@ function resize() { new ResizeObserver(resize).observe(document.body); -function disableReloadButton() { - const reloadButton = document.getElementById('reload') - reloadButton.value = 'Reloading…' - reloadButton.disabled = true - reloadButton.classList.add('disabled') +function disableButtons() { + const reloadButton = document.getElementById('reload'); + reloadButton.value = 'Reloading…'; + reloadButton.disabled = true; + reloadButton.classList.add('disabled'); + + const runButton = document.getElementById('run'); + runButton.value = 'Reloading…'; + runButton.disabled = true; + runButton.classList.add('disabled'); + + const searchButton = document.getElementById('search'); + searchButton.value = '…'; + searchButton.disabled = true; + searchButton.classList.add('disabled'); } -function disableRunButton() { - const runButton = document.getElementById('run') - runButton.value = 'Reloading…' - runButton.disabled = true - runButton.classList.add('disabled') +function enableButtons() { + const reloadButton = document.getElementById('reload'); + reloadButton.value = 'Reload'; + reloadButton.disabled = false; + reloadButton.classList.remove('disabled'); + + const runButton = document.getElementById('run'); + runButton.value = 'Ok'; + runButton.disabled = false; + runButton.classList.remove('disabled'); + + const searchButton = document.getElementById('search'); + searchButton.value = '🔎'; + searchButton.disabled = false; + searchButton.classList.remove('disabled'); } -function enableReloadButton() { - const reloadButton = document.getElementById('reload') - reloadButton.value = 'Reload' - reloadButton.disabled = false - reloadButton.classList.remove('disabled') -} - -function enableRunButton() { - const runButton = document.getElementById('run') - runButton.value = 'Ok' - runButton.disabled = false - runButton.classList.remove('disabled') -} - -function reloadAll() { - updateParams(); - drawAll(); - saveState(); - disableReloadButton(); - disableRunButton(); +async function reloadAll(do_search) { + disableButtons(); + try { + updateParams(); + if (do_search) { + search_query = document.getElementById('search-query').value; + queries = []; + refreshCustomized(false); + } + saveState(); + if (do_search) { + await searchQueries(); + } + await drawAll(); + } catch (e) { + showError(e.toString()); + } + enableButtons(); } document.getElementById('params').onsubmit = function(event) { - reloadAll(); + let do_search = document.activeElement === document.getElementById('search-query'); + reloadAll(do_search); event.preventDefault(); } function saveState() { - const state = { host: host, user: user, queries: queries, params: params }; + const state = { host, user, queries, params, search_query, customized }; history.pushState(state, '', window.location.pathname + (window.location.search || '') + '#' + btoa(JSON.stringify(state))); } +async function searchQueries() { + let {data, error} = await doFetch(search_query); + if (error) { + throw new Error(error); + } + if (!Array.isArray(data)) { + throw new Error("Search query should return an array."); + } else if (data.length == 0) { + throw new Error("Search query returned empty result."); + } else if (data.length != 2) { + throw new Error("Search query should return exactly two columns: title and query."); + } else if (!Array.isArray(data[0]) || !Array.isArray(data[1]) || data[0].length != data[1].length) { + throw new Error("Wrong data format of the search query."); + } + + for (let i = 0; i < data[0].length; i++) { + queries.push({title: data[0][i], query: data[1][i]}); + } + + regenerate(); +} + +function refreshCustomized(value) { + if (value !== undefined) { + customized = value; + } + document.getElementById('search-span').style.opacity = customized ? 0.5 : 1.0; +} + function regenerate() { document.getElementById('url').value = host; document.getElementById('user').value = user; document.getElementById('password').value = password; + document.getElementById('search-query').value = search_query; + refreshCustomized(); findParamsInQueries(); buildParams(); @@ -1336,7 +1268,7 @@ function regenerate() { window.onpopstate = function(event) { if (!event.state) { return; } - ({host, user, queries, params} = event.state); + ({host, user, queries, params, search_query, customized} = event.state); regenerate(); drawAll(); @@ -1344,19 +1276,35 @@ window.onpopstate = function(event) { if (window.location.hash) { try { - ({host, user, queries, params} = JSON.parse(atob(window.location.hash.substring(1)))); + let search_query_, customized_; + ({host, user, queries, params, search_query_, customized_} = JSON.parse(atob(window.location.hash.substring(1)))); + // For compatibility with old URLs' hashes + search_query = search_query_ !== undefined ? search_query_ : search_query; + customized = customized_ !== undefined ? customized_ : true; } catch {} } -regenerate(); - -let new_theme = window.localStorage.getItem('theme'); -if (new_theme && new_theme != theme) { - setTheme(new_theme); -} else { - drawAll(); +async function start() { + try { + if (queries.length == 0) { + await searchQueries(); + } else { + regenerate(); + } + saveState(); + let new_theme = window.localStorage.getItem('theme'); + if (new_theme && new_theme != theme) { + setTheme(new_theme); + } else { + drawAll(); + } + } catch (e) { + showError(e.toString()); + } } +start(); + diff --git a/programs/server/embedded.xml b/programs/server/embedded.xml index c2336e0d582..9311749a173 100644 --- a/programs/server/embedded.xml +++ b/programs/server/embedded.xml @@ -23,7 +23,9 @@ default default + 1 + 1 diff --git a/programs/server/users.xml b/programs/server/users.xml index fbb5a2c228f..57bc6309a54 100644 --- a/programs/server/users.xml +++ b/programs/server/users.xml @@ -85,7 +85,10 @@ default - + 1 + + + 1 10 - - 0 diff --git a/tests/config/config.d/named_collection.xml b/tests/config/config.d/named_collection.xml index 01645ccecd5..555c3b7f65b 100644 --- a/tests/config/config.d/named_collection.xml +++ b/tests/config/config.d/named_collection.xml @@ -37,6 +37,10 @@ test testtest + + 1Mi + collection + http://127.0.0.1:8123?query=select+1 RawBLOB diff --git a/tests/config/config.d/zookeeper.xml b/tests/config/config.d/zookeeper.xml index a54149e6617..ce402f4850b 100644 --- a/tests/config/config.d/zookeeper.xml +++ b/tests/config/config.d/zookeeper.xml @@ -2,7 +2,7 @@ random - true + 1 127.0.0.1 9181 diff --git a/tests/config/config.d/zookeeper_fault_injection.xml b/tests/config/config.d/zookeeper_fault_injection.xml index a339e1f0fba..75b96064817 100644 --- a/tests/config/config.d/zookeeper_fault_injection.xml +++ b/tests/config/config.d/zookeeper_fault_injection.xml @@ -1,5 +1,6 @@ + 1 localhost 9181 diff --git a/tests/config/install.sh b/tests/config/install.sh index c31275cdcf2..417a413bbec 100755 --- a/tests/config/install.sh +++ b/tests/config/install.sh @@ -18,6 +18,7 @@ mkdir -p $DEST_CLIENT_PATH ln -sf $SRC_PATH/config.d/zookeeper_write.xml $DEST_SERVER_PATH/config.d/ ln -sf $SRC_PATH/config.d/listen.xml $DEST_SERVER_PATH/config.d/ ln -sf $SRC_PATH/config.d/text_log.xml $DEST_SERVER_PATH/config.d/ +ln -sf $SRC_PATH/config.d/blob_storage_log.xml $DEST_SERVER_PATH/config.d/ ln -sf $SRC_PATH/config.d/custom_settings_prefixes.xml $DEST_SERVER_PATH/config.d/ ln -sf $SRC_PATH/config.d/enable_access_control_improvements.xml $DEST_SERVER_PATH/config.d/ ln -sf $SRC_PATH/config.d/macros.xml $DEST_SERVER_PATH/config.d/ @@ -152,6 +153,7 @@ if [[ -n "$EXPORT_S3_STORAGE_POLICIES" ]]; then ln -sf $SRC_PATH/config.d/storage_conf.xml $DEST_SERVER_PATH/config.d/ ln -sf $SRC_PATH/users.d/s3_cache.xml $DEST_SERVER_PATH/users.d/ + ln -sf $SRC_PATH/users.d/s3_cache_new.xml $DEST_SERVER_PATH/users.d/ fi if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then diff --git a/tests/config/users.d/s3_cache_new.xml b/tests/config/users.d/s3_cache_new.xml new file mode 100644 index 00000000000..0afa3d68fc6 --- /dev/null +++ b/tests/config/users.d/s3_cache_new.xml @@ -0,0 +1,7 @@ + + + + 10 + + + diff --git a/tests/instructions/coverity.txt b/tests/instructions/coverity.txt deleted file mode 100644 index f8d6d68d326..00000000000 --- a/tests/instructions/coverity.txt +++ /dev/null @@ -1,28 +0,0 @@ -# Download tool at https://scan.coverity.com/download?tab=cxx - -tar xf cov-analysis-linux64-2017.07.tar.gz -export PATH=$PATH:/home/milovidov/cov-analysis-linux64-2017.07/bin - -mkdir ClickHouse_coverity -cd ClickHouse_coverity -git clone --recursive git@github.com:yandex/ClickHouse.git . - -mkdir build -cd build - -# "Debug" is for faster build -CC=gcc-7 CXX=g++-7 cmake -D CMAKE_BUILD_TYPE=Debug -D CCACHE_FOUND=0 .. - -# Build all targets that we don't want to analyze. -cd contrib && make -j24 && cd .. - -cov-configure --comptype gcc --compiler gcc-7 --template - -cov-build --dir cov-int make -j24 - -# Build is painful slow. Some targets compile in about one hour. Total time is about 4..5 hours. - -tar czvf clickhouse.tgz cov-int - -# tarball is 1.2 GB. -# Upload result at https://scan.coverity.com/projects/yandex-clickhouse/builds/new diff --git a/tests/instructions/cppcheck.txt b/tests/instructions/cppcheck.txt deleted file mode 100644 index 1bc6d1f6c09..00000000000 --- a/tests/instructions/cppcheck.txt +++ /dev/null @@ -1,22 +0,0 @@ -# Install cppcheck - -mkdir cppcheck && cd cppcheck -git clone git@github.com:danmar/cppcheck.git . -mkdir build && cd build -CC=gcc-7 CXX=g++-7 cmake -D CMAKE_BUILD_TYPE=Release .. -make -j24 -sudo make install - -# Perform analysis - -cd ClickHouse_clean/build -cppcheck -j24 --project=compile_commands.json --enable=all 2> cppcheck-errors.txt - -# or (from directory with sources) -# cppcheck -i contrib -i build --enable=all . 2> cppcheck-errors.txt - -# Check is pretty fast. -# It gives many false positives. -# But the result is worth looking and at least few real errors found. - -grep -v -F 'contrib/' cppcheck-errors.txt diff --git a/tests/instructions/easy_tasks_sorted_ru.md b/tests/instructions/easy_tasks_sorted_ru.md index 17e9708eef5..bc95e6b1c37 100644 --- a/tests/instructions/easy_tasks_sorted_ru.md +++ b/tests/instructions/easy_tasks_sorted_ru.md @@ -201,9 +201,9 @@ https://clickhouse.com/docs/en/operations/table_engines/external_data/ ## Возможность ATTACH партиции с меньшим или большим количеством столбцов. -## Поддержка неконстантного аргумента с тайм-зоной у некоторых функций для работы с датой и временем. +## + Поддержка неконстантного аргумента с тайм-зоной у некоторых функций для работы с датой и временем. -## Возможность задавать параметры соединений для табличных функций, движков таблиц и для реплик из отдельных разделов конфигурации. +## + Возможность задавать параметры соединений для табличных функций, движков таблиц и для реплик из отдельных разделов конфигурации. ## + Настройка rollup_use_nulls. diff --git a/tests/instructions/heap-profiler.txt b/tests/instructions/heap-profiler.txt deleted file mode 100644 index 3c35e9cf518..00000000000 --- a/tests/instructions/heap-profiler.txt +++ /dev/null @@ -1,14 +0,0 @@ -Build clickhouse without tcmalloc. cmake -D ENABLE_TCMALLOC=0 - -Copy clickhouse binary to your server. -scp programs/clickhouse server:~ - -ssh to your server - -Stop clickhouse: -sudo service clickhouse-server stop - -Run clickhouse with heap profiler from the terminal: -sudo -u clickhouse LD_PRELOAD=/usr/lib/libtcmalloc.so HEAPPROFILE=/var/log/clickhouse-server/heap.hprof ./clickhouse server --config /etc/clickhouse-server/config.xml - -Profiles will appear in /var/log/clickhouse-server/ diff --git a/tests/instructions/kafka.txt b/tests/instructions/kafka.txt deleted file mode 100644 index 69e87f38b24..00000000000 --- a/tests/instructions/kafka.txt +++ /dev/null @@ -1,45 +0,0 @@ -Use this config for docker-compose: - - version: '3' - - services: - - kafka: - depends_on: - - zookeeper - hostname: kafka - image: wurstmeister/kafka - environment: - KAFKA_LISTENERS: INSIDE://:9092,OUTSIDE://:9094 - KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: INSIDE:PLAINTEXT,OUTSIDE:PLAINTEXT - KAFKA_INTER_BROKER_LISTENER_NAME: INSIDE - KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181 - ports: - - "9092:9092" - - "9094:9094" - - security_opt: - - label:disable - - zookeeper: - hostname: zookeeper - image: zookeeper - - security_opt: - - label:disable - -Start containers with `docker-compose up`. - -In clickhouse-client create table like: - - CREATE TABLE kafka ( a UInt8, b String) ENGINE = Kafka('localhost:9092', 'topic', 'group1', 'CSV') SETTINGS kafka_row_delimiter = '\n'; - -Login inside Kafka container and stream some data: - - docker exec -it bash --login - vi data.csv - cat data.csv | /opt/kafka/bin/kafka-console-producer.sh --topic topic --broker-list localhost:9092 - -Read data in clickhouse: - - SELECT * FROM kafka; diff --git a/tests/instructions/sanitizers.md b/tests/instructions/sanitizers.md deleted file mode 100644 index 3c50f6cbab7..00000000000 --- a/tests/instructions/sanitizers.md +++ /dev/null @@ -1,72 +0,0 @@ -# How to use Address Sanitizer - -Note: We use Address Sanitizer to run functional tests for every commit automatically. - -``` -mkdir build_asan && cd build_asan -``` - -Note: using clang instead of gcc is strongly recommended. Make sure you have installed required packages (`clang`, `lld`). It may be required to specify non-standard `lld` binary using `LINKER_NAME` option (e.g. `-D LINKER_NAME=lld-8`). - -``` -CC=clang CXX=clang++ cmake -D SANITIZE=address .. -ninja -``` - -## Copy binary to your server - -``` -scp ./programs/clickhouse yourserver:~/clickhouse-asan -``` - -## Start ClickHouse and run tests - -``` -sudo -u clickhouse ./clickhouse-asan server --config /etc/clickhouse-server/config.xml -``` - - -# How to use Thread Sanitizer - -``` -mkdir build_tsan && cd build_tsan -``` - -``` -CC=clang CXX=clang++ cmake -D SANITIZE=thread .. -ninja -``` - -## Start ClickHouse and run tests - -``` -sudo -u clickhouse TSAN_OPTIONS='halt_on_error=1' ./clickhouse-tsan server --config /etc/clickhouse-server/config.xml -``` - - -# How to use Undefined Behaviour Sanitizer - -``` -mkdir build_ubsan && cd build_ubsan -``` - -Note: clang is mandatory, because gcc (in version 8) has false positives due to devirtualization and it has less amount of checks. - -``` -CC=clang CXX=clang++ cmake -D SANITIZE=undefined .. -ninja -``` - -## Start ClickHouse and run tests - -``` -sudo -u clickhouse UBSAN_OPTIONS='print_stacktrace=1' ./clickhouse-ubsan server --config /etc/clickhouse-server/config.xml -``` - - -# How to use Memory Sanitizer - -``` -CC=clang CXX=clang++ cmake -D SANITIZE=memory .. -ninja -``` diff --git a/tests/instructions/syntax.txt b/tests/instructions/syntax.txt deleted file mode 100644 index 228b0eb6045..00000000000 --- a/tests/instructions/syntax.txt +++ /dev/null @@ -1,5 +0,0 @@ -# Relatively quick syntax check (20 minutes on 16-core server) - -mkdir build && cd build -cmake -D CMAKE_BUILD_TYPE=Debug .. -time jq --raw-output '.[] | .command' compile_commands.json | grep -P -- ' -o [^ ]+\.o' | grep -v -P -- '-c .+/contrib/' | grep -vP '\.(s|asm)$' | sed -r -e 's/ -o [^ ]+\.o/ -fsyntax-only/' | sort -R | xargs -I{} -P$(nproc) sh -c '{}' diff --git a/tests/instructions/tscancode.txt b/tests/instructions/tscancode.txt deleted file mode 100644 index 33a4eb34f35..00000000000 --- a/tests/instructions/tscancode.txt +++ /dev/null @@ -1,26 +0,0 @@ -# TScanCode is a static analyzer from Tencent -# It looks like to be based on CppCheck - -git clone git@github.com:Tencent/TscanCode.git -cd TscanCode/trunk -make -j4 - -# It looks weird that TScanCode itself compiles with multiple warnings like 'unused-but-set-variable' and 'misleading-indentation' - -# Run analysis: - -./tscancode -j4 --enable=all ~/work/ClickHouse 2> result.txt - -# It has no way to remove specific directories. We have to checkout ClickHouse to separate directory and manually remove "contrib". -# Otherwise it segfaults when analysing llvm submodule. - -# It works quite fast: - -real 0m17.174s -user 0m45.498s -sys 0m0.496s - -wc -l result.txt -61 result.txt - -# It gives almost all false positives. diff --git a/tests/integration/CMakeLists.txt b/tests/integration/CMakeLists.txt deleted file mode 100644 index ed12cf5b4a3..00000000000 --- a/tests/integration/CMakeLists.txt +++ /dev/null @@ -1,20 +0,0 @@ -set (TEST_USE_BINARIES CLICKHOUSE_TESTS_SERVER_BIN_PATH=${ClickHouse_BINARY_DIR}/programs/clickhouse CLICKHOUSE_TESTS_CLIENT_BIN_PATH=${ClickHouse_BINARY_DIR}/programs/clickhouse) - -find_program(DOCKER_CMD docker) -find_program(DOCKER_COMPOSE_CMD docker-compose) -find_program(PYTEST_CMD pytest) -find_program(SUDO_CMD sudo) - -# will mount only one binary to docker container - build with .so cant work -if(DOCKER_CMD) - if(INTEGRATION_USE_RUNNER AND SUDO_CMD) - add_test(NAME integration-runner WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} COMMAND ${SUDO_CMD} ${CMAKE_CURRENT_SOURCE_DIR}/runner --binary ${ClickHouse_BINARY_DIR}/programs/clickhouse --configs-dir ${ClickHouse_SOURCE_DIR}/programs/server/) - message(STATUS "Using tests in docker with runner SUDO=${SUDO_CMD}; DOCKER=${DOCKER_CMD};") - endif() - if(NOT INTEGRATION_USE_RUNNER AND DOCKER_COMPOSE_CMD AND PYTEST_CMD) - # To run one test with debug: - # cmake . -DPYTEST_OPT="-ss;test_cluster_copier" - add_test(NAME integration-pytest WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} COMMAND env ${TEST_USE_BINARIES} "CLICKHOUSE_TESTS_BASE_CONFIG_DIR=${ClickHouse_SOURCE_DIR}/programs/server/" "CLICKHOUSE_TESTS_CONFIG_DIR=${ClickHouse_SOURCE_DIR}/tests/config/" ${PYTEST_STARTER} ${PYTEST_CMD} ${PYTEST_OPT}) - message(STATUS "Using tests in docker DOCKER=${DOCKER_CMD}; DOCKER_COMPOSE=${DOCKER_COMPOSE_CMD}; PYTEST=${PYTEST_STARTER} ${PYTEST_CMD} ${PYTEST_OPT}") - endif() -endif() diff --git a/tests/integration/helpers/cluster.py b/tests/integration/helpers/cluster.py index 729b30ba934..cbc511628f0 100644 --- a/tests/integration/helpers/cluster.py +++ b/tests/integration/helpers/cluster.py @@ -583,6 +583,7 @@ class ClickHouseCluster: self.rabbitmq_host = "rabbitmq1" self.rabbitmq_ip = None self.rabbitmq_port = 5672 + self.rabbitmq_secure_port = 5671 self.rabbitmq_dir = p.abspath(p.join(self.instances_dir, "rabbitmq")) self.rabbitmq_cookie_file = os.path.join(self.rabbitmq_dir, "erlang.cookie") self.rabbitmq_logs_dir = os.path.join(self.rabbitmq_dir, "logs") @@ -1316,6 +1317,7 @@ class ClickHouseCluster: self.with_rabbitmq = True env_variables["RABBITMQ_HOST"] = self.rabbitmq_host env_variables["RABBITMQ_PORT"] = str(self.rabbitmq_port) + env_variables["RABBITMQ_SECURE_PORT"] = str(self.rabbitmq_secure_port) env_variables["RABBITMQ_LOGS"] = self.rabbitmq_logs_dir env_variables["RABBITMQ_LOGS_FS"] = "bind" env_variables["RABBITMQ_COOKIE_FILE"] = self.rabbitmq_cookie_file diff --git a/tests/integration/helpers/keeper_config1.xml b/tests/integration/helpers/keeper_config1.xml index 7702aecba9c..12c6c0b78b6 100644 --- a/tests/integration/helpers/keeper_config1.xml +++ b/tests/integration/helpers/keeper_config1.xml @@ -11,6 +11,9 @@ 2181 + + az-zoo1 + 1 diff --git a/tests/integration/helpers/keeper_config2.xml b/tests/integration/helpers/keeper_config2.xml index 2a1a1c1003c..2afff2f5e59 100644 --- a/tests/integration/helpers/keeper_config2.xml +++ b/tests/integration/helpers/keeper_config2.xml @@ -12,6 +12,10 @@ 2181 2 + + az-zoo2 + 1 + 10000 diff --git a/tests/integration/helpers/keeper_utils.py b/tests/integration/helpers/keeper_utils.py index 83d0f2969b7..1ca17e923e4 100644 --- a/tests/integration/helpers/keeper_utils.py +++ b/tests/integration/helpers/keeper_utils.py @@ -37,39 +37,59 @@ class KeeperException(Exception): class KeeperClient(object): SEPARATOR = b"\a\a\a\a\n" - def __init__(self, bin_path: str, host: str, port: int): + def __init__(self, bin_path: str, host: str, port: int, connection_tries=30): self.bin_path = bin_path self.host = host self.port = port - self.proc = subprocess.Popen( - [ - bin_path, - "keeper-client", - "--host", - host, - "--port", - str(port), - "--log-level", - "error", - "--tests-mode", - "--no-confirmation", - ], - stdin=subprocess.PIPE, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - ) + retry_count = 0 - self.poller = select.epoll() - self.poller.register(self.proc.stdout) - self.poller.register(self.proc.stderr) + while True: + try: + self.proc = subprocess.Popen( + [ + bin_path, + "keeper-client", + "--host", + host, + "--port", + str(port), + "--log-level", + "error", + "--tests-mode", + "--no-confirmation", + ], + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ) - self._fd_nums = { - self.proc.stdout.fileno(): self.proc.stdout, - self.proc.stderr.fileno(): self.proc.stderr, - } + self.poller = select.epoll() + self.poller.register(self.proc.stdout) + self.poller.register(self.proc.stderr) - self.stopped = False + self._fd_nums = { + self.proc.stdout.fileno(): self.proc.stdout, + self.proc.stderr.fileno(): self.proc.stderr, + } + + self.stopped = False + + self.get("/keeper", 60.0) + break + except Exception as e: + retry_count += 1 + if ( + "All connection tries failed while connecting to ZooKeeper" + in str(e) + and retry_count < connection_tries + ): + print( + f"Got exception while connecting to Keeper: {e}\nWill reconnect, reconnect count = {retry_count}" + ) + time.sleep(1) + else: + raise def execute_query(self, query: str, timeout: float = 60.0) -> str: output = io.BytesIO() @@ -94,7 +114,7 @@ class KeeperClient(object): output.write(chunk) elif file == self.proc.stderr: - assert self.proc.stdout.readline() == self.SEPARATOR + self.proc.stdout.readline() raise KeeperException(self.proc.stderr.readline().strip().decode()) else: @@ -221,13 +241,12 @@ NOT_SERVING_REQUESTS_ERROR_MSG = "This instance is not currently serving request def wait_until_connected(cluster, node, port=9181, timeout=30.0): - elapsed = 0.0 + start = time.time() while send_4lw_cmd(cluster, node, "mntr", port) == NOT_SERVING_REQUESTS_ERROR_MSG: time.sleep(0.1) - elapsed += 0.1 - if elapsed >= timeout: + if time.time() - start > timeout: raise Exception( f"{timeout}s timeout while waiting for {node.name} to start serving requests" ) @@ -280,14 +299,16 @@ def wait_configs_equal(left_config: str, right_zk: KeeperClient, timeout: float Check whether get /keeper/config result in left_config is equal to get /keeper/config on right_zk ZK connection. """ - elapsed: float = 0.0 - while sorted(left_config.split("\n")) != sorted( - get_config_str(right_zk).split("\n") - ): + start = time.time() + left_config = sorted(left_config.split("\n")) + while True: + right_config = sorted(get_config_str(right_zk).split("\n")) + if left_config == right_config: + return + time.sleep(1) - elapsed += 1 - if elapsed >= timeout: + if time.time() - start > timeout: raise Exception( f"timeout while checking nodes configs to get equal. " - f"Left: {left_config}, right: {get_config_str(right_zk)}" + f"Left: {left_config}, right: {right_config}" ) diff --git a/tests/integration/parallel_skip.json b/tests/integration/parallel_skip.json index d056225fee4..33dd85aceaf 100644 --- a/tests/integration/parallel_skip.json +++ b/tests/integration/parallel_skip.json @@ -92,5 +92,9 @@ "test_profile_max_sessions_for_user/test.py::test_profile_max_sessions_for_user_grpc", "test_profile_max_sessions_for_user/test.py::test_profile_max_sessions_for_user_tcp_and_others", "test_profile_max_sessions_for_user/test.py::test_profile_max_sessions_for_user_setting_in_query", - "test_profile_max_sessions_for_user/test.py::test_profile_max_sessions_for_user_client_suggestions_load" + "test_profile_max_sessions_for_user/test.py::test_profile_max_sessions_for_user_client_suggestions_load", + + "test_ttl_move/test.py::TestCancelBackgroundMoving::test_cancel_background_moving_on_stop_moves_query", + "test_ttl_move/test.py::TestCancelBackgroundMoving::test_cancel_background_moving_on_table_detach", + "test_ttl_move/test.py::TestCancelBackgroundMoving::test_cancel_background_moving_on_zookeeper_disconnect" ] diff --git a/tests/integration/runner b/tests/integration/runner index 7be491a9a57..3760bf16b84 100755 --- a/tests/integration/runner +++ b/tests/integration/runner @@ -428,6 +428,7 @@ if __name__ == "__main__": f"--volume={args.library_bridge_binary}:/clickhouse-library-bridge " f"--volume={args.base_configs_dir}:/clickhouse-config " f"--volume={args.cases_dir}:/ClickHouse/tests/integration " + f"--volume={args.utils_dir}/backupview:/ClickHouse/utils/backupview " f"--volume={args.utils_dir}/grpc-client/pb2:/ClickHouse/utils/grpc-client/pb2 " f"--volume=/run:/run/host:ro {dockerd_internal_volume} {env_tags} {env_cleanup} " f"-e DOCKER_CLIENT_TIMEOUT=300 -e COMPOSE_HTTP_TIMEOUT=600 {use_analyzer} -e PYTHONUNBUFFERED=1 " diff --git a/src/Processors/examples/processors_test_aggregation.cpp b/tests/integration/test_backup_restore_keeper_map/__init__.py similarity index 100% rename from src/Processors/examples/processors_test_aggregation.cpp rename to tests/integration/test_backup_restore_keeper_map/__init__.py diff --git a/tests/integration/test_backup_restore_keeper_map/configs/backups_disk.xml b/tests/integration/test_backup_restore_keeper_map/configs/backups_disk.xml new file mode 100644 index 00000000000..b99a51cd56d --- /dev/null +++ b/tests/integration/test_backup_restore_keeper_map/configs/backups_disk.xml @@ -0,0 +1,13 @@ + + + + + local + /backups/ + + + + + backups + + diff --git a/tests/integration/test_backup_restore_keeper_map/configs/keeper_map_path_prefix.xml b/tests/integration/test_backup_restore_keeper_map/configs/keeper_map_path_prefix.xml new file mode 100644 index 00000000000..91d7b9d3f8f --- /dev/null +++ b/tests/integration/test_backup_restore_keeper_map/configs/keeper_map_path_prefix.xml @@ -0,0 +1,3 @@ + + /keeper_map_tables + diff --git a/tests/integration/test_backup_restore_keeper_map/configs/remote_servers.xml b/tests/integration/test_backup_restore_keeper_map/configs/remote_servers.xml new file mode 100644 index 00000000000..5cf07c69fd6 --- /dev/null +++ b/tests/integration/test_backup_restore_keeper_map/configs/remote_servers.xml @@ -0,0 +1,22 @@ + + + + + + node1 + 9000 + + + node2 + 9000 + + + + + node3 + 9000 + + + + + diff --git a/tests/integration/test_backup_restore_keeper_map/configs/zookeeper_retries.xml b/tests/integration/test_backup_restore_keeper_map/configs/zookeeper_retries.xml new file mode 100644 index 00000000000..1283f28a8cb --- /dev/null +++ b/tests/integration/test_backup_restore_keeper_map/configs/zookeeper_retries.xml @@ -0,0 +1,11 @@ + + + + 1000 + 1 + 1 + 42 + 0.002 + + + diff --git a/tests/integration/test_backup_restore_keeper_map/test.py b/tests/integration/test_backup_restore_keeper_map/test.py new file mode 100644 index 00000000000..c401f482c3f --- /dev/null +++ b/tests/integration/test_backup_restore_keeper_map/test.py @@ -0,0 +1,136 @@ +from time import sleep +import pytest +from helpers.cluster import ClickHouseCluster + + +cluster = ClickHouseCluster(__file__) + +main_configs = [ + "configs/remote_servers.xml", + "configs/backups_disk.xml", + "configs/keeper_map_path_prefix.xml", +] + +user_configs = [ + "configs/zookeeper_retries.xml", +] + +node1 = cluster.add_instance( + "node1", + main_configs=main_configs, + user_configs=user_configs, + external_dirs=["/backups/"], + macros={"replica": "node1", "shard": "shard1"}, + with_zookeeper=True, + stay_alive=True, +) + +node2 = cluster.add_instance( + "node2", + main_configs=main_configs, + user_configs=user_configs, + external_dirs=["/backups/"], + macros={"replica": "node2", "shard": "shard1"}, + with_zookeeper=True, + stay_alive=True, +) + + +node3 = cluster.add_instance( + "node3", + main_configs=main_configs, + user_configs=user_configs, + external_dirs=["/backups/"], + macros={"replica": "node3", "shard": "shard2"}, + with_zookeeper=True, + stay_alive=True, +) + + +@pytest.fixture(scope="module", autouse=True) +def start_cluster(): + try: + cluster.start() + yield cluster + finally: + cluster.shutdown() + + +backup_id_counter = 0 + + +def new_backup_name(base_name): + global backup_id_counter + backup_id_counter += 1 + return f"Disk('backups', '{base_name}{backup_id_counter}')" + + +@pytest.mark.parametrize("deduplicate_files", [0, 1]) +def test_on_cluster(deduplicate_files): + database_name = f"keeper_backup{deduplicate_files}" + node1.query_with_retry(f"CREATE DATABASE {database_name} ON CLUSTER cluster") + node1.query_with_retry( + f"CREATE TABLE {database_name}.keeper1 ON CLUSTER cluster (key UInt64, value String) Engine=KeeperMap('/{database_name}/test_on_cluster1') PRIMARY KEY key" + ) + node1.query_with_retry( + f"CREATE TABLE {database_name}.keeper2 ON CLUSTER cluster (key UInt64, value String) Engine=KeeperMap('/{database_name}/test_on_cluster1') PRIMARY KEY key" + ) + node1.query_with_retry( + f"CREATE TABLE {database_name}.keeper3 ON CLUSTER cluster (key UInt64, value String) Engine=KeeperMap('/{database_name}/test_on_cluster2') PRIMARY KEY key" + ) + node1.query_with_retry( + f"INSERT INTO {database_name}.keeper2 SELECT number, 'test' || toString(number) FROM system.numbers LIMIT 5" + ) + node1.query_with_retry( + f"INSERT INTO {database_name}.keeper3 SELECT number, 'test' || toString(number) FROM system.numbers LIMIT 5" + ) + + expected_result = "".join(f"{i}\ttest{i}\n" for i in range(5)) + + def verify_data(): + for node in [node1, node2, node3]: + for i in range(1, 4): + result = node.query_with_retry( + f"SELECT key, value FROM {database_name}.keeper{i} ORDER BY key FORMAT TSV" + ) + assert result == expected_result + + verify_data() + + backup_name = new_backup_name("test_on_cluster") + node1.query( + f"BACKUP DATABASE {database_name} ON CLUSTER cluster TO {backup_name} SETTINGS async = false, deduplicate_files = {deduplicate_files};" + ) + + node1.query(f"DROP DATABASE {database_name} ON CLUSTER cluster SYNC;") + + def apply_for_all_nodes(f): + for node in [node1, node2, node3]: + f(node) + + def change_keeper_map_prefix(node): + node.replace_config( + "/etc/clickhouse-server/config.d/keeper_map_path_prefix.xml", + """ + + /different_path/keeper_map + +""", + ) + + apply_for_all_nodes(lambda node: node.stop_clickhouse()) + apply_for_all_nodes(change_keeper_map_prefix) + apply_for_all_nodes(lambda node: node.start_clickhouse()) + + node1.query( + f"RESTORE DATABASE {database_name} ON CLUSTER cluster FROM {backup_name} SETTINGS async = false;" + ) + + verify_data() + + node1.query(f"DROP TABLE {database_name}.keeper3 ON CLUSTER cluster SYNC;") + node1.query( + f"RESTORE TABLE {database_name}.keeper3 ON CLUSTER cluster FROM {backup_name} SETTINGS async = false;" + ) + + verify_data() diff --git a/tests/integration/test_backup_restore_new/test.py b/tests/integration/test_backup_restore_new/test.py index 4a26a470aab..cac458f616d 100644 --- a/tests/integration/test_backup_restore_new/test.py +++ b/tests/integration/test_backup_restore_new/test.py @@ -4,11 +4,14 @@ import glob import re import random import os.path +import sys from collections import namedtuple from helpers.cluster import ClickHouseCluster from helpers.test_tools import assert_eq_with_retry, TSV +script_dir = os.path.dirname(os.path.realpath(__file__)) + cluster = ClickHouseCluster(__file__) instance = cluster.add_instance( "instance", @@ -1559,3 +1562,19 @@ def test_tables_dependency(): ) drop() + + +# Test for the "clickhouse_backupview" utility. + +test_backupview_dir = os.path.abspath( + os.path.join(script_dir, "../../../utils/backupview/test") +) +if test_backupview_dir not in sys.path: + sys.path.append(test_backupview_dir) +import test_backupview as test_backupview_module + + +def test_backupview(): + if instance.is_built_with_sanitizer(): + return # This test is actually for clickhouse_backupview, not for ClickHouse itself. + test_backupview_module.test_backupview_1() diff --git a/tests/integration/test_backup_restore_on_cluster/test_concurrency.py b/tests/integration/test_backup_restore_on_cluster/test_concurrency.py index aea82c6b559..ab37846db9a 100644 --- a/tests/integration/test_backup_restore_on_cluster/test_concurrency.py +++ b/tests/integration/test_backup_restore_on_cluster/test_concurrency.py @@ -214,7 +214,13 @@ def test_create_or_drop_tables_during_backup(db_engine, table_engine): while time.time() < end_time: table_name = f"mydb.tbl{randint(1, num_nodes)}" node = nodes[randint(0, num_nodes - 1)] - node.query(f"DROP TABLE IF EXISTS {table_name} SYNC") + # "DROP TABLE IF EXISTS" still can throw some errors (e.g. "WRITE locking attempt on node0 has timed out!") + # So we use query_and_get_answer_with_error() to ignore any errors. + # `lock_acquire_timeout` is also reduced because we don't wait our test to wait too long. + node.query_and_get_answer_with_error( + f"DROP TABLE IF EXISTS {table_name} SYNC", + settings={"lock_acquire_timeout": 10}, + ) def rename_tables(): while time.time() < end_time: diff --git a/tests/integration/test_backup_restore_s3/configs/blob_log.xml b/tests/integration/test_backup_restore_s3/configs/blob_log.xml new file mode 100644 index 00000000000..474c163b937 --- /dev/null +++ b/tests/integration/test_backup_restore_s3/configs/blob_log.xml @@ -0,0 +1,9 @@ + + + system + blob_storage_log
+ toYYYYMM(event_date) + 7500 + event_date + INTERVAL 30 DAY +
+
diff --git a/tests/integration/test_backup_restore_s3/test.py b/tests/integration/test_backup_restore_s3/test.py index f8ec39d240b..55d40b14ea7 100644 --- a/tests/integration/test_backup_restore_s3/test.py +++ b/tests/integration/test_backup_restore_s3/test.py @@ -12,6 +12,7 @@ node = cluster.add_instance( "configs/disk_s3.xml", "configs/named_collection_s3_backups.xml", "configs/s3_settings.xml", + "configs/blob_log.xml", ], user_configs=[ "configs/zookeeper_retries.xml", @@ -51,10 +52,12 @@ def get_events_for_query(query_id: str) -> Dict[str, int]: """ ) ) - return { + result = { event: int(value) for event, value in [line.split("\t") for line in events.lines] } + result["query_id"] = query_id + return result def format_settings(settings): @@ -118,7 +121,7 @@ def check_backup_and_restore( ) -def check_system_tables(): +def check_system_tables(backup_query_id=None): disks = [ tuple(disk.split("\t")) for disk in node.query("SELECT name, type FROM system.disks").split("\n") @@ -136,6 +139,14 @@ def check_system_tables(): if expected_disk not in disks: raise AssertionError(f"Missed {expected_disk} in {disks}") + if backup_query_id is not None: + blob_storage_log = node.query( + f"SELECT count() FROM system.blob_storage_log WHERE query_id = '{backup_query_id}' AND error = '' AND event_type = 'Upload'" + ).strip() + assert int(blob_storage_log) >= 1, node.query( + "SELECT * FROM system.blob_storage_log FORMAT PrettyCompactMonoBlock" + ) + @pytest.mark.parametrize( "storage_policy, to_disk", @@ -179,8 +190,8 @@ def test_backup_to_s3(): backup_destination = ( f"S3('http://minio1:9001/root/data/backups/{backup_name}', 'minio', 'minio123')" ) - check_backup_and_restore(storage_policy, backup_destination) - check_system_tables() + (backup_events, _) = check_backup_and_restore(storage_policy, backup_destination) + check_system_tables(backup_events["query_id"]) def test_backup_to_s3_named_collection(): @@ -203,6 +214,15 @@ def test_backup_to_s3_multipart(): f"copyDataToS3File: Multipart upload has completed. Bucket: root, Key: data/backups/multipart/{backup_name}" ) + backup_query_id = backup_events["query_id"] + blob_storage_log = node.query( + f"SELECT countIf(event_type == 'MultiPartUploadCreate') * countIf(event_type == 'MultiPartUploadComplete') * countIf(event_type == 'MultiPartUploadWrite') " + f"FROM system.blob_storage_log WHERE query_id = '{backup_query_id}' AND error = ''" + ).strip() + assert int(blob_storage_log) >= 1, node.query( + "SELECT * FROM system.blob_storage_log FORMAT PrettyCompactMonoBlock" + ) + s3_backup_events = ( "WriteBufferFromS3Microseconds", "WriteBufferFromS3Bytes", diff --git a/tests/integration/test_backward_compatibility/test.py b/tests/integration/test_backward_compatibility/test.py index 6f21b184a95..847483f2b9b 100644 --- a/tests/integration/test_backward_compatibility/test.py +++ b/tests/integration/test_backward_compatibility/test.py @@ -7,7 +7,7 @@ node1 = cluster.add_instance( "node1", with_zookeeper=True, image="yandex/clickhouse-server", - tag="19.17.8.54", + tag="19.16.9.37", stay_alive=True, with_installed_binary=True, allow_analyzer=False, diff --git a/tests/integration/test_backward_compatibility/test_aggregate_fixed_key.py b/tests/integration/test_backward_compatibility/test_aggregate_fixed_key.py index cf258987cbf..94bc1d3bfc9 100644 --- a/tests/integration/test_backward_compatibility/test_aggregate_fixed_key.py +++ b/tests/integration/test_backward_compatibility/test_aggregate_fixed_key.py @@ -7,7 +7,7 @@ node1 = cluster.add_instance( "node1", with_zookeeper=True, image="yandex/clickhouse-server", - tag="21.3", + tag="20.8.11.17", with_installed_binary=True, allow_analyzer=False, ) diff --git a/tests/integration/test_backward_compatibility/test_convert_ordinary.py b/tests/integration/test_backward_compatibility/test_convert_ordinary.py index 36facdd59b1..034a68e0f30 100644 --- a/tests/integration/test_backward_compatibility/test_convert_ordinary.py +++ b/tests/integration/test_backward_compatibility/test_convert_ordinary.py @@ -5,7 +5,7 @@ cluster = ClickHouseCluster(__file__) node = cluster.add_instance( "node", image="yandex/clickhouse-server", - tag="19.17.8.54", + tag="19.16.9.37", stay_alive=True, with_zookeeper=True, with_installed_binary=True, diff --git a/tests/integration/test_backward_compatibility/test_cte_distributed.py b/tests/integration/test_backward_compatibility/test_cte_distributed.py index c68468aad75..d47ae3aa255 100644 --- a/tests/integration/test_backward_compatibility/test_cte_distributed.py +++ b/tests/integration/test_backward_compatibility/test_cte_distributed.py @@ -8,7 +8,7 @@ node2 = cluster.add_instance( "node2", with_zookeeper=False, image="yandex/clickhouse-server", - tag="21.7.3.14", + tag="21.6", stay_alive=True, with_installed_binary=True, allow_analyzer=False, diff --git a/tests/integration/test_backward_compatibility/test_functions.py b/tests/integration/test_backward_compatibility/test_functions.py index 94771a624e2..b6b6ef28de5 100644 --- a/tests/integration/test_backward_compatibility/test_functions.py +++ b/tests/integration/test_backward_compatibility/test_functions.py @@ -153,6 +153,9 @@ def test_string_functions(start_cluster): # mandatory or optional). The former lib produces a value based on implicit padding, the latter lib throws an error. "FROM_BASE64", "base64Decode", + # PR #56913 (in v23.11) corrected the way tryBase64Decode() behaved with invalid inputs. Old versions return garbage, new versions + # return an empty string (as it was always documented). + "tryBase64Decode", # Removed in 23.9 "meiliMatch", ] diff --git a/tests/integration/test_backward_compatibility/test_insert_profile_events.py b/tests/integration/test_backward_compatibility/test_insert_profile_events.py index 8564c6b5952..d38bece7855 100644 --- a/tests/integration/test_backward_compatibility/test_insert_profile_events.py +++ b/tests/integration/test_backward_compatibility/test_insert_profile_events.py @@ -11,7 +11,7 @@ upstream_node = cluster.add_instance("upstream_node", allow_analyzer=False) old_node = cluster.add_instance( "old_node", image="clickhouse/clickhouse-server", - tag="22.5.1.2079", + tag="22.6", with_installed_binary=True, allow_analyzer=False, ) diff --git a/tests/integration/test_backward_compatibility/test_memory_bound_aggregation.py b/tests/integration/test_backward_compatibility/test_memory_bound_aggregation.py index 96b41c81384..5261a279a4f 100644 --- a/tests/integration/test_backward_compatibility/test_memory_bound_aggregation.py +++ b/tests/integration/test_backward_compatibility/test_memory_bound_aggregation.py @@ -7,7 +7,7 @@ node1 = cluster.add_instance( "node1", with_zookeeper=False, image="yandex/clickhouse-server", - tag="21.1", + tag="20.8.11.17", stay_alive=True, with_installed_binary=True, allow_analyzer=False, @@ -16,7 +16,7 @@ node2 = cluster.add_instance( "node2", with_zookeeper=False, image="yandex/clickhouse-server", - tag="21.1", + tag="20.8.11.17", stay_alive=True, with_installed_binary=True, allow_analyzer=False, diff --git a/tests/integration/test_backward_compatibility/test_normalized_count_comparison.py b/tests/integration/test_backward_compatibility/test_normalized_count_comparison.py index 3cd708d5029..cf7a25e8dc1 100644 --- a/tests/integration/test_backward_compatibility/test_normalized_count_comparison.py +++ b/tests/integration/test_backward_compatibility/test_normalized_count_comparison.py @@ -8,7 +8,7 @@ node2 = cluster.add_instance( "node2", with_zookeeper=False, image="yandex/clickhouse-server", - tag="21.7.2.7", + tag="21.6", stay_alive=True, with_installed_binary=True, allow_analyzer=False, diff --git a/tests/integration/test_backward_compatibility/test_select_aggregate_alias_column.py b/tests/integration/test_backward_compatibility/test_select_aggregate_alias_column.py index 7e10b6ab430..ec1d7fedac5 100644 --- a/tests/integration/test_backward_compatibility/test_select_aggregate_alias_column.py +++ b/tests/integration/test_backward_compatibility/test_select_aggregate_alias_column.py @@ -8,7 +8,7 @@ node2 = cluster.add_instance( "node2", with_zookeeper=False, image="yandex/clickhouse-server", - tag="21.7.2.7", + tag="21.6", stay_alive=True, with_installed_binary=True, allow_analyzer=False, diff --git a/tests/integration/test_checking_s3_blobs_paranoid/configs/inf_s3_retries.xml b/tests/integration/test_checking_s3_blobs_paranoid/configs/inf_s3_retries.xml index 206eb4f2bad..4210c13b727 100644 --- a/tests/integration/test_checking_s3_blobs_paranoid/configs/inf_s3_retries.xml +++ b/tests/integration/test_checking_s3_blobs_paranoid/configs/inf_s3_retries.xml @@ -4,6 +4,7 @@ 1000000 + 1
diff --git a/tests/integration/test_checking_s3_blobs_paranoid/configs/s3_retries.xml b/tests/integration/test_checking_s3_blobs_paranoid/configs/s3_retries.xml index 556bf60d385..95a313ea4f2 100644 --- a/tests/integration/test_checking_s3_blobs_paranoid/configs/s3_retries.xml +++ b/tests/integration/test_checking_s3_blobs_paranoid/configs/s3_retries.xml @@ -4,6 +4,7 @@ 5 + 0 diff --git a/tests/integration/test_checking_s3_blobs_paranoid/configs/storage_conf.xml b/tests/integration/test_checking_s3_blobs_paranoid/configs/storage_conf.xml index b77e72d808b..7b1f503ed55 100644 --- a/tests/integration/test_checking_s3_blobs_paranoid/configs/storage_conf.xml +++ b/tests/integration/test_checking_s3_blobs_paranoid/configs/storage_conf.xml @@ -7,11 +7,18 @@ + + s3 + http://minio1:9001/root/data/ + minio + minio123 + s3 http://resolver:8083/root/data/ minio minio123 + 1 @@ -23,9 +30,16 @@ + + +
+ s3 +
+
+
- broken_s3 + s3 diff --git a/tests/integration/test_checking_s3_blobs_paranoid/test.py b/tests/integration/test_checking_s3_blobs_paranoid/test.py index d6bcb3fb8f4..1391f1af6f1 100644 --- a/tests/integration/test_checking_s3_blobs_paranoid/test.py +++ b/tests/integration/test_checking_s3_blobs_paranoid/test.py @@ -64,6 +64,8 @@ def test_upload_after_check_works(cluster, broken_s3): data String ) ENGINE=MergeTree() ORDER BY id + SETTINGS + storage_policy='broken_s3' """ ) @@ -78,7 +80,7 @@ def test_upload_after_check_works(cluster, broken_s3): assert "suddenly disappeared" in error, error -def get_counters(node, query_id, log_type="ExceptionWhileProcessing"): +def get_multipart_counters(node, query_id, log_type="ExceptionWhileProcessing"): node.query("SYSTEM FLUSH LOGS") return [ int(x) @@ -87,7 +89,25 @@ def get_counters(node, query_id, log_type="ExceptionWhileProcessing"): SELECT ProfileEvents['S3CreateMultipartUpload'], ProfileEvents['S3UploadPart'], - ProfileEvents['S3WriteRequestsErrors'] + ProfileEvents['S3WriteRequestsErrors'], + FROM system.query_log + WHERE query_id='{query_id}' + AND type='{log_type}' + """ + ).split() + if x + ] + + +def get_put_counters(node, query_id, log_type="ExceptionWhileProcessing"): + node.query("SYSTEM FLUSH LOGS") + return [ + int(x) + for x in node.query( + f""" + SELECT + ProfileEvents['S3PutObject'], + ProfileEvents['S3WriteRequestsErrors'], FROM system.query_log WHERE query_id='{query_id}' AND type='{log_type}' @@ -97,9 +117,8 @@ def get_counters(node, query_id, log_type="ExceptionWhileProcessing"): ] -# Add "lz4" compression method in the list after https://github.com/ClickHouse/ClickHouse/issues/50975 is fixed @pytest.mark.parametrize( - "compression", ["none", "gzip", "br", "xz", "zstd", "bz2", "deflate"] + "compression", ["none", "gzip", "br", "xz", "zstd", "bz2", "deflate", "lz4"] ) def test_upload_s3_fail_create_multi_part_upload(cluster, broken_s3, compression): node = cluster.instances["node"] @@ -129,17 +148,16 @@ def test_upload_s3_fail_create_multi_part_upload(cluster, broken_s3, compression assert "Code: 499" in error, error assert "mock s3 injected error" in error, error - count_create_multi_part_uploads, count_upload_parts, count_s3_errors = get_counters( + create_multipart, upload_parts, s3_errors = get_multipart_counters( node, insert_query_id ) - assert count_create_multi_part_uploads == 1 - assert count_upload_parts == 0 - assert count_s3_errors == 1 + assert create_multipart == 1 + assert upload_parts == 0 + assert s3_errors == 1 -# Add "lz4" compression method in the list after https://github.com/ClickHouse/ClickHouse/issues/50975 is fixed @pytest.mark.parametrize( - "compression", ["none", "gzip", "br", "xz", "zstd", "bz2", "deflate"] + "compression", ["none", "gzip", "br", "xz", "zstd", "bz2", "deflate", "lz4"] ) def test_upload_s3_fail_upload_part_when_multi_part_upload( cluster, broken_s3, compression @@ -172,12 +190,12 @@ def test_upload_s3_fail_upload_part_when_multi_part_upload( assert "Code: 499" in error, error assert "mock s3 injected error" in error, error - count_create_multi_part_uploads, count_upload_parts, count_s3_errors = get_counters( + create_multipart, upload_parts, s3_errors = get_multipart_counters( node, insert_query_id ) - assert count_create_multi_part_uploads == 1 - assert count_upload_parts >= 2 - assert count_s3_errors >= 2 + assert create_multipart == 1 + assert upload_parts >= 2 + assert s3_errors >= 2 def test_when_s3_connection_refused_is_retried(cluster, broken_s3): @@ -207,12 +225,12 @@ def test_when_s3_connection_refused_is_retried(cluster, broken_s3): query_id=insert_query_id, ) - count_create_multi_part_uploads, count_upload_parts, count_s3_errors = get_counters( + create_multipart, upload_parts, s3_errors = get_multipart_counters( node, insert_query_id, log_type="QueryFinish" ) - assert count_create_multi_part_uploads == 1 - assert count_upload_parts == 39 - assert count_s3_errors == 3 + assert create_multipart == 1 + assert upload_parts == 39 + assert s3_errors == 3 broken_s3.setup_at_part_upload(count=1000, after=2, action="connection_refused") insert_query_id = f"INSERT_INTO_TABLE_FUNCTION_CONNECTION_REFUSED_RETRIED_1" @@ -279,13 +297,13 @@ def test_when_s3_connection_reset_by_peer_at_upload_is_retried( query_id=insert_query_id, ) - count_create_multi_part_uploads, count_upload_parts, count_s3_errors = get_counters( + create_multipart, upload_parts, s3_errors = get_multipart_counters( node, insert_query_id, log_type="QueryFinish" ) - assert count_create_multi_part_uploads == 1 - assert count_upload_parts == 39 - assert count_s3_errors == 3 + assert create_multipart == 1 + assert upload_parts == 39 + assert s3_errors == 3 broken_s3.setup_at_part_upload( count=1000, @@ -361,13 +379,13 @@ def test_when_s3_connection_reset_by_peer_at_create_mpu_retried( query_id=insert_query_id, ) - count_create_multi_part_uploads, count_upload_parts, count_s3_errors = get_counters( + create_multipart, upload_parts, s3_errors = get_multipart_counters( node, insert_query_id, log_type="QueryFinish" ) - assert count_create_multi_part_uploads == 1 - assert count_upload_parts == 39 - assert count_s3_errors == 3 + assert create_multipart == 1 + assert upload_parts == 39 + assert s3_errors == 3 broken_s3.setup_at_create_multi_part_upload( count=1000, @@ -438,13 +456,13 @@ def test_when_s3_broken_pipe_at_upload_is_retried(cluster, broken_s3): query_id=insert_query_id, ) - count_create_multi_part_uploads, count_upload_parts, count_s3_errors = get_counters( + create_multipart, upload_parts, s3_errors = get_multipart_counters( node, insert_query_id, log_type="QueryFinish" ) - assert count_create_multi_part_uploads == 1 - assert count_upload_parts == 7 - assert count_s3_errors == 3 + assert create_multipart == 1 + assert upload_parts == 7 + assert s3_errors == 3 broken_s3.setup_at_part_upload( count=1000, @@ -533,3 +551,60 @@ def test_query_is_canceled_with_inf_retries(cluster, broken_s3): retry_count=120, sleep_time=1, ) + + +@pytest.mark.parametrize("node_name", ["node", "node_with_inf_s3_retries"]) +def test_adaptive_timeouts(cluster, broken_s3, node_name): + node = cluster.instances[node_name] + + broken_s3.setup_fake_puts(part_length=1) + broken_s3.setup_slow_answers( + timeout=5, + count=1000000, + ) + + insert_query_id = f"TEST_ADAPTIVE_TIMEOUTS_{node_name}" + node.query( + f""" + INSERT INTO + TABLE FUNCTION s3( + 'http://resolver:8083/root/data/adaptive_timeouts', + 'minio', 'minio123', + 'CSV', auto, 'none' + ) + SELECT + * + FROM system.numbers + LIMIT 1 + SETTINGS + s3_request_timeout_ms=30000, + s3_check_objects_after_upload=0 + """, + query_id=insert_query_id, + ) + + broken_s3.reset() + + put_objects, s3_errors = get_put_counters( + node, insert_query_id, log_type="QueryFinish" + ) + + assert put_objects == 1 + + s3_use_adaptive_timeouts = node.query( + f""" + SELECT + value + FROM system.settings + WHERE + name='s3_use_adaptive_timeouts' + """ + ).strip() + + if node_name == "node_with_inf_s3_retries": + # first 2 attempts failed + assert s3_use_adaptive_timeouts == "1" + assert s3_errors == 1 + else: + assert s3_use_adaptive_timeouts == "0" + assert s3_errors == 0 diff --git a/tests/integration/test_config_substitutions/configs/000-config_with_env_subst.xml b/tests/integration/test_config_substitutions/configs/000-config_with_env_subst.xml new file mode 100644 index 00000000000..ffa26488874 --- /dev/null +++ b/tests/integration/test_config_substitutions/configs/000-config_with_env_subst.xml @@ -0,0 +1,17 @@ + + + + + + + + + + default + default + + + + + + diff --git a/tests/integration/test_config_substitutions/configs/010-env_subst_override.xml b/tests/integration/test_config_substitutions/configs/010-env_subst_override.xml new file mode 100644 index 00000000000..2d768b05172 --- /dev/null +++ b/tests/integration/test_config_substitutions/configs/010-env_subst_override.xml @@ -0,0 +1,17 @@ + + + + 424242 + + + + + + default + default + + + + + + diff --git a/tests/integration/test_config_substitutions/test.py b/tests/integration/test_config_substitutions/test.py index 692b36f1fae..46961e5da71 100644 --- a/tests/integration/test_config_substitutions/test.py +++ b/tests/integration/test_config_substitutions/test.py @@ -30,6 +30,15 @@ node6 = cluster.add_instance( }, main_configs=["configs/include_from_source.xml"], ) +node7 = cluster.add_instance( + "node7", + user_configs=[ + "configs/000-config_with_env_subst.xml", + "configs/010-env_subst_override.xml", + ], + env_variables={"MAX_QUERY_SIZE": "121212"}, + instance_env_variables=True, +) # overridden with 424242 @pytest.fixture(scope="module") @@ -78,6 +87,10 @@ def test_config(start_cluster): node6.query("select value from system.settings where name = 'max_query_size'") == "99999\n" ) + assert ( + node7.query("select value from system.settings where name = 'max_query_size'") + == "424242\n" + ) def test_include_config(start_cluster): diff --git a/tests/integration/test_default_compression_codec/test.py b/tests/integration/test_default_compression_codec/test.py index 82d5eb04d2a..ffe22c62325 100644 --- a/tests/integration/test_default_compression_codec/test.py +++ b/tests/integration/test_default_compression_codec/test.py @@ -27,9 +27,9 @@ node2 = cluster.add_instance( ) node3 = cluster.add_instance( "node3", - main_configs=["configs/default_compression.xml", "configs/wide_parts_only.xml"], + main_configs=["configs/default_compression.xml"], image="yandex/clickhouse-server", - tag="20.3.16", + tag="19.16.9.37", stay_alive=True, with_installed_binary=True, allow_analyzer=False, diff --git a/src/Processors/examples/processors_test_merge_sorting_transform.cpp b/tests/integration/test_dictionaries_wait_for_load/__init__.py similarity index 100% rename from src/Processors/examples/processors_test_merge_sorting_transform.cpp rename to tests/integration/test_dictionaries_wait_for_load/__init__.py diff --git a/tests/integration/test_dictionaries_wait_for_load/configs/no_dictionaries_lazy_load.xml b/tests/integration/test_dictionaries_wait_for_load/configs/no_dictionaries_lazy_load.xml new file mode 100644 index 00000000000..aaae3e0c4c1 --- /dev/null +++ b/tests/integration/test_dictionaries_wait_for_load/configs/no_dictionaries_lazy_load.xml @@ -0,0 +1,3 @@ + + 0 + diff --git a/tests/integration/test_dictionaries_wait_for_load/dictionaries/long_loading_dictionary.xml b/tests/integration/test_dictionaries_wait_for_load/dictionaries/long_loading_dictionary.xml new file mode 100644 index 00000000000..8b9d47833e4 --- /dev/null +++ b/tests/integration/test_dictionaries_wait_for_load/dictionaries/long_loading_dictionary.xml @@ -0,0 +1,29 @@ + + + long_loading_dictionary + + + + sleep 9 && echo "key,value" && echo "1,aa" && echo "2,bb" + CSVWithNames + + + + 600 + + + + + + + + key + + + value + String + + + + + diff --git a/tests/integration/test_dictionaries_wait_for_load/test.py b/tests/integration/test_dictionaries_wait_for_load/test.py new file mode 100644 index 00000000000..b30cc61abce --- /dev/null +++ b/tests/integration/test_dictionaries_wait_for_load/test.py @@ -0,0 +1,48 @@ +import pytest +from helpers.cluster import ClickHouseCluster +from helpers.test_tools import TSV + + +DICTIONARY_FILES = [ + "dictionaries/long_loading_dictionary.xml", +] + +cluster = ClickHouseCluster(__file__) +node1 = cluster.add_instance( + "node1", + main_configs=["configs/no_dictionaries_lazy_load.xml"], + dictionaries=DICTIONARY_FILES, +) + +node0 = cluster.add_instance( + "node0", + dictionaries=DICTIONARY_FILES, +) + + +@pytest.fixture(scope="module", autouse=True) +def started_cluster(): + try: + cluster.start() + yield cluster + finally: + cluster.shutdown() + + +def get_status(instance, dictionary_name): + return instance.query( + "SELECT status FROM system.dictionaries WHERE name='" + dictionary_name + "'" + ).rstrip("\n") + + +def test_wait_for_dictionaries_load(): + assert get_status(node1, "long_loading_dictionary") == "LOADED" + assert node1.query("SELECT * FROM dictionary(long_loading_dictionary)") == TSV( + [[1, "aa"], [2, "bb"]] + ) + + assert get_status(node0, "long_loading_dictionary") == "NOT_LOADED" + assert node0.query("SELECT * FROM dictionary(long_loading_dictionary)") == TSV( + [[1, "aa"], [2, "bb"]] + ) + assert get_status(node0, "long_loading_dictionary") == "LOADED" diff --git a/tests/integration/test_disk_over_web_server/test.py b/tests/integration/test_disk_over_web_server/test.py index 7695d235425..a71fdeff302 100644 --- a/tests/integration/test_disk_over_web_server/test.py +++ b/tests/integration/test_disk_over_web_server/test.py @@ -38,7 +38,7 @@ def cluster(): stay_alive=True, with_installed_binary=True, image="clickhouse/clickhouse-server", - tag="22.8.14.53", + tag="22.6", allow_analyzer=False, ) diff --git a/tests/integration/test_distributed_backward_compatability/test.py b/tests/integration/test_distributed_backward_compatability/test.py index c48a7ad1fa1..319a4c08e60 100644 --- a/tests/integration/test_distributed_backward_compatability/test.py +++ b/tests/integration/test_distributed_backward_compatability/test.py @@ -8,7 +8,7 @@ node_old = cluster.add_instance( "node1", main_configs=["configs/remote_servers.xml"], image="yandex/clickhouse-server", - tag="20.8.9.6", + tag="20.8.11.17", stay_alive=True, with_installed_binary=True, allow_analyzer=False, diff --git a/tests/integration/test_distributed_insert_backward_compatibility/test.py b/tests/integration/test_distributed_insert_backward_compatibility/test.py index 1e566d5e2da..7cfea61ffff 100644 --- a/tests/integration/test_distributed_insert_backward_compatibility/test.py +++ b/tests/integration/test_distributed_insert_backward_compatibility/test.py @@ -11,7 +11,7 @@ node_dist = cluster.add_instance( "node2", main_configs=["configs/remote_servers.xml"], image="yandex/clickhouse-server", - tag="21.11.9.1", + tag="21.6", stay_alive=True, with_installed_binary=True, allow_analyzer=False, diff --git a/tests/integration/test_distributed_inter_server_secret/test.py b/tests/integration/test_distributed_inter_server_secret/test.py index 4276fcffbf9..a5b353cc030 100644 --- a/tests/integration/test_distributed_inter_server_secret/test.py +++ b/tests/integration/test_distributed_inter_server_secret/test.py @@ -31,7 +31,7 @@ backward = make_instance( "configs/remote_servers_backward.xml", image="clickhouse/clickhouse-server", # version without DBMS_MIN_REVISION_WITH_INTERSERVER_SECRET_V2 - tag="23.2.3", + tag="22.6", with_installed_binary=True, allow_analyzer=False, ) @@ -304,26 +304,20 @@ def test_secure_insert_buffer_async(): def test_secure_disagree(): - with pytest.raises( - QueryRuntimeException, match=".*Interserver authentication failed.*" - ): + with pytest.raises(QueryRuntimeException): n1.query("SELECT * FROM dist_secure_disagree") def test_secure_disagree_insert(): n1.query("TRUNCATE TABLE data") n1.query("INSERT INTO dist_secure_disagree SELECT * FROM numbers(2)") - with pytest.raises( - QueryRuntimeException, match=".*Interserver authentication failed.*" - ): + with pytest.raises(QueryRuntimeException): n1.query( "SYSTEM FLUSH DISTRIBUTED ON CLUSTER secure_disagree dist_secure_disagree" ) - # check the the connection will be re-established + # check that the connection will be re-established # IOW that we will not get "Unknown BlockInfo field" - with pytest.raises( - QueryRuntimeException, match=".*Interserver authentication failed.*" - ): + with pytest.raises(QueryRuntimeException): assert int(n1.query("SELECT count() FROM dist_secure_disagree")) == 0 diff --git a/tests/integration/test_distributed_storage_configuration/configs/config.d/overrides.xml b/tests/integration/test_distributed_storage_configuration/configs/config.d/overrides.xml index 91a22a81a22..e1e2444992a 100644 --- a/tests/integration/test_distributed_storage_configuration/configs/config.d/overrides.xml +++ b/tests/integration/test_distributed_storage_configuration/configs/config.d/overrides.xml @@ -27,14 +27,14 @@ - +
disk1 disk2
-
+
diff --git a/tests/integration/test_distributed_storage_configuration/test.py b/tests/integration/test_distributed_storage_configuration/test.py index b0e17da37b2..00620668bd9 100644 --- a/tests/integration/test_distributed_storage_configuration/test.py +++ b/tests/integration/test_distributed_storage_configuration/test.py @@ -53,7 +53,7 @@ def test_insert(start_cluster): test, foo, key%2, - 'default' + 'jbod_policy' ) """ ) diff --git a/tests/integration/test_filesystem_cache/test.py b/tests/integration/test_filesystem_cache/test.py index be7b12946a7..3a6a1ef76eb 100644 --- a/tests/integration/test_filesystem_cache/test.py +++ b/tests/integration/test_filesystem_cache/test.py @@ -46,7 +46,7 @@ def test_parallel_cache_loading_on_startup(cluster, node_name): path = 'paralel_loading_test', disk = 'hdd_blob', max_file_segment_size = '1Ki', - boundary_alignemt = '1Ki', + boundary_alignment = '1Ki', max_size = '1Gi', max_elements = 10000000, load_metadata_threads = 30); diff --git a/tests/integration/test_groupBitmapAnd_on_distributed/test.py b/tests/integration/test_groupBitmapAnd_on_distributed/test.py index 8cf7e0fb2c1..5d3dda8ecf2 100644 --- a/tests/integration/test_groupBitmapAnd_on_distributed/test.py +++ b/tests/integration/test_groupBitmapAnd_on_distributed/test.py @@ -26,7 +26,7 @@ node4 = cluster.add_instance( "node4", main_configs=["configs/clusters.xml"], image="yandex/clickhouse-server", - tag="21.5", + tag="21.6", with_zookeeper=True, allow_analyzer=False, ) diff --git a/tests/queries/0_stateless/02869_gcd_codec_test_incorrect_type.reference b/tests/integration/test_hot_reload_storage_policy/__init__.py similarity index 100% rename from tests/queries/0_stateless/02869_gcd_codec_test_incorrect_type.reference rename to tests/integration/test_hot_reload_storage_policy/__init__.py diff --git a/tests/integration/test_hot_reload_storage_policy/configs/storage_configuration.xml b/tests/integration/test_hot_reload_storage_policy/configs/storage_configuration.xml new file mode 100644 index 00000000000..466ecde137d --- /dev/null +++ b/tests/integration/test_hot_reload_storage_policy/configs/storage_configuration.xml @@ -0,0 +1,21 @@ + + + + + /var/lib/clickhouse/disk0/ + + + /var/lib/clickhouse/disk1/ + + + + + + + disk0 + + + + + + \ No newline at end of file diff --git a/tests/integration/test_hot_reload_storage_policy/test.py b/tests/integration/test_hot_reload_storage_policy/test.py new file mode 100644 index 00000000000..8654b0462e4 --- /dev/null +++ b/tests/integration/test_hot_reload_storage_policy/test.py @@ -0,0 +1,86 @@ +import os +import sys +import time + +import pytest + +sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) + +from helpers.cluster import ClickHouseCluster +from helpers.test_tools import TSV + +cluster = ClickHouseCluster(__file__) +node0 = cluster.add_instance( + "node0", with_zookeeper=True, main_configs=["configs/storage_configuration.xml"] +) +node1 = cluster.add_instance( + "node1", with_zookeeper=True, main_configs=["configs/storage_configuration.xml"] +) + + +@pytest.fixture(scope="module") +def started_cluster(): + try: + cluster.start() + yield cluster + + finally: + cluster.shutdown() + + +new_disk_config = """ + + + + + /var/lib/clickhouse/disk0/ + + + /var/lib/clickhouse/disk1/ + + + /var/lib/clickhouse/disk2/ + + + + + + + disk2 + disk1 + disk0 + + + + + + +""" + + +def set_config(node, config): + node.replace_config( + "/etc/clickhouse-server/config.d/storage_configuration.xml", config + ) + node.query("SYSTEM RELOAD CONFIG") + + +def test_hot_reload_policy(started_cluster): + node0.query( + "CREATE TABLE t (d Int32, s String) ENGINE = ReplicatedMergeTree('/clickhouse/tables/t', '0') PARTITION BY d ORDER BY tuple() SETTINGS storage_policy = 'default_policy'" + ) + node0.query("INSERT INTO TABLE t VALUES (1, 'foo') (1, 'bar')") + + node1.query( + "CREATE TABLE t (d Int32, s String) ENGINE = ReplicatedMergeTree('/clickhouse/tables/t_mirror', '1') PARTITION BY d ORDER BY tuple() SETTINGS storage_policy = 'default_policy'" + ) + set_config(node1, new_disk_config) + time.sleep(1) + node1.query("ALTER TABLE t FETCH PARTITION 1 FROM '/clickhouse/tables/t'") + result = int(node1.query("SELECT count() FROM t")) + assert ( + result == 4, + "Node should have 2 x full data (4 rows) after reloading storage configuration and fetch new partition, but get {} rows".format( + result + ), + ) diff --git a/tests/integration/test_keeper_auth/test.py b/tests/integration/test_keeper_auth/test.py index e247984cc6a..78fbf84bbe2 100644 --- a/tests/integration/test_keeper_auth/test.py +++ b/tests/integration/test_keeper_auth/test.py @@ -1,6 +1,7 @@ import pytest import time from helpers.cluster import ClickHouseCluster +from helpers import keeper_utils from kazoo.client import KazooClient, KazooState from kazoo.security import ACL, make_digest_acl, make_acl from kazoo.exceptions import ( @@ -26,6 +27,7 @@ SUPERAUTH = "super:admin" def started_cluster(): try: cluster.start() + keeper_utils.wait_until_connected(cluster, node) yield cluster diff --git a/tests/integration/test_keeper_availability_zone/__init__.py b/tests/integration/test_keeper_availability_zone/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/integration/test_keeper_availability_zone/configs/keeper_config.xml b/tests/integration/test_keeper_availability_zone/configs/keeper_config.xml new file mode 100644 index 00000000000..3cbf717bb67 --- /dev/null +++ b/tests/integration/test_keeper_availability_zone/configs/keeper_config.xml @@ -0,0 +1,2 @@ + + diff --git a/tests/integration/test_keeper_availability_zone/test.py b/tests/integration/test_keeper_availability_zone/test.py new file mode 100644 index 00000000000..a2003f8539e --- /dev/null +++ b/tests/integration/test_keeper_availability_zone/test.py @@ -0,0 +1,38 @@ +import pytest +from helpers.cluster import ClickHouseCluster +from helpers.keeper_utils import KeeperClient + + +cluster = ClickHouseCluster(__file__) + +node = cluster.add_instance( + "node", + main_configs=["configs/keeper_config.xml"], + with_zookeeper=True, + stay_alive=True, +) + + +@pytest.fixture(scope="module", autouse=True) +def started_cluster(): + try: + cluster.start() + yield cluster + + finally: + cluster.shutdown() + + +def test_get_availability_zone(): + with KeeperClient.from_cluster(cluster, "zoo1") as client1: + assert client1.get("/keeper/availability_zone") == "az-zoo1" + + # Keeper2 set enable_auto_detection_on_cloud to true, but is ignored and az-zoo2 is used. + with KeeperClient.from_cluster(cluster, "zoo2") as client2: + assert client2.get("/keeper/availability_zone") == "az-zoo2" + assert "availability_zone" in client2.ls("/keeper") + + # keeper3 is not configured with availability_zone value. + with KeeperClient.from_cluster(cluster, "zoo3") as client3: + with pytest.raises(Exception): + client3.get("/keeper/availability_zone") diff --git a/tests/integration/test_keeper_broken_logs/__init__.py b/tests/integration/test_keeper_broken_logs/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/integration/test_keeper_broken_logs/configs/enable_keeper1.xml b/tests/integration/test_keeper_broken_logs/configs/enable_keeper1.xml new file mode 100644 index 00000000000..870326838e6 --- /dev/null +++ b/tests/integration/test_keeper_broken_logs/configs/enable_keeper1.xml @@ -0,0 +1,44 @@ + + + false + 9181 + 1 + /var/lib/clickhouse/coordination/log + /var/lib/clickhouse/coordination/snapshots + false + + + 5000 + 10000 + 75 + trace + false + + + + + 1 + node1 + 9234 + true + 3 + + + 2 + node2 + 9234 + true + true + 2 + + + 3 + node3 + 9234 + true + true + 1 + + + + diff --git a/tests/integration/test_keeper_broken_logs/configs/enable_keeper2.xml b/tests/integration/test_keeper_broken_logs/configs/enable_keeper2.xml new file mode 100644 index 00000000000..ee2ff903dff --- /dev/null +++ b/tests/integration/test_keeper_broken_logs/configs/enable_keeper2.xml @@ -0,0 +1,43 @@ + + + false + 9181 + 2 + /var/lib/clickhouse/coordination/log + /var/lib/clickhouse/coordination/snapshots + + + 5000 + 10000 + 75 + trace + false + + + + + 1 + node1 + 9234 + true + 3 + + + 2 + node2 + 9234 + true + true + 2 + + + 3 + node3 + 9234 + true + true + 1 + + + + diff --git a/tests/integration/test_keeper_broken_logs/configs/enable_keeper3.xml b/tests/integration/test_keeper_broken_logs/configs/enable_keeper3.xml new file mode 100644 index 00000000000..a16fea43125 --- /dev/null +++ b/tests/integration/test_keeper_broken_logs/configs/enable_keeper3.xml @@ -0,0 +1,43 @@ + + + false + 9181 + 3 + /var/lib/clickhouse/coordination/log + /var/lib/clickhouse/coordination/snapshots + + + 5000 + 10000 + 75 + trace + false + + + + + 1 + node1 + 9234 + true + 3 + + + 2 + node2 + 9234 + true + true + 2 + + + 3 + node3 + 9234 + true + true + 1 + + + + diff --git a/tests/integration/test_keeper_broken_logs/test.py b/tests/integration/test_keeper_broken_logs/test.py new file mode 100644 index 00000000000..49b8d985ee8 --- /dev/null +++ b/tests/integration/test_keeper_broken_logs/test.py @@ -0,0 +1,129 @@ +import pytest +from helpers.cluster import ClickHouseCluster +import helpers.keeper_utils as keeper_utils +import time + +cluster = ClickHouseCluster(__file__) +node1 = cluster.add_instance( + "node1", + main_configs=["configs/enable_keeper1.xml"], + stay_alive=True, +) +node2 = cluster.add_instance( + "node2", + main_configs=["configs/enable_keeper2.xml"], + stay_alive=True, +) +node3 = cluster.add_instance( + "node3", + main_configs=["configs/enable_keeper3.xml"], + stay_alive=True, +) + +from kazoo.client import KazooClient, KazooState + + +@pytest.fixture(scope="module") +def started_cluster(): + try: + cluster.start() + + yield cluster + + finally: + cluster.shutdown() + + +def smaller_exception(ex): + return "\n".join(str(ex).split("\n")[0:2]) + + +def wait_nodes(): + keeper_utils.wait_nodes(cluster, [node1, node2, node3]) + + +def get_fake_zk(nodename, timeout=30.0): + _fake_zk_instance = KazooClient( + hosts=cluster.get_instance_ip(nodename) + ":9181", timeout=timeout + ) + _fake_zk_instance.start() + return _fake_zk_instance + + +def test_single_node_broken_log(started_cluster): + try: + wait_nodes() + node1_conn = get_fake_zk("node1") + + # Cleanup + if node1_conn.exists("/test_broken_log") != None: + node1_conn.delete("/test_broken_log") + + node1_conn.create("/test_broken_log") + for _ in range(10): + node1_conn.create(f"/test_broken_log/node", b"somedata1", sequence=True) + + def verify_nodes(zk_conn): + children = zk_conn.get_children("/test_broken_log") + assert len(children) == 10 + + for child in children: + assert zk_conn.get("/test_broken_log/" + child)[0] == b"somedata1" + + verify_nodes(node1_conn) + + node1_conn.stop() + node1_conn.close() + + node1.stop_clickhouse() + + # wait until cluster stabilizes with a new leader + while not keeper_utils.is_leader( + started_cluster, node2 + ) and not keeper_utils.is_leader(started_cluster, node3): + time.sleep(1) + + node1.exec_in_container( + [ + "truncate", + "-s", + "-50", + "/var/lib/clickhouse/coordination/log/changelog_1_100000.bin", + ] + ) + node1.start_clickhouse() + keeper_utils.wait_until_connected(cluster, node1) + + node1_conn = get_fake_zk("node1") + node1_conn.create(f"/test_broken_log_final_node", b"somedata1") + + verify_nodes(node1_conn) + assert node1_conn.get("/test_broken_log_final_node")[0] == b"somedata1" + + node2_conn = get_fake_zk("node2") + verify_nodes(node2_conn) + assert node2_conn.get("/test_broken_log_final_node")[0] == b"somedata1" + + node3_conn = get_fake_zk("node2") + verify_nodes(node3_conn) + assert node3_conn.get("/test_broken_log_final_node")[0] == b"somedata1" + + assert ( + node1.exec_in_container(["ls", "/var/lib/clickhouse/coordination/log"]) + == "changelog_1_100000.bin\nchangelog_14_100013.bin\n" + ) + assert ( + node2.exec_in_container(["ls", "/var/lib/clickhouse/coordination/log"]) + == "changelog_1_100000.bin\n" + ) + assert ( + node3.exec_in_container(["ls", "/var/lib/clickhouse/coordination/log"]) + == "changelog_1_100000.bin\n" + ) + finally: + try: + for zk_conn in [node1_conn, node2_conn, node3_conn]: + zk_conn.stop() + zk_conn.close() + except: + pass diff --git a/tests/integration/test_keeper_four_word_command/test.py b/tests/integration/test_keeper_four_word_command/test.py index 71501133ae7..84dd2a2fd93 100644 --- a/tests/integration/test_keeper_four_word_command/test.py +++ b/tests/integration/test_keeper_four_word_command/test.py @@ -287,7 +287,7 @@ def test_cmd_conf(started_cluster): assert result["quorum_reads"] == "false" assert result["force_sync"] == "true" - assert result["compress_logs"] == "true" + assert result["compress_logs"] == "false" assert result["compress_snapshots_with_zstd_format"] == "true" assert result["configuration_change_tries_count"] == "20" diff --git a/tests/integration/test_keeper_reconfig_replace_leader/test.py b/tests/integration/test_keeper_reconfig_replace_leader/test.py index 4cdd48fcf7c..8e621eef279 100644 --- a/tests/integration/test_keeper_reconfig_replace_leader/test.py +++ b/tests/integration/test_keeper_reconfig_replace_leader/test.py @@ -3,6 +3,7 @@ import pytest from helpers.cluster import ClickHouseCluster, ClickHouseInstance from os.path import join, dirname, realpath +import time import helpers.keeper_utils as ku import typing as tp @@ -83,6 +84,12 @@ def test_reconfig_replace_leader(started_cluster): assert "node3" in config assert "node4" not in config + # wait until cluster stabilizes with a new leader + while not ku.is_leader(started_cluster, node2) and not ku.is_leader( + started_cluster, node3 + ): + time.sleep(1) + # additional 20s wait before removing leader ku.wait_configs_equal(config, zk2, timeout=50) diff --git a/tests/integration/test_max_suspicious_broken_parts_replicated/test.py b/tests/integration/test_max_suspicious_broken_parts_replicated/test.py index 6226240df56..0d009e6b132 100644 --- a/tests/integration/test_max_suspicious_broken_parts_replicated/test.py +++ b/tests/integration/test_max_suspicious_broken_parts_replicated/test.py @@ -81,12 +81,51 @@ def test_unexpected_uncommitted_merge(): detach_table("broken_table") attach_table("broken_table") - assert node.query("SELECT sum(key) FROM broken_table") == "190\n" + # it's not readonly + node.query("INSERT INTO broken_table SELECT 1") + + assert node.query("SELECT sum(key) FROM broken_table") == "191\n" assert ( node.query( "SELECT name FROM system.parts where table = 'broken_table' and active order by name" ) - == "all_0_0_0\nall_1_1_0\n" + == "all_0_0_0\nall_1_1_0\nall_2_2_0\n" + ) + + +def test_unexpected_uncommitted_mutation(): + node.query( + """ + CREATE TABLE broken_table0 (key Int) ENGINE = ReplicatedMergeTree('/tables/broken0', '1') ORDER BY tuple() + SETTINGS max_suspicious_broken_parts = 0, replicated_max_ratio_of_wrong_parts=0, old_parts_lifetime=100500, sleep_before_loading_outdated_parts_ms=10000""" + ) + + node.query("INSERT INTO broken_table0 SELECT number from numbers(10)") + + node.query( + "ALTER TABLE broken_table0 UPDATE key = key * 10 WHERE 1 SETTINGS mutations_sync=1" + ) + + assert node.query("SELECT sum(key) FROM broken_table0") == "450\n" + assert ( + node.query( + "SELECT name FROM system.parts where table = 'broken_table0' and active" + ) + == "all_0_0_0_1\n" + ) + + remove_part_from_zookeeper("/tables/broken0/replicas/1", "all_0_0_0_1") + + detach_table("broken_table0") + attach_table("broken_table0") + + node.query("INSERT INTO broken_table0 SELECT 1") + + # it may remain 45 if the nutation was finalized + sum_key = node.query("SELECT sum(key) FROM broken_table0") + assert sum_key == "46\n" or sum_key == "451\n" + assert "all_0_0_0_1" in node.query( + "SELECT name FROM system.detached_parts where table = 'broken_table0'" ) diff --git a/tests/integration/test_merge_tree_s3/configs/config.d/blob_log.xml b/tests/integration/test_merge_tree_s3/configs/config.d/blob_log.xml new file mode 100644 index 00000000000..474c163b937 --- /dev/null +++ b/tests/integration/test_merge_tree_s3/configs/config.d/blob_log.xml @@ -0,0 +1,9 @@ + + + system + blob_storage_log
+ toYYYYMM(event_date) + 7500 + event_date + INTERVAL 30 DAY +
+
diff --git a/tests/integration/test_merge_tree_s3/configs/config.d/users.xml b/tests/integration/test_merge_tree_s3/configs/config.d/users.xml index 3daa6f06a78..79e5091b28a 100644 --- a/tests/integration/test_merge_tree_s3/configs/config.d/users.xml +++ b/tests/integration/test_merge_tree_s3/configs/config.d/users.xml @@ -3,6 +3,7 @@ 1 20 + 0 diff --git a/tests/integration/test_merge_tree_s3/test.py b/tests/integration/test_merge_tree_s3/test.py index 3b2f1c0f6a6..9216b08f942 100644 --- a/tests/integration/test_merge_tree_s3/test.py +++ b/tests/integration/test_merge_tree_s3/test.py @@ -1,6 +1,7 @@ import logging import time import os +import uuid import pytest from helpers.cluster import ClickHouseCluster @@ -10,7 +11,6 @@ from helpers.wait_for_helpers import wait_for_delete_inactive_parts from helpers.wait_for_helpers import wait_for_delete_empty_parts from helpers.wait_for_helpers import wait_for_merges - SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) @@ -24,6 +24,7 @@ def cluster(): "configs/config.xml", "configs/config.d/storage_conf.xml", "configs/config.d/bg_processing_pool_conf.xml", + "configs/config.d/blob_log.xml", ], user_configs=[ "configs/config.d/users.xml", @@ -37,6 +38,7 @@ def cluster(): main_configs=[ "configs/config.d/storage_conf.xml", "configs/config.d/bg_processing_pool_conf.xml", + "configs/config.d/blob_log.xml", ], with_minio=True, tmpfs=[ @@ -126,17 +128,22 @@ def list_objects(cluster, path="data/", hint="list_objects"): def wait_for_delete_s3_objects(cluster, expected, timeout=30): while timeout > 0: - if len(list_objects(cluster, "data/")) == expected: - return + existing_objects = list_objects(cluster, "data/") + if len(existing_objects) == expected: + return existing_objects timeout -= 1 time.sleep(1) - assert len(list_objects(cluster, "data/")) == expected + existing_objects = list_objects(cluster, "data/") + assert len(existing_objects) == expected + return existing_objects def remove_all_s3_objects(cluster): minio = cluster.minio_client - for obj in list_objects(cluster, "data/"): + objects_to_delete = list_objects(cluster, "data/") + for obj in objects_to_delete: minio.remove_object(cluster.minio_bucket, obj.object_name) + return objects_to_delete @pytest.fixture(autouse=True, scope="function") @@ -155,7 +162,7 @@ def clear_minio(cluster): def check_no_objects_after_drop(cluster, table_name="s3_test", node_name="node"): node = cluster.instances[node_name] node.query(f"DROP TABLE IF EXISTS {table_name} SYNC") - wait_for_delete_s3_objects(cluster, 0, timeout=0) + return wait_for_delete_s3_objects(cluster, 0, timeout=0) @pytest.mark.parametrize( @@ -173,10 +180,32 @@ def test_simple_insert_select( minio = cluster.minio_client values1 = generate_values("2020-01-03", 4096) - node.query("INSERT INTO s3_test VALUES {}".format(values1)) + insert_query_id = uuid.uuid4().hex + + node.query( + "INSERT INTO s3_test VALUES {}".format(values1), query_id=insert_query_id + ) assert node.query("SELECT * FROM s3_test order by dt, id FORMAT Values") == values1 assert len(list_objects(cluster, "data/")) == FILES_OVERHEAD + files_per_part + node.query("SYSTEM FLUSH LOGS") + blob_storage_log = node.query( + f"SELECT * FROM system.blob_storage_log WHERE query_id = '{insert_query_id}' FORMAT PrettyCompactMonoBlock" + ) + + result = node.query( + f"""SELECT + (countIf( (event_type == 'Upload' OR event_type == 'MultiPartUploadWrite') as event_match) as total_events) > 0, + countIf(event_match AND bucket == 'root') == total_events, + countIf(event_match AND remote_path != '') == total_events, + countIf(event_match AND local_path != '') == total_events, + sumIf(data_size, event_match) > 0 + FROM system.blob_storage_log + WHERE query_id = '{insert_query_id}' AND error == '' + """ + ) + assert result == "1\t1\t1\t1\t1\n", blob_storage_log + values2 = generate_values("2020-01-04", 4096) node.query("INSERT INTO s3_test VALUES {}".format(values2)) assert ( @@ -269,6 +298,30 @@ def test_alter_table_columns(cluster, node_name): "INSERT INTO s3_test VALUES {}".format(generate_values("2020-01-03", 4096, -1)) ) + def assert_deleted_in_log(old_objects, new_objects): + node.query("SYSTEM FLUSH LOGS") + + deleted_objects = set(obj.object_name for obj in old_objects) - set( + obj.object_name for obj in new_objects + ) + deleted_in_log = set( + node.query( + f"SELECT remote_path FROM system.blob_storage_log WHERE error == '' AND event_type == 'Delete'" + ) + .strip() + .split() + ) + + # all deleted objects should be in log + assert all(obj in deleted_in_log for obj in deleted_objects), ( + deleted_objects, + node.query( + f"SELECT * FROM system.blob_storage_log FORMAT PrettyCompactMonoBlock" + ), + ) + + objects_before = list_objects(cluster, "data/") + node.query("ALTER TABLE s3_test ADD COLUMN col1 UInt64 DEFAULT 1") # To ensure parts have merged node.query("OPTIMIZE TABLE s3_test") @@ -278,30 +331,42 @@ def test_alter_table_columns(cluster, node_name): node.query("SELECT sum(col1) FROM s3_test WHERE id > 0 FORMAT Values") == "(4096)" ) - wait_for_delete_s3_objects( + + existing_objects = wait_for_delete_s3_objects( cluster, FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE + FILES_OVERHEAD_PER_COLUMN, ) + assert_deleted_in_log(objects_before, existing_objects) + objects_before = existing_objects + node.query( "ALTER TABLE s3_test MODIFY COLUMN col1 String", settings={"mutations_sync": 2} ) assert node.query("SELECT distinct(col1) FROM s3_test FORMAT Values") == "('1')" # and file with mutation - wait_for_delete_s3_objects( + existing_objects = wait_for_delete_s3_objects( cluster, FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE + FILES_OVERHEAD_PER_COLUMN + 1, ) + assert_deleted_in_log(objects_before, existing_objects) + objects_before = existing_objects + node.query("ALTER TABLE s3_test DROP COLUMN col1", settings={"mutations_sync": 2}) # and 2 files with mutations - wait_for_delete_s3_objects( + existing_objects = wait_for_delete_s3_objects( cluster, FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE + 2 ) + assert_deleted_in_log(objects_before, existing_objects) + objects_before = existing_objects - check_no_objects_after_drop(cluster) + existing_objects = check_no_objects_after_drop(cluster) + + assert_deleted_in_log(objects_before, existing_objects) + objects_before = existing_objects @pytest.mark.parametrize("node_name", ["node"]) @@ -796,6 +861,18 @@ def test_merge_canceled_by_s3_errors(cluster, broken_s3, node_name, storage_poli node.wait_for_log_line("ExpectedError Message: mock s3 injected error") + table_uuid = node.query( + "SELECT uuid FROM system.tables WHERE database = 'default' AND name = 'test_merge_canceled_by_s3_errors' LIMIT 1" + ).strip() + + node.query("SYSTEM FLUSH LOGS") + error_count_in_blob_log = node.query( + f"SELECT count() FROM system.blob_storage_log WHERE query_id like '{table_uuid}::%' AND error like '%mock s3 injected error%'" + ).strip() + assert int(error_count_in_blob_log) > 0, node.query( + f"SELECT * FROM system.blob_storage_log WHERE query_id like '{table_uuid}::%' FORMAT PrettyCompactMonoBlock" + ) + check_no_objects_after_drop( cluster, table_name="test_merge_canceled_by_s3_errors", node_name=node_name ) @@ -851,6 +928,10 @@ def test_merge_canceled_by_s3_errors_when_move(cluster, broken_s3, node_name): def test_s3_engine_heavy_write_check_mem( cluster, broken_s3, node_name, in_flight_memory ): + pytest.skip( + "Disabled, will be fixed after https://github.com/ClickHouse/ClickHouse/issues/51152" + ) + in_flight = in_flight_memory[0] memory = in_flight_memory[1] @@ -870,12 +951,18 @@ def test_s3_engine_heavy_write_check_mem( ) broken_s3.setup_fake_multpartuploads() - broken_s3.setup_slow_answers(10 * 1024 * 1024, timeout=15, count=10) + slow_responces = 10 + slow_timeout = 15 + broken_s3.setup_slow_answers( + 10 * 1024 * 1024, timeout=slow_timeout, count=slow_responces + ) query_id = f"INSERT_INTO_S3_ENGINE_QUERY_ID_{in_flight}" node.query( "INSERT INTO s3_test SELECT number, toString(number) FROM numbers(50000000)" - f" SETTINGS max_memory_usage={2*memory}" + f" SETTINGS " + f" max_memory_usage={2*memory}" + f", max_threads=1" # ParallelFormattingOutputFormat consumption depends on it f", s3_max_inflight_parts_for_one_file={in_flight}", query_id=query_id, ) @@ -892,7 +979,8 @@ def test_s3_engine_heavy_write_check_mem( assert int(memory_usage) < 1.2 * memory assert int(memory_usage) > 0.8 * memory - assert int(wait_inflight) > in_flight * 1000 * 1000 + # The more in_flight value is the less time CH waits. + assert int(wait_inflight) / 1000 / 1000 > slow_responces * slow_timeout / in_flight check_no_objects_after_drop(cluster, node_name=node_name) diff --git a/tests/integration/test_merge_tree_s3_failover/configs/config.d/storage_conf.xml b/tests/integration/test_merge_tree_s3_failover/configs/config.d/storage_conf.xml index 235b9a7b7a1..6303e9273fc 100644 --- a/tests/integration/test_merge_tree_s3_failover/configs/config.d/storage_conf.xml +++ b/tests/integration/test_merge_tree_s3_failover/configs/config.d/storage_conf.xml @@ -11,6 +11,7 @@ true 0 + 0 20000 @@ -33,6 +34,7 @@ true 1 + 0 1 20000 diff --git a/tests/integration/test_move_partition_to_volume_async/__init__.py b/tests/integration/test_move_partition_to_volume_async/__init__.py new file mode 100644 index 00000000000..e5a0d9b4834 --- /dev/null +++ b/tests/integration/test_move_partition_to_volume_async/__init__.py @@ -0,0 +1 @@ +#!/usr/bin/env python3 diff --git a/tests/integration/test_move_partition_to_volume_async/configs/storage_policy.xml b/tests/integration/test_move_partition_to_volume_async/configs/storage_policy.xml new file mode 100644 index 00000000000..f5d82656210 --- /dev/null +++ b/tests/integration/test_move_partition_to_volume_async/configs/storage_policy.xml @@ -0,0 +1,38 @@ + + + + + + + + s3 + http://minio1:9001/root/data/ + minio + minio123 + + + + s3 + http://resolver:8083/root/data/ + minio + minio123 + + + + + + +
+ default +
+ + broken_s3 + +
+ + 0.0 +
+
+
+ +
diff --git a/tests/integration/test_move_partition_to_volume_async/test.py b/tests/integration/test_move_partition_to_volume_async/test.py new file mode 100644 index 00000000000..cdd2ee126c0 --- /dev/null +++ b/tests/integration/test_move_partition_to_volume_async/test.py @@ -0,0 +1,133 @@ +#!/usr/bin/env python3 +import logging +import time +import os + +import pytest +from helpers.cluster import ClickHouseCluster +from helpers.mock_servers import start_s3_mock, start_mock_servers +from helpers.utility import generate_values, replace_config, SafeThread +from helpers.wait_for_helpers import wait_for_delete_inactive_parts +from helpers.wait_for_helpers import wait_for_delete_empty_parts +from helpers.wait_for_helpers import wait_for_merges + + +SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) + + +@pytest.fixture(scope="module") +def init_broken_s3(cluster): + yield start_s3_mock(cluster, "broken_s3", "8083") + + +@pytest.fixture(scope="function") +def broken_s3(init_broken_s3): + init_broken_s3.reset() + yield init_broken_s3 + + +@pytest.fixture(scope="module") +def cluster(): + try: + cluster = ClickHouseCluster(__file__) + cluster.add_instance( + "node", + main_configs=[ + "configs/storage_policy.xml", + ], + with_minio=True, + ) + + cluster.start() + logging.info("Cluster started") + + yield cluster + finally: + cluster.shutdown() + + +def test_async_alter_move(cluster, broken_s3): + node = cluster.instances["node"] + + node.query( + """ + CREATE TABLE moving_table_async + ( + key UInt64, + data String + ) + ENGINE MergeTree() + ORDER BY tuple() + SETTINGS storage_policy = 'slow_s3' + """ + ) + + node.query( + "INSERT INTO moving_table_async SELECT number, randomPrintableASCII(1000) FROM numbers(10000)" + ) + + broken_s3.setup_slow_answers( + timeout=5, + count=1000000, + ) + + node.query( + "ALTER TABLE moving_table_async MOVE PARTITION tuple() TO DISK 'broken_s3'", + settings={"alter_move_to_space_execute_async": True}, + timeout=10, + ) + + # not flaky, just introduce some wait + time.sleep(3) + + for i in range(100): + count = node.query( + "SELECT count() FROM system.moves where table = 'moving_table_async'" + ) + if count == "1\n": + break + time.sleep(0.1) + else: + assert False, "Cannot find any moving background operation" + + +def test_sync_alter_move(cluster, broken_s3): + node = cluster.instances["node"] + + node.query( + """ + CREATE TABLE moving_table_sync + ( + key UInt64, + data String + ) + ENGINE MergeTree() + ORDER BY tuple() + SETTINGS storage_policy = 'slow_s3' + """ + ) + + node.query( + "INSERT INTO moving_table_sync SELECT number, randomPrintableASCII(1000) FROM numbers(10000)" + ) + + broken_s3.reset() + + node.query( + "ALTER TABLE moving_table_sync MOVE PARTITION tuple() TO DISK 'broken_s3'", + timeout=30, + ) + # not flaky, just introduce some wait + time.sleep(3) + + assert ( + node.query("SELECT count() FROM system.moves where table = 'moving_table_sync'") + == "0\n" + ) + + assert ( + node.query( + "SELECT disk_name FROM system.parts WHERE table = 'moving_table_sync'" + ) + == "broken_s3\n" + ) diff --git a/tests/integration/test_mysql_protocol/java_client.reference b/tests/integration/test_mysql_protocol/java_client.reference new file mode 100644 index 00000000000..0e6ab5b5e79 --- /dev/null +++ b/tests/integration/test_mysql_protocol/java_client.reference @@ -0,0 +1,133 @@ +### testSimpleDataTypes +Row #1 +i8 type is TINYINT, value: -128, wasNull: false +i16 type is SMALLINT, value: -32768, wasNull: false +i32 type is INT, value: -2147483648, wasNull: false +i64 type is BIGINT, value: -9223372036854775808, wasNull: false +i128 type is CHAR, value: -170141183460469231731687303715884105728, wasNull: false +i256 type is CHAR, value: -57896044618658097711785492504343953926634992332820282019728792003956564819968, wasNull: false +ui8 type is TINYINT, value: 120, wasNull: false +ui16 type is SMALLINT, value: 1234, wasNull: false +ui32 type is INT, value: 51234, wasNull: false +ui64 type is BIGINT, value: 421342, wasNull: false +ui128 type is CHAR, value: 15324355, wasNull: false +ui256 type is CHAR, value: 41345135123432, wasNull: false +f32 type is FLOAT, value: -0.796896, wasNull: false +f64 type is DOUBLE, value: -0.113259, wasNull: false +b type is TINYINT, value: true, wasNull: false +Row #2 +i8 type is TINYINT, value: 127, wasNull: false +i16 type is SMALLINT, value: 32767, wasNull: false +i32 type is INT, value: 2147483647, wasNull: false +i64 type is BIGINT, value: 9223372036854775807, wasNull: false +i128 type is CHAR, value: 170141183460469231731687303715884105727, wasNull: false +i256 type is CHAR, value: 57896044618658097711785492504343953926634992332820282019728792003956564819967, wasNull: false +ui8 type is TINYINT, value: 255, wasNull: false +ui16 type is SMALLINT, value: 65535, wasNull: false +ui32 type is INT, value: 4294967295, wasNull: false +ui64 type is BIGINT, value: 18446744073709551615, wasNull: false +ui128 type is CHAR, value: 340282366920938463463374607431768211455, wasNull: false +ui256 type is CHAR, value: 115792089237316195423570985008687907853269984665640564039457584007913129639935, wasNull: false +f32 type is FLOAT, value: 1.234000, wasNull: false +f64 type is DOUBLE, value: 3.352451, wasNull: false +b type is TINYINT, value: false, wasNull: false + +### testStringTypes +Row #1 +s type is CHAR, value: 42, wasNull: false +sn type is CHAR, value: null, wasNull: true +lc type is CHAR, value: test, wasNull: false +nlc type is CHAR, value: null, wasNull: true +Row #2 +s type is CHAR, value: foo, wasNull: false +sn type is CHAR, value: bar, wasNull: false +lc type is CHAR, value: qaz, wasNull: false +nlc type is CHAR, value: qux, wasNull: false + +### testLowCardinalityAndNullableTypes +Row #1 +ilc type is INT, value: -54, wasNull: false +dlc type is DATE, value: 1970-01-01, wasNull: false +ni type is INT, value: 144, wasNull: false +Row #2 +ilc type is INT, value: 42, wasNull: false +dlc type is DATE, value: 2011-02-05, wasNull: false +ni type is INT, value: 0, wasNull: true + +### testDecimalTypes +Row #1 +d32 type is DECIMAL, value: -1.55, wasNull: false +d64 type is DECIMAL, value: 6.03, wasNull: false +d128_native type is DECIMAL, value: 5, wasNull: false +d128_text type is CHAR, value: -1224124.23423, wasNull: false +d256 type is CHAR, value: -54342.3, wasNull: false +Row #2 +d32 type is DECIMAL, value: 1234567.89, wasNull: false +d64 type is DECIMAL, value: 123456789123456.789, wasNull: false +d128_native type is DECIMAL, value: 12345678912345678912.1234567891, wasNull: false +d128_text type is CHAR, value: 1234567.8912345678912345678911234567891, wasNull: false +d256 type is CHAR, value: 12345678912345678912345678911234567891234567891234567891.12345678911234567891, wasNull: false + +### testMiscTypes +Row #1 +a type is CHAR, value: ['foo','bar'], wasNull: false +u type is CHAR, value: 5da5038d-788f-48c6-b510-babb41c538d3, wasNull: false +t type is CHAR, value: (42,'qaz'), wasNull: false +m type is CHAR, value: {'qux':144,'text':255}, wasNull: false + +### testDateTypes +Row #1 +d type is DATE, value: 1970-01-01, wasNull: false +d32 type is DATE, value: 1900-01-01, wasNull: false +dt type is TIMESTAMP, value: 1970-01-01 00:00:00.0, wasNull: false +dt64_3 type is TIMESTAMP, value: 1900-01-01 00:00:00.001, wasNull: false +dt64_6 type is TIMESTAMP, value: 1900-01-01 00:00:00.000001, wasNull: false +dt64_9 type is TIMESTAMP, value: 1900-01-01 00:00:00.0, wasNull: false +Row #2 +d type is DATE, value: 2149-06-06, wasNull: false +d32 type is DATE, value: 2178-04-16, wasNull: false +dt type is TIMESTAMP, value: 2106-02-07 06:28:15.0, wasNull: false +dt64_3 type is TIMESTAMP, value: 2106-02-07 06:28:15.123, wasNull: false +dt64_6 type is TIMESTAMP, value: 2106-02-07 06:28:15.123456, wasNull: false +dt64_9 type is TIMESTAMP, value: 2106-02-07 06:28:15.123456, wasNull: false + +### testUnusualDateTime64Scales +Row #1 +dt64_0 type is TIMESTAMP, value: 2022-04-13 03:17:45.0, wasNull: false +dt64_1 type is TIMESTAMP, value: 2022-04-13 03:17:45.1, wasNull: false +dt64_2 type is TIMESTAMP, value: 2022-04-13 03:17:45.12, wasNull: false +dt64_4 type is TIMESTAMP, value: 2022-04-13 03:17:45.1234, wasNull: false +dt64_5 type is TIMESTAMP, value: 2022-04-13 03:17:45.12345, wasNull: false +dt64_7 type is TIMESTAMP, value: 2022-04-13 03:17:45.123456, wasNull: false +dt64_8 type is TIMESTAMP, value: 2022-04-13 03:17:45.123456, wasNull: false +Row #2 +dt64_0 type is TIMESTAMP, value: 2022-04-13 03:17:45.0, wasNull: false +dt64_1 type is TIMESTAMP, value: 2022-04-13 03:17:45.1, wasNull: false +dt64_2 type is TIMESTAMP, value: 2022-04-13 03:17:45.01, wasNull: false +dt64_4 type is TIMESTAMP, value: 2022-04-13 03:17:45.0001, wasNull: false +dt64_5 type is TIMESTAMP, value: 2022-04-13 03:17:45.00001, wasNull: false +dt64_7 type is TIMESTAMP, value: 2022-04-13 03:17:45.0, wasNull: false +dt64_8 type is TIMESTAMP, value: 2022-04-13 03:17:45.0, wasNull: false + +### testDateTimeTimezones +Row #1 +dt type is TIMESTAMP, value: 1970-01-01 01:00:00.0, wasNull: false +dt64_3 type is TIMESTAMP, value: 1969-12-31 16:00:00.0, wasNull: false +Row #2 +dt type is TIMESTAMP, value: 2022-09-04 20:31:05.0, wasNull: false +dt64_3 type is TIMESTAMP, value: 2022-09-04 20:31:05.022, wasNull: false + +### testSuspiciousNullableLowCardinalityTypes +Row #1 +f type is FLOAT, value: 1.0, wasNull: false +d type is DATE, value: 2022-04-15, wasNull: false +dt type is TIMESTAMP, value: 2021-06-04 13:55:11.0, wasNull: false +Row #2 +f type is FLOAT, value: 3.14, wasNull: false +d type is DATE, value: 1970-01-01, wasNull: false +dt type is TIMESTAMP, value: 1970-01-01 00:00:00.0, wasNull: false +Row #3 +f type is FLOAT, value: 0.0, wasNull: true +d type is DATE, value: null, wasNull: true +dt type is TIMESTAMP, value: null, wasNull: true + diff --git a/tests/integration/test_mysql_protocol/java_client_binary.reference b/tests/integration/test_mysql_protocol/java_client_binary.reference deleted file mode 100644 index 763b7577487..00000000000 --- a/tests/integration/test_mysql_protocol/java_client_binary.reference +++ /dev/null @@ -1,129 +0,0 @@ -### testSimpleDataTypes -Row #1 -i8 type is TINYINT, value: -128 -i16 type is SMALLINT, value: -32768 -i32 type is INT, value: -2147483648 -i64 type is BIGINT, value: -9223372036854775808 -i128 type is CHAR, value: -170141183460469231731687303715884105728 -i256 type is CHAR, value: -57896044618658097711785492504343953926634992332820282019728792003956564819968 -ui8 type is TINYINT, value: 120 -ui16 type is SMALLINT, value: 1234 -ui32 type is INT, value: 51234 -ui64 type is BIGINT, value: 421342 -ui128 type is CHAR, value: 15324355 -ui256 type is CHAR, value: 41345135123432 -f32 type is FLOAT, value: -0.796896 -f64 type is DOUBLE, value: -0.113259 -b type is TINYINT, value: true -Row #2 -i8 type is TINYINT, value: 127 -i16 type is SMALLINT, value: 32767 -i32 type is INT, value: 2147483647 -i64 type is BIGINT, value: 9223372036854775807 -i128 type is CHAR, value: 170141183460469231731687303715884105727 -i256 type is CHAR, value: 57896044618658097711785492504343953926634992332820282019728792003956564819967 -ui8 type is TINYINT, value: 255 -ui16 type is SMALLINT, value: 65535 -ui32 type is INT, value: 4294967295 -ui64 type is BIGINT, value: 18446744073709551615 -ui128 type is CHAR, value: 340282366920938463463374607431768211455 -ui256 type is CHAR, value: 115792089237316195423570985008687907853269984665640564039457584007913129639935 -f32 type is FLOAT, value: 1.234000 -f64 type is DOUBLE, value: 3.352451 -b type is TINYINT, value: false - -### testStringTypes -Row #1 -s type is CHAR, value: 42 -sn type is CHAR, value: ᴺᵁᴸᴸ -lc type is CHAR, value: test -nlc type is CHAR, value: ᴺᵁᴸᴸ -Row #2 -s type is CHAR, value: foo -sn type is CHAR, value: bar -lc type is CHAR, value: qaz -nlc type is CHAR, value: qux - -### testLowCardinalityAndNullableTypes -Row #1 -ilc type is INT, value: -54 -dlc type is DATE, value: 1970-01-01 -ni type is INT, value: 144 -Row #2 -ilc type is INT, value: 42 -dlc type is DATE, value: 2011-02-05 -ni type is INT, value: 0 - -### testDecimalTypes -Row #1 -d32 type is DECIMAL, value: -1.55 -d64 type is DECIMAL, value: 6.03 -d128_native type is DECIMAL, value: 5 -d128_text type is CHAR, value: -1224124.23423 -d256 type is CHAR, value: -54342.3 -Row #2 -d32 type is DECIMAL, value: 1234567.89 -d64 type is DECIMAL, value: 123456789123456.789 -d128_native type is DECIMAL, value: 12345678912345678912.1234567891 -d128_text type is CHAR, value: 1234567.8912345678912345678911234567891 -d256 type is CHAR, value: 12345678912345678912345678911234567891234567891234567891.12345678911234567891 - -### testMiscTypes -Row #1 -a type is CHAR, value: ['foo','bar'] -u type is CHAR, value: 5da5038d-788f-48c6-b510-babb41c538d3 -t type is CHAR, value: (42,'qaz') -m type is CHAR, value: {'qux':144,'text':255} - -### testDateTypes -Row #1 -d type is DATE, value: 1970-01-01 -d32 type is DATE, value: 1900-01-01 -dt type is TIMESTAMP, value: 1970-01-01 00:00:00.0 -dt64_3 type is TIMESTAMP, value: 1900-01-01 00:00:00.001 -dt64_6 type is TIMESTAMP, value: 1900-01-01 00:00:00.000001 -dt64_9 type is TIMESTAMP, value: 1900-01-01 00:00:00.0 -Row #2 -d type is DATE, value: 2149-06-06 -d32 type is DATE, value: 2178-04-16 -dt type is TIMESTAMP, value: 2106-02-07 06:28:15.0 -dt64_3 type is TIMESTAMP, value: 2106-02-07 06:28:15.123 -dt64_6 type is TIMESTAMP, value: 2106-02-07 06:28:15.123456 -dt64_9 type is TIMESTAMP, value: 2106-02-07 06:28:15.123456 - -### testUnusualDateTime64Scales -Row #1 -dt64_0 type is TIMESTAMP, value: 2022-04-13 03:17:45.0 -dt64_1 type is TIMESTAMP, value: 2022-04-13 03:17:45.1 -dt64_2 type is TIMESTAMP, value: 2022-04-13 03:17:45.12 -dt64_4 type is TIMESTAMP, value: 2022-04-13 03:17:45.1234 -dt64_5 type is TIMESTAMP, value: 2022-04-13 03:17:45.12345 -dt64_7 type is TIMESTAMP, value: 2022-04-13 03:17:45.123456 -dt64_8 type is TIMESTAMP, value: 2022-04-13 03:17:45.123456 -Row #2 -dt64_0 type is TIMESTAMP, value: 2022-04-13 03:17:45.0 -dt64_1 type is TIMESTAMP, value: 2022-04-13 03:17:45.1 -dt64_2 type is TIMESTAMP, value: 2022-04-13 03:17:45.01 -dt64_4 type is TIMESTAMP, value: 2022-04-13 03:17:45.0001 -dt64_5 type is TIMESTAMP, value: 2022-04-13 03:17:45.00001 -dt64_7 type is TIMESTAMP, value: 2022-04-13 03:17:45.0 -dt64_8 type is TIMESTAMP, value: 2022-04-13 03:17:45.0 - -### testDateTimeTimezones -Row #1 -dt type is TIMESTAMP, value: 1970-01-01 01:00:00.0 -dt64_3 type is TIMESTAMP, value: 1969-12-31 16:00:00.0 -Row #2 -dt type is TIMESTAMP, value: 2022-09-04 20:31:05.0 -dt64_3 type is TIMESTAMP, value: 2022-09-04 20:31:05.022 - -### testSuspiciousNullableLowCardinalityTypes -Row #1 -f type is FLOAT, value: 1.0 -d type is DATE, value: 2022-04-15 -dt type is TIMESTAMP, value: 2021-06-04 13:55:11.0 -Row #2 -f type is FLOAT, value: 3.14 -d type is DATE, value: 1970-01-01 -dt type is TIMESTAMP, value: 1970-01-01 00:00:00.0 - diff --git a/tests/integration/test_mysql_protocol/java_client_test.sql b/tests/integration/test_mysql_protocol/java_client_test.sql index 1bcb29b8bee..311c315bae3 100644 --- a/tests/integration/test_mysql_protocol/java_client_test.sql +++ b/tests/integration/test_mysql_protocol/java_client_test.sql @@ -145,4 +145,5 @@ CREATE OR REPLACE TABLE suspicious_nullable_low_cardinality_types INSERT INTO suspicious_nullable_low_cardinality_types VALUES (1, '2022-04-15', '2021-06-04 13:55:11'), - (3.14, '1970-01-01', '1970-01-01 00:00:00'); + (3.14, '1970-01-01', '1970-01-01 00:00:00'), + (NULL, NULL, NULL); diff --git a/tests/integration/test_mysql_protocol/java_client_text.reference b/tests/integration/test_mysql_protocol/java_client_text.reference deleted file mode 100644 index f0dc390bea6..00000000000 --- a/tests/integration/test_mysql_protocol/java_client_text.reference +++ /dev/null @@ -1,129 +0,0 @@ -### testSimpleDataTypes -Row #1 -i8 type is TINYINT, value: -128 -i16 type is SMALLINT, value: -32768 -i32 type is INT, value: -2147483648 -i64 type is BIGINT, value: -9223372036854775808 -i128 type is CHAR, value: -170141183460469231731687303715884105728 -i256 type is CHAR, value: -57896044618658097711785492504343953926634992332820282019728792003956564819968 -ui8 type is TINYINT, value: 120 -ui16 type is SMALLINT, value: 1234 -ui32 type is INT, value: 51234 -ui64 type is BIGINT, value: 421342 -ui128 type is CHAR, value: 15324355 -ui256 type is CHAR, value: 41345135123432 -f32 type is FLOAT, value: -0.796896 -f64 type is DOUBLE, value: -0.113259 -b type is TINYINT, value: true -Row #2 -i8 type is TINYINT, value: 127 -i16 type is SMALLINT, value: 32767 -i32 type is INT, value: 2147483647 -i64 type is BIGINT, value: 9223372036854775807 -i128 type is CHAR, value: 170141183460469231731687303715884105727 -i256 type is CHAR, value: 57896044618658097711785492504343953926634992332820282019728792003956564819967 -ui8 type is TINYINT, value: 255 -ui16 type is SMALLINT, value: 65535 -ui32 type is INT, value: 4294967295 -ui64 type is BIGINT, value: 18446744073709551615 -ui128 type is CHAR, value: 340282366920938463463374607431768211455 -ui256 type is CHAR, value: 115792089237316195423570985008687907853269984665640564039457584007913129639935 -f32 type is FLOAT, value: 1.234000 -f64 type is DOUBLE, value: 3.352451 -b type is TINYINT, value: false - -### testStringTypes -Row #1 -s type is CHAR, value: 42 -sn type is CHAR, value: null -lc type is CHAR, value: test -nlc type is CHAR, value: null -Row #2 -s type is CHAR, value: foo -sn type is CHAR, value: bar -lc type is CHAR, value: qaz -nlc type is CHAR, value: qux - -### testLowCardinalityAndNullableTypes -Row #1 -ilc type is INT, value: -54 -dlc type is DATE, value: 1970-01-01 -ni type is INT, value: 144 -Row #2 -ilc type is INT, value: 42 -dlc type is DATE, value: 2011-02-05 -ni type is INT, value: 0 - -### testDecimalTypes -Row #1 -d32 type is DECIMAL, value: -1.55 -d64 type is DECIMAL, value: 6.03 -d128_native type is DECIMAL, value: 5 -d128_text type is CHAR, value: -1224124.23423 -d256 type is CHAR, value: -54342.3 -Row #2 -d32 type is DECIMAL, value: 1234567.89 -d64 type is DECIMAL, value: 123456789123456.789 -d128_native type is DECIMAL, value: 12345678912345678912.1234567891 -d128_text type is CHAR, value: 1234567.8912345678912345678911234567891 -d256 type is CHAR, value: 12345678912345678912345678911234567891234567891234567891.12345678911234567891 - -### testMiscTypes -Row #1 -a type is CHAR, value: ['foo','bar'] -u type is CHAR, value: 5da5038d-788f-48c6-b510-babb41c538d3 -t type is CHAR, value: (42,'qaz') -m type is CHAR, value: {'qux':144,'text':255} - -### testDateTypes -Row #1 -d type is DATE, value: 1970-01-01 -d32 type is DATE, value: 1900-01-01 -dt type is TIMESTAMP, value: 1970-01-01 00:00:00.0 -dt64_3 type is TIMESTAMP, value: 1900-01-01 00:00:00.001 -dt64_6 type is TIMESTAMP, value: 1900-01-01 00:00:00.000001 -dt64_9 type is TIMESTAMP, value: 1900-01-01 00:00:00.0 -Row #2 -d type is DATE, value: 2149-06-06 -d32 type is DATE, value: 2178-04-16 -dt type is TIMESTAMP, value: 2106-02-07 06:28:15.0 -dt64_3 type is TIMESTAMP, value: 2106-02-07 06:28:15.123 -dt64_6 type is TIMESTAMP, value: 2106-02-07 06:28:15.123456 -dt64_9 type is TIMESTAMP, value: 2106-02-07 06:28:15.123456 - -### testUnusualDateTime64Scales -Row #1 -dt64_0 type is TIMESTAMP, value: 2022-04-13 03:17:45.0 -dt64_1 type is TIMESTAMP, value: 2022-04-13 03:17:45.1 -dt64_2 type is TIMESTAMP, value: 2022-04-13 03:17:45.12 -dt64_4 type is TIMESTAMP, value: 2022-04-13 03:17:45.1234 -dt64_5 type is TIMESTAMP, value: 2022-04-13 03:17:45.12345 -dt64_7 type is TIMESTAMP, value: 2022-04-13 03:17:45.123456 -dt64_8 type is TIMESTAMP, value: 2022-04-13 03:17:45.123456 -Row #2 -dt64_0 type is TIMESTAMP, value: 2022-04-13 03:17:45.0 -dt64_1 type is TIMESTAMP, value: 2022-04-13 03:17:45.1 -dt64_2 type is TIMESTAMP, value: 2022-04-13 03:17:45.01 -dt64_4 type is TIMESTAMP, value: 2022-04-13 03:17:45.0001 -dt64_5 type is TIMESTAMP, value: 2022-04-13 03:17:45.00001 -dt64_7 type is TIMESTAMP, value: 2022-04-13 03:17:45.0 -dt64_8 type is TIMESTAMP, value: 2022-04-13 03:17:45.0 - -### testDateTimeTimezones -Row #1 -dt type is TIMESTAMP, value: 1970-01-01 01:00:00.0 -dt64_3 type is TIMESTAMP, value: 1969-12-31 16:00:00.0 -Row #2 -dt type is TIMESTAMP, value: 2022-09-04 20:31:05.0 -dt64_3 type is TIMESTAMP, value: 2022-09-04 20:31:05.022 - -### testSuspiciousNullableLowCardinalityTypes -Row #1 -f type is FLOAT, value: 1.0 -d type is DATE, value: 2022-04-15 -dt type is TIMESTAMP, value: 2021-06-04 13:55:11.0 -Row #2 -f type is FLOAT, value: 3.14 -d type is DATE, value: 1970-01-01 -dt type is TIMESTAMP, value: 1970-01-01 00:00:00.0 - diff --git a/tests/integration/test_mysql_protocol/test.py b/tests/integration/test_mysql_protocol/test.py index 6a6831467f8..7a69d07633c 100644 --- a/tests/integration/test_mysql_protocol/test.py +++ b/tests/integration/test_mysql_protocol/test.py @@ -799,7 +799,7 @@ def test_java_client_text(started_cluster, java_container): demux=True, ) - with open(os.path.join(SCRIPT_DIR, "java_client_text.reference")) as fp: + with open(os.path.join(SCRIPT_DIR, "java_client.reference")) as fp: reference = fp.read() assert stdout.decode() == reference @@ -813,7 +813,7 @@ def test_java_client_binary(started_cluster, java_container): demux=True, ) - with open(os.path.join(SCRIPT_DIR, "java_client_binary.reference")) as fp: + with open(os.path.join(SCRIPT_DIR, "java_client.reference")) as fp: reference = fp.read() assert stdout.decode() == reference diff --git a/tests/integration/test_named_collections/configs/users.d/users_no_default_access.xml b/tests/integration/test_named_collections/configs/users.d/users_no_default_access.xml index b8f38f04ca9..dcac83188dc 100644 --- a/tests/integration/test_named_collections/configs/users.d/users_no_default_access.xml +++ b/tests/integration/test_named_collections/configs/users.d/users_no_default_access.xml @@ -4,6 +4,7 @@ default default + 0 diff --git a/tests/integration/test_old_versions/test.py b/tests/integration/test_old_versions/test.py index aff07c53114..b59bfcc4f6b 100644 --- a/tests/integration/test_old_versions/test.py +++ b/tests/integration/test_old_versions/test.py @@ -55,7 +55,7 @@ node19_13 = cluster.add_instance( node19_16 = cluster.add_instance( "node19_16", image="yandex/clickhouse-server", - tag="19.16.2.2", + tag="19.16.9.37", with_installed_binary=True, main_configs=["configs/config.d/test_cluster.xml"], allow_analyzer=False, diff --git a/tests/integration/test_polymorphic_parts/test.py b/tests/integration/test_polymorphic_parts/test.py index debb509de90..ba9b5ec6cac 100644 --- a/tests/integration/test_polymorphic_parts/test.py +++ b/tests/integration/test_polymorphic_parts/test.py @@ -360,7 +360,7 @@ node7 = cluster.add_instance( user_configs=["configs_old/users.d/not_optimize_count.xml"], with_zookeeper=True, image="yandex/clickhouse-server", - tag="19.17.8.54", + tag="20.8.11.17", stay_alive=True, with_installed_binary=True, allow_analyzer=False, diff --git a/tests/integration/test_reload_query_masking_rules/__init__.py b/tests/integration/test_reload_query_masking_rules/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/integration/test_reload_query_masking_rules/configs/changed_settings.xml b/tests/integration/test_reload_query_masking_rules/configs/changed_settings.xml new file mode 100644 index 00000000000..d681496d843 --- /dev/null +++ b/tests/integration/test_reload_query_masking_rules/configs/changed_settings.xml @@ -0,0 +1,19 @@ + + + system + query_log
+ toYYYYMM(event_date) + 7500 + 1048576 + 8192 + 524288 + false +
+ + + + TOPSECRET.TOPSECRET + [hidden] + + +
diff --git a/tests/integration/test_reload_query_masking_rules/configs/empty_settings.xml b/tests/integration/test_reload_query_masking_rules/configs/empty_settings.xml new file mode 100644 index 00000000000..82647ff82b5 --- /dev/null +++ b/tests/integration/test_reload_query_masking_rules/configs/empty_settings.xml @@ -0,0 +1,12 @@ + + + system + query_log
+ toYYYYMM(event_date) + 7500 + 1048576 + 8192 + 524288 + false +
+
diff --git a/tests/integration/test_reload_query_masking_rules/test.py b/tests/integration/test_reload_query_masking_rules/test.py new file mode 100644 index 00000000000..f269aefbacb --- /dev/null +++ b/tests/integration/test_reload_query_masking_rules/test.py @@ -0,0 +1,57 @@ +import pytest +import os +from helpers.cluster import ClickHouseCluster +from helpers.test_tools import assert_eq_with_retry, assert_logs_contain_with_retry + +SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) +cluster = ClickHouseCluster(__file__) +node = cluster.add_instance("node", user_configs=["configs/empty_settings.xml"]) + + +@pytest.fixture(scope="module", autouse=True) +def started_cluster(): + try: + cluster.start() + yield cluster + finally: + cluster.shutdown() + + +@pytest.fixture(autouse=True) +def reset_to_normal_settings_after_test(): + try: + node.copy_file_to_container( + os.path.join(SCRIPT_DIR, "configs/empty_settings.xml"), + "/etc/clickhouse-server/config.d/z.xml", + ) + node.query("SYSTEM RELOAD CONFIG") + yield + finally: + pass + + +# @pytest.mark.parametrize("reload_strategy", ["force", "timeout"]) +def test_reload_query_masking_rules(): + # At first, empty configuration is fed to ClickHouse. The query + # "SELECT 'TOPSECRET.TOPSECRET'" will not be redacted, and the new masking + # event will not be registered + node.query("SELECT 'TOPSECRET.TOPSECRET'") + assert_logs_contain_with_retry(node, "SELECT 'TOPSECRET.TOPSECRET'") + assert not node.contains_in_log(r"SELECT '\[hidden\]'") + node.rotate_logs() + + node.copy_file_to_container( + os.path.join(SCRIPT_DIR, "configs/changed_settings.xml"), + "/etc/clickhouse-server/config.d/z.xml", + ) + + node.query("SYSTEM RELOAD CONFIG") + + # Now the same query will be redacted in the logs and the counter of events + # will be incremented + node.query("SELECT 'TOPSECRET.TOPSECRET'") + + assert_logs_contain_with_retry(node, r"SELECT '\[hidden\]'") + assert not node.contains_in_log("SELECT 'TOPSECRET.TOPSECRET'") + + node.rotate_logs() diff --git a/tests/integration/test_replicated_merge_tree_compatibility/test.py b/tests/integration/test_replicated_merge_tree_compatibility/test.py index c30a0d86c98..32a44aa65b9 100644 --- a/tests/integration/test_replicated_merge_tree_compatibility/test.py +++ b/tests/integration/test_replicated_merge_tree_compatibility/test.py @@ -6,7 +6,7 @@ node1 = cluster.add_instance( "node1", with_zookeeper=True, image="yandex/clickhouse-server", - tag="20.12.4.5", + tag="20.8.11.17", stay_alive=True, with_installed_binary=True, allow_analyzer=False, @@ -15,7 +15,7 @@ node2 = cluster.add_instance( "node2", with_zookeeper=True, image="yandex/clickhouse-server", - tag="20.12.4.5", + tag="20.8.11.17", stay_alive=True, with_installed_binary=True, allow_analyzer=False, diff --git a/tests/integration/test_replicated_merge_tree_encryption_codec/test.py b/tests/integration/test_replicated_merge_tree_encryption_codec/test.py index a50f8341ee7..c8b63f9502a 100644 --- a/tests/integration/test_replicated_merge_tree_encryption_codec/test.py +++ b/tests/integration/test_replicated_merge_tree_encryption_codec/test.py @@ -91,7 +91,14 @@ def test_different_keys(): copy_keys(node2, "key_b") create_table() - insert_data() + # Insert two blocks without duplicated blocks to force each replica to actually fetch parts from another replica. + node1.query("INSERT INTO tbl VALUES (1, 'str1')") + node2.query("INSERT INTO tbl VALUES (2, 'str2')") + node1.query("SYSTEM SYNC REPLICA ON CLUSTER 'cluster' tbl") + + # After "SYSTEM SYNC REPLICA" we expect node1 and node2 here both having a part for (1, 'str1') encrypted with "key_a", + # and a part for (2, 'str2') encrypted with "key_b". + # So the command "SELECT * from tbl" must fail on both nodes because each node has only one encryption key. assert "BAD_DECRYPT" in node1.query_and_get_error("SELECT * FROM tbl") assert "BAD_DECRYPT" in node2.query_and_get_error("SELECT * FROM tbl") diff --git a/tests/integration/test_replicating_constants/test.py b/tests/integration/test_replicating_constants/test.py index 00781e473c7..9669e890cd3 100644 --- a/tests/integration/test_replicating_constants/test.py +++ b/tests/integration/test_replicating_constants/test.py @@ -9,7 +9,7 @@ node2 = cluster.add_instance( "node2", with_zookeeper=True, image="yandex/clickhouse-server", - tag="19.1.14", + tag="19.16.9.37", with_installed_binary=True, allow_analyzer=False, ) diff --git a/tests/integration/test_storage_azure_blob_storage/test.py b/tests/integration/test_storage_azure_blob_storage/test.py index e0365f70e7f..96fff6b891f 100644 --- a/tests/integration/test_storage_azure_blob_storage/test.py +++ b/tests/integration/test_storage_azure_blob_storage/test.py @@ -1156,3 +1156,37 @@ def test_filtering_by_file_or_path(cluster): ) assert int(result) == 1 + + +def test_size_virtual_column(cluster): + node = cluster.instances["node"] + storage_account_url = cluster.env_variables["AZURITE_STORAGE_ACCOUNT_URL"] + azure_query( + node, + f"INSERT INTO TABLE FUNCTION azureBlobStorage('{storage_account_url}','cont', 'test_size_virtual_column1.tsv', 'devstoreaccount1', " + f"'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==', 'auto', 'auto', 'x UInt64') select 1", + ) + + azure_query( + node, + f"INSERT INTO TABLE FUNCTION azureBlobStorage('{storage_account_url}','cont', 'test_size_virtual_column2.tsv', 'devstoreaccount1', " + f"'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==', 'auto', 'auto', 'x UInt64') select 11", + ) + + azure_query( + node, + f"INSERT INTO TABLE FUNCTION azureBlobStorage('{storage_account_url}', 'cont', 'test_size_virtual_column3.tsv', 'devstoreaccount1', " + f"'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==', 'auto', 'auto', 'x UInt64') select 111", + ) + + result = azure_query( + node, + f"select _file, _size from azureBlobStorage('{storage_account_url}', 'cont', 'test_size_virtual_column*.tsv', 'devstoreaccount1', " + f"'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==', 'auto', 'auto', 'x UInt64') " + f"order by _file", + ) + + assert ( + result + == "test_size_virtual_column1.tsv\t2\ntest_size_virtual_column2.tsv\t3\ntest_size_virtual_column3.tsv\t4\n" + ) diff --git a/tests/integration/test_storage_iceberg/test.py b/tests/integration/test_storage_iceberg/test.py index 11198a7175b..d5f8d04e258 100644 --- a/tests/integration/test_storage_iceberg/test.py +++ b/tests/integration/test_storage_iceberg/test.py @@ -9,6 +9,8 @@ import json import pytest import time import glob +import uuid +import os from pyspark.sql.types import ( StructType, @@ -515,3 +517,35 @@ def test_metadata_file_selection(started_cluster, format_version): create_iceberg_table(instance, TABLE_NAME) assert int(instance.query(f"SELECT count() FROM {TABLE_NAME}")) == 500 + + +@pytest.mark.parametrize("format_version", ["1", "2"]) +def test_metadata_file_format_with_uuid(started_cluster, format_version): + instance = started_cluster.instances["node1"] + spark = started_cluster.spark_session + minio_client = started_cluster.minio_client + bucket = started_cluster.minio_bucket + TABLE_NAME = "test_metadata_selection_with_uuid_" + format_version + + spark.sql( + f"CREATE TABLE {TABLE_NAME} (id bigint, data string) USING iceberg TBLPROPERTIES ('format-version' = '2', 'write.update.mode'='merge-on-read', 'write.delete.mode'='merge-on-read', 'write.merge.mode'='merge-on-read')" + ) + + for i in range(50): + spark.sql( + f"INSERT INTO {TABLE_NAME} select id, char(id + ascii('a')) from range(10)" + ) + + for i in range(50): + os.rename( + f"/iceberg_data/default/{TABLE_NAME}/metadata/v{i + 1}.metadata.json", + f"/iceberg_data/default/{TABLE_NAME}/metadata/{str(i).zfill(5)}-{uuid.uuid4()}.metadata.json", + ) + + files = upload_directory( + minio_client, bucket, f"/iceberg_data/default/{TABLE_NAME}/", "" + ) + + create_iceberg_table(instance, TABLE_NAME) + + assert int(instance.query(f"SELECT count() FROM {TABLE_NAME}")) == 500 diff --git a/tests/integration/test_storage_rabbitmq/test.py b/tests/integration/test_storage_rabbitmq/test.py index 983e52ca294..f26a273fe5e 100644 --- a/tests/integration/test_storage_rabbitmq/test.py +++ b/tests/integration/test_storage_rabbitmq/test.py @@ -102,18 +102,35 @@ def rabbitmq_setup_teardown(): # Tests -def test_rabbitmq_select(rabbitmq_cluster): +@pytest.mark.parametrize( + "secure", + [ + pytest.param(0), + pytest.param(1), + ], +) +def test_rabbitmq_select(rabbitmq_cluster, secure): + if secure and instance.is_built_with_thread_sanitizer(): + pytest.skip( + "Data races: see https://github.com/ClickHouse/ClickHouse/issues/56866" + ) + + port = cluster.rabbitmq_port + if secure: + port = cluster.rabbitmq_secure_port + instance.query( """ CREATE TABLE test.rabbitmq (key UInt64, value UInt64) ENGINE = RabbitMQ - SETTINGS rabbitmq_host_port = '{}:5672', + SETTINGS rabbitmq_host_port = '{}:{}', rabbitmq_exchange_name = 'select', rabbitmq_commit_on_select = 1, rabbitmq_format = 'JSONEachRow', - rabbitmq_row_delimiter = '\\n'; + rabbitmq_row_delimiter = '\\n', + rabbitmq_secure = {}; """.format( - rabbitmq_cluster.rabbitmq_host + rabbitmq_cluster.rabbitmq_host, port, secure ) ) @@ -3442,18 +3459,18 @@ def test_rabbitmq_handle_error_mode_stream(rabbitmq_cluster): rabbitmq_row_delimiter = '\\n', rabbitmq_handle_error_mode = 'stream'; - + CREATE TABLE test.errors (error Nullable(String), broken_message Nullable(String)) ENGINE = MergeTree() ORDER BY tuple(); CREATE MATERIALIZED VIEW test.errors_view TO test.errors AS SELECT _error as error, _raw_message as broken_message FROM test.rabbit where not isNull(_error); - + CREATE TABLE test.data (key UInt64, value UInt64) ENGINE = MergeTree() ORDER BY key; - + CREATE MATERIALIZED VIEW test.view TO test.data AS SELECT key, value FROM test.rabbit; """.format( diff --git a/tests/integration/test_storage_s3/configs/blob_log.xml b/tests/integration/test_storage_s3/configs/blob_log.xml new file mode 100644 index 00000000000..474c163b937 --- /dev/null +++ b/tests/integration/test_storage_s3/configs/blob_log.xml @@ -0,0 +1,9 @@ + + + system + blob_storage_log
+ toYYYYMM(event_date) + 7500 + event_date + INTERVAL 30 DAY +
+
diff --git a/tests/integration/test_storage_s3/configs/defaultS3.xml b/tests/integration/test_storage_s3/configs/defaultS3.xml index 37454ef6781..7dac6d9fbb5 100644 --- a/tests/integration/test_storage_s3/configs/defaultS3.xml +++ b/tests/integration/test_storage_s3/configs/defaultS3.xml @@ -1,9 +1,4 @@ - - - 5 - - http://resolver:8080 diff --git a/tests/integration/test_storage_s3/configs/s3_retry.xml b/tests/integration/test_storage_s3/configs/s3_retry.xml index 727e23273cf..3171da051d0 100644 --- a/tests/integration/test_storage_s3/configs/s3_retry.xml +++ b/tests/integration/test_storage_s3/configs/s3_retry.xml @@ -1,7 +1,9 @@ - 5 + 1 + 10 + 5 diff --git a/tests/integration/test_storage_s3/s3_mocks/unstable_server.py b/tests/integration/test_storage_s3/s3_mocks/unstable_server.py index 103dd30340c..5ef781bdc9e 100644 --- a/tests/integration/test_storage_s3/s3_mocks/unstable_server.py +++ b/tests/integration/test_storage_s3/s3_mocks/unstable_server.py @@ -4,6 +4,7 @@ import re import socket import struct import sys +import time def gen_n_digit_number(n): @@ -39,14 +40,14 @@ random.seed("Unstable server/1.0") # Generating some "random" data and append a line which contains sum of numbers in column 4. lines = ( - b"".join((gen_line() for _ in range(500000))) + b"".join([gen_line() for _ in range(500000)]) + f"0,0,0,{-sum_in_4_column}\n".encode() ) class RequestHandler(http.server.BaseHTTPRequestHandler): def do_HEAD(self): - if self.path == "/root/test.csv": + if self.path == "/root/test.csv" or self.path == "/root/slow_send_test.csv": self.from_bytes = 0 self.end_bytes = len(lines) self.size = self.end_bytes @@ -101,6 +102,18 @@ class RequestHandler(http.server.BaseHTTPRequestHandler): print("Dropping connection") break + if self.path == "/root/slow_send_test.csv": + self.send_block_size = 81920 + + for c, i in enumerate( + range(self.from_bytes, self.end_bytes, self.send_block_size) + ): + self.wfile.write( + lines[i : min(i + self.send_block_size, self.end_bytes)] + ) + self.wfile.flush() + time.sleep(1) + elif self.path == "/": self.wfile.write(b"OK") diff --git a/tests/integration/test_storage_s3/test.py b/tests/integration/test_storage_s3/test.py index 01ade1acc4d..f5c6f54a1ea 100644 --- a/tests/integration/test_storage_s3/test.py +++ b/tests/integration/test_storage_s3/test.py @@ -1,5 +1,5 @@ import gzip -import json +import uuid import logging import os import io @@ -54,6 +54,7 @@ def started_cluster(): "configs/defaultS3.xml", "configs/named_collections.xml", "configs/schema_cache.xml", + "configs/blob_log.xml", ], user_configs=[ "configs/access.xml", @@ -104,11 +105,9 @@ def started_cluster(): cluster.shutdown() -def run_query(instance, query, stdin=None, settings=None): - # type: (ClickHouseInstance, str, object, dict) -> str - +def run_query(instance, query, *args, **kwargs): logging.info("Running query '{}'...".format(query)) - result = instance.query(query, stdin=stdin, settings=settings) + result = instance.query(query, *args, **kwargs) logging.info("Query finished") return result @@ -129,7 +128,7 @@ def run_query(instance, query, stdin=None, settings=None): ], ) def test_put(started_cluster, maybe_auth, positive, compression): - # type: (ClickHouseCluster) -> None + # type: (ClickHouseCluster, str, bool, str) -> None bucket = ( started_cluster.minio_bucket @@ -496,7 +495,7 @@ def test_put_get_with_globs(started_cluster): ], ) def test_multipart(started_cluster, maybe_auth, positive): - # type: (ClickHouseCluster) -> None + # type: (ClickHouseCluster, str, bool) -> None bucket = ( started_cluster.minio_bucket @@ -529,7 +528,7 @@ def test_multipart(started_cluster, maybe_auth, positive): maybe_auth, table_format, ) - + put_query_id = uuid.uuid4().hex try: run_query( instance, @@ -539,6 +538,7 @@ def test_multipart(started_cluster, maybe_auth, positive): "s3_min_upload_part_size": min_part_size_bytes, "s3_max_single_part_upload_size": 0, }, + query_id=put_query_id, ) except helpers.client.QueryRuntimeException: if positive: @@ -583,6 +583,24 @@ def test_multipart(started_cluster, maybe_auth, positive): == "\t".join(map(str, [total_rows, total_rows * 2, total_rows * 3])) + "\n" ) + if positive: + instance.query("SYSTEM FLUSH LOGS") + blob_storage_log = instance.query(f"SELECT * FROM system.blob_storage_log") + + result = instance.query( + f"""SELECT + countIf(event_type == 'MultiPartUploadCreate'), + countIf(event_type == 'MultiPartUploadWrite'), + countIf(event_type == 'MultiPartUploadComplete'), + count() + FROM system.blob_storage_log WHERE query_id = '{put_query_id}'""" + ) + r = result.strip().split("\t") + assert int(r[0]) == 1, blob_storage_log + assert int(r[1]) >= 1, blob_storage_log + assert int(r[2]) == 1, blob_storage_log + assert int(r[0]) + int(r[1]) + int(r[2]) == int(r[3]), blob_storage_log + def test_remote_host_filter(started_cluster): instance = started_cluster.instances["restricted_dummy"] @@ -818,6 +836,15 @@ def test_storage_s3_get_unstable(started_cluster): assert result.splitlines() == ["500001,500000,0"] +def test_storage_s3_get_slow(started_cluster): + bucket = started_cluster.minio_bucket + instance = started_cluster.instances["dummy"] + table_format = "column1 Int64, column2 Int64, column3 Int64, column4 Int64" + get_query = f"SELECT count(), sum(column3), sum(column4) FROM s3('http://resolver:8081/{started_cluster.minio_bucket}/slow_send_test.csv', 'CSV', '{table_format}') FORMAT CSV" + result = run_query(instance, get_query) + assert result.splitlines() == ["500001,500000,0"] + + def test_storage_s3_put_uncompressed(started_cluster): bucket = started_cluster.minio_bucket instance = started_cluster.instances["dummy"] @@ -846,14 +873,34 @@ def test_storage_s3_put_uncompressed(started_cluster): name, started_cluster.minio_ip, MINIO_INTERNAL_PORT, bucket, filename ), ) - - run_query(instance, "INSERT INTO {} VALUES ({})".format(name, "),(".join(data))) + insert_query_id = uuid.uuid4().hex + data_sep = "),(" + run_query( + instance, + "INSERT INTO {} VALUES ({})".format(name, data_sep.join(data)), + query_id=insert_query_id, + ) run_query(instance, "SELECT sum(id) FROM {}".format(name)).splitlines() == ["753"] uncompressed_content = get_s3_file_content(started_cluster, bucket, filename) assert sum([int(i.split(",")[1]) for i in uncompressed_content.splitlines()]) == 753 + instance.query("SYSTEM FLUSH LOGS") + blob_storage_log = instance.query(f"SELECT * FROM system.blob_storage_log") + + result = instance.query( + f"""SELECT + countIf(event_type == 'Upload'), + countIf(remote_path == '{filename}'), + countIf(bucket == '{bucket}'), + count() + FROM system.blob_storage_log WHERE query_id = '{insert_query_id}'""" + ) + r = result.strip().split("\t") + assert int(r[0]) >= 1, blob_storage_log + assert all(col == r[0] for col in r), blob_storage_log + @pytest.mark.parametrize( "extension,method", @@ -944,13 +991,6 @@ def test_predefined_connection_configuration(started_cluster): instance.query("GRANT SELECT ON *.* TO user") instance.query(f"drop table if exists {name}", user="user") - error = instance.query_and_get_error( - f"CREATE TABLE {name} (id UInt32) ENGINE = S3(s3_conf1, format='CSV')" - ) - assert ( - "To execute this query, it's necessary to have the grant NAMED COLLECTION ON s3_conf1" - in error - ) error = instance.query_and_get_error( f"CREATE TABLE {name} (id UInt32) ENGINE = S3(s3_conf1, format='CSV')", user="user", @@ -975,11 +1015,6 @@ def test_predefined_connection_configuration(started_cluster): ) assert result == instance.query("SELECT number FROM numbers(10)") - error = instance.query_and_get_error("SELECT * FROM s3(no_collection)") - assert ( - "To execute this query, it's necessary to have the grant NAMED COLLECTION ON no_collection" - in error - ) error = instance.query_and_get_error("SELECT * FROM s3(no_collection)", user="user") assert ( "To execute this query, it's necessary to have the grant NAMED COLLECTION ON no_collection" diff --git a/tests/integration/test_storage_s3_queue/test.py b/tests/integration/test_storage_s3_queue/test.py index 9f41cfd176d..b1163a549b1 100644 --- a/tests/integration/test_storage_s3_queue/test.py +++ b/tests/integration/test_storage_s3_queue/test.py @@ -717,6 +717,8 @@ def test_multiple_tables_streaming_sync_distributed(started_cluster, mode): keeper_path = f"/clickhouse/test_{table_name}" files_path = f"{table_name}_data" files_to_generate = 300 + row_num = 50 + total_rows = row_num * files_to_generate for instance in [node, node_2]: create_table( @@ -734,7 +736,7 @@ def test_multiple_tables_streaming_sync_distributed(started_cluster, mode): create_mv(instance, table_name, dst_table_name) total_values = generate_random_files( - started_cluster, files_path, files_to_generate, row_num=1 + started_cluster, files_path, files_to_generate, row_num=row_num ) def get_count(node, table_name): @@ -743,13 +745,13 @@ def test_multiple_tables_streaming_sync_distributed(started_cluster, mode): for _ in range(150): if ( get_count(node, dst_table_name) + get_count(node_2, dst_table_name) - ) == files_to_generate: + ) == total_rows: break time.sleep(1) if ( get_count(node, dst_table_name) + get_count(node_2, dst_table_name) - ) != files_to_generate: + ) != total_rows: info = node.query( f"SELECT * FROM system.s3queue WHERE zookeeper_path like '%{table_name}' ORDER BY file_name FORMAT Vertical" ) @@ -762,7 +764,7 @@ def test_multiple_tables_streaming_sync_distributed(started_cluster, mode): list(map(int, l.split())) for l in run_query(node_2, get_query).splitlines() ] - assert len(res1) + len(res2) == files_to_generate + assert len(res1) + len(res2) == total_rows # Checking that all engines have made progress assert len(res1) > 0 @@ -774,7 +776,7 @@ def test_multiple_tables_streaming_sync_distributed(started_cluster, mode): time.sleep(10) assert ( get_count(node, dst_table_name) + get_count(node_2, dst_table_name) - ) == files_to_generate + ) == total_rows def test_max_set_age(started_cluster): diff --git a/tests/integration/test_temporary_data_in_cache/configs/config.d/storage_configuration.xml b/tests/integration/test_temporary_data_in_cache/configs/config.d/storage_configuration.xml index b753123a5ef..5a087d03266 100644 --- a/tests/integration/test_temporary_data_in_cache/configs/config.d/storage_configuration.xml +++ b/tests/integration/test_temporary_data_in_cache/configs/config.d/storage_configuration.xml @@ -12,6 +12,7 @@ /tiny_local_cache/ 10M 1M + 1M 1 diff --git a/tests/integration/test_ttl_move/test.py b/tests/integration/test_ttl_move/test.py index c1c076277bb..2f18a0a1afa 100644 --- a/tests/integration/test_ttl_move/test.py +++ b/tests/integration/test_ttl_move/test.py @@ -1,5 +1,5 @@ +import inspect import random -import string import threading import time from multiprocessing.dummy import Pool @@ -8,6 +8,8 @@ from helpers.test_tools import assert_logs_contain_with_retry import pytest from helpers.client import QueryRuntimeException from helpers.cluster import ClickHouseCluster +from helpers.network import PartitionManager +from helpers.test_tools import assert_eq_with_retry # FIXME: each sleep(1) is a time bomb, and not only this cause false positive # it also makes the test not reliable (i.e. assertions may be wrong, due timing issues) @@ -26,6 +28,7 @@ node1 = cluster.add_instance( with_zookeeper=True, tmpfs=["/jbod1:size=40M", "/jbod2:size=40M", "/external:size=200M"], macros={"shard": 0, "replica": 1}, + stay_alive=True, ) node2 = cluster.add_instance( @@ -1813,3 +1816,117 @@ def test_ttl_move_if_exists(started_cluster, name, dest_type): node2.query("DROP TABLE IF EXISTS {} SYNC".format(name)) except: pass + + +class TestCancelBackgroundMoving: + @pytest.fixture() + def prepare_table(self, request, started_cluster): + name = unique_table_name(request.node.name) + engine = f"ReplicatedMergeTree('/clickhouse/{name}', '1')" + + node1.query( + f""" + CREATE TABLE {name} ( + s1 String, + d1 DateTime + ) ENGINE = {engine} + ORDER BY tuple() + TTL d1 + interval 5 second TO DISK 'external' + SETTINGS storage_policy='small_jbod_with_external' + """ + ) + + node1.query("SYSTEM STOP MOVES") + + # Insert part which is about to move + node1.query( + "INSERT INTO {} (s1, d1) VALUES (randomPrintableASCII({}), toDateTime({}))".format( + name, 10 * 1024 * 1024, time.time() + ) + ) + + # Set low bandwidth to have enough time to cancel part moving + config = inspect.cleandoc( + f""" + + { 256 * 1024 } + + """ + ) + node1.replace_config( + "/etc/clickhouse-server/config.d/disk_throttling.xml", config + ) + node1.restart_clickhouse() + + try: + yield name + finally: + node1.query(f"DROP TABLE IF EXISTS {name} SYNC") + + def test_cancel_background_moving_on_stop_moves_query(self, prepare_table): + name = prepare_table + + # Wait for background moving task to be started + node1.query("SYSTEM START MOVES") + assert_eq_with_retry( + node1, + f"SELECT count() FROM system.moves WHERE table = '{name}'".strip(), + "1", + ) + + # Wait for background moving task to be cancelled + node1.query("SYSTEM STOP MOVES") + assert_logs_contain_with_retry( + node1, "MergeTreeBackgroundExecutor.*Cancelled moving parts" + ) + assert_eq_with_retry( + node1, + f"SELECT count() FROM system.moves WHERE table = '{name}'".strip(), + "0", + ) + + # Ensure that part was not moved + assert set(get_used_disks_for_table(node1, name)) == {"jbod1"} + + def test_cancel_background_moving_on_table_detach(self, prepare_table): + name = prepare_table + + # Wait for background moving task to be started + node1.query("SYSTEM START MOVES") + assert_eq_with_retry( + node1, + f"SELECT count() FROM system.moves WHERE table = '{name}'".strip(), + "1", + ) + + # Wait for background moving task to be cancelled + node1.query(f"DETACH Table {name}") + assert_logs_contain_with_retry( + node1, "MergeTreeBackgroundExecutor.*Cancelled moving parts" + ) + assert_eq_with_retry( + node1, + f"SELECT count() FROM system.moves WHERE table = '{name}'".strip(), + "0", + ) + + def test_cancel_background_moving_on_zookeeper_disconnect(self, prepare_table): + name = prepare_table + + # Wait for background moving task to be started + node1.query("SYSTEM START MOVES") + assert_eq_with_retry( + node1, + f"SELECT count() FROM system.moves WHERE table = '{name}'".strip(), + "1", + ) + + with PartitionManager() as pm: + pm.drop_instance_zk_connections(node1) + # Wait for background moving task to be cancelled + assert_logs_contain_with_retry( + node1, + "MergeTreeBackgroundExecutor.*Cancelled moving parts", + retry_count=30, + sleep_time=1, + ) diff --git a/tests/integration/test_ttl_replicated/test.py b/tests/integration/test_ttl_replicated/test.py index 117ebe37dd2..119a211ae45 100644 --- a/tests/integration/test_ttl_replicated/test.py +++ b/tests/integration/test_ttl_replicated/test.py @@ -17,7 +17,7 @@ node4 = cluster.add_instance( "node4", with_zookeeper=True, image="yandex/clickhouse-server", - tag="20.12.4.5", + tag="20.8.11.17", stay_alive=True, with_installed_binary=True, main_configs=[ @@ -30,7 +30,7 @@ node5 = cluster.add_instance( "node5", with_zookeeper=True, image="yandex/clickhouse-server", - tag="20.12.4.5", + tag="20.8.11.17", stay_alive=True, with_installed_binary=True, main_configs=[ @@ -42,7 +42,7 @@ node6 = cluster.add_instance( "node6", with_zookeeper=True, image="yandex/clickhouse-server", - tag="20.12.4.5", + tag="20.8.11.17", stay_alive=True, with_installed_binary=True, main_configs=[ @@ -66,47 +66,41 @@ def started_cluster(): cluster.shutdown() -def drop_table(nodes, table_name): - for node in nodes: - node.query("DROP TABLE IF EXISTS {} SYNC".format(table_name)) - - # Column TTL works only with wide parts, because it's very expensive to apply it for compact parts def test_ttl_columns(started_cluster): - drop_table([node1, node2], "test_ttl") + table_name = f"test_ttl_{node1.name}_{node2.name}" for node in [node1, node2]: node.query( """ - CREATE TABLE test_ttl(date DateTime, id UInt32, a Int32 TTL date + INTERVAL 1 DAY, b Int32 TTL date + INTERVAL 1 MONTH) + CREATE TABLE {table_name}(date DateTime, id UInt32, a Int32 TTL date + INTERVAL 1 DAY, b Int32 TTL date + INTERVAL 1 MONTH) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/test_ttl_columns', '{replica}') ORDER BY id PARTITION BY toDayOfMonth(date) SETTINGS merge_with_ttl_timeout=0, min_bytes_for_wide_part=0, max_merge_selecting_sleep_ms=6000; """.format( - replica=node.name + table_name=table_name, replica=node.name ) ) node1.query( - "INSERT INTO test_ttl VALUES (toDateTime('2000-10-10 00:00:00'), 1, 1, 3)" + f"INSERT INTO {table_name} VALUES (toDateTime('2000-10-10 00:00:00'), 1, 1, 3)" ) node1.query( - "INSERT INTO test_ttl VALUES (toDateTime('2000-10-11 10:00:00'), 2, 2, 4)" + f"INSERT INTO {table_name} VALUES (toDateTime('2000-10-11 10:00:00'), 2, 2, 4)" ) time.sleep(1) # sleep to allow use ttl merge selector for second time - node1.query("OPTIMIZE TABLE test_ttl FINAL") + node1.query(f"OPTIMIZE TABLE {table_name} FINAL") expected = "1\t0\t0\n2\t0\t0\n" - assert TSV(node1.query("SELECT id, a, b FROM test_ttl ORDER BY id")) == TSV( + assert TSV(node1.query(f"SELECT id, a, b FROM {table_name} ORDER BY id")) == TSV( expected ) - assert TSV(node2.query("SELECT id, a, b FROM test_ttl ORDER BY id")) == TSV( + assert TSV(node2.query(f"SELECT id, a, b FROM {table_name} ORDER BY id")) == TSV( expected ) def test_merge_with_ttl_timeout(started_cluster): - table = "test_merge_with_ttl_timeout" - drop_table([node1, node2], table) + table = f"test_merge_with_ttl_timeout_{node1.name}_{node2.name}" for node in [node1, node2]: node.query( """ @@ -157,11 +151,11 @@ def test_merge_with_ttl_timeout(started_cluster): def test_ttl_many_columns(started_cluster): - drop_table([node1, node2], "test_ttl_2") + table = f"test_ttl_2{node1.name}_{node2.name}" for node in [node1, node2]: node.query( """ - CREATE TABLE test_ttl_2(date DateTime, id UInt32, + CREATE TABLE {table}(date DateTime, id UInt32, a Int32 TTL date, _idx Int32 TTL date, _offset Int32 TTL date, @@ -169,44 +163,40 @@ def test_ttl_many_columns(started_cluster): ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/test_ttl_2', '{replica}') ORDER BY id PARTITION BY toDayOfMonth(date) SETTINGS merge_with_ttl_timeout=0, max_merge_selecting_sleep_ms=6000; """.format( - replica=node.name + table=table, replica=node.name ) ) - node1.query("SYSTEM STOP TTL MERGES test_ttl_2") - node2.query("SYSTEM STOP TTL MERGES test_ttl_2") + node1.query(f"SYSTEM STOP TTL MERGES {table}") + node2.query(f"SYSTEM STOP TTL MERGES {table}") node1.query( - "INSERT INTO test_ttl_2 VALUES (toDateTime('2000-10-10 00:00:00'), 1, 2, 3, 4, 5)" + f"INSERT INTO {table} VALUES (toDateTime('2000-10-10 00:00:00'), 1, 2, 3, 4, 5)" ) node1.query( - "INSERT INTO test_ttl_2 VALUES (toDateTime('2100-10-10 10:00:00'), 6, 7, 8, 9, 10)" + f"INSERT INTO {table} VALUES (toDateTime('2100-10-10 10:00:00'), 6, 7, 8, 9, 10)" ) - node2.query("SYSTEM SYNC REPLICA test_ttl_2", timeout=5) + node2.query(f"SYSTEM SYNC REPLICA {table}", timeout=5) # Check that part will appear in result of merge - node1.query("SYSTEM STOP FETCHES test_ttl_2") - node2.query("SYSTEM STOP FETCHES test_ttl_2") + node1.query(f"SYSTEM STOP FETCHES {table}") + node2.query(f"SYSTEM STOP FETCHES {table}") - node1.query("SYSTEM START TTL MERGES test_ttl_2") - node2.query("SYSTEM START TTL MERGES test_ttl_2") + node1.query(f"SYSTEM START TTL MERGES {table}") + node2.query(f"SYSTEM START TTL MERGES {table}") time.sleep(1) # sleep to allow use ttl merge selector for second time - node1.query("OPTIMIZE TABLE test_ttl_2 FINAL", timeout=5) + node1.query(f"OPTIMIZE TABLE {table} FINAL", timeout=5) - node2.query("SYSTEM SYNC REPLICA test_ttl_2", timeout=5) + node2.query(f"SYSTEM SYNC REPLICA {table}", timeout=5) expected = "1\t0\t0\t0\t0\n6\t7\t8\t9\t10\n" assert TSV( - node1.query( - "SELECT id, a, _idx, _offset, _partition FROM test_ttl_2 ORDER BY id" - ) + node1.query(f"SELECT id, a, _idx, _offset, _partition FROM {table} ORDER BY id") ) == TSV(expected) assert TSV( - node2.query( - "SELECT id, a, _idx, _offset, _partition FROM test_ttl_2 ORDER BY id" - ) + node2.query(f"SELECT id, a, _idx, _offset, _partition FROM {table} ORDER BY id") ) == TSV(expected) @@ -218,107 +208,107 @@ def test_ttl_many_columns(started_cluster): ], ) def test_ttl_table(started_cluster, delete_suffix): - drop_table([node1, node2], "test_ttl") + table = f"test_ttl_table_{delete_suffix}_{node1.name}_{node2.name}" for node in [node1, node2]: node.query( """ - CREATE TABLE test_ttl(date DateTime, id UInt32) - ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/test_ttl', '{replica}') + CREATE TABLE {table}(date DateTime, id UInt32) + ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/{table}', '{replica}') ORDER BY id PARTITION BY toDayOfMonth(date) TTL date + INTERVAL 1 DAY {delete_suffix} SETTINGS merge_with_ttl_timeout=0, max_merge_selecting_sleep_ms=6000; """.format( - replica=node.name, delete_suffix=delete_suffix + table=table, replica=node.name, delete_suffix=delete_suffix ) ) - node1.query("INSERT INTO test_ttl VALUES (toDateTime('2000-10-10 00:00:00'), 1)") - node1.query("INSERT INTO test_ttl VALUES (toDateTime('2000-10-11 10:00:00'), 2)") + node1.query(f"INSERT INTO {table} VALUES (toDateTime('2000-10-10 00:00:00'), 1)") + node1.query(f"INSERT INTO {table} VALUES (toDateTime('2000-10-11 10:00:00'), 2)") time.sleep(1) # sleep to allow use ttl merge selector for second time - node1.query("OPTIMIZE TABLE test_ttl FINAL") + node1.query(f"OPTIMIZE TABLE {table} FINAL") - assert TSV(node1.query("SELECT * FROM test_ttl")) == TSV("") - assert TSV(node2.query("SELECT * FROM test_ttl")) == TSV("") + assert TSV(node1.query(f"SELECT * FROM {table}")) == TSV("") + assert TSV(node2.query(f"SELECT * FROM {table}")) == TSV("") def test_modify_ttl(started_cluster): - drop_table([node1, node2], "test_ttl") + table = f"test_modify_ttl_{node1.name}_{node2.name}" for node in [node1, node2]: node.query( """ - CREATE TABLE test_ttl(d DateTime, id UInt32) - ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/test_ttl_modify', '{replica}') + CREATE TABLE {table}(d DateTime, id UInt32) + ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/{table}', '{replica}') ORDER BY id """.format( - replica=node.name + table=table, replica=node.name ) ) node1.query( - "INSERT INTO test_ttl VALUES (now() - INTERVAL 5 HOUR, 1), (now() - INTERVAL 3 HOUR, 2), (now() - INTERVAL 1 HOUR, 3)" + f"INSERT INTO {table} VALUES (now() - INTERVAL 5 HOUR, 1), (now() - INTERVAL 3 HOUR, 2), (now() - INTERVAL 1 HOUR, 3)" ) - node2.query("SYSTEM SYNC REPLICA test_ttl", timeout=20) + node2.query(f"SYSTEM SYNC REPLICA {table}", timeout=20) node1.query( - "ALTER TABLE test_ttl MODIFY TTL d + INTERVAL 4 HOUR SETTINGS replication_alter_partitions_sync = 2" + f"ALTER TABLE {table} MODIFY TTL d + INTERVAL 4 HOUR SETTINGS replication_alter_partitions_sync = 2" ) - assert node2.query("SELECT id FROM test_ttl") == "2\n3\n" + assert node2.query(f"SELECT id FROM {table}") == "2\n3\n" node2.query( - "ALTER TABLE test_ttl MODIFY TTL d + INTERVAL 2 HOUR SETTINGS replication_alter_partitions_sync = 2" + f"ALTER TABLE {table} MODIFY TTL d + INTERVAL 2 HOUR SETTINGS replication_alter_partitions_sync = 2" ) - assert node1.query("SELECT id FROM test_ttl") == "3\n" + assert node1.query(f"SELECT id FROM {table}") == "3\n" node1.query( - "ALTER TABLE test_ttl MODIFY TTL d + INTERVAL 30 MINUTE SETTINGS replication_alter_partitions_sync = 2" + f"ALTER TABLE {table} MODIFY TTL d + INTERVAL 30 MINUTE SETTINGS replication_alter_partitions_sync = 2" ) - assert node2.query("SELECT id FROM test_ttl") == "" + assert node2.query(f"SELECT id FROM {table}") == "" def test_modify_column_ttl(started_cluster): - drop_table([node1, node2], "test_ttl") + table = f"test_modify_column_ttl_{node1.name}_{node2.name}" for node in [node1, node2]: node.query( """ - CREATE TABLE test_ttl(d DateTime, id UInt32 DEFAULT 42) - ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/test_ttl_column', '{replica}') + CREATE TABLE {table}(d DateTime, id UInt32 DEFAULT 42) + ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/{table}', '{replica}') ORDER BY d """.format( - replica=node.name + table=table, replica=node.name ) ) node1.query( - "INSERT INTO test_ttl VALUES (now() - INTERVAL 5 HOUR, 1), (now() - INTERVAL 3 HOUR, 2), (now() - INTERVAL 1 HOUR, 3)" + f"INSERT INTO {table} VALUES (now() - INTERVAL 5 HOUR, 1), (now() - INTERVAL 3 HOUR, 2), (now() - INTERVAL 1 HOUR, 3)" ) - node2.query("SYSTEM SYNC REPLICA test_ttl", timeout=20) + node2.query(f"SYSTEM SYNC REPLICA {table}", timeout=20) node1.query( - "ALTER TABLE test_ttl MODIFY COLUMN id UInt32 TTL d + INTERVAL 4 HOUR SETTINGS replication_alter_partitions_sync = 2" + f"ALTER TABLE {table} MODIFY COLUMN id UInt32 TTL d + INTERVAL 4 HOUR SETTINGS replication_alter_partitions_sync = 2" ) - assert node2.query("SELECT id FROM test_ttl") == "42\n2\n3\n" + assert node2.query(f"SELECT id FROM {table}") == "42\n2\n3\n" node1.query( - "ALTER TABLE test_ttl MODIFY COLUMN id UInt32 TTL d + INTERVAL 2 HOUR SETTINGS replication_alter_partitions_sync = 2" + f"ALTER TABLE {table} MODIFY COLUMN id UInt32 TTL d + INTERVAL 2 HOUR SETTINGS replication_alter_partitions_sync = 2" ) - assert node1.query("SELECT id FROM test_ttl") == "42\n42\n3\n" + assert node1.query(f"SELECT id FROM {table}") == "42\n42\n3\n" node1.query( - "ALTER TABLE test_ttl MODIFY COLUMN id UInt32 TTL d + INTERVAL 30 MINUTE SETTINGS replication_alter_partitions_sync = 2" + f"ALTER TABLE {table} MODIFY COLUMN id UInt32 TTL d + INTERVAL 30 MINUTE SETTINGS replication_alter_partitions_sync = 2" ) - assert node2.query("SELECT id FROM test_ttl") == "42\n42\n42\n" + assert node2.query(f"SELECT id FROM {table}") == "42\n42\n42\n" def test_ttl_double_delete_rule_returns_error(started_cluster): - drop_table([node1, node2], "test_ttl") + table = "test_ttl_double_delete_rule_returns_error" try: node1.query( """ - CREATE TABLE test_ttl(date DateTime, id UInt32) - ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/test_ttl_double_delete', '{replica}') + CREATE TABLE {table}(date DateTime, id UInt32) + ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/{table}', '{replica}') ORDER BY id PARTITION BY toDayOfMonth(date) TTL date + INTERVAL 1 DAY, date + INTERVAL 2 DAY SETTINGS merge_with_ttl_timeout=0, max_merge_selecting_sleep_ms=6000 """.format( - replica=node1.name + table=table, replica=node1.name ) ) assert False @@ -364,7 +354,6 @@ def test_ttl_alter_delete(started_cluster, name, engine): for a table that has TTL delete expression defined but no explicit storage policy assigned. """ - drop_table([node1], name) node1.query( """ @@ -426,7 +415,6 @@ def test_ttl_alter_delete(started_cluster, name, engine): def test_ttl_empty_parts(started_cluster): - drop_table([node1, node2], "test_ttl_empty_parts") for node in [node1, node2]: node.query( """ @@ -519,65 +507,59 @@ def test_ttl_empty_parts(started_cluster): [(node1, node2, 0), (node3, node4, 1), (node5, node6, 2)], ) def test_ttl_compatibility(started_cluster, node_left, node_right, num_run): - drop_table([node_left, node_right], "test_ttl_delete") - drop_table([node_left, node_right], "test_ttl_group_by") - drop_table([node_left, node_right], "test_ttl_where") - + table = f"test_ttl_compatibility_{node_left.name}_{node_right.name}_{num_run}" for node in [node_left, node_right]: node.query( """ - CREATE TABLE test_ttl_delete(date DateTime, id UInt32) - ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/test_ttl_delete_{suff}', '{replica}') + CREATE TABLE {table}_delete(date DateTime, id UInt32) + ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/{table}_delete', '{replica}') ORDER BY id PARTITION BY toDayOfMonth(date) TTL date + INTERVAL 3 SECOND - SETTINGS max_number_of_merges_with_ttl_in_pool=100, max_replicated_merges_with_ttl_in_queue=100 """.format( - suff=num_run, replica=node.name + table=table, replica=node.name ) ) node.query( """ - CREATE TABLE test_ttl_group_by(date DateTime, id UInt32, val UInt64) - ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/test_ttl_group_by_{suff}', '{replica}') + CREATE TABLE {table}_group_by(date DateTime, id UInt32, val UInt64) + ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/{table}_group_by', '{replica}') ORDER BY id PARTITION BY toDayOfMonth(date) TTL date + INTERVAL 3 SECOND GROUP BY id SET val = sum(val) - SETTINGS max_number_of_merges_with_ttl_in_pool=100, max_replicated_merges_with_ttl_in_queue=100 """.format( - suff=num_run, replica=node.name + table=table, replica=node.name ) ) node.query( """ - CREATE TABLE test_ttl_where(date DateTime, id UInt32) - ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/test_ttl_where_{suff}', '{replica}') + CREATE TABLE {table}_where(date DateTime, id UInt32) + ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/{table}_where', '{replica}') ORDER BY id PARTITION BY toDayOfMonth(date) TTL date + INTERVAL 3 SECOND DELETE WHERE id % 2 = 1 - SETTINGS max_number_of_merges_with_ttl_in_pool=100, max_replicated_merges_with_ttl_in_queue=100 """.format( - suff=num_run, replica=node.name + table=table, replica=node.name ) ) - node_left.query("INSERT INTO test_ttl_delete VALUES (now(), 1)") + node_left.query(f"INSERT INTO {table}_delete VALUES (now(), 1)") node_left.query( - "INSERT INTO test_ttl_delete VALUES (toDateTime('2100-10-11 10:00:00'), 2)" + f"INSERT INTO {table}_delete VALUES (toDateTime('2100-10-11 10:00:00'), 2)" ) - node_right.query("INSERT INTO test_ttl_delete VALUES (now(), 3)") + node_right.query(f"INSERT INTO {table}_delete VALUES (now(), 3)") node_right.query( - "INSERT INTO test_ttl_delete VALUES (toDateTime('2100-10-11 10:00:00'), 4)" + f"INSERT INTO {table}_delete VALUES (toDateTime('2100-10-11 10:00:00'), 4)" ) - node_left.query("INSERT INTO test_ttl_group_by VALUES (now(), 0, 1)") - node_left.query("INSERT INTO test_ttl_group_by VALUES (now(), 0, 2)") - node_right.query("INSERT INTO test_ttl_group_by VALUES (now(), 0, 3)") - node_right.query("INSERT INTO test_ttl_group_by VALUES (now(), 0, 4)") + node_left.query(f"INSERT INTO {table}_group_by VALUES (now(), 0, 1)") + node_left.query(f"INSERT INTO {table}_group_by VALUES (now(), 0, 2)") + node_right.query(f"INSERT INTO {table}_group_by VALUES (now(), 0, 3)") + node_right.query(f"INSERT INTO {table}_group_by VALUES (now(), 0, 4)") - node_left.query("INSERT INTO test_ttl_where VALUES (now(), 1)") - node_left.query("INSERT INTO test_ttl_where VALUES (now(), 2)") - node_right.query("INSERT INTO test_ttl_where VALUES (now(), 3)") - node_right.query("INSERT INTO test_ttl_where VALUES (now(), 4)") + node_left.query(f"INSERT INTO {table}_where VALUES (now(), 1)") + node_left.query(f"INSERT INTO {table}_where VALUES (now(), 2)") + node_right.query(f"INSERT INTO {table}_where VALUES (now(), 3)") + node_right.query(f"INSERT INTO {table}_where VALUES (now(), 4)") if node_left.with_installed_binary: node_left.restart_with_latest_version() @@ -588,13 +570,13 @@ def test_ttl_compatibility(started_cluster, node_left, node_right, num_run): time.sleep(5) # Wait for TTL # after restart table can be in readonly mode - exec_query_with_retry(node_right, "OPTIMIZE TABLE test_ttl_delete FINAL") - node_right.query("OPTIMIZE TABLE test_ttl_group_by FINAL") - node_right.query("OPTIMIZE TABLE test_ttl_where FINAL") + exec_query_with_retry(node_right, f"OPTIMIZE TABLE {table}_delete FINAL") + node_right.query(f"OPTIMIZE TABLE {table}_group_by FINAL") + node_right.query(f"OPTIMIZE TABLE {table}_where FINAL") - exec_query_with_retry(node_left, "OPTIMIZE TABLE test_ttl_delete FINAL") - node_left.query("OPTIMIZE TABLE test_ttl_group_by FINAL", timeout=20) - node_left.query("OPTIMIZE TABLE test_ttl_where FINAL", timeout=20) + exec_query_with_retry(node_left, f"OPTIMIZE TABLE {table}_delete FINAL") + node_left.query(f"OPTIMIZE TABLE {table}_group_by FINAL", timeout=20) + node_left.query(f"OPTIMIZE TABLE {table}_where FINAL", timeout=20) # After OPTIMIZE TABLE, it is not guaranteed that everything is merged. # Possible scenario (for test_ttl_group_by): @@ -605,19 +587,19 @@ def test_ttl_compatibility(started_cluster, node_left, node_right, num_run): # 4. OPTIMIZE FINAL does nothing, cause there is an entry for 0_3 # # So, let's also sync replicas for node_right (for now). - exec_query_with_retry(node_right, "SYSTEM SYNC REPLICA test_ttl_delete") - node_right.query("SYSTEM SYNC REPLICA test_ttl_group_by", timeout=20) - node_right.query("SYSTEM SYNC REPLICA test_ttl_where", timeout=20) + exec_query_with_retry(node_right, f"SYSTEM SYNC REPLICA {table}_delete") + node_right.query(f"SYSTEM SYNC REPLICA {table}_group_by", timeout=20) + node_right.query(f"SYSTEM SYNC REPLICA {table}_where", timeout=20) - exec_query_with_retry(node_left, "SYSTEM SYNC REPLICA test_ttl_delete") - node_left.query("SYSTEM SYNC REPLICA test_ttl_group_by", timeout=20) - node_left.query("SYSTEM SYNC REPLICA test_ttl_where", timeout=20) + exec_query_with_retry(node_left, f"SYSTEM SYNC REPLICA {table}_delete") + node_left.query(f"SYSTEM SYNC REPLICA {table}_group_by", timeout=20) + node_left.query(f"SYSTEM SYNC REPLICA {table}_where", timeout=20) - assert node_left.query("SELECT id FROM test_ttl_delete ORDER BY id") == "2\n4\n" - assert node_right.query("SELECT id FROM test_ttl_delete ORDER BY id") == "2\n4\n" + assert node_left.query(f"SELECT id FROM {table}_delete ORDER BY id") == "2\n4\n" + assert node_right.query(f"SELECT id FROM {table}_delete ORDER BY id") == "2\n4\n" - assert node_left.query("SELECT val FROM test_ttl_group_by ORDER BY id") == "10\n" - assert node_right.query("SELECT val FROM test_ttl_group_by ORDER BY id") == "10\n" + assert node_left.query(f"SELECT val FROM {table}_group_by ORDER BY id") == "10\n" + assert node_right.query(f"SELECT val FROM {table}_group_by ORDER BY id") == "10\n" - assert node_left.query("SELECT id FROM test_ttl_where ORDER BY id") == "2\n4\n" - assert node_right.query("SELECT id FROM test_ttl_where ORDER BY id") == "2\n4\n" + assert node_left.query(f"SELECT id FROM {table}_where ORDER BY id") == "2\n4\n" + assert node_right.query(f"SELECT id FROM {table}_where ORDER BY id") == "2\n4\n" diff --git a/tests/integration/test_version_update/test.py b/tests/integration/test_version_update/test.py index b8fa3e7ebb4..a752960bc76 100644 --- a/tests/integration/test_version_update/test.py +++ b/tests/integration/test_version_update/test.py @@ -12,18 +12,18 @@ node2 = cluster.add_instance( "node2", with_zookeeper=True, image="yandex/clickhouse-server", - tag="21.2", + tag="20.8.11.17", with_installed_binary=True, stay_alive=True, allow_analyzer=False, ) -# Use differents nodes because if there is node.restart_from_latest_version(), then in later tests +# Use different nodes because if there is node.restart_from_latest_version(), then in later tests # it will be with latest version, but shouldn't, order of tests in CI is shuffled. node3 = cluster.add_instance( "node3", image="yandex/clickhouse-server", - tag="21.5", + tag="21.6", with_installed_binary=True, stay_alive=True, allow_analyzer=False, @@ -31,7 +31,7 @@ node3 = cluster.add_instance( node4 = cluster.add_instance( "node4", image="yandex/clickhouse-server", - tag="21.5", + tag="21.6", with_installed_binary=True, stay_alive=True, allow_analyzer=False, @@ -39,7 +39,7 @@ node4 = cluster.add_instance( node5 = cluster.add_instance( "node5", image="yandex/clickhouse-server", - tag="21.5", + tag="21.6", with_installed_binary=True, stay_alive=True, allow_analyzer=False, @@ -47,7 +47,7 @@ node5 = cluster.add_instance( node6 = cluster.add_instance( "node6", image="yandex/clickhouse-server", - tag="21.5", + tag="21.6", with_installed_binary=True, stay_alive=True, allow_analyzer=False, diff --git a/tests/integration/test_version_update_after_mutation/test.py b/tests/integration/test_version_update_after_mutation/test.py index f3ae190ee46..4e84b4c10ca 100644 --- a/tests/integration/test_version_update_after_mutation/test.py +++ b/tests/integration/test_version_update_after_mutation/test.py @@ -10,7 +10,7 @@ node1 = cluster.add_instance( "node1", with_zookeeper=True, image="yandex/clickhouse-server", - tag="20.4.9.110", + tag="20.8.11.17", with_installed_binary=True, stay_alive=True, main_configs=[ @@ -22,7 +22,7 @@ node2 = cluster.add_instance( "node2", with_zookeeper=True, image="yandex/clickhouse-server", - tag="20.4.9.110", + tag="20.8.11.17", with_installed_binary=True, stay_alive=True, main_configs=[ @@ -34,7 +34,7 @@ node3 = cluster.add_instance( "node3", with_zookeeper=True, image="yandex/clickhouse-server", - tag="20.4.9.110", + tag="20.8.11.17", with_installed_binary=True, stay_alive=True, main_configs=[ @@ -72,8 +72,8 @@ def test_mutate_and_upgrade(start_cluster): node1.query("DETACH TABLE mt") # stop being leader node1.query("SYSTEM FLUSH LOGS") node2.query("SYSTEM FLUSH LOGS") - node1.restart_with_latest_version(signal=9, fix_metadata=True) - node2.restart_with_latest_version(signal=9, fix_metadata=True) + node1.restart_with_latest_version(signal=9, fix_metadata=False) + node2.restart_with_latest_version(signal=9, fix_metadata=False) # After hard restart table can be in readonly mode exec_query_with_retry( @@ -129,7 +129,7 @@ def test_upgrade_while_mutation(start_cluster): # (We could be in process of creating some system table, which will leave empty directory on restart, # so when we start moving system tables from ordinary to atomic db, it will complain about some undeleted files) node3.query("SYSTEM FLUSH LOGS") - node3.restart_with_latest_version(signal=9, fix_metadata=True) + node3.restart_with_latest_version(signal=9, fix_metadata=False) # checks for readonly exec_query_with_retry(node3, "OPTIMIZE TABLE mt1", sleep_time=5, retry_count=60) diff --git a/tests/performance/avg_weighted.xml b/tests/performance/avg_weighted.xml index 5aa89b08c35..edf3c19fdfa 100644 --- a/tests/performance/avg_weighted.xml +++ b/tests/performance/avg_weighted.xml @@ -27,10 +27,6 @@ SELECT avg(num_f) FROM perf_avg FORMAT Null SELECT avgWeighted(num_f, num) FROM perf_avg FORMAT Null SELECT avgWeighted(num_f, num_f) FROM perf_avg FORMAT Null - SELECT avgWeighted(num_f, num_u) FROM perf_avg FORMAT Null - SELECT avgWeighted(num_u, num_f) FROM perf_avg FORMAT Null - SELECT avgWeighted(num_u, num) FROM perf_avg FORMAT Null - SELECT avgWeighted(num_u, num_u) FROM perf_avg FORMAT Null SELECT avgWeighted(num_f, num_f) FROM perf_avg FORMAT Null SELECT avgWeighted(toNullable(num_f), num_f) FROM perf_avg FORMAT Null diff --git a/tests/performance/orc_filter_push_down.xml b/tests/performance/orc_filter_push_down.xml index 9f49c20a075..318c6eca991 100644 --- a/tests/performance/orc_filter_push_down.xml +++ b/tests/performance/orc_filter_push_down.xml @@ -23,4 +23,4 @@ select a % 10, length(b) % 10, count(1) from test_orc_fpd where a in (9000000, 1000) group by a % 10, length(b) % 10 - \ No newline at end of file + diff --git a/tests/performance/orc_tuple_field_prune.xml b/tests/performance/orc_tuple_field_prune.xml new file mode 100644 index 00000000000..d95787af93b --- /dev/null +++ b/tests/performance/orc_tuple_field_prune.xml @@ -0,0 +1,16 @@ + + + 1 + 10000 + + + + insert into function file('test_orc_tfp.orc', 'ORC') select * from generateRandom('tuple_column Tuple(a Nullable(String), b Nullable(Float64), c Nullable(Int64)), array_tuple_column Nested(a Nullable(String), b Nullable(Float64), c Nullable(Int64)), map_tuple_column Map(String, Tuple(a Nullable(String), b Nullable(Float64), c Nullable(Int64)))') limit 1000000 + + + DROP TABLE IF EXISTS test_orc_tfp + + select * from file('test_orc_tfp.orc', 'ORC', 'tuple_column Tuple(c Nullable(Int64))') format Null + select * from file('test_orc_tfp.orc', 'ORC', 'array_tuple_column Nested(c Nullable(Int64))') format Null + select * from file('test_orc_tfp.orc', 'ORC', 'map_tuple_column Map(String, Tuple(c Nullable(Int64)))') format Null + diff --git a/tests/queries/0_stateless/00002_log_and_exception_messages_formatting.reference b/tests/queries/0_stateless/00002_log_and_exception_messages_formatting.reference index 00a2cd14700..cd9f0142d45 100644 --- a/tests/queries/0_stateless/00002_log_and_exception_messages_formatting.reference +++ b/tests/queries/0_stateless/00002_log_and_exception_messages_formatting.reference @@ -3,7 +3,7 @@ runtime exceptions 0.05 unknown runtime exceptions 0.01 messages shorter than 10 1 messages shorter than 16 3 -exceptions shorter than 30 3 +exceptions shorter than 30 3 [] noisy messages 0.3 noisy Trace messages 0.16 noisy Debug messages 0.09 diff --git a/tests/queries/0_stateless/00002_log_and_exception_messages_formatting.sql b/tests/queries/0_stateless/00002_log_and_exception_messages_formatting.sql index 63432f127aa..d1ac8773aca 100644 --- a/tests/queries/0_stateless/00002_log_and_exception_messages_formatting.sql +++ b/tests/queries/0_stateless/00002_log_and_exception_messages_formatting.sql @@ -13,11 +13,11 @@ select 'runtime messages', greatest(coalesce(sum(length(message_format_string) = where message not like '% Received from %clickhouse-staging.com:9440%'; -- Check the same for exceptions. The value was 0.03 -select 'runtime exceptions', max2(coalesce(sum(length(message_format_string) = 0) / countOrNull(), 0), 0.05) from logs +select 'runtime exceptions', greatest(coalesce(sum(length(message_format_string) = 0) / countOrNull(), 0), 0.05) from logs where (message like '%DB::Exception%' or message like '%Coordination::Exception%') and message not like '% Received from %clickhouse-staging.com:9440%'; -select 'unknown runtime exceptions', max2(coalesce(sum(length(message_format_string) = 0) / countOrNull(), 0), 0.01) from logs where +select 'unknown runtime exceptions', greatest(coalesce(sum(length(message_format_string) = 0) / countOrNull(), 0), 0.01) from logs where (message like '%DB::Exception%' or message like '%Coordination::Exception%') and message not like '% Received from %' and message not like '%(SYNTAX_ERROR)%'; @@ -50,10 +50,15 @@ create temporary table known_short_messages (s String) as select * from (select ] as arr) array join arr; -- Check that we don't have too many short meaningless message patterns. -select 'messages shorter than 10', max2(countDistinctOrDefault(message_format_string), 1) from logs where length(message_format_string) < 10 and message_format_string not in known_short_messages; +select 'messages shorter than 10', + greatest(uniqExact(message_format_string), 1) + from logs + where length(message_format_string) < 10 and message_format_string not in known_short_messages; -- Same as above. Feel free to update the threshold or remove this query if really necessary -select 'messages shorter than 16', max2(countDistinctOrDefault(message_format_string), 3) from logs where length(message_format_string) < 16 and message_format_string not in known_short_messages; +select 'messages shorter than 16', + greatest(uniqExact(message_format_string), 3) + from logs where length(message_format_string) < 16 and message_format_string not in known_short_messages; -- Unlike above, here we look at length of the formatted message, not format string. Most short format strings are fine because they end up decorated with context from outer or inner exceptions, e.g.: -- "Expected end of line" -> "Code: 117. DB::Exception: Expected end of line: (in file/uri /var/lib/clickhouse/user_files/data_02118): (at row 1)" @@ -62,42 +67,53 @@ select 'messages shorter than 16', max2(countDistinctOrDefault(message_format_st -- This table currently doesn't have enough information to do this reliably, so we just regex search for " (ERROR_NAME_IN_CAPS)" and hope that's good enough. -- For the "Code: 123. DB::Exception: " part, we just subtract 26 instead of searching for it. Because sometimes it's not at the start, e.g.: -- "Unexpected error, will try to restart main thread: Code: 341. DB::Exception: Unexpected error: Code: 57. DB::Exception:[...]" -select 'exceptions shorter than 30', max2(countDistinctOrDefault(message_format_string), 3) from logs - where message ilike '%DB::Exception%' and if(length(regexpExtract(message, '(.*)\\([A-Z0-9_]+\\)')) as pref > 0, pref, length(message)) < 30 + 26 and message_format_string not in known_short_messages; +select 'exceptions shorter than 30', + greatest(uniqExact(message_format_string), 3) AS c, + c = 3 ? [] : groupUniqArray(message_format_string) + from logs + where message ilike '%DB::Exception%' and if(length(extract(message, '(.*)\\([A-Z0-9_]+\\)')) as pref > 0, pref, length(message)) < 30 + 26 and message_format_string not in known_short_messages; -- Avoid too noisy messages: top 1 message frequency must be less than 30%. We should reduce the threshold -select 'noisy messages', max2((select count() from logs group by message_format_string order by count() desc limit 1) / (select count() from logs), 0.30); +select 'noisy messages', + greatest((select count() from logs group by message_format_string order by count() desc limit 1) / (select count() from logs), 0.30); -- Same as above, but excluding Test level (actually finds top 1 Trace message) with ('Access granted: {}{}', '{} -> {}') as frequent_in_tests -select 'noisy Trace messages', max2((select count() from logs where level!='Test' and message_format_string not in frequent_in_tests - group by message_format_string order by count() desc limit 1) / (select count() from logs), 0.16); +select 'noisy Trace messages', + greatest((select count() from logs where level!='Test' and message_format_string not in frequent_in_tests + group by message_format_string order by count() desc limit 1) / (select count() from logs), 0.16); -- Same as above for Debug -select 'noisy Debug messages', max2((select count() from logs where level <= 'Debug' group by message_format_string order by count() desc limit 1) / (select count() from logs), 0.09); +select 'noisy Debug messages', + greatest((select count() from logs where level <= 'Debug' group by message_format_string order by count() desc limit 1) / (select count() from logs), 0.09); -- Same as above for Info -select 'noisy Info messages', max2((select count() from logs where level <= 'Information' group by message_format_string order by count() desc limit 1) / (select count() from logs), 0.05); +select 'noisy Info messages', + greatest((select count() from logs where level <= 'Information' group by message_format_string order by count() desc limit 1) / (select count() from logs), 0.05); -- Same as above for Warning with ('Not enabled four letter command {}') as frequent_in_tests -select 'noisy Warning messages', max2(coalesce((select countOrDefault() from logs where level = 'Warning' and message_format_string not in frequent_in_tests +select 'noisy Warning messages', + greatest(coalesce((select count() from logs where level = 'Warning' and message_format_string not in frequent_in_tests group by message_format_string order by count() desc limit 1), 0) / (select count() from logs), 0.01); -- Same as above for Error -select 'noisy Error messages', max2(coalesce((select countOrDefault() from logs where level = 'Error' group by message_format_string order by count() desc limit 1), 0) / (select count() from logs), 0.02); +select 'noisy Error messages', + greatest(coalesce((select count() from logs where level = 'Error' group by message_format_string order by count() desc limit 1), 0) / (select count() from logs), 0.02); select 'no Fatal messages', count() from logs where level = 'Fatal'; -- Avoid too noisy messages: limit the number of messages with high frequency -select 'number of too noisy messages', max2(count(), 3) from (select count() / (select count() from logs) as freq, message_format_string from logs group by message_format_string having freq > 0.10); -select 'number of noisy messages', max2(count(), 10) from (select count() / (select count() from logs) as freq, message_format_string from logs group by message_format_string having freq > 0.05); +select 'number of too noisy messages', + greatest(count(), 3) from (select count() / (select count() from logs) as freq, message_format_string from logs group by message_format_string having freq > 0.10); +select 'number of noisy messages', + greatest(count(), 10) from (select count() / (select count() from logs) as freq, message_format_string from logs group by message_format_string having freq > 0.05); -- Each message matches its pattern (returns 0 rows) --- FIXME maybe we should make it stricter ('Code:%Exception: '||s||'%'), but it's not easy because of addMessage -select 'incorrect patterns', max2(countDistinct(message_format_string), 15) from ( +-- Note: maybe we should make it stricter ('Code:%Exception: '||s||'%'), but it's not easy because of addMessage +select 'incorrect patterns', greatest(uniqExact(message_format_string), 15) from ( select message_format_string, any(message) as any_message from logs where ((rand() % 8) = 0) and message not like (replaceRegexpAll(message_format_string, '{[:.0-9dfx]*}', '%') as s) diff --git a/tests/queries/0_stateless/00059_shard_global_in_mergetree.reference b/tests/queries/0_stateless/00059_shard_global_in_mergetree.reference new file mode 100644 index 00000000000..208e649c056 --- /dev/null +++ b/tests/queries/0_stateless/00059_shard_global_in_mergetree.reference @@ -0,0 +1,7 @@ +20 +20 +20 +20 +20 +20 +20 diff --git a/tests/queries/0_stateless/00059_shard_global_in_mergetree.sql b/tests/queries/0_stateless/00059_shard_global_in_mergetree.sql new file mode 100644 index 00000000000..62eec6f324b --- /dev/null +++ b/tests/queries/0_stateless/00059_shard_global_in_mergetree.sql @@ -0,0 +1,25 @@ +-- Tags: shard + +-- test for #56790 + +DROP TABLE IF EXISTS test_local; + +CREATE TABLE test_local (x Int64) ENGINE = MergeTree order by x as select * from numbers(10); + +select count() from remote('127.0.0.1,127.0.0.2', currentDatabase(), test_local); + +select count() from remote('127.0.0.1,127.0.0.2', currentDatabase(), test_local) where 'XXX' global in (select 'XXX'); + +select count() from remote('127.0.0.1,127.0.0.2', currentDatabase(), test_local) where * global in (select * from numbers(10)); + +select count() from remote('127.0.0.1,127.0.0.2', currentDatabase(), test_local) where * in (select * from numbers(10)); + +set prefer_localhost_replica=0; + +select count() from remote('127.0.0.1,127.0.0.2', currentDatabase(), test_local) where 'XXX' global in (select 'XXX'); + +select count() from remote('127.0.0.1,127.0.0.2', currentDatabase(), test_local) where * global in (select * from numbers(10)); + +select count() from remote('127.0.0.1,127.0.0.2', currentDatabase(), test_local) where * in (select * from numbers(10)); + +DROP TABLE test_local; diff --git a/tests/queries/0_stateless/00116_storage_set.reference b/tests/queries/0_stateless/00116_storage_set.reference index 01bd24ebe17..b68e740f72b 100644 --- a/tests/queries/0_stateless/00116_storage_set.reference +++ b/tests/queries/0_stateless/00116_storage_set.reference @@ -19,3 +19,5 @@ abc Hello World abc +Hello +Hello diff --git a/tests/queries/0_stateless/00116_storage_set.sql b/tests/queries/0_stateless/00116_storage_set.sql index 0eeed7e859a..c156b387c8f 100644 --- a/tests/queries/0_stateless/00116_storage_set.sql +++ b/tests/queries/0_stateless/00116_storage_set.sql @@ -1,5 +1,6 @@ DROP TABLE IF EXISTS set; DROP TABLE IF EXISTS set2; +DROP TABLE IF EXISTS tab; CREATE TABLE set (x String) ENGINE = Set; @@ -26,4 +27,9 @@ SELECT arrayJoin(['Hello', 'test', 'World', 'world', 'abc', 'xyz']) AS s WHERE s RENAME TABLE set2 TO set; SELECT arrayJoin(['Hello', 'test', 'World', 'world', 'abc', 'xyz']) AS s WHERE s IN set; +create table tab (x String) engine = MergeTree order by x as select 'Hello'; +SELECT * FROM tab PREWHERE x IN (set) WHERE x IN (set) LIMIT 1 settings allow_experimental_analyzer=0; +SELECT * FROM tab PREWHERE x IN (set) WHERE x IN (set) LIMIT 1 settings allow_experimental_analyzer=1; +DROP TABLE tab; + DROP TABLE set; diff --git a/tests/queries/0_stateless/00727_concat.reference b/tests/queries/0_stateless/00727_concat.reference index af5626b4a11..6fb23c072d3 100644 --- a/tests/queries/0_stateless/00727_concat.reference +++ b/tests/queries/0_stateless/00727_concat.reference @@ -1 +1,74 @@ -Hello, world! +-- Const string + non-const arbitrary type +With 42 +With 43 +With 44 +With 45 +With 46 +With 47 +With 48 +With 49 +With 50 +With 51 +With 52 +With 53 +With 42.42 +With 43.43 +With 44 +With true +With false +With foo +With bar +With foo +With bar +With foo +With bar +With foo +With bar +With 42 +With 42 +With fae310ca-d52a-4923-9e9b-02bf67f4b009 +With 2023-11-14 +With 2123-11-14 +With 2023-11-14 05:50:12 +With 2023-11-14 05:50:12.123 +With hallo +With [\'foo\',\'bar\'] +With {"foo":"bar"} +With (42,\'foo\') +With {42:\'foo\'} +With 122.233.64.201 +With 2001:1:130f:2:3:9c0:876a:130b +With (42,43) +With [(0,0),(10,0),(10,10),(0,10)] +With [[(20,20),(50,20),(50,50),(20,50)],[(30,30),(50,50),(50,30)]] +With [[[(0,0),(10,0),(10,10),(0,10)]],[[(20,20),(50,20),(50,50),(20,50)],[(30,30),(50,50),(50,30)]]] +-- SimpleAggregateFunction +With 42 +With 4 +-- Nested +With [\'foo\',\'bar\'][\'qaz\',\'qux\'] +-- NULL arguments +\N +\N +\N +\N +\N +\N +\N +-- Various arguments tests +Non-const strings +Two arguments test +Three arguments test +3 arguments test with int type +42144 +42144255 +42144 +42144255 +-- Single argument tests +42 +42 +foo +foo +\N +\N +Testing the alias diff --git a/tests/queries/0_stateless/00727_concat.sql b/tests/queries/0_stateless/00727_concat.sql index 800ebd5ec53..f5048dcaaae 100644 --- a/tests/queries/0_stateless/00727_concat.sql +++ b/tests/queries/0_stateless/00727_concat.sql @@ -1 +1,96 @@ -SELECT CONCAT('Hello', ', ', 'world!'); +-- Tags: no-fasttest +-- no-fasttest: json type needs rapidjson library, geo types need s2 geometry + +SET allow_experimental_object_type = 1; +SET allow_suspicious_low_cardinality_types=1; + +SELECT '-- Const string + non-const arbitrary type'; +SELECT concat('With ', materialize(42 :: Int8)); +SELECT concat('With ', materialize(43 :: Int16)); +SELECT concat('With ', materialize(44 :: Int32)); +SELECT concat('With ', materialize(45 :: Int64)); +SELECT concat('With ', materialize(46 :: Int128)); +SELECT concat('With ', materialize(47 :: Int256)); +SELECT concat('With ', materialize(48 :: UInt8)); +SELECT concat('With ', materialize(49 :: UInt16)); +SELECT concat('With ', materialize(50 :: UInt32)); +SELECT concat('With ', materialize(51 :: UInt64)); +SELECT concat('With ', materialize(52 :: UInt128)); +SELECT concat('With ', materialize(53 :: UInt256)); +SELECT concat('With ', materialize(42.42 :: Float32)); +SELECT concat('With ', materialize(43.43 :: Float64)); +SELECT concat('With ', materialize(44.44 :: Decimal(2))); +SELECT concat('With ', materialize(true :: Bool)); +SELECT concat('With ', materialize(false :: Bool)); +SELECT concat('With ', materialize('foo' :: String)); +SELECT concat('With ', materialize('bar' :: FixedString(3))); +SELECT concat('With ', materialize('foo' :: Nullable(String))); +SELECT concat('With ', materialize('bar' :: Nullable(FixedString(3)))); +SELECT concat('With ', materialize('foo' :: LowCardinality(String))); +SELECT concat('With ', materialize('bar' :: LowCardinality(FixedString(3)))); +SELECT concat('With ', materialize('foo' :: LowCardinality(Nullable(String)))); +SELECT concat('With ', materialize('bar' :: LowCardinality(Nullable(FixedString(3))))); +SELECT concat('With ', materialize(42 :: LowCardinality(Nullable(UInt32)))); +SELECT concat('With ', materialize(42 :: LowCardinality(UInt32))); +SELECT concat('With ', materialize('fae310ca-d52a-4923-9e9b-02bf67f4b009' :: UUID)); +SELECT concat('With ', materialize('2023-11-14' :: Date)); +SELECT concat('With ', materialize('2123-11-14' :: Date32)); +SELECT concat('With ', materialize('2023-11-14 05:50:12' :: DateTime('Europe/Amsterdam'))); +SELECT concat('With ', materialize('2023-11-14 05:50:12.123' :: DateTime64(3, 'Europe/Amsterdam'))); +SELECT concat('With ', materialize('hallo' :: Enum('hallo' = 1))); +SELECT concat('With ', materialize(['foo', 'bar'] :: Array(String))); +SELECT concat('With ', materialize('{"foo": "bar"}' :: JSON)); +SELECT concat('With ', materialize((42, 'foo') :: Tuple(Int32, String))); +SELECT concat('With ', materialize(map(42, 'foo') :: Map(Int32, String))); +SELECT concat('With ', materialize('122.233.64.201' :: IPv4)); +SELECT concat('With ', materialize('2001:0001:130F:0002:0003:09C0:876A:130B' :: IPv6)); +SELECT concat('With ', materialize((42, 43) :: Point)); +SELECT concat('With ', materialize([(0,0),(10,0),(10,10),(0,10)] :: Ring)); +SELECT concat('With ', materialize([[(20, 20), (50, 20), (50, 50), (20, 50)], [(30, 30), (50, 50), (50, 30)]] :: Polygon)); +SELECT concat('With ', materialize([[[(0, 0), (10, 0), (10, 10), (0, 10)]], [[(20, 20), (50, 20), (50, 50), (20, 50)],[(30, 30), (50, 50), (50, 30)]]] :: MultiPolygon)); + +SELECT '-- SimpleAggregateFunction'; +DROP TABLE IF EXISTS concat_saf_test; +CREATE TABLE concat_saf_test(x SimpleAggregateFunction(max, Int32)) ENGINE=MergeTree ORDER BY tuple(); +INSERT INTO concat_saf_test VALUES (42); +INSERT INTO concat_saf_test SELECT max(number) FROM numbers(5); +SELECT concat('With ', x) FROM concat_saf_test ORDER BY x DESC; +DROP TABLE concat_saf_test; + +SELECT '-- Nested'; +DROP TABLE IF EXISTS concat_nested_test; +CREATE TABLE concat_nested_test(attrs Nested(k String, v String)) ENGINE = MergeTree ORDER BY tuple(); +INSERT INTO concat_nested_test VALUES (['foo', 'bar'], ['qaz', 'qux']); +SELECT concat('With ', attrs.k, attrs.v) FROM concat_nested_test; +DROP TABLE concat_nested_test; + +SELECT '-- NULL arguments'; +SELECT concat(NULL, NULL); +SELECT concat(NULL, materialize(NULL :: Nullable(UInt64))); +SELECT concat(materialize(NULL :: Nullable(UInt64)), materialize(NULL :: Nullable(UInt64))); +SELECT concat(42, materialize(NULL :: Nullable(UInt64))); +SELECT concat('42', materialize(NULL :: Nullable(UInt64))); +SELECT concat(42, materialize(NULL :: Nullable(UInt64)), materialize(NULL :: Nullable(UInt64))); +SELECT concat('42', materialize(NULL :: Nullable(UInt64)), materialize(NULL :: Nullable(UInt64))); + +SELECT '-- Various arguments tests'; +SELECT concat(materialize('Non-const'), materialize(' strings')); +SELECT concat('Two arguments ', 'test'); +SELECT concat('Three ', 'arguments', ' test'); +SELECT concat(materialize(3 :: Int64), ' arguments test', ' with int type'); +SELECT concat(materialize(42 :: Int32), materialize(144 :: UInt64)); +SELECT concat(materialize(42 :: Int32), materialize(144 :: UInt64), materialize(255 :: UInt32)); +SELECT concat(42, 144); +SELECT concat(42, 144, 255); + +SELECT '-- Single argument tests'; +SELECT concat(42); +SELECT concat(materialize(42)); +SELECT concat('foo'); +SELECT concat(materialize('foo')); +SELECT concat(NULL); +SELECT concat(materialize(NULL :: Nullable(UInt64))); + +SELECT CONCAT('Testing the ', 'alias'); + +SELECT concat(); -- { serverError 42 } diff --git a/tests/queries/0_stateless/00732_base64_functions.reference b/tests/queries/0_stateless/00732_base64_functions.reference index f97c19427e7..8f91ffa74ab 100644 --- a/tests/queries/0_stateless/00732_base64_functions.reference +++ b/tests/queries/0_stateless/00732_base64_functions.reference @@ -21,9 +21,9 @@ fooba foobar 1 1 1 1 -fooba -~ + + + Zm9v foo foo -TEcgT3B0aW11cw== diff --git a/tests/queries/0_stateless/00732_base64_functions.sql b/tests/queries/0_stateless/00732_base64_functions.sql index 99268004003..3c60bf939fe 100644 --- a/tests/queries/0_stateless/00732_base64_functions.sql +++ b/tests/queries/0_stateless/00732_base64_functions.sql @@ -2,17 +2,23 @@ SET send_logs_level = 'fatal'; -SELECT base64Encode(val) FROM (select arrayJoin(['', 'f', 'fo', 'foo', 'foob', 'fooba', 'foobar']) val); +SELECT base64Encode(); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT base64Decode(); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT tryBase64Decode(); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT base64Encode('foo', 'excess argument'); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT base64Decode('foo', 'excess argument'); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT tryBase64Decode('foo', 'excess argument'); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +-- test with valid inputs + +SELECT base64Encode(val) FROM (select arrayJoin(['', 'f', 'fo', 'foo', 'foob', 'fooba', 'foobar']) val); SELECT base64Decode(val) FROM (select arrayJoin(['', 'Zg==', 'Zm8=', 'Zm9v', 'Zm9vYg==', 'Zm9vYmE=', 'Zm9vYmFy']) val); SELECT tryBase64Decode(val) FROM (select arrayJoin(['', 'Zg==', 'Zm8=', 'Zm9v', 'Zm9vYg==', 'Zm9vYmE=', 'Zm9vYmFy']) val); SELECT base64Decode(base64Encode('foo')) = 'foo', base64Encode(base64Decode('Zm9v')) == 'Zm9v'; SELECT tryBase64Decode(base64Encode('foo')) = 'foo', base64Encode(tryBase64Decode('Zm9v')) == 'Zm9v'; -SELECT base64Encode('foo', 'excess argument'); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } -SELECT base64Decode('foo', 'excess argument'); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } -SELECT tryBase64Decode('foo', 'excess argument'); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +-- test with invalid inputs SELECT base64Decode('Zm9vYmF=Zm9v'); -- { serverError INCORRECT_DATA } SELECT tryBase64Decode('Zm9vYmF=Zm9v'); @@ -20,9 +26,11 @@ SELECT tryBase64Decode('Zm9vYmF=Zm9v'); SELECT base64Decode('foo'); -- { serverError INCORRECT_DATA } SELECT tryBase64Decode('foo'); +SELECT base64Decode('aoeo054640eu='); -- { serverError INCORRECT_DATA } +SELECT tryBase64Decode('aoeo054640eu='); + +-- test FixedString arguments + select base64Encode(toFixedString('foo', 3)); select base64Decode(toFixedString('Zm9v', 4)); select tryBase64Decode(toFixedString('Zm9v', 4)); - --- This query reproduces a bug in TurboBase64 library (which we no longer use) -select distinct base64Encode(materialize('LG Optimus')) from numbers(100); diff --git a/tests/queries/0_stateless/00753_system_columns_and_system_tables_long.reference b/tests/queries/0_stateless/00753_system_columns_and_system_tables_long.reference index 58f8b7abfb3..3de05d66188 100644 --- a/tests/queries/0_stateless/00753_system_columns_and_system_tables_long.reference +++ b/tests/queries/0_stateless/00753_system_columns_and_system_tables_long.reference @@ -49,5 +49,5 @@ Check total_bytes/total_rows for Set 2048 50 2048 100 Check total_bytes/total_rows for Join -10240 50 -10240 100 +1 50 +1 100 diff --git a/tests/queries/0_stateless/00753_system_columns_and_system_tables_long.sql b/tests/queries/0_stateless/00753_system_columns_and_system_tables_long.sql index 16085c8a995..ae9db656f00 100644 --- a/tests/queries/0_stateless/00753_system_columns_and_system_tables_long.sql +++ b/tests/queries/0_stateless/00753_system_columns_and_system_tables_long.sql @@ -134,7 +134,7 @@ DROP TABLE check_system_tables; SELECT 'Check total_bytes/total_rows for Join'; CREATE TABLE check_system_tables Engine=Join(ANY, LEFT, number) AS SELECT * FROM numbers(50); -SELECT total_bytes, total_rows FROM system.tables WHERE name = 'check_system_tables' AND database = currentDatabase(); +SELECT total_bytes BETWEEN 5000 AND 15000, total_rows FROM system.tables WHERE name = 'check_system_tables' AND database = currentDatabase(); INSERT INTO check_system_tables SELECT number+50 FROM numbers(50); -SELECT total_bytes, total_rows FROM system.tables WHERE name = 'check_system_tables' AND database = currentDatabase(); +SELECT total_bytes BETWEEN 5000 AND 15000, total_rows FROM system.tables WHERE name = 'check_system_tables' AND database = currentDatabase(); DROP TABLE check_system_tables; diff --git a/tests/queries/0_stateless/00814_replicated_minimalistic_part_header_zookeeper.sh b/tests/queries/0_stateless/00814_replicated_minimalistic_part_header_zookeeper.sh index bab2304cec2..12d889a7137 100755 --- a/tests/queries/0_stateless/00814_replicated_minimalistic_part_header_zookeeper.sh +++ b/tests/queries/0_stateless/00814_replicated_minimalistic_part_header_zookeeper.sh @@ -7,6 +7,9 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh +SHARD=$($CLICKHOUSE_CLIENT --query "Select getMacro('shard')") +REPLICA=$($CLICKHOUSE_CLIENT --query "Select getMacro('replica')") + $CLICKHOUSE_CLIENT -nm -q " DROP TABLE IF EXISTS part_header_r1; @@ -54,8 +57,8 @@ elapsed=1 until [ $elapsed -eq 5 ]; do sleep $(( elapsed++ )) - count1=$($CLICKHOUSE_CLIENT --query="SELECT count(name) FROM system.zookeeper WHERE path = '/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/test_00814/part_header/s1/replicas/1r1/parts'") - count2=$($CLICKHOUSE_CLIENT --query="SELECT count(name) FROM system.zookeeper WHERE path = '/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/test_00814/part_header/s1/replicas/2r1/parts'") + count1=$($CLICKHOUSE_CLIENT --query="SELECT count(name) FROM system.zookeeper WHERE path = '/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/test_00814/part_header/$SHARD/replicas/1$REPLICA/parts'") + count2=$($CLICKHOUSE_CLIENT --query="SELECT count(name) FROM system.zookeeper WHERE path = '/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/test_00814/part_header/$SHARD/replicas/2$REPLICA/parts'") [[ $count1 == 1 && $count2 == 1 ]] && break done @@ -64,10 +67,10 @@ $CLICKHOUSE_CLIENT -nm -q " SELECT '*** Test part removal ***'; SELECT '*** replica 1 ***'; SELECT name FROM system.parts WHERE active AND database = currentDatabase() AND table = 'part_header_r1'; -SELECT name FROM system.zookeeper WHERE path = '/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/test_00814/part_header/s1/replicas/1r1/parts'; +SELECT name FROM system.zookeeper WHERE path = '/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/test_00814/part_header/$SHARD/replicas/1$REPLICA/parts'; SELECT '*** replica 2 ***'; SELECT name FROM system.parts WHERE active AND database = currentDatabase() AND table = 'part_header_r2'; -SELECT name FROM system.zookeeper WHERE path = '/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/test_00814/part_header/s1/replicas/2r1/parts'; +SELECT name FROM system.zookeeper WHERE path = '/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/test_00814/part_header/$SHARD/replicas/2$REPLICA/parts'; SELECT '*** Test ALTER ***'; ALTER TABLE part_header_r1 MODIFY COLUMN y String; diff --git a/tests/queries/0_stateless/00953_zookeeper_suetin_deduplication_bug.sh b/tests/queries/0_stateless/00953_zookeeper_suetin_deduplication_bug.sh index ad0146b9d99..57a41526900 100755 --- a/tests/queries/0_stateless/00953_zookeeper_suetin_deduplication_bug.sh +++ b/tests/queries/0_stateless/00953_zookeeper_suetin_deduplication_bug.sh @@ -9,7 +9,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CURDIR"/../shell_config.sh CLICKHOUSE_TEST_ZOOKEEPER_PREFIX="${CLICKHOUSE_TEST_ZOOKEEPER_PREFIX}/${CLICKHOUSE_DATABASE}" - +SHARD=$($CLICKHOUSE_CLIENT --query "Select getMacro('shard')") $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS elog;" @@ -30,33 +30,33 @@ $CLICKHOUSE_CLIENT --query="INSERT INTO elog VALUES (toDate('2018-10-01'), 3, 'h $CLICKHOUSE_CLIENT --query="SELECT count(*) from elog" # 3 rows -count=$($CLICKHOUSE_CLIENT --query="SELECT COUNT(*) FROM system.zookeeper where path = '/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/elog/s1/blocks'") +count=$($CLICKHOUSE_CLIENT --query="SELECT COUNT(*) FROM system.zookeeper where path = '/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/elog/$SHARD/blocks'") while [[ $count != 2 ]] do sleep 1 - count=$($CLICKHOUSE_CLIENT --query="SELECT COUNT(*) FROM system.zookeeper where path = '/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/elog/s1/blocks'") + count=$($CLICKHOUSE_CLIENT --query="SELECT COUNT(*) FROM system.zookeeper where path = '/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/elog/$SHARD/blocks'") done $CLICKHOUSE_CLIENT --query="INSERT INTO elog VALUES (toDate('2018-10-01'), 1, 'hello')" $CLICKHOUSE_CLIENT --query="SELECT count(*) from elog" # 4 rows -count=$($CLICKHOUSE_CLIENT --query="SELECT COUNT(*) FROM system.zookeeper where path = '/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/elog/s1/blocks'") +count=$($CLICKHOUSE_CLIENT --query="SELECT COUNT(*) FROM system.zookeeper where path = '/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/elog/$SHARD/blocks'") while [[ $count != 2 ]] do sleep 1 - count=$($CLICKHOUSE_CLIENT --query="SELECT COUNT(*) FROM system.zookeeper where path = '/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/elog/s1/blocks'") + count=$($CLICKHOUSE_CLIENT --query="SELECT COUNT(*) FROM system.zookeeper where path = '/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/elog/$SHARD/blocks'") done $CLICKHOUSE_CLIENT --query="INSERT INTO elog VALUES (toDate('2018-10-01'), 2, 'hello')" $CLICKHOUSE_CLIENT --query="SELECT count(*) from elog" # 5 rows -count=$($CLICKHOUSE_CLIENT --query="SELECT COUNT(*) FROM system.zookeeper where path = '/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/elog/s1/blocks'") +count=$($CLICKHOUSE_CLIENT --query="SELECT COUNT(*) FROM system.zookeeper where path = '/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/elog/$SHARD/blocks'") while [[ $count != 2 ]] do sleep 1 - count=$($CLICKHOUSE_CLIENT --query="SELECT COUNT(*) FROM system.zookeeper where path = '/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/elog/s1/blocks'") + count=$($CLICKHOUSE_CLIENT --query="SELECT COUNT(*) FROM system.zookeeper where path = '/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/elog/$SHARD/blocks'") done $CLICKHOUSE_CLIENT --query="INSERT INTO elog VALUES (toDate('2018-10-01'), 2, 'hello')" diff --git a/tests/queries/0_stateless/01052_window_view_proc_tumble_to_now.sh b/tests/queries/0_stateless/01052_window_view_proc_tumble_to_now.sh index e75b7d9570b..4325ebeed24 100755 --- a/tests/queries/0_stateless/01052_window_view_proc_tumble_to_now.sh +++ b/tests/queries/0_stateless/01052_window_view_proc_tumble_to_now.sh @@ -16,7 +16,7 @@ DROP TABLE IF EXISTS wv; CREATE TABLE dst(count UInt64) Engine=MergeTree ORDER BY tuple(); CREATE TABLE mt(a Int32) ENGINE=MergeTree ORDER BY tuple(); -CREATE WINDOW VIEW wv TO dst AS SELECT count(a) AS count FROM mt GROUP BY tumble(now('US/Samoa'), INTERVAL '5' SECOND, 'US/Samoa') AS wid; +CREATE WINDOW VIEW wv TO dst AS SELECT count(a) AS count FROM mt GROUP BY tumble(now('US/Samoa'), INTERVAL '10' SECOND, 'US/Samoa') AS wid; INSERT INTO mt VALUES (1); EOF diff --git a/tests/queries/0_stateless/01280_ttl_where_group_by.reference b/tests/queries/0_stateless/01280_ttl_where_group_by.reference deleted file mode 100644 index 65e7e5b158f..00000000000 --- a/tests/queries/0_stateless/01280_ttl_where_group_by.reference +++ /dev/null @@ -1,26 +0,0 @@ -ttl_01280_1 -1 1 0 4 -1 2 3 7 -1 3 0 5 -2 1 0 1 -2 1 20 1 -ttl_01280_2 -1 1 [0,2,3] 4 -1 1 [5,4,1] 13 -1 3 [1,0,1,0] 17 -2 1 [3,1,0,3] 8 -3 1 [2,4,5] 8 -ttl_01280_3 -1 1 0 4 -1 1 10 6 -2 1 0 3 -3 1 8 2 -ttl_01280_4 -0 4 -13 9 -ttl_01280_5 -1 2 7 5 -2 3 6 5 -ttl_01280_6 -1 3 5 -2 3 5 diff --git a/tests/queries/0_stateless/01280_ttl_where_group_by.sh b/tests/queries/0_stateless/01280_ttl_where_group_by.sh deleted file mode 100755 index e6f83d6edd1..00000000000 --- a/tests/queries/0_stateless/01280_ttl_where_group_by.sh +++ /dev/null @@ -1,120 +0,0 @@ -#!/usr/bin/env bash -# Tags: no-parallel, no-fasttest - -CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) -# shellcheck source=../shell_config.sh -. "$CURDIR"/../shell_config.sh - -$CLICKHOUSE_CLIENT --query "drop table if exists ttl_01280_1" - -function optimize() -{ - for _ in {0..20}; do - $CLICKHOUSE_CLIENT --query "OPTIMIZE TABLE $1 FINAL SETTINGS optimize_throw_if_noop=1" 2>/dev/null && break - sleep 0.3 - done -} - -# "SETTINGS max_parts_to_merge_at_once = 1" prevents merges to start before our own OPTIMIZE FINAL - -echo "ttl_01280_1" -$CLICKHOUSE_CLIENT -n --query " -create table ttl_01280_1 (a Int, b Int, x Int, y Int, d DateTime) engine = MergeTree order by (a, b) ttl d + interval 1 second delete where x % 10 == 0 and y > 5 SETTINGS max_parts_to_merge_at_once = 1; -insert into ttl_01280_1 values (1, 1, 0, 4, now() + 10); -insert into ttl_01280_1 values (1, 1, 10, 6, now()); -insert into ttl_01280_1 values (1, 2, 3, 7, now()); -insert into ttl_01280_1 values (1, 3, 0, 5, now()); -insert into ttl_01280_1 values (2, 1, 20, 1, now()); -insert into ttl_01280_1 values (2, 1, 0, 1, now()); -insert into ttl_01280_1 values (3, 1, 0, 8, now());" - -sleep 2 -optimize "ttl_01280_1" -$CLICKHOUSE_CLIENT --query "select a, b, x, y from ttl_01280_1 ORDER BY a, b, x, y" - -$CLICKHOUSE_CLIENT --query "drop table if exists ttl_01280_2" - -echo "ttl_01280_2" -$CLICKHOUSE_CLIENT -n --query " -create table ttl_01280_2 (a Int, b Int, x Array(Int32), y Double, d DateTime) engine = MergeTree order by (a, b) ttl d + interval 1 second group by a, b set x = minForEach(x), y = sum(y), d = max(d) SETTINGS max_parts_to_merge_at_once = 1; -insert into ttl_01280_2 values (1, 1, array(0, 2, 3), 4, now() + 10); -insert into ttl_01280_2 values (1, 1, array(5, 4, 3), 6, now()); -insert into ttl_01280_2 values (1, 1, array(5, 5, 1), 7, now()); -insert into ttl_01280_2 values (1, 3, array(3, 0, 4), 5, now()); -insert into ttl_01280_2 values (1, 3, array(1, 1, 2, 1), 9, now()); -insert into ttl_01280_2 values (1, 3, array(3, 2, 1, 0), 3, now()); -insert into ttl_01280_2 values (2, 1, array(3, 3, 3), 7, now()); -insert into ttl_01280_2 values (2, 1, array(11, 1, 0, 3), 1, now()); -insert into ttl_01280_2 values (3, 1, array(2, 4, 5), 8, now());" - -sleep 2 -optimize "ttl_01280_2" -$CLICKHOUSE_CLIENT --query "select a, b, x, y from ttl_01280_2 ORDER BY a, b, x, y" - -$CLICKHOUSE_CLIENT --query "drop table if exists ttl_01280_3" - -echo "ttl_01280_3" -$CLICKHOUSE_CLIENT -n --query " -create table ttl_01280_3 (a Int, b Int, x Int64, y Int, d DateTime) engine = MergeTree order by (a, b) ttl d + interval 1 second group by a set b = min(b), x = argMax(x, d), y = argMax(y, d), d = max(d) SETTINGS max_parts_to_merge_at_once = 1; -insert into ttl_01280_3 values (1, 1, 0, 4, now() + 10); -insert into ttl_01280_3 values (1, 1, 10, 6, now() + 1); -insert into ttl_01280_3 values (1, 2, 3, 7, now()); -insert into ttl_01280_3 values (1, 3, 0, 5, now()); -insert into ttl_01280_3 values (2, 1, 20, 1, now()); -insert into ttl_01280_3 values (2, 1, 0, 3, now() + 1); -insert into ttl_01280_3 values (3, 1, 0, 3, now()); -insert into ttl_01280_3 values (3, 2, 8, 2, now() + 1); -insert into ttl_01280_3 values (3, 5, 5, 8, now());" - -sleep 2 -optimize "ttl_01280_3" -$CLICKHOUSE_CLIENT --query "select a, b, x, y from ttl_01280_3 ORDER BY a, b, x, y" - -$CLICKHOUSE_CLIENT --query "drop table if exists ttl_01280_4" - -echo "ttl_01280_4" -$CLICKHOUSE_CLIENT -n --query " -create table ttl_01280_4 (a Int, b Int, x Int64, y Int64, d DateTime) engine = MergeTree order by (toDate(d), -(a + b)) ttl d + interval 1 second group by toDate(d) set x = sum(x), y = max(y) SETTINGS max_parts_to_merge_at_once = 1; -insert into ttl_01280_4 values (1, 1, 0, 4, now() + 10); -insert into ttl_01280_4 values (10, 2, 3, 3, now()); -insert into ttl_01280_4 values (2, 10, 1, 7, now()); -insert into ttl_01280_4 values (3, 3, 5, 2, now()); -insert into ttl_01280_4 values (1, 5, 4, 9, now())" - -sleep 2 -optimize "ttl_01280_4" -$CLICKHOUSE_CLIENT --query "select x, y from ttl_01280_4 ORDER BY a, b, x, y" - -$CLICKHOUSE_CLIENT --query "drop table if exists ttl_01280_5" - -echo "ttl_01280_5" -$CLICKHOUSE_CLIENT -n --query "create table ttl_01280_5 (a Int, b Int, x Int64, y Int64, d DateTime) engine = MergeTree order by (toDate(d), a, -b) ttl d + interval 1 second group by toDate(d), a set x = sum(x), b = argMax(b, -b) SETTINGS max_parts_to_merge_at_once = 1; -insert into ttl_01280_5 values (1, 2, 3, 5, now()); -insert into ttl_01280_5 values (2, 10, 1, 5, now()); -insert into ttl_01280_5 values (2, 3, 5, 5, now()); -insert into ttl_01280_5 values (1, 5, 4, 5, now());" - -sleep 2 -optimize "ttl_01280_5" -$CLICKHOUSE_CLIENT --query "select a, b, x, y from ttl_01280_5 ORDER BY a, b, x, y" - -$CLICKHOUSE_CLIENT --query "drop table if exists ttl_01280_6" - -echo "ttl_01280_6" -$CLICKHOUSE_CLIENT -n --query " -create table ttl_01280_6 (a Int, b Int, x Int64, y Int64, d DateTime) engine = MergeTree order by (toDate(d), a, -b) ttl d + interval 1 second group by toDate(d), a SETTINGS max_parts_to_merge_at_once = 1; -insert into ttl_01280_6 values (1, 2, 3, 5, now()); -insert into ttl_01280_6 values (2, 10, 3, 5, now()); -insert into ttl_01280_6 values (2, 3, 3, 5, now()); -insert into ttl_01280_6 values (1, 5, 3, 5, now())" - -sleep 2 -optimize "ttl_01280_6" -$CLICKHOUSE_CLIENT --query "select a, x, y from ttl_01280_6 ORDER BY a, b, x, y" - -$CLICKHOUSE_CLIENT -q "DROP TABLE ttl_01280_1" -$CLICKHOUSE_CLIENT -q "DROP TABLE ttl_01280_2" -$CLICKHOUSE_CLIENT -q "DROP TABLE ttl_01280_3" -$CLICKHOUSE_CLIENT -q "DROP TABLE ttl_01280_4" -$CLICKHOUSE_CLIENT -q "DROP TABLE ttl_01280_5" -$CLICKHOUSE_CLIENT -q "DROP TABLE ttl_01280_6" diff --git a/tests/queries/0_stateless/01396_inactive_replica_cleanup_nodes_zookeeper.sh b/tests/queries/0_stateless/01396_inactive_replica_cleanup_nodes_zookeeper.sh index 2d761df998e..67a2a70b509 100755 --- a/tests/queries/0_stateless/01396_inactive_replica_cleanup_nodes_zookeeper.sh +++ b/tests/queries/0_stateless/01396_inactive_replica_cleanup_nodes_zookeeper.sh @@ -5,6 +5,9 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh +SHARD=$($CLICKHOUSE_CLIENT --query "Select getMacro('shard')") +REPLICA=$($CLICKHOUSE_CLIENT --query "Select getMacro('replica')") + # Check that if we have one inactive replica and a huge number of INSERTs to active replicas, # the number of nodes in ZooKeeper does not grow unbounded. @@ -32,16 +35,16 @@ for _ in {1..60}; do done -$CLICKHOUSE_CLIENT --query "SELECT numChildren < $((SCALE / 4)) FROM system.zookeeper WHERE path = '/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/s1' AND name = 'log'"; +$CLICKHOUSE_CLIENT --query "SELECT numChildren < $((SCALE / 4)) FROM system.zookeeper WHERE path = '/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/$SHARD' AND name = 'log'"; echo -e '\n---\n'; -$CLICKHOUSE_CLIENT --query "SELECT value FROM system.zookeeper WHERE path = '/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/s1/replicas/1r1' AND name = 'is_lost'"; -$CLICKHOUSE_CLIENT --query "SELECT value FROM system.zookeeper WHERE path = '/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/s1/replicas/2r1' AND name = 'is_lost'"; +$CLICKHOUSE_CLIENT --query "SELECT value FROM system.zookeeper WHERE path = '/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/$SHARD/replicas/1$REPLICA' AND name = 'is_lost'"; +$CLICKHOUSE_CLIENT --query "SELECT value FROM system.zookeeper WHERE path = '/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/$SHARD/replicas/2$REPLICA' AND name = 'is_lost'"; echo -e '\n---\n'; $CLICKHOUSE_CLIENT --query "ATTACH TABLE r2" $CLICKHOUSE_CLIENT --receive_timeout 600 --query "SYSTEM SYNC REPLICA r2" # Need to increase timeout, otherwise it timed out in debug build -$CLICKHOUSE_CLIENT --query "SELECT value FROM system.zookeeper WHERE path = '/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/s1/replicas/2r1' AND name = 'is_lost'"; +$CLICKHOUSE_CLIENT --query "SELECT value FROM system.zookeeper WHERE path = '/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/$SHARD/replicas/2$REPLICA' AND name = 'is_lost'"; $CLICKHOUSE_CLIENT -n --query " DROP TABLE IF EXISTS r1; diff --git a/tests/queries/0_stateless/01568_window_functions_distributed.reference b/tests/queries/0_stateless/01568_window_functions_distributed.reference index 13ac0769a24..29ff2e7133c 100644 --- a/tests/queries/0_stateless/01568_window_functions_distributed.reference +++ b/tests/queries/0_stateless/01568_window_functions_distributed.reference @@ -22,6 +22,16 @@ select sum(number) over w as x, max(number) over w as y from t_01568 window w as 21 8 21 8 21 8 +select sum(number) over w, max(number) over w from t_01568 window w as (partition by p) order by p; +3 2 +3 2 +3 2 +12 5 +12 5 +12 5 +21 8 +21 8 +21 8 select sum(number) over w as x, max(number) over w as y from remote('127.0.0.{1,2}', '', t_01568) window w as (partition by p) order by x, y; 6 2 6 2 @@ -41,6 +51,25 @@ select sum(number) over w as x, max(number) over w as y from remote('127.0.0.{1, 42 8 42 8 42 8 +select sum(number) over w as x, max(number) over w as y from remote('127.0.0.{1,2}', '', t_01568) window w as (partition by p) order by x, y SETTINGS max_threads = 1; +6 2 +6 2 +6 2 +6 2 +6 2 +6 2 +24 5 +24 5 +24 5 +24 5 +24 5 +24 5 +42 8 +42 8 +42 8 +42 8 +42 8 +42 8 select distinct sum(number) over w as x, max(number) over w as y from remote('127.0.0.{1,2}', '', t_01568) window w as (partition by p) order by x, y; 6 2 24 5 diff --git a/tests/queries/0_stateless/01568_window_functions_distributed.sql b/tests/queries/0_stateless/01568_window_functions_distributed.sql index 95072d6460f..ecce7b412ba 100644 --- a/tests/queries/0_stateless/01568_window_functions_distributed.sql +++ b/tests/queries/0_stateless/01568_window_functions_distributed.sql @@ -15,8 +15,12 @@ from numbers(9); select sum(number) over w as x, max(number) over w as y from t_01568 window w as (partition by p) order by x, y; +select sum(number) over w, max(number) over w from t_01568 window w as (partition by p) order by p; + select sum(number) over w as x, max(number) over w as y from remote('127.0.0.{1,2}', '', t_01568) window w as (partition by p) order by x, y; +select sum(number) over w as x, max(number) over w as y from remote('127.0.0.{1,2}', '', t_01568) window w as (partition by p) order by x, y SETTINGS max_threads = 1; + select distinct sum(number) over w as x, max(number) over w as y from remote('127.0.0.{1,2}', '', t_01568) window w as (partition by p) order by x, y; -- window functions + aggregation w/shards diff --git a/tests/queries/0_stateless/01572_kill_window_function.sh b/tests/queries/0_stateless/01572_kill_window_function.sh index 7103b7f7210..de6de3510a0 100755 --- a/tests/queries/0_stateless/01572_kill_window_function.sh +++ b/tests/queries/0_stateless/01572_kill_window_function.sh @@ -6,21 +6,20 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) set -e -o pipefail +function wait_for_query_to_start() +{ + while [[ $($CLICKHOUSE_CURL -sS "$CLICKHOUSE_URL" -d "SELECT count() FROM system.processes WHERE query_id = '$1'") == 0 ]]; do sleep 0.1; done +} + # Run a test query that takes very long to run. query_id="01572_kill_window_function-$CLICKHOUSE_DATABASE" -$CLICKHOUSE_CLIENT --query_id="$query_id" --query "SELECT count(1048575) OVER (PARTITION BY intDiv(NULL, number) ORDER BY number DESC NULLS FIRST ROWS BETWEEN CURRENT ROW AND 1048575 FOLLOWING) FROM numbers(255, 1048575)" >/dev/null 2>&1 & +$CLICKHOUSE_CLIENT --query_id="$query_id" --query "SELECT sum(number) OVER (PARTITION BY number % 10 ORDER BY number DESC NULLS FIRST ROWS BETWEEN CURRENT ROW AND 99999 FOLLOWING) FROM numbers(0, 10000000) format Null;" >/dev/null 2>&1 & client_pid=$! echo Started -# Use one query to both kill the test query and verify that it has started, -# because if we try to kill it before it starts, the test will fail. -while [ -z "$($CLICKHOUSE_CLIENT --query "kill query where query_id = '$query_id' and current_database = currentDatabase()")" ] -do - # If we don't yet see the query in the process list, the client should still - # be running. The query is very long. - kill -0 -- $client_pid - sleep 1 -done +wait_for_query_to_start $query_id + +$CLICKHOUSE_CLIENT --query "kill query where query_id = '$query_id' and current_database = currentDatabase() format Null" echo Sent kill request # Wait for the client to terminate. diff --git a/tests/queries/0_stateless/01586_replicated_mutations_empty_partition.sql b/tests/queries/0_stateless/01586_replicated_mutations_empty_partition.sql index b5ad6c06e96..c4a3c939c26 100644 --- a/tests/queries/0_stateless/01586_replicated_mutations_empty_partition.sql +++ b/tests/queries/0_stateless/01586_replicated_mutations_empty_partition.sql @@ -16,7 +16,7 @@ INSERT INTO replicated_mutations_empty_partitions SETTINGS insert_keeper_fault_i SELECT count(distinct value) FROM replicated_mutations_empty_partitions; -SELECT count() FROM system.zookeeper WHERE path = '/clickhouse/test/'||currentDatabase()||'/01586_replicated_mutations_empty_partitions/s1/block_numbers'; +SELECT count() FROM system.zookeeper WHERE path = '/clickhouse/test/'||currentDatabase()||'/01586_replicated_mutations_empty_partitions/'||getMacro('shard')||'/block_numbers'; ALTER TABLE replicated_mutations_empty_partitions DROP PARTITION '3'; ALTER TABLE replicated_mutations_empty_partitions DROP PARTITION '4'; @@ -24,7 +24,7 @@ ALTER TABLE replicated_mutations_empty_partitions DROP PARTITION '5'; ALTER TABLE replicated_mutations_empty_partitions DROP PARTITION '9'; -- still ten records -SELECT count() FROM system.zookeeper WHERE path = '/clickhouse/test/'||currentDatabase()||'/01586_replicated_mutations_empty_partitions/s1/block_numbers'; +SELECT count() FROM system.zookeeper WHERE path = '/clickhouse/test/'||currentDatabase()||'/01586_replicated_mutations_empty_partitions/'||getMacro('shard')||'/block_numbers'; ALTER TABLE replicated_mutations_empty_partitions MODIFY COLUMN value UInt64 SETTINGS replication_alter_partitions_sync=2; diff --git a/tests/queries/0_stateless/01593_concurrent_alter_mutations_kill_many_replicas_long.sh b/tests/queries/0_stateless/01593_concurrent_alter_mutations_kill_many_replicas_long.sh index f8f3ccd6dd6..2762f918d72 100755 --- a/tests/queries/0_stateless/01593_concurrent_alter_mutations_kill_many_replicas_long.sh +++ b/tests/queries/0_stateless/01593_concurrent_alter_mutations_kill_many_replicas_long.sh @@ -7,6 +7,9 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=./replication.lib . "$CURDIR"/replication.lib +SHARD=$($CLICKHOUSE_CLIENT --query "Select getMacro('shard')") +REPLICA=$($CLICKHOUSE_CLIENT --query "Select getMacro('replica')") + REPLICAS=5 for i in $(seq $REPLICAS); do @@ -79,9 +82,9 @@ while true; do done -metadata_version=$($CLICKHOUSE_CLIENT --query "SELECT value FROM system.zookeeper WHERE path = '/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/s1/replicas/r11/' and name = 'metadata_version'") +metadata_version=$($CLICKHOUSE_CLIENT --query "SELECT value FROM system.zookeeper WHERE path = '/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/$SHARD/replicas/${REPLICA}1/' and name = 'metadata_version'") for i in $(seq $REPLICAS); do - replica_metadata_version=$($CLICKHOUSE_CLIENT --query "SELECT value FROM system.zookeeper WHERE path = '/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/s1/replicas/r1$i/' and name = 'metadata_version'") + replica_metadata_version=$($CLICKHOUSE_CLIENT --query "SELECT value FROM system.zookeeper WHERE path = '/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/$SHARD/replicas/${REPLICA}$i/' and name = 'metadata_version'") if [ "$metadata_version" != "$replica_metadata_version" ]; then echo "Metadata version on replica $i differs from the first replica, FAIL" diff --git a/tests/queries/0_stateless/01700_system_zookeeper_path_in.reference b/tests/queries/0_stateless/01700_system_zookeeper_path_in.reference index 664d8e84f27..b4eaf226106 100644 --- a/tests/queries/0_stateless/01700_system_zookeeper_path_in.reference +++ b/tests/queries/0_stateless/01700_system_zookeeper_path_in.reference @@ -14,5 +14,3 @@ abandonable_lock-other failed_parts last_part parallel -shared -shared diff --git a/tests/queries/0_stateless/01700_system_zookeeper_path_in.sql b/tests/queries/0_stateless/01700_system_zookeeper_path_in.sql index cf4bc7650e7..3b321d3cea5 100644 --- a/tests/queries/0_stateless/01700_system_zookeeper_path_in.sql +++ b/tests/queries/0_stateless/01700_system_zookeeper_path_in.sql @@ -8,17 +8,17 @@ CREATE TABLE sample_table ( ENGINE ReplicatedMergeTree('/clickhouse/{database}/01700_system_zookeeper_path_in/{shard}', '{replica}') ORDER BY tuple(); -SELECT name FROM system.zookeeper WHERE path = '/clickhouse/' || currentDatabase() || '/01700_system_zookeeper_path_in/s1' AND name like 'block%' ORDER BY name; -SELECT name FROM system.zookeeper WHERE path = '/clickhouse/' || currentDatabase() || '/01700_system_zookeeper_path_in/s1/replicas' AND name LIKE '%r1%' ORDER BY name; +SELECT name FROM system.zookeeper WHERE path = '/clickhouse/' || currentDatabase() || '/01700_system_zookeeper_path_in/' || getMacro('shard') AND name like 'block%' ORDER BY name; +SELECT 'r1' FROM system.zookeeper WHERE path = '/clickhouse/' || currentDatabase() || '/01700_system_zookeeper_path_in/' || getMacro('shard') || '/replicas' AND name LIKE '%'|| getMacro('replica') ||'%' ORDER BY name; SELECT '========'; -SELECT name FROM system.zookeeper WHERE path IN ('/clickhouse/' || currentDatabase() || '/01700_system_zookeeper_path_in/s1') AND name LIKE 'block%' ORDER BY name; -SELECT name FROM system.zookeeper WHERE path IN ('/clickhouse/' || currentDatabase() || '/01700_system_zookeeper_path_in/s1/replicas') AND name LIKE '%r1%' ORDER BY name; +SELECT name FROM system.zookeeper WHERE path IN ('/clickhouse/' || currentDatabase() || '/01700_system_zookeeper_path_in/' || getMacro('shard')) AND name LIKE 'block%' ORDER BY name; +SELECT 'r1' FROM system.zookeeper WHERE path IN ('/clickhouse/' || currentDatabase() || '/01700_system_zookeeper_path_in/' || getMacro('shard') || '/replicas') AND name LIKE '%' || getMacro('replica') || '%' ORDER BY name; SELECT '========'; -SELECT name FROM system.zookeeper WHERE path IN ('/clickhouse/' || currentDatabase() || '/01700_system_zookeeper_path_in/s1', - '/clickhouse/' || currentDatabase() || '/01700_system_zookeeper_path_in/s1/replicas') AND name LIKE 'block%' ORDER BY name; +SELECT name FROM system.zookeeper WHERE path IN ('/clickhouse/' || currentDatabase() || '/01700_system_zookeeper_path_in/' || getMacro('shard'), + '/clickhouse/' || currentDatabase() || '/01700_system_zookeeper_path_in/' || getMacro('shard') || '/replicas') AND name LIKE 'block%' ORDER BY name; SELECT '========'; -SELECT name FROM system.zookeeper WHERE path IN (SELECT concat('/clickhouse/' || currentDatabase() || '/01700_system_zookeeper_path_in/s1/', name) - FROM system.zookeeper WHERE (name != 'replicas' AND name NOT LIKE 'leader_election%' AND path = '/clickhouse/' || currentDatabase() || '/01700_system_zookeeper_path_in/s1')) ORDER BY name; +SELECT name FROM system.zookeeper WHERE path IN (SELECT concat('/clickhouse/' || currentDatabase() || '/01700_system_zookeeper_path_in/' || getMacro('shard') || '/', name) + FROM system.zookeeper WHERE (name != 'replicas' AND name NOT LIKE 'leader_election%' AND name NOT LIKE 'zero_copy_%' AND path = '/clickhouse/' || currentDatabase() || '/01700_system_zookeeper_path_in/' || getMacro('shard'))) ORDER BY name; DROP TABLE IF EXISTS sample_table; diff --git a/tests/queries/0_stateless/01710_normal_projection_join_plan_fix.reference b/tests/queries/0_stateless/01710_normal_projection_join_plan_fix.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/01710_normal_projection_join_plan_fix.sql b/tests/queries/0_stateless/01710_normal_projection_join_plan_fix.sql new file mode 100644 index 00000000000..40847a301c2 --- /dev/null +++ b/tests/queries/0_stateless/01710_normal_projection_join_plan_fix.sql @@ -0,0 +1,13 @@ +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; + +CREATE TABLE t1 (id UInt32, s String) Engine = MergeTree ORDER BY id; +CREATE TABLE t2 (id1 UInt32, id2 UInt32) Engine = MergeTree ORDER BY id1 SETTINGS index_granularity = 1; +INSERT INTO t2 SELECT number, number from numbers(100); +ALTER TABLE t2 ADD PROJECTION proj (SELECT id2 ORDER BY id2); +INSERT INTO t2 SELECT number, number from numbers(100); + +SELECT s FROM t1 as lhs LEFT JOIN (SELECT * FROM t2 WHERE id2 = 2) as rhs ON lhs.id = rhs.id2; + +DROP TABLE t1; +DROP TABLE t2; diff --git a/tests/queries/0_stateless/01710_projection_aggregation_in_order.sql b/tests/queries/0_stateless/01710_projection_aggregation_in_order.sql index e4fb1816c89..06f192adb57 100644 --- a/tests/queries/0_stateless/01710_projection_aggregation_in_order.sql +++ b/tests/queries/0_stateless/01710_projection_aggregation_in_order.sql @@ -1,5 +1,3 @@ --- Tags: disabled --- FIXME https://github.com/ClickHouse/ClickHouse/issues/49552 -- Test that check the correctness of the result for optimize_aggregation_in_order and projections, -- not that this optimization will take place. @@ -20,7 +18,7 @@ CREATE TABLE normal ) ) ENGINE = MergeTree -ORDER BY (key, ts); +ORDER BY tuple(); INSERT INTO normal SELECT number, @@ -52,7 +50,7 @@ CREATE TABLE agg ) ) ENGINE = MergeTree -ORDER BY (key, ts); +ORDER BY tuple(); INSERT INTO agg SELECT 1, diff --git a/tests/queries/0_stateless/01927_query_views_log_current_database.reference b/tests/queries/0_stateless/01927_query_views_log_current_database.reference index 64bf1e501f3..9912e3b059d 100644 --- a/tests/queries/0_stateless/01927_query_views_log_current_database.reference +++ b/tests/queries/0_stateless/01927_query_views_log_current_database.reference @@ -7,7 +7,7 @@ databases: ['_table_function','default'] tables: ['_table_function.numbers','default.table_a','default.table_b','default.table_c'] views: ['default.matview_a_to_b','default.matview_b_to_c'] sleep_calls: 200 -sleep_us: 298 +sleep_us: 300 profile_select_rows: 300 profile_select_bytes: 3600 profile_insert_rows: 201 @@ -23,7 +23,7 @@ view_query: SELECT toFloat64(a) AS a, b + sleepEachRow(0.000001) AS co read_rows: 100 written_rows: 100 sleep_calls: 100 -sleep_us: 99 +sleep_us: 100 profile_select_rows: 100 profile_select_bytes: 2000 profile_insert_rows: 100 @@ -40,7 +40,7 @@ view_query: SELECT sum(a + sleepEachRow(0.000002)) AS a FROM default.t read_rows: 100 written_rows: 1 sleep_calls: 100 -sleep_us: 199 +sleep_us: 200 profile_select_rows: 100 profile_select_bytes: 800 profile_insert_rows: 1 diff --git a/tests/queries/0_stateless/01933_invalid_date.sql b/tests/queries/0_stateless/01933_invalid_date.sql index aac09c99e60..b9ea9319aea 100644 --- a/tests/queries/0_stateless/01933_invalid_date.sql +++ b/tests/queries/0_stateless/01933_invalid_date.sql @@ -1,4 +1,4 @@ -SELECT toDate('07-08-2019'); -- { serverError 6 } +SELECT toDate('07-08-2019'); -- { serverError 38 } SELECT toDate('2019-0708'); -- { serverError 38 } SELECT toDate('201907-08'); -- { serverError 38 } SELECT toDate('2019^7^8'); @@ -6,5 +6,5 @@ SELECT toDate('2019^7^8'); CREATE TEMPORARY TABLE test (d Date); INSERT INTO test VALUES ('2018-01-01'); -SELECT * FROM test WHERE d >= '07-08-2019'; -- { serverError 53 } +SELECT * FROM test WHERE d >= '07-08-2019'; -- { serverError 38 } SELECT * FROM test WHERE d >= '2019-07-08'; diff --git a/tests/queries/0_stateless/01952_optimize_distributed_group_by_sharding_key.reference b/tests/queries/0_stateless/01952_optimize_distributed_group_by_sharding_key.reference index 9016e731106..6adb2382a6f 100644 --- a/tests/queries/0_stateless/01952_optimize_distributed_group_by_sharding_key.reference +++ b/tests/queries/0_stateless/01952_optimize_distributed_group_by_sharding_key.reference @@ -19,10 +19,8 @@ explain select distinct on (k1) k2 from remote('127.{1,2}', view(select 1 k1, 2 Expression (Projection) LimitBy Union - Expression (Before LIMIT BY) - LimitBy - Expression ((Before LIMIT BY + (Before ORDER BY + (Convert VIEW subquery result to VIEW table structure + (Materialize constants after VIEW subquery + (Projection + Before ORDER BY)))))) - ReadFromStorage (SystemNumbers) + Expression ((Before LIMIT BY + (Before ORDER BY + (Convert VIEW subquery result to VIEW table structure + (Materialize constants after VIEW subquery + (Projection + Before ORDER BY)))))) + ReadFromStorage (SystemNumbers) Expression ReadFromRemote (Read from remote replica) explain select distinct on (k1, k2) v from remote('127.{1,2}', view(select 1 k1, 2 k2, 3 v from numbers(2)), cityHash64(k1, k2)); -- optimized @@ -58,11 +56,10 @@ Expression (Projection) Expression (Before LIMIT BY) Sorting (Merge sorted streams for ORDER BY, without aggregation) Union - LimitBy - Expression ((Before LIMIT BY + (Before ORDER BY + (Convert VIEW subquery result to VIEW table structure + (Materialize constants after VIEW subquery + (Projection + Before ORDER BY)))) [lifted up part])) - Sorting (Sorting for ORDER BY) - Expression ((Before ORDER BY + (Convert VIEW subquery result to VIEW table structure + (Materialize constants after VIEW subquery + (Projection + Before ORDER BY))))) - ReadFromStorage (SystemNumbers) + Expression ((Before ORDER BY + (Convert VIEW subquery result to VIEW table structure + (Materialize constants after VIEW subquery + (Projection + Before ORDER BY)))) [lifted up part]) + Sorting (Sorting for ORDER BY) + Expression ((Before ORDER BY + (Convert VIEW subquery result to VIEW table structure + (Materialize constants after VIEW subquery + (Projection + Before ORDER BY))))) + ReadFromStorage (SystemNumbers) ReadFromRemote (Read from remote replica) explain select distinct on (k1, k2) v from remote('127.{1,2}', view(select 1 k1, 2 k2, 3 v from numbers(2)), cityHash64(k1, k2)) order by v; -- optimized Expression (Projection) diff --git a/tests/queries/0_stateless/02003_memory_limit_in_client.sh b/tests/queries/0_stateless/02003_memory_limit_in_client.sh index 2d2493828c8..4017c3771a6 100755 --- a/tests/queries/0_stateless/02003_memory_limit_in_client.sh +++ b/tests/queries/0_stateless/02003_memory_limit_in_client.sh @@ -1,4 +1,4 @@ -#!/usr/bin/bash -f +#!/usr/bin/env bash CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh diff --git a/tests/queries/0_stateless/02052_last_granula_adjust_LOGICAL_ERROR.reference b/tests/queries/0_stateless/02052_last_granula_adjust_logical_error.reference similarity index 100% rename from tests/queries/0_stateless/02052_last_granula_adjust_LOGICAL_ERROR.reference rename to tests/queries/0_stateless/02052_last_granula_adjust_logical_error.reference diff --git a/tests/queries/0_stateless/02052_last_granula_adjust_LOGICAL_ERROR.sql.j2 b/tests/queries/0_stateless/02052_last_granula_adjust_logical_error.sql.j2 similarity index 100% rename from tests/queries/0_stateless/02052_last_granula_adjust_LOGICAL_ERROR.sql.j2 rename to tests/queries/0_stateless/02052_last_granula_adjust_logical_error.sql.j2 diff --git a/tests/queries/0_stateless/02221_system_zookeeper_unrestricted.reference b/tests/queries/0_stateless/02221_system_zookeeper_unrestricted.reference index 53b44764d5c..137fb0587cc 100644 --- a/tests/queries/0_stateless/02221_system_zookeeper_unrestricted.reference +++ b/tests/queries/0_stateless/02221_system_zookeeper_unrestricted.reference @@ -16,6 +16,8 @@ columns columns columns columns +creator_info +creator_info failed_parts failed_parts flags diff --git a/tests/queries/0_stateless/02221_system_zookeeper_unrestricted_like.reference b/tests/queries/0_stateless/02221_system_zookeeper_unrestricted_like.reference index ccc3064ccbd..2893c2a845f 100644 --- a/tests/queries/0_stateless/02221_system_zookeeper_unrestricted_like.reference +++ b/tests/queries/0_stateless/02221_system_zookeeper_unrestricted_like.reference @@ -7,6 +7,7 @@ block_numbers blocks columns columns +creator_info failed_parts flags host @@ -49,6 +50,7 @@ block_numbers blocks columns columns +creator_info failed_parts flags host diff --git a/tests/queries/0_stateless/02226_filesystem_cache_profile_events.reference b/tests/queries/0_stateless/02226_filesystem_cache_profile_events.reference index 2ee0f256949..c538301cbd9 100644 --- a/tests/queries/0_stateless/02226_filesystem_cache_profile_events.reference +++ b/tests/queries/0_stateless/02226_filesystem_cache_profile_events.reference @@ -1,15 +1,15 @@ Using storage policy: s3_cache -1 0 1 -0 1 0 -0 1 0 +0 1 1 0 1 +1 0 0 1 0 +1 0 0 1 0 0 Using storage policy: local_cache -1 0 1 -0 1 0 -0 1 0 +0 1 1 0 1 +1 0 0 1 0 +1 0 0 1 0 0 Using storage policy: azure_cache -1 0 1 -0 1 0 -0 1 0 +0 1 1 0 1 +1 0 0 1 0 +1 0 0 1 0 0 diff --git a/tests/queries/0_stateless/02226_filesystem_cache_profile_events.sh b/tests/queries/0_stateless/02226_filesystem_cache_profile_events.sh index f071a570243..02e98bbb1b0 100755 --- a/tests/queries/0_stateless/02226_filesystem_cache_profile_events.sh +++ b/tests/queries/0_stateless/02226_filesystem_cache_profile_events.sh @@ -7,11 +7,10 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh - for STORAGE_POLICY in 's3_cache' 'local_cache' 'azure_cache'; do echo "Using storage policy: $STORAGE_POLICY" - clickhouse client --multiquery --multiline --query """ + $CLICKHOUSE_CLIENT --multiquery --multiline --query """ SET max_memory_usage='20G'; SET enable_filesystem_cache_on_write_operations = 0; @@ -24,11 +23,13 @@ for STORAGE_POLICY in 's3_cache' 'local_cache' 'azure_cache'; do query="SELECT * FROM test_02226 LIMIT 10" - query_id=$(clickhouse client --query "select queryID() from ($query) limit 1" 2>&1) + query_id=$($CLICKHOUSE_CLIENT --query "select queryID() from ($query) limit 1" 2>&1) - clickhouse client --multiquery --multiline --query """ + $CLICKHOUSE_CLIENT --multiquery --multiline --query """ SYSTEM FLUSH LOGS; - SELECT ProfileEvents['CachedReadBufferReadFromSourceBytes'] > 0 as remote_fs_read, + SELECT ProfileEvents['CachedReadBufferReadFromCacheHits'] > 0 as remote_fs_cache_hit, + ProfileEvents['CachedReadBufferReadFromCacheMisses'] > 0 as remote_fs_cache_miss, + ProfileEvents['CachedReadBufferReadFromSourceBytes'] > 0 as remote_fs_read, ProfileEvents['CachedReadBufferReadFromCacheBytes'] > 0 as remote_fs_cache_read, ProfileEvents['CachedReadBufferCacheWriteBytes'] > 0 as remote_fs_read_and_download FROM system.query_log @@ -39,16 +40,18 @@ for STORAGE_POLICY in 's3_cache' 'local_cache' 'azure_cache'; do LIMIT 1; """ - clickhouse client --multiquery --multiline --query """ + $CLICKHOUSE_CLIENT --multiquery --multiline --query """ set remote_filesystem_read_method = 'read'; set local_filesystem_read_method = 'pread'; """ - query_id=$(clickhouse client --query "select queryID() from ($query) limit 1" 2>&1) + query_id=$($CLICKHOUSE_CLIENT --query "select queryID() from ($query) limit 1" 2>&1) - clickhouse client --multiquery --multiline --query """ + $CLICKHOUSE_CLIENT --multiquery --multiline --query """ SYSTEM FLUSH LOGS; - SELECT ProfileEvents['CachedReadBufferReadFromSourceBytes'] > 0 as remote_fs_read, + SELECT ProfileEvents['CachedReadBufferReadFromCacheHits'] > 0 as remote_fs_cache_hit, + ProfileEvents['CachedReadBufferReadFromCacheMisses'] > 0 as remote_fs_cache_miss, + ProfileEvents['CachedReadBufferReadFromSourceBytes'] > 0 as remote_fs_read, ProfileEvents['CachedReadBufferReadFromCacheBytes'] > 0 as remote_fs_cache_read, ProfileEvents['CachedReadBufferCacheWriteBytes'] > 0 as remote_fs_read_and_download FROM system.query_log @@ -60,15 +63,17 @@ for STORAGE_POLICY in 's3_cache' 'local_cache' 'azure_cache'; do """ - clickhouse client --multiquery --multiline --query """ + $CLICKHOUSE_CLIENT --multiquery --multiline --query """ set remote_filesystem_read_method='threadpool'; """ - query_id=$(clickhouse client --query "select queryID() from ($query) limit 1") + query_id=$($CLICKHOUSE_CLIENT --query "select queryID() from ($query) limit 1") - clickhouse client --multiquery --multiline --query """ + $CLICKHOUSE_CLIENT --multiquery --multiline --query """ SYSTEM FLUSH LOGS; - SELECT ProfileEvents['CachedReadBufferReadFromSourceBytes'] > 0 as remote_fs_read, + SELECT ProfileEvents['CachedReadBufferReadFromCacheHits'] > 0 as remote_fs_cache_hit, + ProfileEvents['CachedReadBufferReadFromCacheMisses'] > 0 as remote_fs_cache_miss, + ProfileEvents['CachedReadBufferReadFromSourceBytes'] > 0 as remote_fs_read, ProfileEvents['CachedReadBufferReadFromCacheBytes'] > 0 as remote_fs_cache_read, ProfileEvents['CachedReadBufferCacheWriteBytes'] > 0 as remote_fs_read_and_download FROM system.query_log @@ -79,7 +84,7 @@ for STORAGE_POLICY in 's3_cache' 'local_cache' 'azure_cache'; do LIMIT 1; """ - clickhouse client --multiquery --multiline --query """ + $CLICKHOUSE_CLIENT --multiquery --multiline --query """ SELECT * FROM test_02226 WHERE value LIKE '%abc%' ORDER BY value LIMIT 10 FORMAT Null; SET enable_filesystem_cache_on_write_operations = 1; @@ -92,5 +97,5 @@ for STORAGE_POLICY in 's3_cache' 'local_cache' 'azure_cache'; do INSERT INTO test_02226 SELECT * FROM generateRandom('key UInt32, value String') LIMIT 10000; """ - clickhouse client --query "DROP TABLE test_02226" + $CLICKHOUSE_CLIENT --query "DROP TABLE test_02226" done diff --git a/tests/queries/0_stateless/02227_union_match_by_name.reference b/tests/queries/0_stateless/02227_union_match_by_name.reference index 685b3c83b05..42b9b01a529 100644 --- a/tests/queries/0_stateless/02227_union_match_by_name.reference +++ b/tests/queries/0_stateless/02227_union_match_by_name.reference @@ -14,36 +14,29 @@ Header: avgWeighted(x, y) Nullable(Float64) Header: x_0 Nullable(UInt8) y_1 UInt8 Union - Header: NULL Nullable(UInt8) - x Nullable(UInt8) + Header: x Nullable(UInt8) y UInt8 Expression (Conversion before UNION) - Header: NULL Nullable(UInt8) - x Nullable(UInt8) + Header: x Nullable(UInt8) y UInt8 Expression (Project names) - Header: NULL Nullable(Nothing) - x UInt8 + Header: x UInt8 y UInt8 Expression (Projection) - Header: NULL_Nullable(Nothing) Nullable(Nothing) - 255_UInt8 UInt8 + Header: 255_UInt8 UInt8 1_UInt8 UInt8 Expression (Change column names to column identifiers) Header: dummy_0 UInt8 ReadFromStorage (SystemOne) Header: dummy UInt8 Expression (Conversion before UNION) - Header: NULL Nullable(UInt8) - x Nullable(UInt8) + Header: x Nullable(UInt8) y UInt8 Expression (Project names) - Header: y UInt8 - x Nullable(Nothing) + Header: x Nullable(Nothing) y UInt8 Expression (Projection) - Header: 1_UInt8 UInt8 - NULL_Nullable(Nothing) Nullable(Nothing) + Header: NULL_Nullable(Nothing) Nullable(Nothing) 1_UInt8 UInt8 Expression (Change column names to column identifiers) Header: dummy_0 UInt8 diff --git a/tests/queries/0_stateless/02233_interpolate_1.sql b/tests/queries/0_stateless/02233_interpolate_1.sql index 3d416b27f45..d589a18421b 100644 --- a/tests/queries/0_stateless/02233_interpolate_1.sql +++ b/tests/queries/0_stateless/02233_interpolate_1.sql @@ -26,7 +26,7 @@ SELECT n, source, inter FROM ( # Test INTERPOLATE with incompatible expression - should produce error SELECT n, source, inter FROM ( SELECT toFloat32(number % 10) AS n, 'original' AS source, number as inter FROM numbers(10) WHERE number % 3 = 1 -) ORDER BY n WITH FILL FROM 0 TO 11.51 STEP 0.5 INTERPOLATE (inter AS inter||'inter'); -- { serverError 44 } +) ORDER BY n WITH FILL FROM 0 TO 11.51 STEP 0.5 INTERPOLATE (inter AS reverse(inter)); -- { serverError 44 } # Test INTERPOLATE with column from WITH FILL expression - should produce error SELECT n, source, inter FROM ( diff --git a/tests/queries/0_stateless/02302_s3_file_pruning.reference b/tests/queries/0_stateless/02302_s3_file_pruning.reference index f8d2bdd0612..7e69bdd55db 100644 --- a/tests/queries/0_stateless/02302_s3_file_pruning.reference +++ b/tests/queries/0_stateless/02302_s3_file_pruning.reference @@ -24,4 +24,14 @@ insert into test_02302 select 1 settings s3_create_new_file_on_insert = true; insert into test_02302 select 2 settings s3_create_new_file_on_insert = true; select * from test_02302 where _file like '%1'; 1 +select _file, * from test_02302 where _file like '%1'; +test_02302.1 1 +set max_rows_to_read = 2; +select * from test_02302 where (_file like '%.1' OR _file like '%.2') AND a > 1; +2 +set max_rows_to_read = 999; +select 'a1' as _file, * from test_02302 where _file like '%1' ORDER BY a; +a1 0 +a1 1 +a1 2 drop table test_02302; diff --git a/tests/queries/0_stateless/02302_s3_file_pruning.sql b/tests/queries/0_stateless/02302_s3_file_pruning.sql index 624a87506d1..93fc8a1bc25 100644 --- a/tests/queries/0_stateless/02302_s3_file_pruning.sql +++ b/tests/queries/0_stateless/02302_s3_file_pruning.sql @@ -1,5 +1,5 @@ -- Tags: no-parallel, no-fasttest --- Tag no-fasttest: Depends on AWS +-- Tag no-fasttest: Depends on S3 -- { echo } drop table if exists test_02302; @@ -32,4 +32,14 @@ insert into test_02302 select 1 settings s3_create_new_file_on_insert = true; insert into test_02302 select 2 settings s3_create_new_file_on_insert = true; select * from test_02302 where _file like '%1'; + +select _file, * from test_02302 where _file like '%1'; + +set max_rows_to_read = 2; +select * from test_02302 where (_file like '%.1' OR _file like '%.2') AND a > 1; + +set max_rows_to_read = 999; + +select 'a1' as _file, * from test_02302 where _file like '%1' ORDER BY a; + drop table test_02302; diff --git a/tests/queries/0_stateless/02389_analyzer_nested_lambda.reference b/tests/queries/0_stateless/02389_analyzer_nested_lambda.reference index 935c53358c0..68eb282a6a1 100644 --- a/tests/queries/0_stateless/02389_analyzer_nested_lambda.reference +++ b/tests/queries/0_stateless/02389_analyzer_nested_lambda.reference @@ -117,5 +117,5 @@ SELECT arrayMap(x -> concat(concat(concat(concat(concat(toString(id), '___\0____ FROM test_table WHERE concat(concat(concat(toString(id), '___\0_______\0____'), toString(id)), concat(toString(id), NULL), toString(id)); SELECT '--'; -- -SELECT arrayMap(x -> concat(toString(id), arrayMap(x -> toString(1), [NULL])), [NULL]) FROM test_table; -- { serverError 44 }; +SELECT arrayMap(x -> splitByChar(toString(id), arrayMap(x -> toString(1), [NULL])), [NULL]) FROM test_table; -- { serverError 44 }; DROP TABLE test_table; diff --git a/tests/queries/0_stateless/02389_analyzer_nested_lambda.sql b/tests/queries/0_stateless/02389_analyzer_nested_lambda.sql index 8f8b5537da9..48e84246d1c 100644 --- a/tests/queries/0_stateless/02389_analyzer_nested_lambda.sql +++ b/tests/queries/0_stateless/02389_analyzer_nested_lambda.sql @@ -122,7 +122,7 @@ FROM test_table WHERE concat(concat(concat(toString(id), '___\0_______\0____'), SELECT '--'; -SELECT arrayMap(x -> concat(toString(id), arrayMap(x -> toString(1), [NULL])), [NULL]) FROM test_table; -- { serverError 44 }; +SELECT arrayMap(x -> splitByChar(toString(id), arrayMap(x -> toString(1), [NULL])), [NULL]) FROM test_table; -- { serverError 44 }; DROP TABLE test_table; diff --git a/tests/queries/0_stateless/02439_merge_selecting_partitions.sql b/tests/queries/0_stateless/02439_merge_selecting_partitions.sql index 1d01fde56d6..0142afba7f2 100644 --- a/tests/queries/0_stateless/02439_merge_selecting_partitions.sql +++ b/tests/queries/0_stateless/02439_merge_selecting_partitions.sql @@ -21,7 +21,7 @@ select sleepEachRow(3) as higher_probability_of_reproducing_the_issue format Nul system flush logs; -- it should not list unneeded partitions where we cannot merge anything -select * from system.zookeeper_log where path like '/test/02439/s1/' || currentDatabase() || '/block_numbers/%' +select * from system.zookeeper_log where path like '/test/02439/' || getMacro('shard') || '/' || currentDatabase() || '/block_numbers/%' and op_num in ('List', 'SimpleList', 'FilteredList') and path not like '%/block_numbers/1' and path not like '%/block_numbers/123' and event_time >= now() - interval 1 minute diff --git a/tests/queries/0_stateless/02486_truncate_and_unexpected_parts.reference b/tests/queries/0_stateless/02486_truncate_and_unexpected_parts.reference index 1f991703c7b..2ece1147d78 100644 --- a/tests/queries/0_stateless/02486_truncate_and_unexpected_parts.reference +++ b/tests/queries/0_stateless/02486_truncate_and_unexpected_parts.reference @@ -1,4 +1,15 @@ 1 rmt -1 rmt1 2 rmt +1 rmt1 2 rmt1 +0 +1 rmt +2 rmt +1 rmt1 +2 rmt1 +1 rmt2 +1 rmt2 +3 rmt2 +5 rmt2 +7 rmt2 +9 rmt2 diff --git a/tests/queries/0_stateless/02486_truncate_and_unexpected_parts.sql b/tests/queries/0_stateless/02486_truncate_and_unexpected_parts.sql index fbd90d8ab0f..52e8be236c8 100644 --- a/tests/queries/0_stateless/02486_truncate_and_unexpected_parts.sql +++ b/tests/queries/0_stateless/02486_truncate_and_unexpected_parts.sql @@ -24,4 +24,29 @@ insert into rmt1 values (2); system sync replica rmt; system sync replica rmt1; -select *, _table from merge(currentDatabase(), '') order by (*,), _table; +select *, _table from merge(currentDatabase(), '') order by _table, (*,); +select 0; + +create table rmt2 (n int) engine=ReplicatedMergeTree('/test/02468/{database}2', '1') order by tuple() partition by n % 2 settings replicated_max_ratio_of_wrong_parts=0, max_suspicious_broken_parts=0, max_suspicious_broken_parts_bytes=0; + +system stop cleanup rmt; +system stop merges rmt1; +insert into rmt select * from numbers(10) settings max_block_size=1; +system sync replica rmt1 lightweight; + +alter table rmt replace partition id '0' from rmt2; +alter table rmt1 move partition id '1' to table rmt2; + +detach table rmt sync; +detach table rmt1 sync; + +attach table rmt; +attach table rmt1; + +insert into rmt values (1); +insert into rmt1 values (2); +system sync replica rmt; +system sync replica rmt1; +system sync replica rmt2; + +select *, _table from merge(currentDatabase(), '') order by _table, (*,); diff --git a/tests/queries/0_stateless/02494_query_cache_events.reference b/tests/queries/0_stateless/02494_query_cache_events.reference index 9bcd2820f27..00510f3a0c6 100644 --- a/tests/queries/0_stateless/02494_query_cache_events.reference +++ b/tests/queries/0_stateless/02494_query_cache_events.reference @@ -1,7 +1,4 @@ ---- 1 -0 1 ---- 1 0 1 1 0 diff --git a/tests/queries/0_stateless/02494_query_cache_events.sql b/tests/queries/0_stateless/02494_query_cache_events.sql index 05c0acad4b8..f92e71cb50f 100644 --- a/tests/queries/0_stateless/02494_query_cache_events.sql +++ b/tests/queries/0_stateless/02494_query_cache_events.sql @@ -4,20 +4,7 @@ -- Start with empty query cache QC SYSTEM DROP QUERY CACHE; --- Run a query with QC on. The first execution is a QC miss. -SELECT '---'; SELECT 1 SETTINGS use_query_cache = true; - -SYSTEM FLUSH LOGS; -SELECT ProfileEvents['QueryCacheHits'], ProfileEvents['QueryCacheMisses'] -FROM system.query_log -WHERE type = 'QueryFinish' - AND current_database = currentDatabase() - AND query = 'SELECT 1 SETTINGS use_query_cache = true;'; - - --- Run previous query again with query cache on -SELECT '---'; SELECT 1 SETTINGS use_query_cache = true; SYSTEM FLUSH LOGS; @@ -28,4 +15,6 @@ WHERE type = 'QueryFinish' AND query = 'SELECT 1 SETTINGS use_query_cache = true;' ORDER BY event_time_microseconds; +-- (The 1st execution was a cache miss, the 2nd execution was a cache hit) + SYSTEM DROP QUERY CACHE; diff --git a/tests/queries/0_stateless/02494_query_cache_nondeterministic_functions.reference b/tests/queries/0_stateless/02494_query_cache_nondeterministic_functions.reference index e666f54d4c4..74dcf748395 100644 --- a/tests/queries/0_stateless/02494_query_cache_nondeterministic_functions.reference +++ b/tests/queries/0_stateless/02494_query_cache_nondeterministic_functions.reference @@ -1,4 +1,8 @@ +-- query_cache_nondeterministic_function_handling = throw 0 ---- +-- query_cache_nondeterministic_function_handling = save 1 1 +-- query_cache_nondeterministic_function_handling = ignore +1 +0 diff --git a/tests/queries/0_stateless/02494_query_cache_nondeterministic_functions.sql b/tests/queries/0_stateless/02494_query_cache_nondeterministic_functions.sql index 62e0b099d7a..477655e474f 100644 --- a/tests/queries/0_stateless/02494_query_cache_nondeterministic_functions.sql +++ b/tests/queries/0_stateless/02494_query_cache_nondeterministic_functions.sql @@ -3,14 +3,21 @@ SYSTEM DROP QUERY CACHE; --- rand() is non-deterministic, the query is rejected by default -SELECT COUNT(rand(1)) SETTINGS use_query_cache = true; -- { serverError CANNOT_USE_QUERY_CACHE_WITH_NONDETERMINISTIC_FUNCTIONS } -SELECT COUNT(*) FROM system.query_cache; - -SELECT '---'; - --- Force caching using a setting -SELECT COUNT(RAND(1)) SETTINGS use_query_cache = true, query_cache_store_results_of_queries_with_nondeterministic_functions = true; -SELECT COUNT(*) FROM system.query_cache; +SELECT '-- query_cache_nondeterministic_function_handling = throw'; +SELECT count(now()) SETTINGS use_query_cache = true; -- { serverError QUERY_CACHE_USED_WITH_NONDETERMINISTIC_FUNCTIONS } +SELECT count(now()) SETTINGS use_query_cache = true, query_cache_nondeterministic_function_handling = 'throw'; -- { serverError QUERY_CACHE_USED_WITH_NONDETERMINISTIC_FUNCTIONS } +SELECT count(*) FROM system.query_cache; + +SYSTEM DROP QUERY CACHE; + +SELECT '-- query_cache_nondeterministic_function_handling = save'; +SELECT count(now()) SETTINGS use_query_cache = true, query_cache_nondeterministic_function_handling = 'save'; +SELECT count(*) FROM system.query_cache; + +SYSTEM DROP QUERY CACHE; + +SELECT '-- query_cache_nondeterministic_function_handling = ignore'; +SELECT count(now()) SETTINGS use_query_cache = true, query_cache_nondeterministic_function_handling = 'ignore'; +SELECT count(*) FROM system.query_cache; SYSTEM DROP QUERY CACHE; diff --git a/tests/queries/0_stateless/02499_extract_key_value_pairs_multiple_input.reference b/tests/queries/0_stateless/02499_extract_key_value_pairs_multiple_input.reference index d0cf9ff680b..f646583bbd3 100644 --- a/tests/queries/0_stateless/02499_extract_key_value_pairs_multiple_input.reference +++ b/tests/queries/0_stateless/02499_extract_key_value_pairs_multiple_input.reference @@ -345,6 +345,18 @@ WITH SELECT x; {'argument1':'1','argument2':'2','char':'=','char2':'=','formula':'1+2=3','result':'3','string':'foo=bar'} +-- https://github.com/ClickHouse/ClickHouse/issues/56357 +WITH + extractKeyValuePairs('{"a":"1", "b":"2"}') as s_map, + CAST( + arrayMap( + (x) -> (x, s_map[x]), arraySort(mapKeys(s_map)) + ), + 'Map(String,String)' + ) AS x +SELECT + x; +{'a':'1','b':'2'} -- check str_to_map alias (it is case-insensitive) WITH sTr_tO_mAp('name:neymar, age:31 team:psg,nationality:brazil') AS s_map, diff --git a/tests/queries/0_stateless/02499_extract_key_value_pairs_multiple_input.sql b/tests/queries/0_stateless/02499_extract_key_value_pairs_multiple_input.sql index 804ff4ce880..9277ba6d7ec 100644 --- a/tests/queries/0_stateless/02499_extract_key_value_pairs_multiple_input.sql +++ b/tests/queries/0_stateless/02499_extract_key_value_pairs_multiple_input.sql @@ -481,6 +481,18 @@ WITH SELECT x; +-- https://github.com/ClickHouse/ClickHouse/issues/56357 +WITH + extractKeyValuePairs('{"a":"1", "b":"2"}') as s_map, + CAST( + arrayMap( + (x) -> (x, s_map[x]), arraySort(mapKeys(s_map)) + ), + 'Map(String,String)' + ) AS x +SELECT + x; + -- check str_to_map alias (it is case-insensitive) WITH sTr_tO_mAp('name:neymar, age:31 team:psg,nationality:brazil') AS s_map, diff --git a/tests/queries/0_stateless/02503_cache_on_write_with_small_segment_size.sh b/tests/queries/0_stateless/02503_cache_on_write_with_small_segment_size.sh index 63f912c6bff..4f3fd0e54f6 100755 --- a/tests/queries/0_stateless/02503_cache_on_write_with_small_segment_size.sh +++ b/tests/queries/0_stateless/02503_cache_on_write_with_small_segment_size.sh @@ -22,6 +22,7 @@ SETTINGS min_bytes_for_wide_part = 0, type = cache, max_size = '128Mi', max_file_segment_size = '10Ki', + boundary_alignment = '5Ki', path = '${CLICKHOUSE_TEST_UNIQUE_NAME}', cache_on_write_operations = 1, enable_filesystem_query_cache_limit = 1, diff --git a/tests/queries/0_stateless/02516_projections_with_rollup.sql b/tests/queries/0_stateless/02516_projections_with_rollup.sql index 038caf59264..a87621073af 100644 --- a/tests/queries/0_stateless/02516_projections_with_rollup.sql +++ b/tests/queries/0_stateless/02516_projections_with_rollup.sql @@ -1,6 +1,3 @@ --- Tags: disabled --- FIXME https://github.com/ClickHouse/ClickHouse/issues/49552 - DROP TABLE IF EXISTS video_log; DROP TABLE IF EXISTS video_log_result__fuzz_0; DROP TABLE IF EXISTS rng; @@ -16,7 +13,8 @@ CREATE TABLE video_log ) ENGINE = MergeTree PARTITION BY toDate(datetime) -ORDER BY (user_id, device_id); +ORDER BY (user_id, device_id) +SETTINGS index_granularity_bytes=10485760, index_granularity=8192; CREATE TABLE video_log_result__fuzz_0 ( @@ -62,7 +60,7 @@ LIMIT 10; ALTER TABLE video_log ADD PROJECTION p_norm ( - SELECT + SELECT datetime, device_id, bytes, @@ -77,12 +75,12 @@ SETTINGS mutations_sync = 1; ALTER TABLE video_log ADD PROJECTION p_agg ( - SELECT + SELECT toStartOfHour(datetime) AS hour, domain, sum(bytes), avg(duration) - GROUP BY + GROUP BY hour, domain ); diff --git a/tests/queries/0_stateless/02521_analyzer_array_join_crash.reference b/tests/queries/0_stateless/02521_analyzer_array_join_crash.reference index 59da8ccad1a..5e7728e0590 100644 --- a/tests/queries/0_stateless/02521_analyzer_array_join_crash.reference +++ b/tests/queries/0_stateless/02521_analyzer_array_join_crash.reference @@ -8,4 +8,4 @@ SELECT id, value_element, value FROM test_table ARRAY JOIN [[1,2,3]] AS value_el 0 [1,2,3] 3 SELECT value_element, value FROM test_table ARRAY JOIN [1048577] AS value_element, arrayMap(x -> value_element, ['']) AS value; 1048577 [1048577] -SELECT arrayFilter(x -> notEmpty(concat(x)), [NULL, NULL]) FROM system.one ARRAY JOIN [1048577] AS elem, arrayMap(x -> concat(x, elem, ''), ['']) AS unused; -- { serverError 44 } +SELECT arrayFilter(x -> notEmpty(concat(x)), [NULL, NULL]) FROM system.one ARRAY JOIN [1048577] AS elem, arrayMap(x -> splitByChar(x, elem), ['']) AS unused; -- { serverError 44 } diff --git a/tests/queries/0_stateless/02521_analyzer_array_join_crash.sql b/tests/queries/0_stateless/02521_analyzer_array_join_crash.sql index c7641a3bee0..53606e01ab7 100644 --- a/tests/queries/0_stateless/02521_analyzer_array_join_crash.sql +++ b/tests/queries/0_stateless/02521_analyzer_array_join_crash.sql @@ -17,7 +17,7 @@ SELECT id, value_element, value FROM test_table ARRAY JOIN [[1,2,3]] AS value_el SELECT value_element, value FROM test_table ARRAY JOIN [1048577] AS value_element, arrayMap(x -> value_element, ['']) AS value; -SELECT arrayFilter(x -> notEmpty(concat(x)), [NULL, NULL]) FROM system.one ARRAY JOIN [1048577] AS elem, arrayMap(x -> concat(x, elem, ''), ['']) AS unused; -- { serverError 44 } +SELECT arrayFilter(x -> notEmpty(concat(x)), [NULL, NULL]) FROM system.one ARRAY JOIN [1048577] AS elem, arrayMap(x -> splitByChar(x, elem), ['']) AS unused; -- { serverError 44 } -- { echoOff } diff --git a/tests/queries/0_stateless/02554_fix_grouping_sets_predicate_push_down.reference b/tests/queries/0_stateless/02554_fix_grouping_sets_predicate_push_down.reference index 794fe5944cd..7e43f249a74 100644 --- a/tests/queries/0_stateless/02554_fix_grouping_sets_predicate_push_down.reference +++ b/tests/queries/0_stateless/02554_fix_grouping_sets_predicate_push_down.reference @@ -41,6 +41,20 @@ ExpressionTransform × 2 ExpressionTransform (ReadFromMergeTree) MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 +(Expression) +ExpressionTransform × 2 + (Filter) + FilterTransform × 2 + (Aggregating) + ExpressionTransform × 2 + AggregatingTransform × 2 + Copy 1 → 2 + (Expression) + ExpressionTransform + (Expression) + ExpressionTransform + (ReadFromMergeTree) + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 ---Result--- 2023-01-05 all @@ -60,3 +74,13 @@ ExpressionTransform × 2 FilterTransform (ReadFromMergeTree) MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 +(Expression) +ExpressionTransform × 2 + (Aggregating) + ExpressionTransform × 2 + AggregatingTransform × 2 + Copy 1 → 2 + (Expression) + ExpressionTransform + (ReadFromMergeTree) + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 diff --git a/tests/queries/0_stateless/02554_fix_grouping_sets_predicate_push_down.sql b/tests/queries/0_stateless/02554_fix_grouping_sets_predicate_push_down.sql index 9a970674890..3d98096f52e 100644 --- a/tests/queries/0_stateless/02554_fix_grouping_sets_predicate_push_down.sql +++ b/tests/queries/0_stateless/02554_fix_grouping_sets_predicate_push_down.sql @@ -26,7 +26,7 @@ FROM SELECT day_, type_1 - FROM test_grouping_sets_predicate + FROM test_grouping_sets_predicate WHERE day_ = '2023-01-05' GROUP BY GROUPING SETS ( @@ -58,7 +58,30 @@ FROM (day_)) ) AS t ) -WHERE type_1 = 'all'; +WHERE type_1 = 'all' settings allow_experimental_analyzer=0; + +-- Query plan with analyzer has less Filter steps (which is more optimal) +EXPLAIN PIPELINE +SELECT * +FROM +( + SELECT + day_, + if(type_1 = '', 'all', type_1) AS type_1 + FROM + ( + SELECT + day_, + type_1 + FROM test_grouping_sets_predicate + WHERE day_ = '2023-01-05' + GROUP BY + GROUPING SETS ( + (day_, type_1), + (day_)) + ) AS t +) +WHERE type_1 = 'all' settings allow_experimental_analyzer=1; SELECT ''; SELECT '---Result---'; @@ -104,6 +127,28 @@ FROM (day_)) ) AS t ) -WHERE day_ = '2023-01-05'; +WHERE day_ = '2023-01-05' settings allow_experimental_analyzer=0; + +-- Query plan with analyzer has less Filter steps (which is more optimal) +EXPLAIN PIPELINE +SELECT * +FROM +( + SELECT + day_, + if(type_1 = '', 'all', type_1) AS type_1 + FROM + ( + SELECT + day_, + type_1 + FROM test_grouping_sets_predicate + GROUP BY + GROUPING SETS ( + (day_, type_1), + (day_)) + ) AS t +) +WHERE day_ = '2023-01-05' settings allow_experimental_analyzer=1; DROP TABLE test_grouping_sets_predicate; diff --git a/tests/queries/0_stateless/02668_logical_optimizer_removing_redundant_checks.reference b/tests/queries/0_stateless/02668_logical_optimizer_removing_redundant_checks.reference index d083e178586..60ff2d76995 100644 --- a/tests/queries/0_stateless/02668_logical_optimizer_removing_redundant_checks.reference +++ b/tests/queries/0_stateless/02668_logical_optimizer_removing_redundant_checks.reference @@ -87,3 +87,4 @@ QUERY id: 0 LIST id: 6, nodes: 2 COLUMN id: 7, column_name: a, result_type: Int32, source_id: 3 CONSTANT id: 8, constant_value: UInt64_2, constant_value_type: UInt8 +1 diff --git a/tests/queries/0_stateless/02668_logical_optimizer_removing_redundant_checks.sql b/tests/queries/0_stateless/02668_logical_optimizer_removing_redundant_checks.sql index f20ef412215..eebea322dbf 100644 --- a/tests/queries/0_stateless/02668_logical_optimizer_removing_redundant_checks.sql +++ b/tests/queries/0_stateless/02668_logical_optimizer_removing_redundant_checks.sql @@ -24,3 +24,5 @@ EXPLAIN QUERY TREE SELECT * FROM 02668_logical_optimizer WHERE a = 3 AND b = 'an SELECT * FROM 02668_logical_optimizer WHERE a = 2 AND 2 = a; EXPLAIN QUERY TREE SELECT * FROM 02668_logical_optimizer WHERE a = 2 AND 2 = a; + +SELECT (k = 3) OR ( (k = 1) OR (k = 2) OR ( (NULL OR 1) = k ) ) FROM ( SELECT materialize(1) AS k ); diff --git a/tests/queries/0_stateless/02735_parquet_encoder.reference b/tests/queries/0_stateless/02735_parquet_encoder.reference index a7ee82bc67f..143fde3093f 100644 --- a/tests/queries/0_stateless/02735_parquet_encoder.reference +++ b/tests/queries/0_stateless/02735_parquet_encoder.reference @@ -36,11 +36,11 @@ ipv6 Nullable(FixedString(16)) 1 1000000 1 3914219105369203805 4 1000000 1 -(1000000,0,NULL,'100','299') -(1000000,0,NULL,'0','-1294970296') -(1000000,0,NULL,'-2147483296','2147481000') +(1000000,NULL,NULL,'100','299') +(1000000,NULL,NULL,'0','-1294970296') +(1000000,NULL,NULL,'-2147483296','2147481000') (100000,900000,NULL,'100009','999999') -[(2,0,NULL,'','[]')] +[(2,NULL,NULL,'','[]')] 1 1 0 1 5090915589685802007 diff --git a/tests/queries/0_stateless/02741_hashed_dictionary_load_factor.reference b/tests/queries/0_stateless/02741_hashed_dictionary_load_factor.reference index abe891cbb9b..81f1bdda20c 100644 --- a/tests/queries/0_stateless/02741_hashed_dictionary_load_factor.reference +++ b/tests/queries/0_stateless/02741_hashed_dictionary_load_factor.reference @@ -1,4 +1,4 @@ -test_dictionary_hashed 1000000 0.4768 33558760 -test_dictionary_hashed_load_factor 1000000 0.9537 16781544 -test_dictionary_sparse_hashed 1000000 0.4768 20975848 -test_dictionary_sparse_hashed_load_factor 1000000 0.9537 10490088 +test_dictionary_hashed 1000000 0.4768 34000000 +test_dictionary_hashed_load_factor 1000000 0.9537 17000000 +test_dictionary_sparse_hashed 1000000 0.4768 21000000 +test_dictionary_sparse_hashed_load_factor 1000000 0.9537 10000000 diff --git a/tests/queries/0_stateless/02741_hashed_dictionary_load_factor.sql.j2 b/tests/queries/0_stateless/02741_hashed_dictionary_load_factor.sql.j2 index 870acd54514..41d68216412 100644 --- a/tests/queries/0_stateless/02741_hashed_dictionary_load_factor.sql.j2 +++ b/tests/queries/0_stateless/02741_hashed_dictionary_load_factor.sql.j2 @@ -31,7 +31,7 @@ LIFETIME(0); SYSTEM RELOAD DICTIONARY test_dictionary_{{layout}}; SYSTEM RELOAD DICTIONARY test_dictionary_{{layout}}_load_factor; -SELECT name, element_count, round(load_factor, 4), bytes_allocated FROM system.dictionaries WHERE database = currentDatabase() ORDER BY name; +SELECT name, element_count, round(load_factor, 4), round(bytes_allocated, -6) FROM system.dictionaries WHERE database = currentDatabase() ORDER BY name; DROP DICTIONARY IF EXISTS test_dictionary_{{layout}}; DROP DICTIONARY IF EXISTS test_dictionary_{{layout}}_load_factor; diff --git a/tests/queries/0_stateless/02763_row_policy_storage_merge.reference b/tests/queries/0_stateless/02763_row_policy_storage_merge.reference new file mode 100644 index 00000000000..9fa5612e7cd --- /dev/null +++ b/tests/queries/0_stateless/02763_row_policy_storage_merge.reference @@ -0,0 +1,314 @@ +SELECT * FROM 02763_merge_log_1 ORDER BY x +1 11 +2 12 +3 13 +4 14 +SELECT * FROM merge(currentDatabase(), 02763_merge) ORDER BY x +1 11 +1 11 +1 11 +1 11 +2 12 +2 12 +2 12 +2 12 +3 13 +3 13 +3 13 +3 13 +4 14 +4 14 +4 14 +4 14 +SETTINGS optimize_move_to_prewhere= 0 +SELECT * FROM 02763_merge_log_1 +3 13 +SELECT * FROM merge(currentDatabase(), 02763_merge_log_1) +3 13 +SELECT * FROM merge(currentDatabase(), 02763_merge_log) +1 11 +2 12 +3 13 +3 13 +4 14 +SELECT * FROM merge(currentDatabase(), 02763_merge_log) WHERE x>2 +3 13 +3 13 +4 14 +SELECT * FROM 02763_merge_merge_1 +4 14 +SELECT * FROM merge(currentDatabase(), 02763_merge_merge_1) +4 14 +SELECT * FROM merge(currentDatabase(), 02763_merge_merge) +1 11 +2 12 +3 13 +4 14 +4 14 +SELECT * FROM merge(currentDatabase(), 02763_merge_merge) WHERE x>2 +3 13 +4 14 +4 14 +SELECT * FROM engine_merge_12 WHERE x>2 +3 13 +4 14 +4 14 +SELECT * FROM merge(currentDatabase(), 02763_merge) +1 11 +1 11 +2 12 +2 12 +3 13 +3 13 +3 13 +4 14 +4 14 +4 14 +SELECT * FROM merge(currentDatabase(), 02763_merge) WHERE x>2 +3 13 +3 13 +3 13 +4 14 +4 14 +4 14 +aaa 6 39 +aaa 6 39 +aaa 6 39 +aaa 8 42 +aaa 8 42 +aaa 8 42 +3 +3 +3 +4 +4 +4 +SELECT * FROM merge(...) LEFT JOIN merge(...) +3 13 13 +3 13 13 +4 14 14 +4 14 14 +SELECT * FROM merge(...) UNION ALL SELECT * FROM merge(...) +1 11 +1 11 +2 12 +2 12 +3 13 +3 13 +3 13 +4 14 +4 14 +4 14 +SELECT x, SUM(x) FROM (SELECT * FROM merge(...) UNION ALL ...) GROUP BY x +1 22 +2 24 +3 39 +4 42 +1 11 0 +2 12 0 +3 13 0 +4 14 1 +4 14 1 +SELECT * FROM merge(currentDatabase(), 02763_merge_log) WHERE x>1 -- with y>12 +2 12 +3 13 +3 13 +4 14 +4 14 +SELECT * FROM merge(currentDatabase(), 02763_merge_merge) WHERE x>1 -- with y>12 +2 12 +3 13 +3 13 +4 14 +4 14 +2 12 0 +3 13 1 +3 13 1 +4 14 1 +4 14 1 +SELECT y from merge(currentDatabase(), 02763_merge) +11 +11 +12 +12 +13 +13 +13 +13 +14 +14 +14 +14 +02763_merge_fancycols +SELECT * +SELECT x, lc +SELECT * +1 11 111 111 42 +1 11 111 111 42 +SELECT x, lc +1 111 +1 111 +SELECT x, lc, cnst +1 111 42 +1 111 42 +SELECT x, y from merge(currentDatabase(), 02763_merge +1 11 +1 11 +1 11 +1 11 +2 12 +2 12 +3 13 +3 13 +3 13 +3 13 +4 14 +4 14 +4 14 +4 14 +SETTINGS optimize_move_to_prewhere= 1 +SELECT * FROM 02763_merge_log_1 +3 13 +SELECT * FROM merge(currentDatabase(), 02763_merge_log_1) +3 13 +SELECT * FROM merge(currentDatabase(), 02763_merge_log) +1 11 +2 12 +3 13 +3 13 +4 14 +SELECT * FROM merge(currentDatabase(), 02763_merge_log) WHERE x>2 +3 13 +3 13 +4 14 +SELECT * FROM 02763_merge_merge_1 +4 14 +SELECT * FROM merge(currentDatabase(), 02763_merge_merge_1) +4 14 +SELECT * FROM merge(currentDatabase(), 02763_merge_merge) +1 11 +2 12 +3 13 +4 14 +4 14 +SELECT * FROM merge(currentDatabase(), 02763_merge_merge) WHERE x>2 +3 13 +4 14 +4 14 +SELECT * FROM engine_merge_12 WHERE x>2 +3 13 +4 14 +4 14 +SELECT * FROM merge(currentDatabase(), 02763_merge) +1 11 +1 11 +2 12 +2 12 +3 13 +3 13 +3 13 +4 14 +4 14 +4 14 +SELECT * FROM merge(currentDatabase(), 02763_merge) WHERE x>2 +3 13 +3 13 +3 13 +4 14 +4 14 +4 14 +aaa 6 39 +aaa 6 39 +aaa 6 39 +aaa 8 42 +aaa 8 42 +aaa 8 42 +3 +3 +3 +4 +4 +4 +SELECT * FROM merge(...) LEFT JOIN merge(...) +3 13 13 +3 13 13 +4 14 14 +4 14 14 +SELECT * FROM merge(...) UNION ALL SELECT * FROM merge(...) +1 11 +1 11 +2 12 +2 12 +3 13 +3 13 +3 13 +4 14 +4 14 +4 14 +SELECT x, SUM(x) FROM (SELECT * FROM merge(...) UNION ALL ...) GROUP BY x +1 22 +2 24 +3 39 +4 42 +1 11 0 +2 12 0 +3 13 0 +4 14 1 +4 14 1 +SELECT * FROM merge(currentDatabase(), 02763_merge_log) WHERE x>1 -- with y>12 +2 12 +3 13 +3 13 +4 14 +4 14 +SELECT * FROM merge(currentDatabase(), 02763_merge_merge) WHERE x>1 -- with y>12 +2 12 +3 13 +3 13 +4 14 +4 14 +2 12 0 +3 13 1 +3 13 1 +4 14 1 +4 14 1 +SELECT y from merge(currentDatabase(), 02763_merge) +11 +11 +12 +12 +13 +13 +13 +13 +14 +14 +14 +14 +02763_merge_fancycols +SELECT * +SELECT x, lc +SELECT * +1 11 111 111 42 +1 11 111 111 42 +SELECT x, lc +1 111 +1 111 +SELECT x, lc, cnst +1 111 42 +1 111 42 +SELECT x, y from merge(currentDatabase(), 02763_merge +1 11 +1 11 +1 11 +1 11 +2 12 +2 12 +3 13 +3 13 +3 13 +3 13 +4 14 +4 14 +4 14 +4 14 diff --git a/tests/queries/0_stateless/02763_row_policy_storage_merge.sql.j2 b/tests/queries/0_stateless/02763_row_policy_storage_merge.sql.j2 new file mode 100644 index 00000000000..0263e1a974f --- /dev/null +++ b/tests/queries/0_stateless/02763_row_policy_storage_merge.sql.j2 @@ -0,0 +1,143 @@ +DROP TABLE IF EXISTS 02763_merge_log_1; +DROP TABLE IF EXISTS 02763_merge_log_2; +DROP TABLE IF EXISTS 02763_merge_merge_1; +DROP TABLE IF EXISTS 02763_merge_merge_2; +DROP TABLE IF EXISTS 02763_merge_fancycols; +DROP ROW POLICY IF EXISTS 02763_filter_1 ON 02763_merge_log_1; +DROP ROW POLICY IF EXISTS 02763_filter_2 ON 02763_merge_merge_1; +DROP ROW POLICY IF EXISTS 02763_filter_3 ON 02763_merge_log_1; +DROP ROW POLICY IF EXISTS 02763_filter_4 ON 02763_merge_merge_1; +DROP ROW POLICY IF EXISTS 02763_filter_5 ON 02763_merge_fancycols; +DROP ROW POLICY IF EXISTS 02763_filter_6 ON 02763_merge_fancycols; + + +CREATE TABLE 02763_merge_log_1 (x UInt8, y UInt64) ENGINE = Log; +CREATE TABLE 02763_merge_log_2 (x UInt8, y UInt64) ENGINE = Log; + +CREATE TABLE 02763_merge_merge_1 (x UInt8, y UInt64) ENGINE = MergeTree ORDER BY x; +CREATE TABLE 02763_merge_merge_2 (x UInt8, y UInt64) ENGINE = MergeTree ORDER BY x; + +CREATE TABLE 02763_engine_merge_12 (x UInt8, y UInt64) ENGINE = Merge(currentDatabase(), '02763_merge_merge'); + +INSERT INTO 02763_merge_log_1 VALUES (1, 11), (2, 12), (3, 13), (4, 14); +INSERT INTO 02763_merge_log_2 VALUES (1, 11), (2, 12), (3, 13), (4, 14); +INSERT INTO 02763_merge_merge_1 VALUES (1, 11), (2, 12), (3, 13), (4, 14); +INSERT INTO 02763_merge_merge_2 VALUES (1, 11), (2, 12), (3, 13), (4, 14); + +SELECT 'SELECT * FROM 02763_merge_log_1 ORDER BY x'; +SELECT * FROM 02763_merge_log_1 ORDER BY x; + +SELECT 'SELECT * FROM merge(currentDatabase(), 02763_merge) ORDER BY x'; +SELECT * FROM merge(currentDatabase(), '02763_merge') ORDER BY x; + + +{% for prew in [0 , 1] -%} + +SELECT 'SETTINGS optimize_move_to_prewhere= {{prew}}'; + +CREATE ROW POLICY 02763_filter_1 ON 02763_merge_log_1 USING x=3 AS permissive TO ALL; + +SELECT 'SELECT * FROM 02763_merge_log_1'; +SELECT * FROM 02763_merge_log_1 ORDER BY x SETTINGS optimize_move_to_prewhere= {{prew}}; +SELECT 'SELECT * FROM merge(currentDatabase(), 02763_merge_log_1)'; +SELECT * FROM merge(currentDatabase(), '02763_merge_log_1') ORDER BY x SETTINGS optimize_move_to_prewhere= {{prew}}; +SELECT 'SELECT * FROM merge(currentDatabase(), 02763_merge_log)'; +SELECT * FROM merge(currentDatabase(), '02763_merge_log') ORDER BY x SETTINGS optimize_move_to_prewhere= {{prew}}; +SELECT 'SELECT * FROM merge(currentDatabase(), 02763_merge_log) WHERE x>2'; +SELECT * FROM merge(currentDatabase(), '02763_merge_log') WHERE x>2 ORDER BY x SETTINGS optimize_move_to_prewhere= {{prew}}; + +CREATE ROW POLICY 02763_filter_2 ON 02763_merge_merge_1 USING x=4 AS permissive TO ALL; + +SELECT 'SELECT * FROM 02763_merge_merge_1'; +SELECT * FROM 02763_merge_merge_1 ORDER BY x SETTINGS optimize_move_to_prewhere= {{prew}}; +SELECT 'SELECT * FROM merge(currentDatabase(), 02763_merge_merge_1)'; +SELECT * FROM merge(currentDatabase(), '02763_merge_merge_1') ORDER BY x SETTINGS optimize_move_to_prewhere= {{prew}}; +SELECT 'SELECT * FROM merge(currentDatabase(), 02763_merge_merge)'; +SELECT * FROM merge(currentDatabase(), '02763_merge_merge') ORDER BY x SETTINGS optimize_move_to_prewhere= {{prew}}; +SELECT 'SELECT * FROM merge(currentDatabase(), 02763_merge_merge) WHERE x>2'; +SELECT * FROM merge(currentDatabase(), '02763_merge_merge') WHERE x>2 ORDER BY x SETTINGS optimize_move_to_prewhere= {{prew}}; + + +SELECT 'SELECT * FROM engine_merge_12 WHERE x>2'; +SELECT * FROM 02763_engine_merge_12 WHERE x>2 ORDER BY x SETTINGS optimize_move_to_prewhere= {{prew}}; + + +SELECT 'SELECT * FROM merge(currentDatabase(), 02763_merge)'; +SELECT * FROM merge(currentDatabase(), '02763_merge') ORDER BY x SETTINGS optimize_move_to_prewhere= {{prew}}; +SELECT 'SELECT * FROM merge(currentDatabase(), 02763_merge) WHERE x>2'; +SELECT * FROM merge(currentDatabase(), '02763_merge') WHERE x>2 ORDER BY x SETTINGS optimize_move_to_prewhere= {{prew}}; + +SELECT 'aaa', x*2 as x_2, y*3 as y_3 FROM merge(currentDatabase(), '02763_merge') WHERE x>2 ORDER BY x_2 SETTINGS optimize_move_to_prewhere= {{prew}}; +SELECT x FROM (SELECT * FROM merge(currentDatabase(), '02763_merge') WHERE x IN (3,4)) ORDER BY x SETTINGS optimize_move_to_prewhere= {{prew}}; + +SELECT 'SELECT * FROM merge(...) LEFT JOIN merge(...)'; +SELECT * FROM merge(currentDatabase(), '02763_merge.*1') as a +LEFT JOIN +merge(currentDatabase(), '02763_merge.*2') as b +USING (x) +ORDER BY x SETTINGS optimize_move_to_prewhere= {{prew}}; + +SELECT 'SELECT * FROM merge(...) UNION ALL SELECT * FROM merge(...)'; +SELECT * FROM +( +SELECT * FROM merge(currentDatabase(), '02763_merge.*1') +UNION ALL +SELECT * FROM merge(currentDatabase(), '02763_merge.*2') +) +ORDER BY x SETTINGS optimize_move_to_prewhere= {{prew}}; + +SELECT 'SELECT x, SUM(x) FROM (SELECT * FROM merge(...) UNION ALL ...) GROUP BY x'; +SELECT x, SUM(y) FROM +(SELECT * FROM merge(currentDatabase(), '02763_merge.*1') +UNION ALL +SELECT * FROM merge(currentDatabase(), '02763_merge.*2')) +GROUP BY x +ORDER BY x; + +SELECT *, x=4 FROM merge(currentDatabase(), '02763_merge_merge') ORDER BY x SETTINGS optimize_move_to_prewhere= {{prew}}; + +CREATE ROW POLICY 02763_filter_3 ON 02763_merge_log_1 USING y>12 AS permissive TO ALL; +SELECT 'SELECT * FROM merge(currentDatabase(), 02763_merge_log) WHERE x>1 -- with y>12'; +SELECT * FROM merge(currentDatabase(), '02763_merge_log') WHERE x>1 ORDER BY x SETTINGS optimize_move_to_prewhere= {{prew}}; + +CREATE ROW POLICY 02763_filter_4 ON 02763_merge_merge_1 USING y>12 AS permissive TO ALL; +SELECT 'SELECT * FROM merge(currentDatabase(), 02763_merge_merge) WHERE x>1 -- with y>12'; +SELECT * FROM merge(currentDatabase(), '02763_merge_merge') WHERE x>1 ORDER BY x SETTINGS optimize_move_to_prewhere= {{prew}}; + +SELECT *, (x=4 OR y>12) FROM merge(currentDatabase(), '02763_merge_merge') WHERE x>1 ORDER BY x SETTINGS optimize_move_to_prewhere= {{prew}}; + +SELECT 'SELECT y from merge(currentDatabase(), 02763_merge)'; +SELECT y from merge(currentDatabase(), '02763_merge') ORDER BY y SETTINGS optimize_move_to_prewhere= {{prew}}; + +SELECT '02763_merge_fancycols'; +CREATE TABLE 02763_merge_fancycols (x UInt8, y Nullable(UInt64), z String DEFAULT CONCAT(toString(x), toString(y)), lc LowCardinality(String) DEFAULT z, cnst UInt32 MATERIALIZED 42) ENGINE = MergeTree() ORDER BY tuple(); +INSERT INTO 02763_merge_fancycols (x, y) SELECT x, y from merge(currentDatabase(), '02763_merge'); + +CREATE ROW POLICY 02763_filter_5 ON 02763_merge_fancycols USING cnst<>42 AS permissive TO ALL; +SELECT 'SELECT *'; +SELECT * from merge(currentDatabase(), '02763_merge_fancycols') ORDER BY x SETTINGS optimize_move_to_prewhere= {{prew}}; +SELECT 'SELECT x, lc'; +SELECT x, lc from merge(currentDatabase(), '02763_merge_fancycols') ORDER BY x SETTINGS optimize_move_to_prewhere= {{prew}}; + +CREATE ROW POLICY 02763_filter_6 ON 02763_merge_fancycols USING lc='111' AS permissive TO ALL; +SELECT 'SELECT *'; +SELECT * from merge(currentDatabase(), '02763_merge_fancycols') ORDER BY x SETTINGS optimize_move_to_prewhere= {{prew}}; +SELECT 'SELECT x, lc'; +SELECT x, lc from merge(currentDatabase(), '02763_merge_fancycols') ORDER BY x SETTINGS optimize_move_to_prewhere= {{prew}}; +SELECT 'SELECT x, lc, cnst'; +SELECT x, lc, cnst from merge(currentDatabase(), '02763_merge_fancycols') ORDER BY x SETTINGS optimize_move_to_prewhere= {{prew}}; +SELECT 'SELECT x, y from merge(currentDatabase(), 02763_merge'; +SELECT x, y from merge(currentDatabase(), '02763_merge') ORDER BY x SETTINGS optimize_move_to_prewhere= {{prew}}; + +DROP TABLE 02763_merge_fancycols; + +DROP ROW POLICY 02763_filter_1 ON 02763_merge_log_1; +DROP ROW POLICY 02763_filter_2 ON 02763_merge_merge_1; + +DROP ROW POLICY 02763_filter_3 ON 02763_merge_log_1; +DROP ROW POLICY 02763_filter_4 ON 02763_merge_merge_1; + +DROP ROW POLICY 02763_filter_5 ON 02763_merge_fancycols; +DROP ROW POLICY 02763_filter_6 ON 02763_merge_fancycols; + +{% endfor %} diff --git a/tests/queries/0_stateless/02763_row_policy_storage_merge_alias.reference b/tests/queries/0_stateless/02763_row_policy_storage_merge_alias.reference new file mode 100644 index 00000000000..56bfdbe0b18 --- /dev/null +++ b/tests/queries/0_stateless/02763_row_policy_storage_merge_alias.reference @@ -0,0 +1,49 @@ +02763_merge_aliases +x, y, z FROM 02763_a_merge +3 13 16 +4 14 18 +* FROM 02763_a_merge +3 13 16 +4 14 18 +x, y FROM 02763_a_merge +3 13 +4 14 +SELECT x, y FROM merge(currentDatabase(), 02763_alias) +3 13 +4 14 +SELECT x, y FROM merge(currentDatabase(), 02763_alias) +2 12 +3 13 +4 14 +SELECT x FROM merge(currentDatabase(), 02763_alias) +12 +13 +14 +SELECT y FROM merge(currentDatabase(), 02763_alias) +2 +3 +4 +x, y, z FROM 02763_a_merge +3 13 16 +4 14 18 +* FROM 02763_a_merge +3 13 16 +4 14 18 +x, y FROM 02763_a_merge +3 13 +4 14 +SELECT x, y FROM merge(currentDatabase(), 02763_alias) +3 13 +4 14 +SELECT x, y FROM merge(currentDatabase(), 02763_alias) +2 12 +3 13 +4 14 +SELECT x FROM merge(currentDatabase(), 02763_alias) +12 +13 +14 +SELECT y FROM merge(currentDatabase(), 02763_alias) +2 +3 +4 diff --git a/tests/queries/0_stateless/02763_row_policy_storage_merge_alias.sql.j2 b/tests/queries/0_stateless/02763_row_policy_storage_merge_alias.sql.j2 new file mode 100644 index 00000000000..bdd456951dd --- /dev/null +++ b/tests/queries/0_stateless/02763_row_policy_storage_merge_alias.sql.j2 @@ -0,0 +1,41 @@ +DROP TABLE IF EXISTS 02763_alias; +DROP TABLE IF EXISTS 02763_a_merge; + + +SELECT '02763_merge_aliases'; +CREATE TABLE 02763_alias (x UInt8, y UInt64, z UInt64 ALIAS plus(x,y)) ENGINE = MergeTree ORDER BY x; +INSERT INTO 02763_alias VALUES (1, 11), (2, 12), (3, 13), (4, 14); + +CREATE ROW POLICY 02763_filter_7 ON 02763_alias USING z>15 AS permissive TO ALL; + +CREATE TABLE 02763_a_merge (x UInt8, y UInt64, z UInt64) ENGINE = Merge(currentDatabase(), '02763_alias'); + +{% for prew in [0 , 1] -%} + + + +SELECT 'x, y, z FROM 02763_a_merge'; +SELECT x, y, z FROM 02763_a_merge ORDER BY x SETTINGS optimize_move_to_prewhere= {{prew}}; +SELECT '* FROM 02763_a_merge'; +SELECT * FROM 02763_a_merge ORDER BY x SETTINGS optimize_move_to_prewhere= {{prew}}; +SELECT 'x, y FROM 02763_a_merge'; +SELECT x, y FROM 02763_a_merge ORDER BY x SETTINGS optimize_move_to_prewhere= {{prew}}; +SELECT 'SELECT x, y FROM merge(currentDatabase(), 02763_alias)'; +SELECT x, y FROM merge(currentDatabase(), '02763_alias') ORDER BY x SETTINGS optimize_move_to_prewhere= {{prew}}; + +CREATE ROW POLICY 02763_filter_8 ON 02763_alias USING y>11 AS permissive TO ALL; + +SELECT 'SELECT x, y FROM merge(currentDatabase(), 02763_alias)'; +SELECT x, y FROM merge(currentDatabase(), '02763_alias') ORDER BY x SETTINGS optimize_move_to_prewhere= {{prew}}; +SELECT 'SELECT x FROM merge(currentDatabase(), 02763_alias)'; +SELECT y FROM merge(currentDatabase(), '02763_alias') ORDER BY x SETTINGS optimize_move_to_prewhere= {{prew}}; +SELECT 'SELECT y FROM merge(currentDatabase(), 02763_alias)'; +SELECT x FROM merge(currentDatabase(), '02763_alias') ORDER BY x SETTINGS optimize_move_to_prewhere= {{prew}}; + +DROP ROW POLICY 02763_filter_8 ON 02763_alias; +{% endfor %} + +DROP TABLE 02763_alias; +DROP TABLE 02763_a_merge; + +DROP ROW POLICY 02763_filter_7 ON 02763_alias; diff --git a/tests/queries/0_stateless/02775_show_columns_called_from_clickhouse.sql b/tests/queries/0_stateless/02775_show_columns_called_from_clickhouse.sql index 89073bd2943..3bbcbb1a535 100644 --- a/tests/queries/0_stateless/02775_show_columns_called_from_clickhouse.sql +++ b/tests/queries/0_stateless/02775_show_columns_called_from_clickhouse.sql @@ -5,7 +5,7 @@ -- Tests the output of SHOW COLUMNS when called through the ClickHouse protocol. -- ----------------------------------------------------------------------------------- --- Please keep this test in-sync with 02775_show_columns_called_through_mysql.sql +-- Please keep this test in-sync with 02775_show_columns_called_from_clickhouse.expect -- ----------------------------------------------------------------------------------- DROP TABLE IF EXISTS tab; diff --git a/tests/queries/0_stateless/02775_show_columns_called_from_mysql.expect b/tests/queries/0_stateless/02775_show_columns_called_from_mysql.expect index bef5bd10ff3..8ba5774820e 100755 --- a/tests/queries/0_stateless/02775_show_columns_called_from_mysql.expect +++ b/tests/queries/0_stateless/02775_show_columns_called_from_mysql.expect @@ -6,7 +6,7 @@ # Tests the output of SHOW COLUMNS when called through the MySQL protocol. # ----------------------------------------------------------------------------------- -# Please keep this test in-sync with 02775_show_columns_called_through_clickhouse.sql +# Please keep this test in-sync with 02775_show_columns_called_from_clickhouse.sql # ----------------------------------------------------------------------------------- set basedir [file dirname $argv0] diff --git a/tests/queries/0_stateless/02813_series_period_detect.reference b/tests/queries/0_stateless/02813_series_period_detect.reference new file mode 100644 index 00000000000..f72e8498f31 --- /dev/null +++ b/tests/queries/0_stateless/02813_series_period_detect.reference @@ -0,0 +1,5 @@ +14 +3 +3 +3 +0 diff --git a/tests/queries/0_stateless/02813_series_period_detect.sql b/tests/queries/0_stateless/02813_series_period_detect.sql new file mode 100644 index 00000000000..e860fd75923 --- /dev/null +++ b/tests/queries/0_stateless/02813_series_period_detect.sql @@ -0,0 +1,12 @@ +-- Tags: no-fasttest + +SELECT seriesPeriodDetectFFT([139, 87, 110, 68, 54, 50, 51, 53, 133, 86, 141, 97, 156, 94, 149, 95, 140, 77, 61, 50, 54, 47, 133, 72, 152, 94, 148, 105, 162, 101, 160, 87, 63, 53, 55, 54, 151, 103, 189, 108, 183, 113, 175, 113, 178, 90, 71, 62, 62, 65, 165, 109, 181, 115, 182, 121, 178, 114, 170]); +SELECT seriesPeriodDetectFFT([10,20,30,10,20,30,10,20,30, 10,20,30,10,20,30,10,20,30,10,20,30]); +SELECT seriesPeriodDetectFFT([10.1, 20.45, 40.34, 10.1, 20.45, 40.34,10.1, 20.45, 40.34,10.1, 20.45, 40.34,10.1, 20.45, 40.34,10.1, 20.45, 40.34,10.1, 20.45, 40.34, 10.1, 20.45, 40.34]); +SELECT seriesPeriodDetectFFT([10.1, 10, 400, 10.1, 10, 400, 10.1, 10, 400,10.1, 10, 400,10.1, 10, 400,10.1, 10, 400,10.1, 10, 400,10.1, 10, 400]); +SELECT seriesPeriodDetectFFT([2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]); +SELECT seriesPeriodDetectFFT([1,2,3]); -- { serverError BAD_ARGUMENTS} +SELECT seriesPeriodDetectFFT(); --{ serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH} +SELECT seriesPeriodDetectFFT([]); -- { serverError ILLEGAL_COLUMN} +SELECT seriesPeriodDetectFFT([NULL, NULL, NULL]); -- { serverError ILLEGAL_COLUMN} +SELECT seriesPeriodDetectFFT([10,20,30,10,202,30,NULL]); -- { serverError ILLEGAL_COLUMN } \ No newline at end of file diff --git a/tests/queries/0_stateless/02861_filter_pushdown_const_bug.reference b/tests/queries/0_stateless/02861_filter_pushdown_const_bug.reference index 428ba88bff0..df8198bc856 100644 --- a/tests/queries/0_stateless/02861_filter_pushdown_const_bug.reference +++ b/tests/queries/0_stateless/02861_filter_pushdown_const_bug.reference @@ -6,3 +6,5 @@ 1 1 1 1 +1 1 +1 1 diff --git a/tests/queries/0_stateless/02861_filter_pushdown_const_bug.sql b/tests/queries/0_stateless/02861_filter_pushdown_const_bug.sql index a5ddf830d48..a299e50984f 100644 --- a/tests/queries/0_stateless/02861_filter_pushdown_const_bug.sql +++ b/tests/queries/0_stateless/02861_filter_pushdown_const_bug.sql @@ -15,4 +15,8 @@ SELECT key FROM ( SELECT key FROM t1 ) AS t1 JOIN ( SELECT key FROM t1 ) AS t2 O SELECT key FROM ( SELECT 1 AS key ) AS t1 JOIN ( SELECT 1 AS key ) AS t2 ON t1.key = t2.key WHERE key; SELECT * FROM ( SELECT 1 AS key GROUP BY NULL ) AS t1 INNER JOIN (SELECT 1 AS key) AS t2 ON t1.key = t2.key WHERE t1.key ORDER BY key; +SET join_algorithm = 'grace_hash'; + +SELECT * FROM (SELECT key AS a FROM t1 ) t1 INNER JOIN (SELECT key AS c FROM t1 ) t2 ON c = a WHERE a; + DROP TABLE IF EXISTS t1; diff --git a/tests/queries/0_stateless/02867_nullable_primary_key_final.reference b/tests/queries/0_stateless/02867_nullable_primary_key_final.reference index 2e55b120f6e..035932e1bb4 100644 --- a/tests/queries/0_stateless/02867_nullable_primary_key_final.reference +++ b/tests/queries/0_stateless/02867_nullable_primary_key_final.reference @@ -1,2 +1,3 @@ 2023-09-01 2500000000 2023-09-01 166167 +10 diff --git a/tests/queries/0_stateless/02867_nullable_primary_key_final.sql b/tests/queries/0_stateless/02867_nullable_primary_key_final.sql index 05677789459..773a6d35b8d 100644 --- a/tests/queries/0_stateless/02867_nullable_primary_key_final.sql +++ b/tests/queries/0_stateless/02867_nullable_primary_key_final.sql @@ -55,3 +55,11 @@ WHERE f2 = 'x' GROUP BY 1; DROP TABLE t; + +CREATE TABLE t (o Nullable(String), p Nullable(String)) ENGINE = ReplacingMergeTree ORDER BY (p, o) SETTINGS allow_nullable_key = 1, index_granularity = 2; + +INSERT INTO t SELECT number, NULL FROM numbers(10); + +SELECT count() FROM t FINAL; + +DROP TABLE t; diff --git a/tests/queries/0_stateless/02868_distinct_to_count_optimization.reference b/tests/queries/0_stateless/02868_distinct_to_count_optimization.reference index b2b15f92199..a2c441fa460 100644 --- a/tests/queries/0_stateless/02868_distinct_to_count_optimization.reference +++ b/tests/queries/0_stateless/02868_distinct_to_count_optimization.reference @@ -125,18 +125,13 @@ QUERY id: 0 QUERY id: 3, is_subquery: 1 PROJECTION COLUMNS a UInt8 - sum(b) UInt64 PROJECTION - LIST id: 4, nodes: 2 + LIST id: 4, nodes: 1 COLUMN id: 5, column_name: a, result_type: UInt8, source_id: 6 - FUNCTION id: 7, function_name: sum, function_type: aggregate, result_type: UInt64 - ARGUMENTS - LIST id: 8, nodes: 1 - COLUMN id: 9, column_name: b, result_type: UInt8, source_id: 6 JOIN TREE TABLE id: 6, table_name: default.test_rewrite_uniq_to_count GROUP BY - LIST id: 10, nodes: 1 + LIST id: 7, nodes: 1 COLUMN id: 5, column_name: a, result_type: UInt8, source_id: 6 SETTINGS allow_experimental_analyzer=1 6. test group by with subquery alias @@ -162,18 +157,13 @@ QUERY id: 0 QUERY id: 3, alias: t, is_subquery: 1 PROJECTION COLUMNS a UInt8 - sum(b) UInt64 PROJECTION - LIST id: 4, nodes: 2 + LIST id: 4, nodes: 1 COLUMN id: 5, column_name: a, result_type: UInt8, source_id: 6 - FUNCTION id: 7, function_name: sum, function_type: aggregate, result_type: UInt64 - ARGUMENTS - LIST id: 8, nodes: 1 - COLUMN id: 9, column_name: b, result_type: UInt8, source_id: 6 JOIN TREE TABLE id: 6, table_name: default.test_rewrite_uniq_to_count GROUP BY - LIST id: 10, nodes: 1 + LIST id: 7, nodes: 1 COLUMN id: 5, column_name: a, result_type: UInt8, source_id: 6 SETTINGS allow_experimental_analyzer=1 7. test group by with compound column name @@ -199,18 +189,13 @@ QUERY id: 0 QUERY id: 3, alias: t, is_subquery: 1 PROJECTION COLUMNS alias_of_a UInt8 - sum(b) UInt64 PROJECTION - LIST id: 4, nodes: 2 + LIST id: 4, nodes: 1 COLUMN id: 5, column_name: a, result_type: UInt8, source_id: 6 - FUNCTION id: 7, function_name: sum, function_type: aggregate, result_type: UInt64 - ARGUMENTS - LIST id: 8, nodes: 1 - COLUMN id: 9, column_name: b, result_type: UInt8, source_id: 6 JOIN TREE TABLE id: 6, table_name: default.test_rewrite_uniq_to_count GROUP BY - LIST id: 10, nodes: 1 + LIST id: 7, nodes: 1 COLUMN id: 5, column_name: a, result_type: UInt8, source_id: 6 SETTINGS allow_experimental_analyzer=1 8. test group by with select expression alias @@ -236,17 +221,12 @@ QUERY id: 0 QUERY id: 3, alias: t, is_subquery: 1 PROJECTION COLUMNS alias_of_a UInt8 - sum(b) UInt64 PROJECTION - LIST id: 4, nodes: 2 + LIST id: 4, nodes: 1 COLUMN id: 5, column_name: a, result_type: UInt8, source_id: 6 - FUNCTION id: 7, function_name: sum, function_type: aggregate, result_type: UInt64 - ARGUMENTS - LIST id: 8, nodes: 1 - COLUMN id: 9, column_name: b, result_type: UInt8, source_id: 6 JOIN TREE TABLE id: 6, table_name: default.test_rewrite_uniq_to_count GROUP BY - LIST id: 10, nodes: 1 + LIST id: 7, nodes: 1 COLUMN id: 5, column_name: a, result_type: UInt8, source_id: 6 SETTINGS allow_experimental_analyzer=1 diff --git a/tests/queries/0_stateless/02869_gcd_codec_test_incorrect_type.sql b/tests/queries/0_stateless/02869_gcd_codec_test_incorrect_type.sql deleted file mode 100644 index 61a964a288f..00000000000 --- a/tests/queries/0_stateless/02869_gcd_codec_test_incorrect_type.sql +++ /dev/null @@ -1,2 +0,0 @@ -DROP TABLE IF EXISTS table_gcd_codec; -CREATE TABLE table_gcd_codec (str String CODEC(GCD, LZ4)) ENGINE = Memory; -- { serverError 36 } diff --git a/tests/queries/0_stateless/02872_gcd_codec.reference b/tests/queries/0_stateless/02872_gcd_codec.reference index 1dd1b67e047..a8224b85b3c 100644 --- a/tests/queries/0_stateless/02872_gcd_codec.reference +++ b/tests/queries/0_stateless/02872_gcd_codec.reference @@ -1002,3 +1002,203 @@ 0 0 0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 diff --git a/tests/queries/0_stateless/02872_gcd_codec.sql b/tests/queries/0_stateless/02872_gcd_codec.sql index 245a1211052..3020bbc44ac 100644 --- a/tests/queries/0_stateless/02872_gcd_codec.sql +++ b/tests/queries/0_stateless/02872_gcd_codec.sql @@ -103,8 +103,24 @@ SELECT * FROM table_gcd_codec_datetime; SELECT * FROM table_gcd_codec_datetime64; --- A column with all 0 values can be compressed/decompressed +-- A column with all zero values can be compressed/decompressed -CREATE TEMPORARY TABLE table_gcd_codec_only_zero_values (n UInt8 CODEC(GCD, LZ4)) ENGINE = Memory; -INSERT INTO table_gcd_codec_only_zero_values VALUES (0), (0), (0); -SELECT * FROM table_gcd_codec_only_zero_values; +CREATE TEMPORARY TABLE table_gcd_codec_only_zeros (n UInt8 CODEC(GCD, LZ4)) ENGINE = Memory; +INSERT INTO table_gcd_codec_only_zeros VALUES (0), (0), (0); +SELECT * FROM table_gcd_codec_only_zeros; + +-- Tests for Bug #56672: + +DROP TABLE IF EXISTS table_gcd_codec_one_hundred_zeros; +DROP TABLE IF EXISTS table_gcd_codec_one_hundred_ones; + +CREATE TABLE table_gcd_codec_one_hundred_zeros (a Nullable(Int64) CODEC (GCD,LZ4)) ENGINE=MergeTree ORDER BY (); +INSERT INTO table_gcd_codec_one_hundred_zeros SELECT 0 FROM numbers(100); +SELECT * FROM table_gcd_codec_one_hundred_zeros; + +CREATE TABLE table_gcd_codec_one_hundred_ones (a Nullable(Int64) CODEC (GCD,LZ4)) ENGINE=MergeTree Order by (); +INSERT INTO table_gcd_codec_one_hundred_ones SELECT 1 FROM numbers(100); +SELECT * FROM table_gcd_codec_one_hundred_ones; + +DROP TABLE table_gcd_codec_one_hundred_zeros; +DROP TABLE table_gcd_codec_one_hundred_ones; diff --git a/tests/queries/0_stateless/02884_parallel_window_functions.reference b/tests/queries/0_stateless/02884_parallel_window_functions.reference new file mode 100644 index 00000000000..bac15838dc2 --- /dev/null +++ b/tests/queries/0_stateless/02884_parallel_window_functions.reference @@ -0,0 +1,100 @@ +1 +-- { echoOn } + +SELECT + nw, + sum(WR) AS R, + sumIf(WR, uniq_rows = 1) AS UNR +FROM +( + SELECT + uniq(nw) OVER (PARTITION BY ac) AS uniq_rows, + AVG(wg) AS WR, + ac, + nw + FROM window_funtion_threading + GROUP BY ac, nw +) +GROUP BY nw +ORDER BY nw ASC, R DESC +LIMIT 10; +0 2 0 +1 2 0 +2 2 0 +SELECT + nw, + sum(WR) AS R, + sumIf(WR, uniq_rows = 1) AS UNR +FROM +( + SELECT + uniq(nw) OVER (PARTITION BY ac) AS uniq_rows, + AVG(wg) AS WR, + ac, + nw + FROM window_funtion_threading + GROUP BY ac, nw +) +GROUP BY nw +ORDER BY nw ASC, R DESC +LIMIT 10 +SETTINGS max_threads = 1; +0 2 0 +1 2 0 +2 2 0 +SELECT + nw, + sum(WR) AS R, + sumIf(WR, uniq_rows = 1) AS UNR +FROM +( + SELECT + uniq(nw) OVER (PARTITION BY ac) AS uniq_rows, + AVG(wg) AS WR, + ac, + nw + FROM window_funtion_threading + WHERE (ac % 4) = 0 + GROUP BY + ac, + nw + UNION ALL + SELECT + uniq(nw) OVER (PARTITION BY ac) AS uniq_rows, + AVG(wg) AS WR, + ac, + nw + FROM window_funtion_threading + WHERE (ac % 4) = 1 + GROUP BY + ac, + nw + UNION ALL + SELECT + uniq(nw) OVER (PARTITION BY ac) AS uniq_rows, + AVG(wg) AS WR, + ac, + nw + FROM window_funtion_threading + WHERE (ac % 4) = 2 + GROUP BY + ac, + nw + UNION ALL + SELECT + uniq(nw) OVER (PARTITION BY ac) AS uniq_rows, + AVG(wg) AS WR, + ac, + nw + FROM window_funtion_threading + WHERE (ac % 4) = 3 + GROUP BY + ac, + nw +) +GROUP BY nw +ORDER BY nw ASC, R DESC +LIMIT 10; +0 2 0 +1 2 0 +2 2 0 diff --git a/tests/queries/0_stateless/02884_parallel_window_functions.sql b/tests/queries/0_stateless/02884_parallel_window_functions.sql new file mode 100644 index 00000000000..3151b42f896 --- /dev/null +++ b/tests/queries/0_stateless/02884_parallel_window_functions.sql @@ -0,0 +1,119 @@ +CREATE TABLE window_funtion_threading +Engine = MergeTree +ORDER BY (ac, nw) +AS SELECT + toUInt64(toFloat32(number % 2) % 20000000) as ac, + toFloat32(1) as wg, + toUInt16(toFloat32(number % 3) % 400) as nw +FROM numbers_mt(10000000); + +SELECT count() FROM (EXPLAIN PIPELINE SELECT + nw, + sum(WR) AS R, + sumIf(WR, uniq_rows = 1) AS UNR +FROM +( + SELECT + uniq(nw) OVER (PARTITION BY ac) AS uniq_rows, + AVG(wg) AS WR, + ac, + nw + FROM window_funtion_threading + GROUP BY ac, nw +) +GROUP BY nw +ORDER BY nw ASC, R DESC +LIMIT 10) where explain ilike '%ScatterByPartitionTransform%' SETTINGS max_threads = 4; + +-- { echoOn } + +SELECT + nw, + sum(WR) AS R, + sumIf(WR, uniq_rows = 1) AS UNR +FROM +( + SELECT + uniq(nw) OVER (PARTITION BY ac) AS uniq_rows, + AVG(wg) AS WR, + ac, + nw + FROM window_funtion_threading + GROUP BY ac, nw +) +GROUP BY nw +ORDER BY nw ASC, R DESC +LIMIT 10; + +SELECT + nw, + sum(WR) AS R, + sumIf(WR, uniq_rows = 1) AS UNR +FROM +( + SELECT + uniq(nw) OVER (PARTITION BY ac) AS uniq_rows, + AVG(wg) AS WR, + ac, + nw + FROM window_funtion_threading + GROUP BY ac, nw +) +GROUP BY nw +ORDER BY nw ASC, R DESC +LIMIT 10 +SETTINGS max_threads = 1; + +SELECT + nw, + sum(WR) AS R, + sumIf(WR, uniq_rows = 1) AS UNR +FROM +( + SELECT + uniq(nw) OVER (PARTITION BY ac) AS uniq_rows, + AVG(wg) AS WR, + ac, + nw + FROM window_funtion_threading + WHERE (ac % 4) = 0 + GROUP BY + ac, + nw + UNION ALL + SELECT + uniq(nw) OVER (PARTITION BY ac) AS uniq_rows, + AVG(wg) AS WR, + ac, + nw + FROM window_funtion_threading + WHERE (ac % 4) = 1 + GROUP BY + ac, + nw + UNION ALL + SELECT + uniq(nw) OVER (PARTITION BY ac) AS uniq_rows, + AVG(wg) AS WR, + ac, + nw + FROM window_funtion_threading + WHERE (ac % 4) = 2 + GROUP BY + ac, + nw + UNION ALL + SELECT + uniq(nw) OVER (PARTITION BY ac) AS uniq_rows, + AVG(wg) AS WR, + ac, + nw + FROM window_funtion_threading + WHERE (ac % 4) = 3 + GROUP BY + ac, + nw +) +GROUP BY nw +ORDER BY nw ASC, R DESC +LIMIT 10; diff --git a/tests/queries/0_stateless/02884_parquet_new_encodings.reference b/tests/queries/0_stateless/02884_parquet_new_encodings.reference new file mode 100644 index 00000000000..1034f208e18 --- /dev/null +++ b/tests/queries/0_stateless/02884_parquet_new_encodings.reference @@ -0,0 +1 @@ +SWEEP SWETT 00459 \N ('20221206100111','+0100') ('20221206100111','+0100') ('20221206100111','+0100') 3 11 T \N diff --git a/tests/queries/0_stateless/02884_parquet_new_encodings.sh b/tests/queries/0_stateless/02884_parquet_new_encodings.sh new file mode 100755 index 00000000000..496ed126e23 --- /dev/null +++ b/tests/queries/0_stateless/02884_parquet_new_encodings.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash +# Tags: no-fasttest + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +$CLICKHOUSE_LOCAL -q "select * from file('$CURDIR/data_parquet/delta_length_byte_array_encoding.parquet')" + diff --git a/tests/queries/0_stateless/02888_obsolete_settings.reference b/tests/queries/0_stateless/02888_obsolete_settings.reference index 6ee5216cd73..63553092c0c 100644 --- a/tests/queries/0_stateless/02888_obsolete_settings.reference +++ b/tests/queries/0_stateless/02888_obsolete_settings.reference @@ -42,6 +42,7 @@ optimize_duplicate_order_by_and_distinct optimize_fuse_sum_count_avg parallel_replicas_min_number_of_granules_to_enable partial_merge_join_optimizations +query_cache_store_results_of_queries_with_nondeterministic_functions query_plan_optimize_projection replication_alter_columns_timeout restore_threads diff --git a/tests/queries/0_stateless/02888_replicated_merge_tree_creation.reference b/tests/queries/0_stateless/02888_replicated_merge_tree_creation.reference new file mode 100644 index 00000000000..98fb6a68656 --- /dev/null +++ b/tests/queries/0_stateless/02888_replicated_merge_tree_creation.reference @@ -0,0 +1,4 @@ +1 +1 +1 +1 diff --git a/tests/queries/0_stateless/02888_replicated_merge_tree_creation.sh b/tests/queries/0_stateless/02888_replicated_merge_tree_creation.sh new file mode 100755 index 00000000000..b9603e75d2e --- /dev/null +++ b/tests/queries/0_stateless/02888_replicated_merge_tree_creation.sh @@ -0,0 +1,42 @@ +#!/usr/bin/env bash +# Tags: zookeeper, no-parallel, no-ordinary-database + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + + +${CLICKHOUSE_CLIENT} -q "DROP TABLE IF EXISTS test_exception_replicated SYNC" + +UUID=$(${CLICKHOUSE_CLIENT} --query "SELECT reinterpretAsUUID(currentDatabase())") + +#### 1 - There is only one replica + +${CLICKHOUSE_CLIENT} --create_replicated_merge_tree_fault_injection_probability=1 \ + -q "CREATE TABLE test_exception_replicated UUID '$UUID' (date Date) ENGINE=ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/recreate', 'r1') ORDER BY date" 2>&1 | grep -cm1 "Fault injected" + +# We will see that the replica is empty and throw the same 'Fault injected' exception as before +${CLICKHOUSE_CLIENT} --create_replicated_merge_tree_fault_injection_probability=1 \ + -q "CREATE TABLE test_exception_replicated UUID '$UUID' (date Date) ENGINE=ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/recreate', 'r1') ORDER BY date" 2>&1 | grep -cm1 "Fault injected" + +# We will succeed +${CLICKHOUSE_CLIENT} \ + -q "CREATE TABLE test_exception_replicated UUID '$UUID' (date Date) ENGINE=ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/recreate', 'r1') ORDER BY date" + +${CLICKHOUSE_CLIENT} -q "DROP TABLE test_exception_replicated SYNC" + +#### 2 - There are two replicas + +${CLICKHOUSE_CLIENT} --create_replicated_merge_tree_fault_injection_probability=1 \ + -q "CREATE TABLE test_exception_replicated UUID '$UUID' (date Date) ENGINE=ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/recreate', 'r1') ORDER BY date" 2>&1 | grep -cm1 "Fault injected" +${CLICKHOUSE_CLIENT} --create_replicated_merge_tree_fault_injection_probability=1 \ + -q "CREATE TABLE test_exception_replicated_2 (date Date) ENGINE=ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/recreate', 'r2') ORDER BY date" 2>&1 | grep -cm1 "Fault injected" + +# We will succeed +${CLICKHOUSE_CLIENT} \ + -q "CREATE TABLE test_exception_replicated UUID '$UUID' (date Date) ENGINE=ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/recreate', 'r1') ORDER BY date" + +# The trash from the second replica creation will not prevent us from dropping the table fully, so we delete it separately +${CLICKHOUSE_CLIENT} -q "SYSTEM DROP REPLICA 'r2' FROM TABLE test_exception_replicated" + +${CLICKHOUSE_CLIENT} -q "DROP TABLE test_exception_replicated SYNC" \ No newline at end of file diff --git a/tests/queries/0_stateless/02892_rocksdb_trivial_count.reference b/tests/queries/0_stateless/02892_rocksdb_trivial_count.reference index 9289ddcee34..4598404dd40 100644 --- a/tests/queries/0_stateless/02892_rocksdb_trivial_count.reference +++ b/tests/queries/0_stateless/02892_rocksdb_trivial_count.reference @@ -1 +1,10 @@ +-- { echoOn } +SELECT count() FROM dict SETTINGS optimize_trivial_approximate_count_query = 0, max_rows_to_read = 1; -- { serverError TOO_MANY_ROWS } +SELECT count() FROM dict SETTINGS optimize_trivial_approximate_count_query = 1, max_rows_to_read = 1; 121 +SET optimize_trivial_approximate_count_query = 1; +-- needs more data to see total_bytes or just detach and attach the table +DETACH TABLE dict SYNC; +ATTACH TABLE dict; +SELECT total_rows, total_bytes > 0 FROM system.tables WHERE database = currentDatabase() AND name = 'dict' FORMAT CSV; +121,1 diff --git a/tests/queries/0_stateless/02892_rocksdb_trivial_count.sql b/tests/queries/0_stateless/02892_rocksdb_trivial_count.sql index 0cdf2d1b2b2..a770b153760 100644 --- a/tests/queries/0_stateless/02892_rocksdb_trivial_count.sql +++ b/tests/queries/0_stateless/02892_rocksdb_trivial_count.sql @@ -2,5 +2,11 @@ CREATE TABLE dict (key UInt64, value String) ENGINE = EmbeddedRocksDB PRIMARY KEY key; INSERT INTO dict SELECT number, toString(number) FROM numbers(121); +-- { echoOn } SELECT count() FROM dict SETTINGS optimize_trivial_approximate_count_query = 0, max_rows_to_read = 1; -- { serverError TOO_MANY_ROWS } SELECT count() FROM dict SETTINGS optimize_trivial_approximate_count_query = 1, max_rows_to_read = 1; +SET optimize_trivial_approximate_count_query = 1; +-- needs more data to see total_bytes or just detach and attach the table +DETACH TABLE dict SYNC; +ATTACH TABLE dict; +SELECT total_rows, total_bytes > 0 FROM system.tables WHERE database = currentDatabase() AND name = 'dict' FORMAT CSV; diff --git a/tests/queries/0_stateless/02895_npy_format.reference b/tests/queries/0_stateless/02895_npy_format.reference index 0c90fbfd418..76c8a7a2abf 100644 --- a/tests/queries/0_stateless/02895_npy_format.reference +++ b/tests/queries/0_stateless/02895_npy_format.reference @@ -84,3 +84,8 @@ c 0 0 1 +[2.199219,1.099609,3.300781] +[4.25,3.34961,6.628906] +inf +nan +0 diff --git a/tests/queries/0_stateless/02895_npy_format.sh b/tests/queries/0_stateless/02895_npy_format.sh index 1dbf62ceaa2..c4fb2e2f67d 100755 --- a/tests/queries/0_stateless/02895_npy_format.sh +++ b/tests/queries/0_stateless/02895_npy_format.sh @@ -56,3 +56,7 @@ $CLICKHOUSE_LOCAL -q "select * from file('$CURDIR/data_npy/one_dim_str.npy', Npy $CLICKHOUSE_LOCAL -q "select * from file('$CURDIR/data_npy/one_dim_unicode.npy', Npy, 'value Float32')" 2>&1 | grep -c "BAD_ARGUMENTS" $CLICKHOUSE_LOCAL -q "select * from file('$CURDIR/data_npy/complex.npy')" 2>&1 | grep -c "BAD_ARGUMENTS" + +$CLICKHOUSE_LOCAL -q "select * from file('$CURDIR/data_npy/float_16.npy')" + +$CLICKHOUSE_LOCAL -q "select * from file('$CURDIR/data_npy/npy_inf_nan_null.npy')" diff --git a/tests/queries/0_stateless/02896_memory_accounting_for_user.sh b/tests/queries/0_stateless/02896_memory_accounting_for_user.sh index 72f4be1475d..f3016671420 100755 --- a/tests/queries/0_stateless/02896_memory_accounting_for_user.sh +++ b/tests/queries/0_stateless/02896_memory_accounting_for_user.sh @@ -1,5 +1,5 @@ #!/usr/bin/env bash -# Tags: no-parallel, long +# Tags: no-parallel, long, no-random-settings CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh diff --git a/tests/queries/0_stateless/02899_distributed_limit_by.reference b/tests/queries/0_stateless/02899_distributed_limit_by.reference new file mode 100644 index 00000000000..c20ecbcc4e4 --- /dev/null +++ b/tests/queries/0_stateless/02899_distributed_limit_by.reference @@ -0,0 +1,52 @@ +Used settings: prefer_localhost_replica=0,distributed_group_by_no_merge=0,distributed_push_down_limit=1 +0 +0 +Used settings: prefer_localhost_replica=0,distributed_group_by_no_merge=0,distributed_push_down_limit=0 +0 +0 +Used settings: prefer_localhost_replica=0,distributed_group_by_no_merge=1,distributed_push_down_limit=1 +0 +0 +0 +0 +Used settings: prefer_localhost_replica=0,distributed_group_by_no_merge=1,distributed_push_down_limit=0 +0 +0 +0 +0 +Used settings: prefer_localhost_replica=0,distributed_group_by_no_merge=2,distributed_push_down_limit=1 +0 +0 +0 +0 +Used settings: prefer_localhost_replica=0,distributed_group_by_no_merge=2,distributed_push_down_limit=0 +0 +0 +0 +0 +Used settings: prefer_localhost_replica=1,distributed_group_by_no_merge=0,distributed_push_down_limit=1 +0 +0 +Used settings: prefer_localhost_replica=1,distributed_group_by_no_merge=0,distributed_push_down_limit=0 +0 +0 +Used settings: prefer_localhost_replica=1,distributed_group_by_no_merge=1,distributed_push_down_limit=1 +0 +0 +0 +0 +Used settings: prefer_localhost_replica=1,distributed_group_by_no_merge=1,distributed_push_down_limit=0 +0 +0 +0 +0 +Used settings: prefer_localhost_replica=1,distributed_group_by_no_merge=2,distributed_push_down_limit=1 +0 +0 +0 +0 +Used settings: prefer_localhost_replica=1,distributed_group_by_no_merge=2,distributed_push_down_limit=0 +0 +0 +0 +0 diff --git a/tests/queries/0_stateless/02899_distributed_limit_by.sql.j2 b/tests/queries/0_stateless/02899_distributed_limit_by.sql.j2 new file mode 100644 index 00000000000..4f885ef2b6c --- /dev/null +++ b/tests/queries/0_stateless/02899_distributed_limit_by.sql.j2 @@ -0,0 +1,26 @@ +{# +Randomize settings: +- prefer_localhost_replica +- distributed_group_by_no_merge (0 = WithMergeableState, 1 = Complete, 2 = WithMergeableStateAfterAggregation/WithMergeableStateAfterAggregationAndLimit) +- distributed_push_down_limit (0/1 = dis/allows WithMergeableStateAfterAggregationAndLimit +#} +{% for settings in product( + [ + 'prefer_localhost_replica=0', + 'prefer_localhost_replica=1', + ], + [ + 'distributed_group_by_no_merge=0', + 'distributed_group_by_no_merge=1', + 'distributed_group_by_no_merge=2', + ], + [ + 'distributed_push_down_limit=1', + 'distributed_push_down_limit=0', + ], +) %} +{% set settings = settings | join(',') %} +select 'Used settings: {{ settings }}'; +select dummy from remote('127.{1,1}', system.one) where dummy + dummy >= 0 limit 1 by dummy + dummy + 0 as l settings {{ settings }}; +select dummy from (select dummy + dummy + 0 as l, dummy from remote('127.{1,1}', system.one) where dummy + dummy >= 0 limit 1 by l) settings {{ settings }}; +{% endfor %} diff --git a/tests/queries/0_stateless/02900_limit_by_query_stage.reference b/tests/queries/0_stateless/02900_limit_by_query_stage.reference new file mode 100644 index 00000000000..b01fb1ca5b0 --- /dev/null +++ b/tests/queries/0_stateless/02900_limit_by_query_stage.reference @@ -0,0 +1,3 @@ +0 0 +0 0 +0 0 diff --git a/tests/queries/0_stateless/02900_limit_by_query_stage.sh b/tests/queries/0_stateless/02900_limit_by_query_stage.sh new file mode 100755 index 00000000000..d34d0d81bcd --- /dev/null +++ b/tests/queries/0_stateless/02900_limit_by_query_stage.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +$CLICKHOUSE_CLIENT --stage with_mergeable_state --query 'SELECT dummy FROM system.one WHERE (dummy + dummy) >= 0 LIMIT 1 BY (dummy + dummy) + 0 AS l' +$CLICKHOUSE_CLIENT --stage with_mergeable_state_after_aggregation --query 'SELECT dummy FROM system.one WHERE (dummy + dummy) >= 0 LIMIT 1 BY (dummy + dummy) + 0 AS l' +$CLICKHOUSE_CLIENT --stage with_mergeable_state_after_aggregation_and_limit --query 'SELECT dummy FROM system.one WHERE (dummy + dummy) >= 0 LIMIT 1 BY (dummy + dummy) + 0 AS l' diff --git a/tests/queries/0_stateless/02903_client_insert_in_background.reference b/tests/queries/0_stateless/02903_client_insert_in_background.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/02903_client_insert_in_background.sh b/tests/queries/0_stateless/02903_client_insert_in_background.sh new file mode 100755 index 00000000000..d5fc56752f6 --- /dev/null +++ b/tests/queries/0_stateless/02903_client_insert_in_background.sh @@ -0,0 +1,13 @@ +#!/usr/bin/env bash + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +$CLICKHOUSE_CLIENT -q "drop table if exists test" +$CLICKHOUSE_CLIENT -q "create table test (x UInt64) engine=Memory" +nohup $CLICKHOUSE_CLIENT -q "insert into test values (42)" 2> $CLICKHOUSE_TEST_UNIQUE_NAME.out +tail -n +2 $CLICKHOUSE_TEST_UNIQUE_NAME.out +$CLICKHOUSE_CLIENT -q "drop table test" +rm $CLICKHOUSE_TEST_UNIQUE_NAME.out + diff --git a/tests/queries/0_stateless/02903_parameterized_view_explain_ast.reference b/tests/queries/0_stateless/02903_parameterized_view_explain_ast.reference new file mode 100644 index 00000000000..6ee8d0c3d23 --- /dev/null +++ b/tests/queries/0_stateless/02903_parameterized_view_explain_ast.reference @@ -0,0 +1,12 @@ +CreateQuery numbers_pv (children 2) + Identifier numbers_pv + SelectWithUnionQuery (children 1) + ExpressionList (children 1) + SelectQuery (children 3) + ExpressionList (children 1) + Asterisk + TablesInSelectQuery (children 1) + TablesInSelectQueryElement (children 1) + TableExpression (children 1) + TableIdentifier numbers + QueryParameter amount:UInt8 diff --git a/tests/queries/0_stateless/02903_parameterized_view_explain_ast.sql b/tests/queries/0_stateless/02903_parameterized_view_explain_ast.sql new file mode 100644 index 00000000000..6af6dab2f4e --- /dev/null +++ b/tests/queries/0_stateless/02903_parameterized_view_explain_ast.sql @@ -0,0 +1,3 @@ +EXPLAIN AST +CREATE VIEW numbers_pv AS +SELECT * FROM numbers LIMIT {amount:UInt8}; \ No newline at end of file diff --git a/tests/queries/0_stateless/02903_rmt_retriable_merge_exception.sh b/tests/queries/0_stateless/02903_rmt_retriable_merge_exception.sh index 074a3a6725e..095239954f4 100755 --- a/tests/queries/0_stateless/02903_rmt_retriable_merge_exception.sh +++ b/tests/queries/0_stateless/02903_rmt_retriable_merge_exception.sh @@ -10,7 +10,12 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # (i.e. "No active replica has part X or covering part") # does not appears as errors (level=Error), only as info message (level=Information). -$CLICKHOUSE_CLIENT -nm -q " +cluster=default +if [[ $($CLICKHOUSE_CLIENT -q "select count()>0 from system.clusters where cluster = 'test_cluster_database_replicated'") = 1 ]]; then + cluster=test_cluster_database_replicated +fi + +$CLICKHOUSE_CLIENT -nm --distributed_ddl_output_mode=none -q " drop table if exists rmt1; drop table if exists rmt2; @@ -21,7 +26,12 @@ $CLICKHOUSE_CLIENT -nm -q " insert into rmt1 values (2); system sync replica rmt1; - system stop pulling replication log rmt2; + -- SYSTEM STOP PULLING REPLICATION LOG does not waits for the current pull, + -- trigger it explicitly to 'avoid race' (though proper way will be to wait + -- for current pull in the StorageReplicatedMergeTree::getActionLock()) + system sync replica rmt2; + -- NOTE: CLICKHOUSE_DATABASE is required + system stop pulling replication log on cluster $cluster $CLICKHOUSE_DATABASE.rmt2; optimize table rmt1 final settings alter_sync=0, optimize_throw_if_noop=1; " || exit 1 diff --git a/tests/queries/0_stateless/02906_orc_tuple_field_prune.reference b/tests/queries/0_stateless/02906_orc_tuple_field_prune.reference new file mode 100644 index 00000000000..dfdd38f5e8e --- /dev/null +++ b/tests/queries/0_stateless/02906_orc_tuple_field_prune.reference @@ -0,0 +1,108 @@ +int64_column Nullable(Int64) +string_column Nullable(String) +float64_column Nullable(Float64) +tuple_column Tuple(a Nullable(String), b Nullable(Float64), c Nullable(Int64)) +array_tuple_column Array(Tuple(a Nullable(String), b Nullable(Float64), c Nullable(Int64))) +map_tuple_column Map(String, Tuple(a Nullable(String), b Nullable(Float64), c Nullable(Int64))) +-- { echoOn } +-- Test primitive types +select int64_column, string_column, float64_column from file('02906.orc') where int64_column % 15 = 0; +0 0 0 +15 15 15 +30 30 30 +45 45 45 +60 60 60 +75 75 75 +90 90 90 +-- Test tuple type with names +select tuple_column from file('02906.orc', 'ORC', 'int64_column Int64, tuple_column Tuple(a Nullable(String), b Nullable(Float64), c Nullable(Int64))') where int64_column % 15 = 0; +(NULL,NULL,NULL) +('15',15,15) +(NULL,NULL,NULL) +('45',45,45) +(NULL,NULL,NULL) +('75',75,75) +(NULL,NULL,NULL) +select tuple_column from file('02906.orc', 'ORC', 'int64_column Int64, tuple_column Tuple(c Nullable(Int64))') where int64_column % 15 = 0; +(NULL) +(15) +(NULL) +(45) +(NULL) +(75) +(NULL) +select tuple_column from file('02906.orc', 'ORC', 'int64_column Int64, tuple_column Tuple(c Nullable(Int64), d Nullable(String))') where int64_column % 15 = 0; +(NULL,NULL) +(15,NULL) +(NULL,NULL) +(45,NULL) +(NULL,NULL) +(75,NULL) +(NULL,NULL) +-- Test tuple type without names +select tuple_column from file('02906.orc', 'ORC', 'int64_column Int64, tuple_column Tuple(Nullable(String), Nullable(Float64), Nullable(Int64))') where int64_column % 15 = 0; +(NULL,NULL,NULL) +('15',15,15) +(NULL,NULL,NULL) +('45',45,45) +(NULL,NULL,NULL) +('75',75,75) +(NULL,NULL,NULL) +select tuple_column from file('02906.orc', 'ORC', 'int64_column Int64, tuple_column Tuple(Nullable(String), Nullable(Float64))') where int64_column % 15 = 0; +(NULL,NULL) +('15',15) +(NULL,NULL) +('45',45) +(NULL,NULL) +('75',75) +(NULL,NULL) +-- Test tuple nested in array +select array_tuple_column from file('02906.orc', 'ORC', 'int64_column Int64, array_tuple_column Array(Tuple(a Nullable(String), b Nullable(Float64), c Nullable(Int64)))') where int64_column % 15 = 0; +[(NULL,NULL,NULL)] +[('15',15,15)] +[(NULL,NULL,NULL)] +[('45',45,45)] +[(NULL,NULL,NULL)] +[('75',75,75)] +[(NULL,NULL,NULL)] +select array_tuple_column from file('02906.orc', 'ORC', 'int64_column Int64, array_tuple_column Array(Tuple(b Nullable(Float64), c Nullable(Int64)))') where int64_column % 15 = 0; +[(NULL,NULL)] +[(15,15)] +[(NULL,NULL)] +[(45,45)] +[(NULL,NULL)] +[(75,75)] +[(NULL,NULL)] +select array_tuple_column from file('02906.orc', 'ORC', 'int64_column Int64, array_tuple_column Array(Tuple(b Nullable(Float64), c Nullable(Int64), d Nullable(String)))') where int64_column % 15 = 0; +[(NULL,NULL,NULL)] +[(15,15,NULL)] +[(NULL,NULL,NULL)] +[(45,45,NULL)] +[(NULL,NULL,NULL)] +[(75,75,NULL)] +[(NULL,NULL,NULL)] +-- Test tuple nested in map +select map_tuple_column from file('02906.orc', 'ORC', 'int64_column Int64, map_tuple_column Map(String, Tuple(a Nullable(String), b Nullable(Float64), c Nullable(Int64)))') where int64_column % 15 = 0; +{'0':(NULL,NULL,NULL)} +{'15':('15',15,15)} +{'30':(NULL,NULL,NULL)} +{'45':('45',45,45)} +{'60':(NULL,NULL,NULL)} +{'75':('75',75,75)} +{'90':(NULL,NULL,NULL)} +select map_tuple_column from file('02906.orc', 'ORC', 'int64_column Int64, map_tuple_column Map(String, Tuple(b Nullable(Float64), c Nullable(Int64)))') where int64_column % 15 = 0; +{'0':(NULL,NULL)} +{'15':(15,15)} +{'30':(NULL,NULL)} +{'45':(45,45)} +{'60':(NULL,NULL)} +{'75':(75,75)} +{'90':(NULL,NULL)} +select map_tuple_column from file('02906.orc', 'ORC', 'int64_column Int64, map_tuple_column Map(String, Tuple(b Nullable(Float64), c Nullable(Int64), d Nullable(String)))') where int64_column % 15 = 0; +{'0':(NULL,NULL,NULL)} +{'15':(15,15,NULL)} +{'30':(NULL,NULL,NULL)} +{'45':(45,45,NULL)} +{'60':(NULL,NULL,NULL)} +{'75':(75,75,NULL)} +{'90':(NULL,NULL,NULL)} diff --git a/tests/queries/0_stateless/02906_orc_tuple_field_prune.sql b/tests/queries/0_stateless/02906_orc_tuple_field_prune.sql new file mode 100644 index 00000000000..5428abc40de --- /dev/null +++ b/tests/queries/0_stateless/02906_orc_tuple_field_prune.sql @@ -0,0 +1,40 @@ +-- Tags: no-fasttest, no-parallel + +set engine_file_truncate_on_insert = 1; +set flatten_nested = 0; + +insert into function file('02906.orc', 'ORC') +select + number::Int64 as int64_column, + number::String as string_column, + number::Float64 as float64_column, + cast(if(number % 10 = 0, tuple(null, null, null), tuple(number::String, number::Float64, number::Int64)) as Tuple(a Nullable(String), b Nullable(Float64), c Nullable(Int64))) as tuple_column, + cast(if(number % 10 = 0, array(tuple(null, null, null)), array(tuple(number::String, number::Float64, number::Int64))) as Array(Tuple(a Nullable(String), b Nullable(Float64), c Nullable(Int64)))) as array_tuple_column, + cast(if(number % 10 = 0, map(number::String, tuple(null, null, null)), map(number::String, tuple(number::String, number::Float64, number::Int64))) as Map(String, Tuple(a Nullable(String), b Nullable(Float64), c Nullable(Int64)))) as map_tuple_column + from numbers(100); + +desc file('02906.orc'); + +-- { echoOn } +-- Test primitive types +select int64_column, string_column, float64_column from file('02906.orc') where int64_column % 15 = 0; + +-- Test tuple type with names +select tuple_column from file('02906.orc', 'ORC', 'int64_column Int64, tuple_column Tuple(a Nullable(String), b Nullable(Float64), c Nullable(Int64))') where int64_column % 15 = 0; +select tuple_column from file('02906.orc', 'ORC', 'int64_column Int64, tuple_column Tuple(c Nullable(Int64))') where int64_column % 15 = 0; +select tuple_column from file('02906.orc', 'ORC', 'int64_column Int64, tuple_column Tuple(c Nullable(Int64), d Nullable(String))') where int64_column % 15 = 0; + +-- Test tuple type without names +select tuple_column from file('02906.orc', 'ORC', 'int64_column Int64, tuple_column Tuple(Nullable(String), Nullable(Float64), Nullable(Int64))') where int64_column % 15 = 0; +select tuple_column from file('02906.orc', 'ORC', 'int64_column Int64, tuple_column Tuple(Nullable(String), Nullable(Float64))') where int64_column % 15 = 0; + +-- Test tuple nested in array +select array_tuple_column from file('02906.orc', 'ORC', 'int64_column Int64, array_tuple_column Array(Tuple(a Nullable(String), b Nullable(Float64), c Nullable(Int64)))') where int64_column % 15 = 0; +select array_tuple_column from file('02906.orc', 'ORC', 'int64_column Int64, array_tuple_column Array(Tuple(b Nullable(Float64), c Nullable(Int64)))') where int64_column % 15 = 0; +select array_tuple_column from file('02906.orc', 'ORC', 'int64_column Int64, array_tuple_column Array(Tuple(b Nullable(Float64), c Nullable(Int64), d Nullable(String)))') where int64_column % 15 = 0; + +-- Test tuple nested in map +select map_tuple_column from file('02906.orc', 'ORC', 'int64_column Int64, map_tuple_column Map(String, Tuple(a Nullable(String), b Nullable(Float64), c Nullable(Int64)))') where int64_column % 15 = 0; +select map_tuple_column from file('02906.orc', 'ORC', 'int64_column Int64, map_tuple_column Map(String, Tuple(b Nullable(Float64), c Nullable(Int64)))') where int64_column % 15 = 0; +select map_tuple_column from file('02906.orc', 'ORC', 'int64_column Int64, map_tuple_column Map(String, Tuple(b Nullable(Float64), c Nullable(Int64), d Nullable(String)))') where int64_column % 15 = 0; +-- { echoOff } diff --git a/tests/queries/0_stateless/02907_fromDaysSinceYearZero.reference b/tests/queries/0_stateless/02907_fromDaysSinceYearZero.reference index ac0f4662db2..3603ebe3e0d 100644 --- a/tests/queries/0_stateless/02907_fromDaysSinceYearZero.reference +++ b/tests/queries/0_stateless/02907_fromDaysSinceYearZero.reference @@ -1,22 +1,25 @@ -- negative tests --- const and non-const arguments -719527 2149-06-06 2149-06-06 -719528 1970-01-01 1970-01-01 -719529 1970-01-02 1970-01-02 -785062 2149-06-05 2149-06-05 -785063 2149-06-06 2149-06-06 -785064 1970-01-01 1970-01-01 -693960 2299-12-31 2299-12-31 -693961 1900-01-01 1900-01-01 -693962 1900-01-02 1900-01-02 -840056 2299-12-30 2299-12-30 -840057 2299-12-31 2299-12-31 -840058 2299-12-31 2299-12-31 --- integer types != UInt32 -255 1974-06-12 2299-12-31 -65535 1973-09-29 2299-12-31 -719529 1970-01-02 1970-01-02 +-- UInt32 and Int32 arguments, both const and non-const +719527 719527 2149-06-06 2149-06-06 2149-06-06 2149-06-06 +719528 719528 1970-01-01 1970-01-01 1970-01-01 1970-01-01 +719529 719529 1970-01-02 1970-01-02 1970-01-02 1970-01-02 +785062 785062 2149-06-05 2149-06-05 2149-06-05 2149-06-05 +785063 785063 2149-06-06 2149-06-06 2149-06-06 2149-06-06 +785064 785064 1970-01-01 1970-01-01 1970-01-01 1970-01-01 +693960 693960 2299-12-31 2299-12-31 2299-12-31 2299-12-31 +693961 693961 1900-01-01 1900-01-01 1900-01-01 1900-01-01 +693962 693962 1900-01-02 1900-01-02 1900-01-02 1900-01-02 +840056 840056 2299-12-30 2299-12-30 2299-12-30 2299-12-30 +840057 840057 2299-12-31 2299-12-31 2299-12-31 2299-12-31 +840058 840058 2299-12-31 2299-12-31 2299-12-31 2299-12-31 +-- integer types != (U)Int32 +255 127 1974-06-12 2299-12-31 1974-02-04 2299-12-31 +65535 32767 1973-09-29 2299-12-31 2063-06-17 2299-12-31 +719529 719529 1970-01-02 1970-01-02 1970-01-02 1970-01-02 -- NULL handling \N \N +-- ubsan bugs +2299-12-31 +2299-12-31 -- Alias 1973-10-01 diff --git a/tests/queries/0_stateless/02907_fromDaysSinceYearZero.sql b/tests/queries/0_stateless/02907_fromDaysSinceYearZero.sql index 83cfa01d5ed..9f356080fe8 100644 --- a/tests/queries/0_stateless/02907_fromDaysSinceYearZero.sql +++ b/tests/queries/0_stateless/02907_fromDaysSinceYearZero.sql @@ -7,32 +7,35 @@ SELECT fromDaysSinceYearZero(1, 2); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_ SELECT fromDaysSinceYearZero32(1, 2); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } SELECT fromDaysSinceYearZero('needs a number'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } SELECT fromDaysSinceYearZero32('needs a number'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } -SELECT fromDaysSinceYearZero(-3); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } -SELECT fromDaysSinceYearZero32(-3); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT fromDaysSinceYearZero(-3); -- { serverError ARGUMENT_OUT_OF_BOUND } +SELECT fromDaysSinceYearZero32(-3); -- { serverError ARGUMENT_OUT_OF_BOUND } -SELECT '-- const and non-const arguments'; +SELECT '-- UInt32 and Int32 arguments, both const and non-const'; +SELECT 719527 AS u, toInt32(u) AS s, fromDaysSinceYearZero(u), fromDaysSinceYearZero(materialize(u)), fromDaysSinceYearZero(s), fromDaysSinceYearZero(materialize(s)); -- outside Date's range +SELECT 719528 AS u, toInt32(u) AS s, fromDaysSinceYearZero(u), fromDaysSinceYearZero(materialize(u)), fromDaysSinceYearZero(s), fromDaysSinceYearZero(materialize(s)); +SELECT 719529 AS u, toInt32(u) AS s, fromDaysSinceYearZero(u), fromDaysSinceYearZero(materialize(u)), fromDaysSinceYearZero(s), fromDaysSinceYearZero(materialize(s)); +SELECT 785062 AS u, toInt32(u) AS s, fromDaysSinceYearZero(u), fromDaysSinceYearZero(materialize(u)), fromDaysSinceYearZero(s), fromDaysSinceYearZero(materialize(s)); +SELECT 785063 AS u, toInt32(u) AS s, fromDaysSinceYearZero(u), fromDaysSinceYearZero(materialize(u)), fromDaysSinceYearZero(s), fromDaysSinceYearZero(materialize(s)); +SELECT 785064 AS u, toInt32(u) AS s, fromDaysSinceYearZero(u), fromDaysSinceYearZero(materialize(u)), fromDaysSinceYearZero(s), fromDaysSinceYearZero(materialize(s)); -- outside Date's range -SELECT 719527 AS x, fromDaysSinceYearZero(x), fromDaysSinceYearZero(materialize(x)); -- outside Date's range -SELECT 719528 AS x, fromDaysSinceYearZero(x), fromDaysSinceYearZero(materialize(x)); -SELECT 719529 AS x, fromDaysSinceYearZero(x), fromDaysSinceYearZero(materialize(x)); -SELECT 785062 AS x, fromDaysSinceYearZero(x), fromDaysSinceYearZero(materialize(x)); -SELECT 785063 AS x, fromDaysSinceYearZero(x), fromDaysSinceYearZero(materialize(x)); -SELECT 785064 AS x, fromDaysSinceYearZero(x), fromDaysSinceYearZero(materialize(x)); -- outside Date's range +SELECT 693960 AS u, toInt32(u) AS s, fromDaysSinceYearZero32(u), fromDaysSinceYearZero32(materialize(u)), fromDaysSinceYearZero32(s), fromDaysSinceYearZero32(materialize(s)); -- outside Date32's range +SELECT 693961 AS u, toInt32(u) AS s, fromDaysSinceYearZero32(u), fromDaysSinceYearZero32(materialize(u)), fromDaysSinceYearZero32(s), fromDaysSinceYearZero32(materialize(s)); +SELECT 693962 AS u, toInt32(u) AS s, fromDaysSinceYearZero32(u), fromDaysSinceYearZero32(materialize(u)), fromDaysSinceYearZero32(s), fromDaysSinceYearZero32(materialize(s)); +SELECT 840056 AS u, toInt32(u) AS s, fromDaysSinceYearZero32(u), fromDaysSinceYearZero32(materialize(u)), fromDaysSinceYearZero32(s), fromDaysSinceYearZero32(materialize(s)); +SELECT 840057 AS u, toInt32(u) AS s, fromDaysSinceYearZero32(u), fromDaysSinceYearZero32(materialize(u)), fromDaysSinceYearZero32(s), fromDaysSinceYearZero32(materialize(s)); +SELECT 840058 AS u, toInt32(u) AS s, fromDaysSinceYearZero32(u), fromDaysSinceYearZero32(materialize(u)), fromDaysSinceYearZero32(s), fromDaysSinceYearZero32(materialize(s)); -- outside Date32's range -SELECT 693960 AS x, fromDaysSinceYearZero32(x), fromDaysSinceYearZero32(materialize(x)); -- outside Date32's range -SELECT 693961 AS x, fromDaysSinceYearZero32(x), fromDaysSinceYearZero32(materialize(x)); -SELECT 693962 AS x, fromDaysSinceYearZero32(x), fromDaysSinceYearZero32(materialize(x)); -SELECT 840056 AS x, fromDaysSinceYearZero32(x), fromDaysSinceYearZero32(materialize(x)); -SELECT 840057 AS x, fromDaysSinceYearZero32(x), fromDaysSinceYearZero32(materialize(x)); -SELECT 840058 AS x, fromDaysSinceYearZero32(x), fromDaysSinceYearZero32(materialize(x)); -- outside Date32's range - -SELECT '-- integer types != UInt32'; -SELECT toUInt8(255) AS x, fromDaysSinceYearZero(x), fromDaysSinceYearZero32(x); -- outside Date's range for all UInt8-s -SELECT toUInt16(65535) AS x, fromDaysSinceYearZero(x), fromDaysSinceYearZero32(x); -- outside Date's range for all UInt16-s -SELECT toUInt64(719529) AS x, fromDaysSinceYearZero(x), fromDaysSinceYearZero32(x); -- something useful +SELECT '-- integer types != (U)Int32'; +SELECT toUInt8(255) AS u, toInt8(127) AS s, fromDaysSinceYearZero(u), fromDaysSinceYearZero32(u), fromDaysSinceYearZero(s), fromDaysSinceYearZero32(s); -- outside Date's range for all (U)Int8-s +SELECT toUInt16(65535) AS u, toInt16(32767) AS s, fromDaysSinceYearZero(u), fromDaysSinceYearZero32(u), fromDaysSinceYearZero(s), fromDaysSinceYearZero32(s); -- outside Date's range for all (U)Int16-s +SELECT toUInt64(719529) AS u, toInt64(719529) AS s, fromDaysSinceYearZero(u), fromDaysSinceYearZero32(u), fromDaysSinceYearZero(s), fromDaysSinceYearZero32(s); -- something useful SELECT '-- NULL handling'; SELECT fromDaysSinceYearZero(NULL), fromDaysSinceYearZero32(NULL); +SELECT '-- ubsan bugs'; +SELECT fromDaysSinceYearZero32(2147483648); +SELECT fromDaysSinceYearZero32(3); + SELECT '-- Alias'; SELECT FROM_DAYS(1); diff --git a/tests/queries/0_stateless/02908_Npy_files_caching.reference b/tests/queries/0_stateless/02908_Npy_files_caching.reference new file mode 100644 index 00000000000..db9adf2d9c1 --- /dev/null +++ b/tests/queries/0_stateless/02908_Npy_files_caching.reference @@ -0,0 +1,9 @@ +3 +3 +3 +array Int64 +3 +1000000 +1000000 +array Int64 +1000000 diff --git a/tests/queries/0_stateless/02908_Npy_files_caching.sh b/tests/queries/0_stateless/02908_Npy_files_caching.sh new file mode 100755 index 00000000000..4845f740972 --- /dev/null +++ b/tests/queries/0_stateless/02908_Npy_files_caching.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +$CLICKHOUSE_LOCAL -q "select count() from file('$CURDIR/data_npy/one_dim.npy') settings optimize_count_from_files=0" +$CLICKHOUSE_LOCAL -q "select count() from file('$CURDIR/data_npy/one_dim.npy') settings optimize_count_from_files=1" +$CLICKHOUSE_LOCAL -q "select count() from file('$CURDIR/data_npy/one_dim.npy', auto, 'array Int64') settings optimize_count_from_files=1" +$CLICKHOUSE_LOCAL -nm -q " +desc file('$CURDIR/data_npy/one_dim.npy'); +select number_of_rows from system.schema_inference_cache where format='Npy'; +" +$CLICKHOUSE_LOCAL -q "select count() from file('$CURDIR/data_npy/npy_big.npy') settings optimize_count_from_files=0" +$CLICKHOUSE_LOCAL -q "select count() from file('$CURDIR/data_npy/npy_big.npy') settings optimize_count_from_files=1" +$CLICKHOUSE_LOCAL -nm -q " +desc file('$CURDIR/data_npy/npy_big.npy'); +select number_of_rows from system.schema_inference_cache where format='Npy'; +" diff --git a/tests/queries/0_stateless/02908_alter_column_alias.reference b/tests/queries/0_stateless/02908_alter_column_alias.reference new file mode 100644 index 00000000000..e44df6e9ff6 --- /dev/null +++ b/tests/queries/0_stateless/02908_alter_column_alias.reference @@ -0,0 +1 @@ +CREATE TABLE default.t\n(\n `c0` DateTime,\n `c1` DateTime,\n `a` DateTime ALIAS c1\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 diff --git a/tests/queries/0_stateless/02908_alter_column_alias.sql b/tests/queries/0_stateless/02908_alter_column_alias.sql new file mode 100644 index 00000000000..fd98339e8b5 --- /dev/null +++ b/tests/queries/0_stateless/02908_alter_column_alias.sql @@ -0,0 +1,8 @@ +CREATE TABLE t ( + c0 DateTime, + c1 DateTime, + a DateTime alias toStartOfFifteenMinutes(c0) +) ENGINE = MergeTree() ORDER BY tuple(); + +ALTER TABLE t MODIFY COLUMN a DateTime ALIAS c1; +SHOW CREATE t; diff --git a/tests/queries/0_stateless/02908_empty_named_collection.reference b/tests/queries/0_stateless/02908_empty_named_collection.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/02908_empty_named_collection.sql b/tests/queries/0_stateless/02908_empty_named_collection.sql new file mode 100644 index 00000000000..6aab83858e8 --- /dev/null +++ b/tests/queries/0_stateless/02908_empty_named_collection.sql @@ -0,0 +1,5 @@ +-- Tags: no-parallel + +CREATE NAMED COLLECTION foobar03 AS a = 1; +ALTER NAMED COLLECTION foobar03 DELETE b; -- { serverError BAD_ARGUMENTS } +DROP NAMED COLLECTION foobar03; diff --git a/tests/queries/0_stateless/02908_filesystem_cache_as_collection.reference b/tests/queries/0_stateless/02908_filesystem_cache_as_collection.reference new file mode 100644 index 00000000000..f5e0af6d507 --- /dev/null +++ b/tests/queries/0_stateless/02908_filesystem_cache_as_collection.reference @@ -0,0 +1,2 @@ +1048576 10000000 33554432 4194304 0 0 0 0 /var/lib/clickhouse/filesystem_caches/collection_sql 2 0 1 +1048576 10000000 33554432 4194304 0 0 0 0 /var/lib/clickhouse/filesystem_caches/collection 2 0 1 diff --git a/tests/queries/0_stateless/02908_filesystem_cache_as_collection.sql b/tests/queries/0_stateless/02908_filesystem_cache_as_collection.sql new file mode 100644 index 00000000000..c7216833bc9 --- /dev/null +++ b/tests/queries/0_stateless/02908_filesystem_cache_as_collection.sql @@ -0,0 +1,10 @@ +-- Tags: no-fasttest, no-replicated-database + +CREATE NAMED COLLECTION IF NOT EXISTS cache_collection_sql AS path = 'collection_sql', max_size = '1Mi'; +DROP TABLE IF EXISTS test; +CREATE TABLE test (a Int32, b String) +ENGINE = MergeTree() ORDER BY a SETTINGS disk = disk(type = cache, disk = 'local_disk', name = '$CLICHOUSE_TEST_UNIQUE_NAME', cache_name='cache_collection_sql'); +DESCRIBE FILESYSTEM CACHE '$CLICHOUSE_TEST_UNIQUE_NAME'; +CREATE TABLE test2 (a Int32, b String) +ENGINE = MergeTree() ORDER BY a SETTINGS disk = disk(type = cache, disk = 'local_disk', name = '$CLICHOUSE_TEST_UNIQUE_NAME_2', cache_name='cache_collection'); +DESCRIBE FILESYSTEM CACHE '$CLICHOUSE_TEST_UNIQUE_NAME_2'; diff --git a/tests/queries/0_stateless/02908_many_requests_to_system_replicas.reference b/tests/queries/0_stateless/02908_many_requests_to_system_replicas.reference new file mode 100644 index 00000000000..af0e50ec332 --- /dev/null +++ b/tests/queries/0_stateless/02908_many_requests_to_system_replicas.reference @@ -0,0 +1,5 @@ +Creating 300 tables +Making making 200 requests to system.replicas +Query system.replicas while waiting for other concurrent requests to finish +0 +900 diff --git a/tests/queries/0_stateless/02908_many_requests_to_system_replicas.sh b/tests/queries/0_stateless/02908_many_requests_to_system_replicas.sh new file mode 100755 index 00000000000..f93175529c0 --- /dev/null +++ b/tests/queries/0_stateless/02908_many_requests_to_system_replicas.sh @@ -0,0 +1,49 @@ +#!/usr/bin/env bash +# Tags: long, zookeeper, no-parallel, no-fasttest + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +set -e + +NUM_TABLES=300 +CONCURRENCY=200 + +echo "Creating $NUM_TABLES tables" + +function init_table() +{ + set -e + i=$1 + curl $CLICKHOUSE_URL --silent --fail --show-error --data "CREATE TABLE test_02908_r1_$i (a UInt64) ENGINE=ReplicatedMergeTree('/02908/{database}/test_$i', 'r1') ORDER BY tuple()" 2>&1 + curl $CLICKHOUSE_URL --silent --fail --show-error --data "CREATE TABLE test_02908_r2_$i (a UInt64) ENGINE=ReplicatedMergeTree('/02908/{database}/test_$i', 'r2') ORDER BY tuple()" 2>&1 + curl $CLICKHOUSE_URL --silent --fail --show-error --data "CREATE TABLE test_02908_r3_$i (a UInt64) ENGINE=ReplicatedMergeTree('/02908/{database}/test_$i', 'r3') ORDER BY tuple()" 2>&1 + + curl $CLICKHOUSE_URL --silent --fail --show-error --data "INSERT INTO test_02908_r1_$i SELECT rand64() FROM numbers(5);" 2>&1 +} + +export init_table; + +for i in `seq 1 $NUM_TABLES`; +do + init_table $i & +done + +wait; + + +echo "Making making $CONCURRENCY requests to system.replicas" + +for i in `seq 1 $CONCURRENCY`; +do + curl $CLICKHOUSE_URL --silent --fail --show-error --data "SELECT * FROM system.replicas WHERE database=currentDatabase() FORMAT Null;" 2>&1 || echo "query $i failed" & +done + +echo "Query system.replicas while waiting for other concurrent requests to finish" +# lost_part_count column is read from ZooKeeper +curl $CLICKHOUSE_URL --silent --fail --show-error --data "SELECT sum(lost_part_count) FROM system.replicas WHERE database=currentDatabase();" 2>&1; +# is_leader column is filled without ZooKeeper +curl $CLICKHOUSE_URL --silent --fail --show-error --data "SELECT sum(is_leader) FROM system.replicas WHERE database=currentDatabase();" 2>&1; + +wait; diff --git a/tests/queries/0_stateless/02908_table_ttl_dependency.reference b/tests/queries/0_stateless/02908_table_ttl_dependency.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/02908_table_ttl_dependency.sh b/tests/queries/0_stateless/02908_table_ttl_dependency.sh new file mode 100755 index 00000000000..70136b4a42b --- /dev/null +++ b/tests/queries/0_stateless/02908_table_ttl_dependency.sh @@ -0,0 +1,24 @@ +#!/usr/bin/env bash +# Tags: no-ordinary-database +# Tag no-ordinary-database: requires UUID + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +$CLICKHOUSE_CLIENT -nm -q " + DROP TABLE IF EXISTS 02908_dependent; + DROP TABLE IF EXISTS 02908_main; + + CREATE TABLE 02908_main (a UInt32) ENGINE = MergeTree ORDER BY a; + CREATE TABLE 02908_dependent (a UInt32, ts DateTime) ENGINE = MergeTree ORDER BY a TTL ts + 1 WHERE a IN (SELECT a FROM ${CLICKHOUSE_DATABASE}.02908_main); +" + +$CLICKHOUSE_CLIENT -nm -q " + DROP TABLE 02908_main; +" 2>&1 | grep -F -q "HAVE_DEPENDENT_OBJECTS" + +$CLICKHOUSE_CLIENT -nm -q " + DROP TABLE 02908_dependent; + DROP TABLE 02908_main; +" diff --git a/tests/queries/0_stateless/02910_bad_logs_level_in_local.reference b/tests/queries/0_stateless/02910_bad_logs_level_in_local.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/02910_bad_logs_level_in_local.sh b/tests/queries/0_stateless/02910_bad_logs_level_in_local.sh new file mode 100755 index 00000000000..badf7232a95 --- /dev/null +++ b/tests/queries/0_stateless/02910_bad_logs_level_in_local.sh @@ -0,0 +1,15 @@ +#!/usr/bin/expect -f + +log_user 0 +set timeout 60 +match_max 100000 + +spawn bash -c "clickhouse-local" + +expect ":) " +send -- "SET send_logs_level = 't'\r" +expect "Exception on client:" +expect ":) " +send -- "exit\r" +expect eof + diff --git a/tests/queries/0_stateless/02910_replicated_with_simple_aggregate_column.reference b/tests/queries/0_stateless/02910_replicated_with_simple_aggregate_column.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/02910_replicated_with_simple_aggregate_column.sql b/tests/queries/0_stateless/02910_replicated_with_simple_aggregate_column.sql new file mode 100644 index 00000000000..84250059c58 --- /dev/null +++ b/tests/queries/0_stateless/02910_replicated_with_simple_aggregate_column.sql @@ -0,0 +1,17 @@ +CREATE TABLE t_r1 +( + `id` UInt64, + `val` SimpleAggregateFunction(max, Nullable(String)) +) +ENGINE = ReplicatedAggregatingMergeTree('/tables/{database}/t', 'r1') +ORDER BY id +SETTINGS index_granularity = 8192; + +CREATE TABLE t_r2 +( + `id` UInt64, + `val` SimpleAggregateFunction(anyLast, Nullable(String)) +) +ENGINE = ReplicatedAggregatingMergeTree('/tables/{database}/t', 'r2') +ORDER BY id +SETTINGS index_granularity = 8192; -- { serverError INCOMPATIBLE_COLUMNS } diff --git a/tests/queries/0_stateless/02910_rocksdb_optimize.reference b/tests/queries/0_stateless/02910_rocksdb_optimize.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/02910_rocksdb_optimize.sql b/tests/queries/0_stateless/02910_rocksdb_optimize.sql new file mode 100644 index 00000000000..575ba6db212 --- /dev/null +++ b/tests/queries/0_stateless/02910_rocksdb_optimize.sql @@ -0,0 +1,5 @@ +-- Tags: use-rocksdb + +CREATE TABLE dict (key UInt64, value String) ENGINE = EmbeddedRocksDB PRIMARY KEY key; +INSERT INTO dict SELECT number, toString(number) FROM numbers(1e3); +OPTIMIZE TABLE dict; diff --git a/tests/queries/0_stateless/02911_add_index_and_materialize_index.reference b/tests/queries/0_stateless/02911_add_index_and_materialize_index.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/02911_add_index_and_materialize_index.sql b/tests/queries/0_stateless/02911_add_index_and_materialize_index.sql new file mode 100644 index 00000000000..f8785ec9a38 --- /dev/null +++ b/tests/queries/0_stateless/02911_add_index_and_materialize_index.sql @@ -0,0 +1,18 @@ +-- Tags: no-replicated-database + +DROP TABLE IF EXISTS index_test; + +CREATE TABLE index_test +( + x UInt32, + y UInt32, + z UInt32 +) ENGINE = MergeTree order by x; + +ALTER TABLE index_test + ADD INDEX i_x mortonDecode(2, z).1 TYPE minmax GRANULARITY 1, + ADD INDEX i_y mortonDecode(2, z).2 TYPE minmax GRANULARITY 1, + MATERIALIZE INDEX i_x, + MATERIALIZE INDEX i_y; + +drop table index_test; diff --git a/tests/queries/0_stateless/02911_analyzer_explain_estimate.reference b/tests/queries/0_stateless/02911_analyzer_explain_estimate.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/02911_analyzer_explain_estimate.sql b/tests/queries/0_stateless/02911_analyzer_explain_estimate.sql new file mode 100644 index 00000000000..b082f2f33b2 --- /dev/null +++ b/tests/queries/0_stateless/02911_analyzer_explain_estimate.sql @@ -0,0 +1,5 @@ +-- Tags: distributed + +SET allow_experimental_analyzer = 1; + +EXPLAIN ESTIMATE SELECT 0 = 1048577, NULL, groupBitmapOr(bitmapBuild([toInt32(65537)])) FROM cluster(test_cluster_two_shards) WHERE NULL = 1048575; diff --git a/tests/queries/0_stateless/02911_analyzer_remove_unused_projection_columns.reference b/tests/queries/0_stateless/02911_analyzer_remove_unused_projection_columns.reference new file mode 100644 index 00000000000..405d3348775 --- /dev/null +++ b/tests/queries/0_stateless/02911_analyzer_remove_unused_projection_columns.reference @@ -0,0 +1,8 @@ +0 +0 +0 +0 +0 +0 +0 +0 diff --git a/tests/queries/0_stateless/02911_analyzer_remove_unused_projection_columns.sql b/tests/queries/0_stateless/02911_analyzer_remove_unused_projection_columns.sql new file mode 100644 index 00000000000..70de63c592d --- /dev/null +++ b/tests/queries/0_stateless/02911_analyzer_remove_unused_projection_columns.sql @@ -0,0 +1,22 @@ +SET allow_experimental_analyzer = 1; + +DROP TABLE IF EXISTS test_table; +CREATE TABLE test_table +( + id UInt64, + value String +) ENGINE = MergeTree ORDER BY id; + +INSERT INTO test_table VALUES (0, 'Value_0'); + +SET max_columns_to_read = 1; + +SELECT id FROM (SELECT * FROM test_table); +SELECT id FROM (SELECT * FROM (SELECT * FROM test_table)); +SELECT id FROM (SELECT * FROM test_table UNION ALL SELECT * FROM test_table); + +SELECT id FROM (SELECT id, value FROM test_table); +SELECT id FROM (SELECT id, value FROM (SELECT id, value FROM test_table)); +SELECT id FROM (SELECT id, value FROM test_table UNION ALL SELECT id, value FROM test_table); + +DROP TABLE test_table; diff --git a/tests/queries/0_stateless/02911_arrow_large_list.reference b/tests/queries/0_stateless/02911_arrow_large_list.reference new file mode 100644 index 00000000000..a6fbcce8c06 --- /dev/null +++ b/tests/queries/0_stateless/02911_arrow_large_list.reference @@ -0,0 +1,4 @@ +a +Array(Nullable(String)) +['00000','00001','00002'] +['10000','10001','10002'] diff --git a/tests/queries/0_stateless/02911_arrow_large_list.sh b/tests/queries/0_stateless/02911_arrow_large_list.sh new file mode 100755 index 00000000000..9b1c9a9d0ed --- /dev/null +++ b/tests/queries/0_stateless/02911_arrow_large_list.sh @@ -0,0 +1,31 @@ +#!/usr/bin/env bash +# Tags: no-fasttest +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +# ## generate arrow file with python +# import pyarrow as pa +# schema = pa.schema([ pa.field('a', pa.large_list(pa.utf8())) ]) +# a = pa.array([["00000", "00001", "00002"], ["10000", "10001", "10002"]]) +# with pa.OSFile('arraydata.arrow', 'wb') as sink: +# with pa.ipc.new_file(sink, schema=schema) as writer: +# batch = pa.record_batch([a], schema=schema) +# writer.write(batch) + +# cat arraydata.arrow | base64 + +cat < /dev/null + +$CLICKHOUSE_CLIENT -q "DROP DATABASE $database_name SYNC;" + +for i in $(seq 1 3); do + $CLICKHOUSE_CLIENT -q "SELECT count() FROM $database_name.02911_backup_restore_keeper_map$i;" 2>&1 | grep -Fq "UNKNOWN_DATABASE" && echo 'OK' || echo 'ERROR' +done + +$CLICKHOUSE_CLIENT -q "RESTORE DATABASE $database_name FROM Disk('backups', '$backup_path');" > /dev/null + +for i in $(seq 1 3); do + $CLICKHOUSE_CLIENT -q "SELECT count() FROM $database_name.02911_backup_restore_keeper_map$i;" +done + +$CLICKHOUSE_CLIENT -q "DROP TABLE $database_name.02911_backup_restore_keeper_map3 SYNC;" + +$CLICKHOUSE_CLIENT -q "SELECT count() FROM $database_name.02911_backup_restore_keeper_map3;" 2>&1 | grep -Fq "UNKNOWN_TABLE" && echo 'OK' || echo 'ERROR' + +$CLICKHOUSE_CLIENT -q "RESTORE TABLE $database_name.02911_backup_restore_keeper_map3 FROM Disk('backups', '$backup_path');" > /dev/null + +for i in $(seq 1 3); do + $CLICKHOUSE_CLIENT -q "SELECT count() FROM $database_name.02911_backup_restore_keeper_map$i;" +done + +$CLICKHOUSE_CLIENT -q "DROP DATABASE $database_name SYNC;" \ No newline at end of file diff --git a/tests/queries/0_stateless/02911_join_on_nullsafe_optimization.reference b/tests/queries/0_stateless/02911_join_on_nullsafe_optimization.reference new file mode 100644 index 00000000000..976c1503b02 --- /dev/null +++ b/tests/queries/0_stateless/02911_join_on_nullsafe_optimization.reference @@ -0,0 +1,25 @@ +-- { echoOn } +SELECT * FROM t1 JOIN t2 ON (t1.x <=> t2.x OR (t1.x IS NULL AND t2.x IS NULL)) ORDER BY t1.x NULLS LAST; +2 2 2 2 +3 3 3 33 +\N \N \N \N +SELECT * FROM t1 JOIN t2 ON (t1.x <=> t2.x OR t1.x IS NULL AND t1.y <=> t2.y AND t2.x IS NULL) ORDER BY t1.x NULLS LAST; +1 42 4 42 +2 2 2 2 +3 3 3 33 +\N \N \N \N +SELECT * FROM t1 JOIN t2 ON (t1.x = t2.x OR t1.x IS NULL AND t2.x IS NULL) AND t1.y <=> t2.y ORDER BY t1.x NULLS LAST; +2 2 2 2 +\N \N \N \N +SELECT * FROM t1 JOIN t2 ON (t1.x <=> t2.x OR t1.y <=> t2.y OR (t1.x IS NULL AND t1.y IS NULL AND t2.x IS NULL AND t2.y IS NULL)) ORDER BY t1.x NULLS LAST; +1 42 4 42 +2 2 2 2 +3 3 3 33 +\N \N \N \N +SELECT * FROM t1 JOIN t2 ON (t1.x <=> t2.x OR (t1.x IS NULL AND t2.x IS NULL)) AND (t1.y == t2.y OR (t1.y IS NULL AND t2.y IS NULL)) AND COALESCE(t1.x, 0) != 2 ORDER BY t1.x NULLS LAST; +\N \N \N \N +SELECT x = y OR (x IS NULL AND y IS NULL) FROM t1 ORDER BY x NULLS LAST; +0 +1 +1 +1 diff --git a/tests/queries/0_stateless/02911_join_on_nullsafe_optimization.sql b/tests/queries/0_stateless/02911_join_on_nullsafe_optimization.sql new file mode 100644 index 00000000000..6a98a7bb57b --- /dev/null +++ b/tests/queries/0_stateless/02911_join_on_nullsafe_optimization.sql @@ -0,0 +1,27 @@ +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; + +CREATE TABLE t1 (x Nullable(Int64), y Nullable(UInt64)) ENGINE = TinyLog; +CREATE TABLE t2 (x Nullable(Int64), y Nullable(UInt64)) ENGINE = TinyLog; + +INSERT INTO t1 VALUES (1,42), (2,2), (3,3), (NULL,NULL); +INSERT INTO t2 VALUES (NULL,NULL), (2,2), (3,33), (4,42); + +SET allow_experimental_analyzer = 1; + +-- { echoOn } +SELECT * FROM t1 JOIN t2 ON (t1.x <=> t2.x OR (t1.x IS NULL AND t2.x IS NULL)) ORDER BY t1.x NULLS LAST; + +SELECT * FROM t1 JOIN t2 ON (t1.x <=> t2.x OR t1.x IS NULL AND t1.y <=> t2.y AND t2.x IS NULL) ORDER BY t1.x NULLS LAST; + +SELECT * FROM t1 JOIN t2 ON (t1.x = t2.x OR t1.x IS NULL AND t2.x IS NULL) AND t1.y <=> t2.y ORDER BY t1.x NULLS LAST; + +SELECT * FROM t1 JOIN t2 ON (t1.x <=> t2.x OR t1.y <=> t2.y OR (t1.x IS NULL AND t1.y IS NULL AND t2.x IS NULL AND t2.y IS NULL)) ORDER BY t1.x NULLS LAST; + +SELECT * FROM t1 JOIN t2 ON (t1.x <=> t2.x OR (t1.x IS NULL AND t2.x IS NULL)) AND (t1.y == t2.y OR (t1.y IS NULL AND t2.y IS NULL)) AND COALESCE(t1.x, 0) != 2 ORDER BY t1.x NULLS LAST; + +SELECT x = y OR (x IS NULL AND y IS NULL) FROM t1 ORDER BY x NULLS LAST; +-- { echoOff } + +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; diff --git a/tests/queries/0_stateless/02911_row_policy_on_cluster.reference b/tests/queries/0_stateless/02911_row_policy_on_cluster.reference new file mode 100644 index 00000000000..c13b599bea6 --- /dev/null +++ b/tests/queries/0_stateless/02911_row_policy_on_cluster.reference @@ -0,0 +1,6 @@ +localhost 9000 0 0 0 +localhost 9000 0 0 0 +localhost 9000 0 0 0 +localhost 9000 0 0 0 +localhost 9000 0 0 0 +localhost 9000 0 0 0 diff --git a/tests/queries/0_stateless/02911_row_policy_on_cluster.sql b/tests/queries/0_stateless/02911_row_policy_on_cluster.sql new file mode 100644 index 00000000000..0c60bb5a6b9 --- /dev/null +++ b/tests/queries/0_stateless/02911_row_policy_on_cluster.sql @@ -0,0 +1,11 @@ +-- Tags: no-parallel, zookeeper, no-replicated-database +-- Tag no-replicated-database: distributed_ddl_output_mode is none + +DROP ROW POLICY IF EXISTS 02911_rowpolicy ON default.* ON CLUSTER test_shard_localhost; +DROP USER IF EXISTS 02911_user ON CLUSTER test_shard_localhost; + +CREATE USER 02911_user ON CLUSTER test_shard_localhost; +CREATE ROW POLICY 02911_rowpolicy ON CLUSTER test_shard_localhost ON default.* USING 1 TO 02911_user; + +DROP ROW POLICY 02911_rowpolicy ON default.* ON CLUSTER test_shard_localhost; +DROP USER 02911_user ON CLUSTER test_shard_localhost; diff --git a/tests/queries/0_stateless/02912_ingestion_mv_deduplication.reference b/tests/queries/0_stateless/02912_ingestion_mv_deduplication.reference index 946897a4fe3..335b55f05c8 100644 --- a/tests/queries/0_stateless/02912_ingestion_mv_deduplication.reference +++ b/tests/queries/0_stateless/02912_ingestion_mv_deduplication.reference @@ -1,4 +1,4 @@ --- Original issue with max_insert_delayed_streams_for_parallel_write = 1 +-- Original issue with max_insert_delayed_streams_for_parallel_write <= 1 -- Landing 2022-09-01 12:23:34 42 2023-09-01 12:23:34 42 diff --git a/tests/queries/0_stateless/02912_ingestion_mv_deduplication.sql b/tests/queries/0_stateless/02912_ingestion_mv_deduplication.sql index 68901b67c91..f206f0d7775 100644 --- a/tests/queries/0_stateless/02912_ingestion_mv_deduplication.sql +++ b/tests/queries/0_stateless/02912_ingestion_mv_deduplication.sql @@ -1,7 +1,7 @@ --- Tags: replica +-- Tags: zookeeper SET session_timezone = 'UTC'; -SELECT '-- Original issue with max_insert_delayed_streams_for_parallel_write = 1'; +SELECT '-- Original issue with max_insert_delayed_streams_for_parallel_write <= 1'; /* This is the expected behavior when mv deduplication is set to false. @@ -11,7 +11,7 @@ SELECT '-- Original issue with max_insert_delayed_streams_for_parallel_write = 1 - 2nd insert gets both blocks inserted in mv table */ -SET deduplicate_blocks_in_dependent_materialized_views = 0, max_insert_delayed_streams_for_parallel_write = 1; +SET deduplicate_blocks_in_dependent_materialized_views = 0, max_insert_delayed_streams_for_parallel_write = 0; CREATE TABLE landing ( @@ -48,7 +48,7 @@ SELECT '-- Original issue with deduplicate_blocks_in_dependent_materialized_view This is the unexpected behavior due to setting max_insert_delayed_streams_for_parallel_write > 1. This unexpected behavior was present since version 21.9 or earlier but due to this PR https://github.com/ClickHouse/ClickHouse/pull/34780 - when max_insert_delayed_streams_for_parallel_write setting it to 1 by default the issue was mitigated. + when max_insert_delayed_streams_for_parallel_write gets disabled by default the issue was mitigated. This is what happens: @@ -57,7 +57,7 @@ SELECT '-- Original issue with deduplicate_blocks_in_dependent_materialized_view - 2nd insert is not inserting anything in mv table due to a bug computing blocks to be discarded */ -SET deduplicate_blocks_in_dependent_materialized_views = 0, max_insert_delayed_streams_for_parallel_write = 10; +SET deduplicate_blocks_in_dependent_materialized_views = 0, max_insert_delayed_streams_for_parallel_write = 1000; CREATE TABLE landing ( @@ -85,14 +85,13 @@ SELECT * FROM landing FINAL ORDER BY time; SELECT '-- MV'; SELECT * FROM mv FINAL ORDER BY hour; -SET max_insert_delayed_streams_for_parallel_write = 1; DROP TABLE IF EXISTS landing SYNC; DROP TABLE IF EXISTS mv SYNC; SELECT '-- Original issue with deduplicate_blocks_in_dependent_materialized_views = 1 AND max_insert_delayed_streams_for_parallel_write > 1'; /* - By setting deduplicate_blocks_in_dependent_materialized_views = 1 we can make the code go through a different path getting an expected + By setting deduplicate_blocks_in_dependent_materialized_views = 1 we can make the code go through a different path getting an expected behavior again, even with max_insert_delayed_streams_for_parallel_write > 1. This is what happens now: @@ -101,7 +100,7 @@ SELECT '-- Original issue with deduplicate_blocks_in_dependent_materialized_view - 2nd insert gets first block 20220901 deduplicated and second one inserted for landing and mv tables */ -SET deduplicate_blocks_in_dependent_materialized_views = 1, max_insert_delayed_streams_for_parallel_write = 10; +SET deduplicate_blocks_in_dependent_materialized_views = 1, max_insert_delayed_streams_for_parallel_write = 1000; CREATE TABLE landing ( @@ -129,7 +128,6 @@ SELECT * FROM landing FINAL ORDER BY time; SELECT '-- MV'; SELECT * FROM mv FINAL ORDER BY hour; -SET max_insert_delayed_streams_for_parallel_write = 1; DROP TABLE IF EXISTS landing SYNC; DROP TABLE IF EXISTS mv SYNC; @@ -142,6 +140,7 @@ SELECT '-- Regression introduced in https://github.com/ClickHouse/ClickHouse/pul max_insert_delayed_streams_for_parallel_write > 1 but it ended up adding a new regression. */ +SET deduplicate_blocks_in_dependent_materialized_views = 0, max_insert_delayed_streams_for_parallel_write = 0; CREATE TABLE landing ( diff --git a/tests/queries/0_stateless/02915_analyzer_fuzz_6.reference b/tests/queries/0_stateless/02915_analyzer_fuzz_6.reference new file mode 100644 index 00000000000..b5c035d8576 --- /dev/null +++ b/tests/queries/0_stateless/02915_analyzer_fuzz_6.reference @@ -0,0 +1,2 @@ +[(0,0)] +[(1,1)] diff --git a/tests/queries/0_stateless/02915_analyzer_fuzz_6.sql b/tests/queries/0_stateless/02915_analyzer_fuzz_6.sql new file mode 100644 index 00000000000..b4eb1b4aff4 --- /dev/null +++ b/tests/queries/0_stateless/02915_analyzer_fuzz_6.sql @@ -0,0 +1,19 @@ +set allow_suspicious_low_cardinality_types=1; +set allow_experimental_analyzer=1; + +create table tab (x LowCardinality(Nullable(Float64))) engine = MergeTree order by x settings allow_nullable_key=1; +insert into tab select number from numbers(2); +SELECT [(arrayJoin([x]), x)] AS row FROM tab; + + +CREATE TABLE t__fuzz_307 (`k1` DateTime, `k2` LowCardinality(Nullable(Float64)), `v` Nullable(UInt32)) ENGINE = + ReplacingMergeTree ORDER BY (k1, k2) settings allow_nullable_key=1; + insert into t__fuzz_307 select * from generateRandom() limit 10; + SELECT arrayJoin([tuple([(toNullable(NULL), -9223372036854775808, toNullable(3.4028234663852886e38), arrayJoin( +[tuple([(toNullable(NULL), 2147483647, toNullable(0.5), k2)])]), k2)])]) AS row, arrayJoin([(1024, k2)]), -9223372036854775807, 256, tupleElement(row, 1048576, 1024) AS k FROM t__fuzz_307 FINAL ORDER BY (toNullable('655.36'), 2, toNullable +('0.2147483648'), k2) ASC, toNullable('102.3') DESC NULLS FIRST, '10.25' DESC, k ASC NULLS FIRST format Null; + +CREATE TABLE t__fuzz_282 (`k1` DateTime, `k2` LowCardinality(Nullable(Float64)), `v` Nullable(UInt32)) ENGINE = ReplacingMergeTree ORDER BY (k1, k2) SETTINGS allow_nullable_key = 1; +INSERT INTO t__fuzz_282 VALUES (1, 2, 3) (1, 2, 4) (2, 3, 4), (2, 3, 5); + +SELECT arrayJoin([tuple([(toNullable(NULL), -9223372036854775808, toNullable(3.4028234663852886e38), arrayJoin([tuple([(toNullable(NULL), 2147483647, toNullable(0.5), k2)])]), k2)])]) AS row, arrayJoin([(1024, k2)]), -9223372036854775807, 256, tupleElement(row, 1048576, 1024) AS k FROM t__fuzz_282 FINAL ORDER BY (toNullable('655.36'), 2, toNullable('0.2147483648'), k2) ASC, toNullable('102.3') DESC NULLS FIRST, '10.25' DESC, k ASC NULLS FIRST format Null; diff --git a/tests/queries/0_stateless/02915_fpc_overflow.reference b/tests/queries/0_stateless/02915_fpc_overflow.reference new file mode 100644 index 00000000000..73011ecb641 --- /dev/null +++ b/tests/queries/0_stateless/02915_fpc_overflow.reference @@ -0,0 +1,2 @@ +Exc +Exc diff --git a/tests/queries/0_stateless/02915_fpc_overflow.sh b/tests/queries/0_stateless/02915_fpc_overflow.sh new file mode 100755 index 00000000000..a10543ed8c4 --- /dev/null +++ b/tests/queries/0_stateless/02915_fpc_overflow.sh @@ -0,0 +1,11 @@ +#!/usr/bin/env bash + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +echo -ne 'checksumchecksum\x98\x90\x00\x00\x00\x11\x11\x11\x11\x04\x0f\x51 ' | + ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&decompress=1&http_native_compression_disable_checksumming_on_decompress=1" --data-binary @- 2>&1 | grep -oF 'Exc' + +echo -ne 'checksumchecksum\x98\x90\x00\x00\x00\x11\x11\x11\x11\x04\x0f\x16 ' | + ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&decompress=1&http_native_compression_disable_checksumming_on_decompress=1" --data-binary @- 2>&1 | grep -oF 'Exc' diff --git a/tests/queries/0_stateless/02915_input_table_function_in_subquery.reference b/tests/queries/0_stateless/02915_input_table_function_in_subquery.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/02915_input_table_function_in_subquery.sh b/tests/queries/0_stateless/02915_input_table_function_in_subquery.sh new file mode 100755 index 00000000000..80e38338751 --- /dev/null +++ b/tests/queries/0_stateless/02915_input_table_function_in_subquery.sh @@ -0,0 +1,40 @@ +#!/usr/bin/env bash +# Tags: no-random-merge-tree-settings + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +$CLICKHOUSE_CLIENT -nm -q " +CREATE TABLE IF NOT EXISTS ts_data_double_raw +( + device_id UInt32 NOT NULL CODEC(ZSTD), + data_item_id UInt32 NOT NULL CODEC(ZSTD), + data_time DateTime64(3, 'UTC') NOT NULL CODEC(Delta, ZSTD), + data_value Float64 NOT NULL CODEC(Delta, ZSTD), + is_deleted Bool CODEC(ZSTD), + ingestion_time DateTime64(3, 'UTC') NOT NULL CODEC(Delta, ZSTD) +) +ENGINE = ReplacingMergeTree +PARTITION BY toYYYYMM(data_time) +ORDER BY (device_id, data_item_id, data_time) +SETTINGS index_granularity = 8192; + + +CREATE VIEW ts_data_double AS +SELECT + device_id, + data_item_id, + data_time, + argMax(data_value, ingestion_time) data_value, + max(ingestion_time) version, + argMax(is_deleted, ingestion_time) is_deleted +FROM ts_data_double_raw +GROUP BY device_id, data_item_id, data_time +HAVING is_deleted = 0; + +INSERT INTO ts_data_double_raw VALUES (100, 1, fromUnixTimestamp64Milli(1697547086760), 3.6, false, fromUnixTimestamp64Milli(1)), (100, 1, fromUnixTimestamp64Milli(1697547086761), 4.6, false, fromUnixTimestamp64Milli(1)); +INSERT INTO ts_data_double_raw VALUES (100, 1, fromUnixTimestamp64Milli(1697547086760), 3.6, true, fromUnixTimestamp64Milli(5)), (100, 1, fromUnixTimestamp64Milli(1697547086761), 4.6, false, fromUnixTimestamp64Milli(4)); +" + +$CLICKHOUSE_CLIENT -q "select 1697547086760 format RowBinary" | ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&query=INSERT%20INTO%20ts_data_double_raw%20%28device_id%2C%20data_item_id%2C%20data_time%2C%20data_value%2C%20is_deleted%2C%20ingestion_time%29%0ASELECT%0A%20%20%20device_id%2C%0A%20%20%20data_item_id%2C%0A%20%20%20data_time%2C%0A%20%20%20data_value%2C%0A%20%20%201%2C%20%20--%20mark%20as%20deleted%0A%20%20%20fromUnixTimestamp64Milli%281697547088995%2C%20%27UTC%27%29%20--%20all%20inserted%20records%20have%20new%20ingestion%20time%0AFROM%20ts_data_double%0AWHERE%20%28device_id%20%3D%20100%29%20AND%20%28data_item_id%20%3D%201%29%0A%20%20%20%20AND%20%28data_time%20%3E%3D%20fromUnixTimestamp64Milli%280%2C%20%27UTC%27%29%29%0A%20%20%20%20AND%20%28data_time%20%3C%3D%20fromUnixTimestamp64Milli%281697547086764%2C%20%27UTC%27%29%29%0A%20%20%20%20AND%20version%20%3C%20fromUnixTimestamp64Milli%281697547088995%2C%20%27UTC%27%29%0A%20%20%20%20AND%20%28toUnixTimestamp64Milli%28data_time%29%20IN%20%28SELECT%20timestamp%20FROM%20input%28%27timestamp%20UInt64%27%29%29%29%20SETTINGS%20insert_quorum%3D1%0A%20FORMAT%20RowBinary" --data-binary @- diff --git a/tests/queries/0_stateless/02915_move_partition_inactive_replica.reference b/tests/queries/0_stateless/02915_move_partition_inactive_replica.reference new file mode 100644 index 00000000000..573541ac970 --- /dev/null +++ b/tests/queries/0_stateless/02915_move_partition_inactive_replica.reference @@ -0,0 +1 @@ +0 diff --git a/tests/queries/0_stateless/02915_move_partition_inactive_replica.sql b/tests/queries/0_stateless/02915_move_partition_inactive_replica.sql new file mode 100644 index 00000000000..3b30a2b6c2c --- /dev/null +++ b/tests/queries/0_stateless/02915_move_partition_inactive_replica.sql @@ -0,0 +1,57 @@ +-- Tags: no-parallel + +create database if not exists shard_0; +create database if not exists shard_1; + +drop table if exists shard_0.from_0; +drop table if exists shard_1.from_0; +drop table if exists shard_0.from_1; +drop table if exists shard_1.from_1; +drop table if exists shard_0.to; +drop table if exists shard_1.to; + +create table shard_0.from_0 (x UInt32) engine = ReplicatedMergeTree('/clickhouse/tables/from_0_' || currentDatabase(), '0') order by x settings old_parts_lifetime=1, max_cleanup_delay_period=1, cleanup_delay_period=1; +create table shard_1.from_0 (x UInt32) engine = ReplicatedMergeTree('/clickhouse/tables/from_0_' || currentDatabase(), '1') order by x settings old_parts_lifetime=1, max_cleanup_delay_period=1, cleanup_delay_period=1; + +create table shard_0.from_1 (x UInt32) engine = ReplicatedMergeTree('/clickhouse/tables/from_1_' || currentDatabase(), '0') order by x settings old_parts_lifetime=1, max_cleanup_delay_period=1, cleanup_delay_period=1; +create table shard_1.from_1 (x UInt32) engine = ReplicatedMergeTree('/clickhouse/tables/from_1_' || currentDatabase(), '1') order by x settings old_parts_lifetime=1, max_cleanup_delay_period=1, cleanup_delay_period=1; + +insert into shard_0.from_0 select number from numbers(10); +insert into shard_0.from_0 select number + 10 from numbers(10); + +insert into shard_0.from_1 select number + 20 from numbers(10); +insert into shard_0.from_1 select number + 30 from numbers(10); + +system sync replica shard_1.from_0; +system sync replica shard_1.from_1; + + +create table shard_0.to (x UInt32) engine = ReplicatedMergeTree('/clickhouse/tables/to_' || currentDatabase(), '0') order by x settings old_parts_lifetime=1, max_cleanup_delay_period=1, cleanup_delay_period=1; + +create table shard_1.to (x UInt32) engine = ReplicatedMergeTree('/clickhouse/tables/to_' || currentDatabase(), '1') order by x settings old_parts_lifetime=1, max_cleanup_delay_period=1, cleanup_delay_period=1; + +detach table shard_1.to; + +alter table shard_0.from_0 on cluster test_cluster_two_shards_different_databases move partition tuple() to table shard_0.to format Null settings distributed_ddl_output_mode='never_throw', distributed_ddl_task_timeout = 1; + +alter table shard_0.from_1 on cluster test_cluster_two_shards_different_databases move partition tuple() to table shard_0.to format Null settings distributed_ddl_output_mode='never_throw', distributed_ddl_task_timeout = 1; + +OPTIMIZE TABLE shard_0.from_0; +OPTIMIZE TABLE shard_1.from_0; +OPTIMIZE TABLE shard_0.from_1; +OPTIMIZE TABLE shard_1.from_1; +OPTIMIZE TABLE shard_0.to; + +system restart replica shard_0.to; + +select sleep(2); + +attach table shard_1.to; + +drop table if exists shard_0.from_0; +drop table if exists shard_1.from_0; +drop table if exists shard_0.from_1; +drop table if exists shard_1.from_1; +drop table if exists shard_0.to; +drop table if exists shard_1.to; + diff --git a/tests/queries/0_stateless/02915_sleep_large_uint.reference b/tests/queries/0_stateless/02915_sleep_large_uint.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/02915_sleep_large_uint.sql b/tests/queries/0_stateless/02915_sleep_large_uint.sql new file mode 100644 index 00000000000..f7c04ab6d1f --- /dev/null +++ b/tests/queries/0_stateless/02915_sleep_large_uint.sql @@ -0,0 +1,7 @@ +SELECT sleep(3.40282e+44); -- { serverError BAD_ARGUMENTS } +SELECT sleep((pow(2, 64) / 1000000) - 1); -- { serverError BAD_ARGUMENTS } +SELECT sleepEachRow(184467440737095516) from numbers(10000); -- { serverError BAD_ARGUMENTS } +SELECT sleepEachRow(pow(2, 31)) from numbers(9007199254740992) settings function_sleep_max_microseconds_per_block = 8589934592000000000; -- { serverError TOO_SLOW } + +-- Another corner case, but it requires lots of memory to run (huge block size) +-- SELECT sleepEachRow(pow(2, 31)) from numbers(17179869184) settings max_block_size = 17179869184, function_sleep_max_microseconds_per_block = 8589934592000000000; -- { serverError TOO_SLOW } diff --git a/tests/queries/0_stateless/02916_addcolumn_nested.reference b/tests/queries/0_stateless/02916_addcolumn_nested.reference new file mode 100644 index 00000000000..7d79cd8731f --- /dev/null +++ b/tests/queries/0_stateless/02916_addcolumn_nested.reference @@ -0,0 +1,4 @@ +CREATE TABLE default.nested_table\n(\n `id` UInt64,\n `first` Nested(a Int8, b String)\n)\nENGINE = MergeTree\nORDER BY id\nSETTINGS index_granularity = 8192 +CREATE TABLE default.nested_table\n(\n `id` UInt64,\n `second.c` Array(Int8),\n `second.d` Array(String),\n `first` Nested(a Int8, b String)\n)\nENGINE = MergeTree\nORDER BY id\nSETTINGS index_granularity = 8192 +CREATE TABLE default.nested_table\n(\n `third` Nested(e Int8, f String),\n `id` UInt64,\n `second.c` Array(Int8),\n `second.d` Array(String),\n `first` Nested(a Int8, b String)\n)\nENGINE = MergeTree\nORDER BY id\nSETTINGS index_granularity = 8192 +CREATE TABLE default.nested_table\n(\n `third` Nested(e Int8, f String),\n `id` UInt64,\n `second.c` Array(Int8),\n `second.d` Array(String),\n `first` Nested(a Int8, b String),\n `fourth.g` Array(Int8),\n `fourth.h` Array(String)\n)\nENGINE = MergeTree\nORDER BY id\nSETTINGS index_granularity = 8192 diff --git a/tests/queries/0_stateless/02916_addcolumn_nested.sql b/tests/queries/0_stateless/02916_addcolumn_nested.sql new file mode 100644 index 00000000000..1e64fca6a15 --- /dev/null +++ b/tests/queries/0_stateless/02916_addcolumn_nested.sql @@ -0,0 +1,22 @@ +SET flatten_nested = 0; + +DROP TABLE IF EXISTS nested_table; +CREATE TABLE nested_table (id UInt64, first Nested(a Int8, b String)) ENGINE = MergeTree() ORDER BY id; +SHOW CREATE nested_table; + +SET flatten_nested = 1; + +ALTER TABLE nested_table ADD COLUMN second Nested(c Int8, d String) AFTER id; +SHOW CREATE nested_table; + +SET flatten_nested = 0; + +ALTER TABLE nested_table ADD COLUMN third Nested(e Int8, f String) FIRST; +SHOW CREATE nested_table; + +SET flatten_nested = 1; + +ALTER TABLE nested_table ADD COLUMN fourth Nested(g Int8, h String); +SHOW CREATE nested_table; + +DROP TABLE nested_table; diff --git a/tests/queries/0_stateless/02916_analyzer_set_in_join.reference b/tests/queries/0_stateless/02916_analyzer_set_in_join.reference new file mode 100644 index 00000000000..a063ea39893 --- /dev/null +++ b/tests/queries/0_stateless/02916_analyzer_set_in_join.reference @@ -0,0 +1,2 @@ +1 0 +42 1 diff --git a/tests/queries/0_stateless/02916_analyzer_set_in_join.sql b/tests/queries/0_stateless/02916_analyzer_set_in_join.sql new file mode 100644 index 00000000000..cae17d74a97 --- /dev/null +++ b/tests/queries/0_stateless/02916_analyzer_set_in_join.sql @@ -0,0 +1,11 @@ + +SELECT 1, b +FROM numbers(1) +ARRAY JOIN [materialize(3) IN (SELECT 42)] AS b +; + +SELECT * +FROM (SELECT materialize(42) as a) as t1 +JOIN (SELECT materialize(1) as a) as t2 +ON t1.a IN (SELECT 42) = t2.a +; diff --git a/tests/queries/0_stateless/02916_date_text_parsing.reference b/tests/queries/0_stateless/02916_date_text_parsing.reference new file mode 100644 index 00000000000..2ec123200d0 --- /dev/null +++ b/tests/queries/0_stateless/02916_date_text_parsing.reference @@ -0,0 +1,5 @@ +2020-01-02 SomeString +2020-01-02 SomeString +2020-01-02 SomeString +2020-01-02 SomeString +2020-01-02 SomeString diff --git a/tests/queries/0_stateless/02916_date_text_parsing.sql b/tests/queries/0_stateless/02916_date_text_parsing.sql new file mode 100644 index 00000000000..d895ccece19 --- /dev/null +++ b/tests/queries/0_stateless/02916_date_text_parsing.sql @@ -0,0 +1,25 @@ +select * from format(CSV, 'd Date, s String', 'abcdefgh,SomeString'); -- {serverError CANNOT_PARSE_DATE} +select * from format(CSV, 'd Date, s String', '2bcdefgh,SomeString'); -- {serverError CANNOT_PARSE_DATE} +select * from format(CSV, 'd Date, s String', '20cdefgh,SomeString'); -- {serverError CANNOT_PARSE_DATE} +select * from format(CSV, 'd Date, s String', '202defgh,SomeString'); -- {serverError CANNOT_PARSE_DATE} +select * from format(CSV, 'd Date, s String', '2020efgh,SomeString'); -- {serverError CANNOT_PARSE_DATE} +select * from format(CSV, 'd Date, s String', '20200fgh,SomeString'); -- {serverError CANNOT_PARSE_DATE} +select * from format(CSV, 'd Date, s String', '202001gh,SomeString'); -- {serverError CANNOT_PARSE_DATE} +select * from format(CSV, 'd Date, s String', '2020010h,SomeString'); -- {serverError CANNOT_PARSE_DATE} +select * from format(CSV, 'd Date, s String', '20200102,SomeString'); +select * from format(CSV, 'd Date, s String', 'abcd-ef-gh,SomeString'); -- {serverError CANNOT_PARSE_DATE} +select * from format(CSV, 'd Date, s String', '2bcd-ef-gh,SomeString'); -- {serverError CANNOT_PARSE_DATE} +select * from format(CSV, 'd Date, s String', '20cd-ef-gh,SomeString'); -- {serverError CANNOT_PARSE_DATE} +select * from format(CSV, 'd Date, s String', '202d-ef-gh,SomeString'); -- {serverError CANNOT_PARSE_DATE} +select * from format(CSV, 'd Date, s String', '2020-ef-gh,SomeString'); -- {serverError CANNOT_PARSE_DATE} +select * from format(CSV, 'd Date, s String', '2020-f-gh,SomeString'); -- {serverError CANNOT_PARSE_DATE} +select * from format(CSV, 'd Date, s String', '2020-f-g,SomeString'); -- {serverError CANNOT_PARSE_DATE} +select * from format(CSV, 'd Date, s String', '2020-0f-gh,SomeString'); -- {serverError CANNOT_PARSE_DATE} +select * from format(CSV, 'd Date, s String', '2020-01-gh,SomeString'); -- {serverError CANNOT_PARSE_DATE} +select * from format(CSV, 'd Date, s String', '2020-01-h,SomeString'); -- {serverError CANNOT_PARSE_DATE} +select * from format(CSV, 'd Date, s String', '2020-1-gh,SomeString'); -- {serverError CANNOT_PARSE_DATE} +select * from format(CSV, 'd Date, s String', '2020-1-h,SomeString'); -- {serverError CANNOT_PARSE_DATE} +select * from format(CSV, 'd Date, s String', '2020-01-02,SomeString'); +select * from format(CSV, 'd Date, s String', '2020-01-2,SomeString'); +select * from format(CSV, 'd Date, s String', '2020-1-2,SomeString'); +select * from format(CSV, 'd Date, s String', '2020-1-02,SomeString'); diff --git a/tests/queries/0_stateless/02916_glogal_in_cancel.reference b/tests/queries/0_stateless/02916_glogal_in_cancel.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/02916_glogal_in_cancel.sql b/tests/queries/0_stateless/02916_glogal_in_cancel.sql new file mode 100644 index 00000000000..ad54f1ecdec --- /dev/null +++ b/tests/queries/0_stateless/02916_glogal_in_cancel.sql @@ -0,0 +1,2 @@ +set max_execution_time = 0.5, timeout_overflow_mode = 'break'; +SELECT number FROM remote('127.0.0.{3|2}', numbers(1)) WHERE number GLOBAL IN (SELECT number FROM numbers(10000000000.)) format Null; diff --git a/tests/queries/0_stateless/02916_joinget_dependency.reference b/tests/queries/0_stateless/02916_joinget_dependency.reference new file mode 100644 index 00000000000..d00491fd7e5 --- /dev/null +++ b/tests/queries/0_stateless/02916_joinget_dependency.reference @@ -0,0 +1 @@ +1 diff --git a/tests/queries/0_stateless/02916_joinget_dependency.sh b/tests/queries/0_stateless/02916_joinget_dependency.sh new file mode 100755 index 00000000000..6477ae8c967 --- /dev/null +++ b/tests/queries/0_stateless/02916_joinget_dependency.sh @@ -0,0 +1,27 @@ +#!/usr/bin/env bash + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +# We test the dependency on the DROP + +$CLICKHOUSE_CLIENT -nm -q " + DROP TABLE IF EXISTS Sub_distributed; + DROP TABLE IF EXISTS Sub; + DROP TABLE IF EXISTS Mapping; + + CREATE TABLE Mapping (Id UInt64, RegionId UInt64) ENGINE = Join(ANY,LEFT,Id); + INSERT INTO Mapping VALUES (1,1); + CREATE TABLE Sub (Id UInt64, PropertyId UInt64) ENGINE = MergeTree() PRIMARY KEY (Id) ORDER BY (Id); + CREATE TABLE Sub_distributed (Id UInt64, PropertyId UInt64)ENGINE = Distributed('test_shard_localhost', $CLICKHOUSE_DATABASE, Sub, joinGet('$CLICKHOUSE_DATABASE.Mapping','RegionId',PropertyId));" + +$CLICKHOUSE_CLIENT -q " + DROP TABLE Mapping; +" 2>&1 | grep -cm1 "HAVE_DEPENDENT_OBJECTS" + +$CLICKHOUSE_CLIENT -nm -q " + DROP TABLE Sub_distributed; + DROP TABLE Sub; + DROP TABLE Mapping; +" \ No newline at end of file diff --git a/tests/queries/0_stateless/02916_local_insert_into_function.reference b/tests/queries/0_stateless/02916_local_insert_into_function.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/02916_local_insert_into_function.sh b/tests/queries/0_stateless/02916_local_insert_into_function.sh new file mode 100755 index 00000000000..2eca2c2ce0a --- /dev/null +++ b/tests/queries/0_stateless/02916_local_insert_into_function.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +$CLICKHOUSE_LOCAL -q "insert into function file('/dev/null', CSV, 'c1 UInt32') values (42)" + diff --git a/tests/queries/0_stateless/02916_replication_protocol_wait_for_part.reference b/tests/queries/0_stateless/02916_replication_protocol_wait_for_part.reference new file mode 100644 index 00000000000..0cfbf08886f --- /dev/null +++ b/tests/queries/0_stateless/02916_replication_protocol_wait_for_part.reference @@ -0,0 +1 @@ +2 diff --git a/tests/queries/0_stateless/02916_replication_protocol_wait_for_part.sql b/tests/queries/0_stateless/02916_replication_protocol_wait_for_part.sql new file mode 100644 index 00000000000..010e29a34e8 --- /dev/null +++ b/tests/queries/0_stateless/02916_replication_protocol_wait_for_part.sql @@ -0,0 +1,26 @@ +-- Tags: no-replicated-database, no-fasttest +-- Tag no-replicated-database: different number of replicas + +create table tableIn (n int) + engine=ReplicatedMergeTree('/test/02916/{database}/table', '1') + order by tuple() + settings + storage_policy='s3_cache', + allow_remote_fs_zero_copy_replication=1, + sleep_before_commit_local_part_in_replicated_table_ms=5000; +create table tableOut (n int) + engine=ReplicatedMergeTree('/test/02916/{database}/table', '2') + order by tuple() + settings + storage_policy='s3_cache', + allow_remote_fs_zero_copy_replication=1; + +SET send_logs_level='error'; + +insert into tableIn values(1); +insert into tableIn values(2); +system sync replica tableOut; +select count() from tableOut; + +drop table tableIn; +drop table tableOut; diff --git a/tests/queries/0_stateless/02916_set_formatting.reference b/tests/queries/0_stateless/02916_set_formatting.reference new file mode 100644 index 00000000000..34ff52365f9 --- /dev/null +++ b/tests/queries/0_stateless/02916_set_formatting.reference @@ -0,0 +1,11 @@ +SET additional_table_filters = {\'kjsnckjn\':\'ksanmn\', \'dkm\':\'dd\'} +SELECT v FROM t1 SETTINGS additional_table_filters = {\'default.t1\':\'s\'} +Row 1: +────── +statement: CREATE VIEW default.v1 +( + `v` UInt64 +) AS +SELECT v +FROM default.t1 +SETTINGS additional_table_filters = {'default.t1':'s != \'s1%\''} diff --git a/tests/queries/0_stateless/02916_set_formatting.sql b/tests/queries/0_stateless/02916_set_formatting.sql new file mode 100644 index 00000000000..10b875293f1 --- /dev/null +++ b/tests/queries/0_stateless/02916_set_formatting.sql @@ -0,0 +1,13 @@ +SELECT formatQuerySingleLine('set additional_table_filters = {\'kjsnckjn\': \'ksanmn\', \'dkm\': \'dd\'}'); +SELECT formatQuerySingleLine('SELECT v FROM t1 SETTINGS additional_table_filters = {\'default.t1\': \'s\'}'); + +DROP TABLE IF EXISTS t1; +DROP VIEW IF EXISTS v1; + +CREATE TABLE t1 (v UInt64, s String) ENGINE=MergeTree() ORDER BY v; +CREATE VIEW v1 (v UInt64) AS SELECT v FROM t1 SETTINGS additional_table_filters = {'default.t1': 's != \'s1%\''}; + +SHOW CREATE TABLE v1 FORMAT Vertical; + +DROP VIEW v1; +DROP TABLE t1; diff --git a/tests/queries/0_stateless/02917_transform_tsan.reference b/tests/queries/0_stateless/02917_transform_tsan.reference new file mode 100644 index 00000000000..896ae5f0269 --- /dev/null +++ b/tests/queries/0_stateless/02917_transform_tsan.reference @@ -0,0 +1,4 @@ +\N +\N +\N +\N diff --git a/tests/queries/0_stateless/02917_transform_tsan.sql b/tests/queries/0_stateless/02917_transform_tsan.sql new file mode 100644 index 00000000000..dac79f83d6a --- /dev/null +++ b/tests/queries/0_stateless/02917_transform_tsan.sql @@ -0,0 +1,2 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/56815 +SELECT transform(arrayJoin([NULL, NULL]), [NULL, NULL], [NULL]) GROUP BY GROUPING SETS (('0.1'), ('-0.2147483647')); diff --git a/tests/queries/0_stateless/02918_analyzer_to_ast_crash.reference b/tests/queries/0_stateless/02918_analyzer_to_ast_crash.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/02918_analyzer_to_ast_crash.sql b/tests/queries/0_stateless/02918_analyzer_to_ast_crash.sql new file mode 100644 index 00000000000..274f74d6ad1 --- /dev/null +++ b/tests/queries/0_stateless/02918_analyzer_to_ast_crash.sql @@ -0,0 +1,5 @@ +WITH + x AS (SELECT in((SELECT * FROM y))), + y AS (SELECT 1) +SELECT * FROM x; -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } + diff --git a/tests/queries/0_stateless/02918_gorilla_invalid_file.reference b/tests/queries/0_stateless/02918_gorilla_invalid_file.reference new file mode 100644 index 00000000000..2574a09f166 --- /dev/null +++ b/tests/queries/0_stateless/02918_gorilla_invalid_file.reference @@ -0,0 +1 @@ +Exc diff --git a/tests/queries/0_stateless/02918_gorilla_invalid_file.sh b/tests/queries/0_stateless/02918_gorilla_invalid_file.sh new file mode 100755 index 00000000000..b877e59b483 --- /dev/null +++ b/tests/queries/0_stateless/02918_gorilla_invalid_file.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +echo -ne 'checksumchecksum\x95\xd3\x02\x00\x00\x01\x00\x00\x00\x0800\xff\xff\xff\xff\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08\x02\x03\x04\x05\x06\x07\x08' | + ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&decompress=1&http_native_compression_disable_checksumming_on_decompress=1" --data-binary @- 2>&1 | grep -oF 'Exc' diff --git a/tests/queries/0_stateless/02918_implicit_sign_column_constraints_for_collapsing_engine.reference b/tests/queries/0_stateless/02918_implicit_sign_column_constraints_for_collapsing_engine.reference new file mode 100644 index 00000000000..323b12c173a --- /dev/null +++ b/tests/queries/0_stateless/02918_implicit_sign_column_constraints_for_collapsing_engine.reference @@ -0,0 +1,4 @@ +1 2504 1 +ok +1 200 1 1 +ok diff --git a/tests/queries/0_stateless/02918_implicit_sign_column_constraints_for_collapsing_engine.sh b/tests/queries/0_stateless/02918_implicit_sign_column_constraints_for_collapsing_engine.sh new file mode 100755 index 00000000000..43594a45a1e --- /dev/null +++ b/tests/queries/0_stateless/02918_implicit_sign_column_constraints_for_collapsing_engine.sh @@ -0,0 +1,57 @@ +#!/usr/bin/env bash + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +EXCEPTION_TEXT="VIOLATED_CONSTRAINT" +EXCEPTION_SUCCESS_TEXT=ok + +# CollapsingSortedAlgorithm::merge() also has a check for sign column value +# optimize_on_insert = 0 is required to avoid this automatic merge behavior +$CLICKHOUSE_CLIENT --query="SET optimize_on_insert=0;" + + +# CollapsingMergeTree +$CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS collapsing_merge_tree;" +$CLICKHOUSE_CLIENT --query="CREATE TABLE collapsing_merge_tree +( + Key UInt32, + Count UInt16, + Sign Int8 +) +ENGINE=CollapsingMergeTree(Sign) ORDER BY Key +SETTINGS add_implicit_sign_column_constraint_for_collapsing_engine=1;" + +# Should succeed +$CLICKHOUSE_CLIENT --query="INSERT INTO collapsing_merge_tree VALUES (1, 2504, 1);" +$CLICKHOUSE_CLIENT --query="SELECT * FROM collapsing_merge_tree;" + +# Should throw an exception +$CLICKHOUSE_CLIENT --query="INSERT INTO collapsing_merge_tree VALUES (1, 2504, 5);" 2>&1 \ + | grep -q "$EXCEPTION_TEXT" && echo "$EXCEPTION_SUCCESS_TEXT" || echo "Did not throw an exception" + +$CLICKHOUSE_CLIENT --query="DROP TABLE collapsing_merge_tree;" + + +# VersionedCollapsingMergeTree +$CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS versioned_collapsing_merge_tree;" +$CLICKHOUSE_CLIENT --query="CREATE TABLE versioned_collapsing_merge_tree +( + Key UInt32, + Count UInt8, + Sign Int8, + Version UInt8 +) +ENGINE=VersionedCollapsingMergeTree(Sign, Version) ORDER BY Key +SETTINGS add_implicit_sign_column_constraint_for_collapsing_engine=1;" + +# Should succeed +$CLICKHOUSE_CLIENT --query="INSERT INTO versioned_collapsing_merge_tree VALUES (1, 2504, 1, 1);" +$CLICKHOUSE_CLIENT --query="SELECT * FROM versioned_collapsing_merge_tree;" + +# Should throw an exception +$CLICKHOUSE_CLIENT --query="INSERT INTO versioned_collapsing_merge_tree VALUES (1, 2504, 5, 1);" 2>&1 \ + | grep -q "$EXCEPTION_TEXT" && echo "$EXCEPTION_SUCCESS_TEXT" || echo "Did not throw an exception" + +$CLICKHOUSE_CLIENT --query="DROP TABLE versioned_collapsing_merge_tree;" diff --git a/tests/queries/0_stateless/02918_join_pm_lc_crash.reference b/tests/queries/0_stateless/02918_join_pm_lc_crash.reference new file mode 100644 index 00000000000..7523f1c1774 --- /dev/null +++ b/tests/queries/0_stateless/02918_join_pm_lc_crash.reference @@ -0,0 +1,12 @@ +0 + +0 +0 + +0 +0 + +\N +0 + +\N diff --git a/tests/queries/0_stateless/02918_join_pm_lc_crash.sql b/tests/queries/0_stateless/02918_join_pm_lc_crash.sql new file mode 100644 index 00000000000..123208ee981 --- /dev/null +++ b/tests/queries/0_stateless/02918_join_pm_lc_crash.sql @@ -0,0 +1,31 @@ + +SET joined_subquery_requires_alias = 0, join_algorithm = 'partial_merge'; + +SET allow_experimental_analyzer = 0, join_use_nulls = 0; + +SELECT * FROM (SELECT dummy AS val FROM system.one) +JOIN (SELECT toLowCardinality(toNullable(dummy)) AS val +FROM system.one GROUP BY val WITH TOTALS) +USING (val); + +SET allow_experimental_analyzer = 0, join_use_nulls = 1; + +SELECT * FROM (SELECT dummy AS val FROM system.one) +JOIN (SELECT toLowCardinality(toNullable(dummy)) AS val +FROM system.one GROUP BY val WITH TOTALS) +USING (val); + +SET allow_experimental_analyzer = 1, join_use_nulls = 0; + +SELECT * FROM (SELECT dummy AS val FROM system.one) +JOIN (SELECT toLowCardinality(toNullable(dummy)) AS val +FROM system.one GROUP BY val WITH TOTALS) +USING (val); + +SET allow_experimental_analyzer = 1, join_use_nulls = 1; + +SELECT * FROM (SELECT dummy AS val FROM system.one) +JOIN (SELECT toLowCardinality(toNullable(dummy)) AS val +FROM system.one GROUP BY val WITH TOTALS) +USING (val); + diff --git a/tests/queries/0_stateless/02918_sqlite_path_check.reference b/tests/queries/0_stateless/02918_sqlite_path_check.reference new file mode 100644 index 00000000000..56b832a6469 --- /dev/null +++ b/tests/queries/0_stateless/02918_sqlite_path_check.reference @@ -0,0 +1,2 @@ +SQLite database file path '/etc/passwd' must be inside 'user_files' directory. (PATH_ACCESS_DENIED) +SQLite database file path '../../../../etc/passwd' must be inside 'user_files' directory. (PATH_ACCESS_DENIED) diff --git a/tests/queries/0_stateless/02918_sqlite_path_check.sh b/tests/queries/0_stateless/02918_sqlite_path_check.sh new file mode 100755 index 00000000000..fa74b9ecfc8 --- /dev/null +++ b/tests/queries/0_stateless/02918_sqlite_path_check.sh @@ -0,0 +1,15 @@ +#!/usr/bin/env bash +# Tags: no-fasttest +# Tag no-fasttest: Fast tests don't build external libraries (SQLite) + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +function get_exception_message() +{ + $CLICKHOUSE_CLIENT --query "$1" |& head -n1 | sed 's/.*DB::Exception: \(.*\) (version.*/\1/g' +} + +get_exception_message "Select * from sqlite('/etc/passwd', 'something');" +get_exception_message "Select * from sqlite('../../../../etc/passwd', 'something');" diff --git a/tests/queries/0_stateless/02918_wrong_dictionary_source.reference b/tests/queries/0_stateless/02918_wrong_dictionary_source.reference new file mode 100644 index 00000000000..573541ac970 --- /dev/null +++ b/tests/queries/0_stateless/02918_wrong_dictionary_source.reference @@ -0,0 +1 @@ +0 diff --git a/tests/queries/0_stateless/02918_wrong_dictionary_source.sql b/tests/queries/0_stateless/02918_wrong_dictionary_source.sql new file mode 100644 index 00000000000..e729ef74c61 --- /dev/null +++ b/tests/queries/0_stateless/02918_wrong_dictionary_source.sql @@ -0,0 +1,11 @@ +DROP DICTIONARY IF EXISTS id_value_dictionary; +DROP TABLE IF EXISTS source_table; + +CREATE TABLE source_table(id UInt64, value String) ENGINE = MergeTree ORDER BY tuple(); + +-- There is no "CLICKHOUSEX" dictionary source, so the next query must fail even if `dictionaries_lazy_load` is enabled. +CREATE DICTIONARY id_value_dictionary(id UInt64, value String) PRIMARY KEY id SOURCE(CLICKHOUSEX(TABLE 'source_table')) LIFETIME(MIN 0 MAX 1000) LAYOUT(FLAT()); -- { serverError UNKNOWN_ELEMENT_IN_CONFIG } + +SELECT count() FROM system.dictionaries WHERE name=='id_value_dictionary' AND database==currentDatabase(); + +DROP TABLE source_table; diff --git a/tests/queries/0_stateless/02920_capnp_protobuf_auto_schema_nested.reference b/tests/queries/0_stateless/02920_capnp_protobuf_auto_schema_nested.reference new file mode 100644 index 00000000000..9874bc57142 --- /dev/null +++ b/tests/queries/0_stateless/02920_capnp_protobuf_auto_schema_nested.reference @@ -0,0 +1,52 @@ + +message Message +{ + message H + { + uint32 k = 1; + } + H h = 1; + message A + { + uint32 g = 1; + message B + { + uint32 c = 1; + uint32 f = 2; + message D + { + uint32 e = 1; + } + D d = 3; + } + B b = 2; + } + A a = 2; +} +46 (45,(42,44,43)) + +struct Message +{ + struct H + { + k @0 : UInt8; + } + h @0 : H; + struct A + { + g @0 : UInt8; + struct B + { + c @0 : UInt8; + f @1 : UInt8; + struct D + { + e @0 : UInt8; + } + d @2 : D; + } + b @1 : B; + } + a @1 : A; +} +(46) (45,(42,44,(43))) diff --git a/tests/queries/0_stateless/02920_capnp_protobuf_auto_schema_nested.sh b/tests/queries/0_stateless/02920_capnp_protobuf_auto_schema_nested.sh new file mode 100755 index 00000000000..aee6b866719 --- /dev/null +++ b/tests/queries/0_stateless/02920_capnp_protobuf_auto_schema_nested.sh @@ -0,0 +1,21 @@ +#!/usr/bin/env bash +# Tags: no-fasttest + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +SCHEMA_FILE=$CLICKHOUSE_TEST_UNIQUE_NAME-schema +FILE=$CLICKHOUSE_TEST_UNIQUE_NAME + +$CLICKHOUSE_LOCAL -q "select 42 as \`a.b.c\`, 43 as \`a.b.d.e\`, 44 as \`a.b.f\`, 45 as \`a.g\`, 46 as \`h.k\` format Protobuf settings output_format_schema='$SCHEMA_FILE.proto'" > $FILE.pb +tail -n +2 $SCHEMA_FILE.proto +$CLICKHOUSE_LOCAL -q "select * from file('$FILE.pb') settings format_schema='$SCHEMA_FILE:Message'" + +$CLICKHOUSE_LOCAL -q "select 42 as a_b_c, 43 as a_b_d_e, 44 as a_b_f, 45 as a_g, 46 as h_k format CapnProto settings output_format_schema='$SCHEMA_FILE.capnp'" > $FILE.capnp +tail -n +2 $SCHEMA_FILE.capnp +$CLICKHOUSE_LOCAL -q "select * from file('$FILE.capnp') settings format_schema='$SCHEMA_FILE:Message'" + +rm $SCHEMA_FILE* +rm $FILE.* + diff --git a/tests/queries/0_stateless/02921_bit_hamming_distance_big_int.reference b/tests/queries/0_stateless/02921_bit_hamming_distance_big_int.reference new file mode 100644 index 00000000000..62245f5d176 --- /dev/null +++ b/tests/queries/0_stateless/02921_bit_hamming_distance_big_int.reference @@ -0,0 +1,9 @@ +314776434768051644139306697240981192872 0 74 74 +14776434768051644139306697240981192872314776434768051644139306697240981192872 0 141 141 +314776434768051644139306697240981192872 14776434768051644139306697240981192872314776434768051644139306697240981192872 115 115 +-25505932152886819324067910190787018584 0 74 74 +14776434768051644139306697240981192872314776434768051644139306697240981192872 0 141 141 +-25505932152886819324067910190787018584 14776434768051644139306697240981192872314776434768051644139306697240981192872 99 99 +314776434768051644139306697240981192872 0 74 74 +14776434768051644139306697240981192872314776434768051644139306697240981192872 0 141 141 +314776434768051644139306697240981192872 14776434768051644139306697240981192872314776434768051644139306697240981192872 115 115 diff --git a/tests/queries/0_stateless/02921_bit_hamming_distance_big_int.sql b/tests/queries/0_stateless/02921_bit_hamming_distance_big_int.sql new file mode 100644 index 00000000000..6f241e104b6 --- /dev/null +++ b/tests/queries/0_stateless/02921_bit_hamming_distance_big_int.sql @@ -0,0 +1,12 @@ +SELECT 314776434768051644139306697240981192872::UInt128 AS x, 0::UInt128 AS y, bitCount(bitXor(x, y)) AS a, bitHammingDistance(x, y) AS b; +SELECT 14776434768051644139306697240981192872314776434768051644139306697240981192872::UInt256 AS x, 0::UInt128 AS y, bitCount(bitXor(x, y)) AS a, bitHammingDistance(x, y) AS b; +SELECT 314776434768051644139306697240981192872::UInt128 AS x, 14776434768051644139306697240981192872314776434768051644139306697240981192872::UInt256 AS y, bitCount(bitXor(x, y)) AS a, bitHammingDistance(x, y) AS b; + +SELECT 314776434768051644139306697240981192872::Int128 AS x, 0::UInt128 AS y, bitCount(bitXor(x, y)) AS a, bitHammingDistance(x, y) AS b; +SELECT 14776434768051644139306697240981192872314776434768051644139306697240981192872::Int256 AS x, 0::UInt128 AS y, bitCount(bitXor(x, y)) AS a, bitHammingDistance(x, y) AS b; +SELECT 314776434768051644139306697240981192872::Int128 AS x, 14776434768051644139306697240981192872314776434768051644139306697240981192872::UInt256 AS y, bitCount(bitXor(x, y)) AS a, bitHammingDistance(x, y) AS b; + +SELECT 314776434768051644139306697240981192872::UInt128 AS x, 0::Int128 AS y, bitCount(bitXor(x, y)) AS a, bitHammingDistance(x, y) AS b; +SELECT 14776434768051644139306697240981192872314776434768051644139306697240981192872::UInt256 AS x, 0::Int128 AS y, bitCount(bitXor(x, y)) AS a, bitHammingDistance(x, y) AS b; +SELECT 314776434768051644139306697240981192872::UInt128 AS x, 14776434768051644139306697240981192872314776434768051644139306697240981192872::Int256 AS y, bitCount(bitXor(x, y)) AS a, bitHammingDistance(x, y) AS b; + diff --git a/tests/queries/0_stateless/02921_database_filesystem_path_check.reference b/tests/queries/0_stateless/02921_database_filesystem_path_check.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/02921_database_filesystem_path_check.sql b/tests/queries/0_stateless/02921_database_filesystem_path_check.sql new file mode 100644 index 00000000000..d62b629df7b --- /dev/null +++ b/tests/queries/0_stateless/02921_database_filesystem_path_check.sql @@ -0,0 +1,2 @@ +create database db_filesystem ENGINE=Filesystem('/etc'); -- { serverError BAD_ARGUMENTS } +create database db_filesystem ENGINE=Filesystem('../../../../../../../../etc'); -- { serverError BAD_ARGUMENTS } \ No newline at end of file diff --git a/tests/queries/0_stateless/02921_file_engine_size_virtual_column.reference b/tests/queries/0_stateless/02921_file_engine_size_virtual_column.reference new file mode 100644 index 00000000000..2f319dfb812 --- /dev/null +++ b/tests/queries/0_stateless/02921_file_engine_size_virtual_column.reference @@ -0,0 +1,12 @@ +2 +3 +4 +2 +3 +4 +2 +3 +4 +2 +3 +4 diff --git a/tests/queries/0_stateless/02921_file_engine_size_virtual_column.sh b/tests/queries/0_stateless/02921_file_engine_size_virtual_column.sh new file mode 100755 index 00000000000..5dd58ec0d7f --- /dev/null +++ b/tests/queries/0_stateless/02921_file_engine_size_virtual_column.sh @@ -0,0 +1,23 @@ +#!/usr/bin/env bash +# Tags: no-fasttest + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +echo "1" > $CLICKHOUSE_TEST_UNIQUE_NAME.data1.tsv +echo "12" > $CLICKHOUSE_TEST_UNIQUE_NAME.data2.tsv +echo "123" > $CLICKHOUSE_TEST_UNIQUE_NAME.data3.tsv + +$CLICKHOUSE_LOCAL -q "select _size from file('$CLICKHOUSE_TEST_UNIQUE_NAME.data{1,2,3}.tsv') order by _size" +# Run this query twice to check correct behaviour when cache is used +$CLICKHOUSE_LOCAL -q "select _size from file('$CLICKHOUSE_TEST_UNIQUE_NAME.data{1,2,3}.tsv') order by _size" + +# Test the same fils in archive +tar -cf $CLICKHOUSE_TEST_UNIQUE_NAME.archive.tar $CLICKHOUSE_TEST_UNIQUE_NAME.data1.tsv $CLICKHOUSE_TEST_UNIQUE_NAME.data2.tsv $CLICKHOUSE_TEST_UNIQUE_NAME.data3.tsv + +$CLICKHOUSE_LOCAL -q "select _size from file('$CLICKHOUSE_TEST_UNIQUE_NAME.archive.tar :: $CLICKHOUSE_TEST_UNIQUE_NAME.data{1,2,3}.tsv') order by _size" +$CLICKHOUSE_LOCAL -q "select _size from file('$CLICKHOUSE_TEST_UNIQUE_NAME.archive.tar :: $CLICKHOUSE_TEST_UNIQUE_NAME.data{1,2,3}.tsv') order by _size" + +rm $CLICKHOUSE_TEST_UNIQUE_NAME.* + diff --git a/tests/queries/0_stateless/02921_fuzzbits_with_array_join.reference b/tests/queries/0_stateless/02921_fuzzbits_with_array_join.reference new file mode 100644 index 00000000000..39443245b6c --- /dev/null +++ b/tests/queries/0_stateless/02921_fuzzbits_with_array_join.reference @@ -0,0 +1,4 @@ +12 1 +12 2 +100 1 +100 2 diff --git a/tests/queries/0_stateless/02921_fuzzbits_with_array_join.sql b/tests/queries/0_stateless/02921_fuzzbits_with_array_join.sql new file mode 100644 index 00000000000..5d80a5fbea6 --- /dev/null +++ b/tests/queries/0_stateless/02921_fuzzbits_with_array_join.sql @@ -0,0 +1,2 @@ +SELECT length(fuzzBits('stringstring', 0.5)), a FROM numbers(1) ARRAY JOIN [1, 2] AS a; +SELECT length(fuzzBits('stringstring'::FixedString(100), 0.5)), a FROM numbers(1) ARRAY JOIN [1, 2] AS a \ No newline at end of file diff --git a/tests/queries/0_stateless/02922_server_exit_code.reference b/tests/queries/0_stateless/02922_server_exit_code.reference new file mode 100644 index 00000000000..7326d960397 --- /dev/null +++ b/tests/queries/0_stateless/02922_server_exit_code.reference @@ -0,0 +1 @@ +Ok diff --git a/tests/queries/0_stateless/02922_server_exit_code.sh b/tests/queries/0_stateless/02922_server_exit_code.sh new file mode 100755 index 00000000000..60049902410 --- /dev/null +++ b/tests/queries/0_stateless/02922_server_exit_code.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash +# Tags: no-fasttest + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +# We will check that the server's exit code corresponds to the exception code if it was terminated after exception. +# In this example, we provide an invalid path to the server's config, ignore its logs and check the exit code. +# The exception code is 400 = CANNOT_STAT, so the exit code will be 400 % 256. + +${CLICKHOUSE_SERVER_BINARY} -- --path /dev/null 2>/dev/null; [[ "$?" == "$((400 % 256))" ]] && echo 'Ok' || echo 'Fail' diff --git a/tests/queries/0_stateless/02922_url_s3_engine_size_virtual_column.reference b/tests/queries/0_stateless/02922_url_s3_engine_size_virtual_column.reference new file mode 100644 index 00000000000..369837adcbb --- /dev/null +++ b/tests/queries/0_stateless/02922_url_s3_engine_size_virtual_column.reference @@ -0,0 +1,12 @@ +a.tsv 24 +b.tsv 33 +c.tsv 33 +a.tsv 24 +b.tsv 33 +c.tsv 33 +a.tsv 24 +b.tsv 33 +c.tsv 33 +a.tsv 24 +b.tsv 33 +c.tsv 33 diff --git a/tests/queries/0_stateless/02922_url_s3_engine_size_virtual_column.sh b/tests/queries/0_stateless/02922_url_s3_engine_size_virtual_column.sh new file mode 100755 index 00000000000..51de2117dca --- /dev/null +++ b/tests/queries/0_stateless/02922_url_s3_engine_size_virtual_column.sh @@ -0,0 +1,13 @@ +#!/usr/bin/env bash +# Tags: no-fasttest + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +$CLICKHOUSE_CLIENT -q "select _file, _size from url('http://localhost:11111/test/{a,b,c}.tsv', 'One') order by _file" +$CLICKHOUSE_CLIENT -q "select _file, _size from url('http://localhost:11111/test/{a,b,c}.tsv', 'One') order by _file" + +$CLICKHOUSE_CLIENT -q "select _file, _size from s3('http://localhost:11111/test/{a,b,c}.tsv', 'One') order by _file" +$CLICKHOUSE_CLIENT -q "select _file, _size from s3('http://localhost:11111/test/{a,b,c}.tsv', 'One') order by _file" + diff --git a/tests/queries/0_stateless/02923_cte_equality_disjunction.reference b/tests/queries/0_stateless/02923_cte_equality_disjunction.reference new file mode 100644 index 00000000000..573541ac970 --- /dev/null +++ b/tests/queries/0_stateless/02923_cte_equality_disjunction.reference @@ -0,0 +1 @@ +0 diff --git a/tests/queries/0_stateless/02923_cte_equality_disjunction.sql b/tests/queries/0_stateless/02923_cte_equality_disjunction.sql new file mode 100644 index 00000000000..288bed9e491 --- /dev/null +++ b/tests/queries/0_stateless/02923_cte_equality_disjunction.sql @@ -0,0 +1,12 @@ +--https://github.com/ClickHouse/ClickHouse/issues/5323 +CREATE TABLE test_bug_optimization +( + `path` String +) +ENGINE = MergeTree +ORDER BY path; + +WITH (path = 'test1') OR match(path, 'test2') OR (match(path, 'test3') AND match(path, 'test2')) OR match(path, 'test4') OR (path = 'test5') OR (path = 'test6') AS alias_in_error +SELECT count(1) +FROM test_bug_optimization +WHERE alias_in_error; diff --git a/tests/queries/0_stateless/02923_explain_expired_context.reference b/tests/queries/0_stateless/02923_explain_expired_context.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/02923_explain_expired_context.sql b/tests/queries/0_stateless/02923_explain_expired_context.sql new file mode 100644 index 00000000000..68277508eb2 --- /dev/null +++ b/tests/queries/0_stateless/02923_explain_expired_context.sql @@ -0,0 +1,3 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/51321 +EXPLAIN ESTIMATE SELECT any(toTypeName(s)) FROM (SELECT 'bbbbbbbb', toTypeName(s), CAST('', 'LowCardinality(String)'), NULL, CAST('\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0', 'String') AS s) AS t1 FULL OUTER JOIN (SELECT CAST('bbbbb\0\0bbb\0bb\0bb', 'LowCardinality(String)'), CAST(CAST('a', 'String'), 'LowCardinality(String)') AS s GROUP BY CoNnEcTiOn_Id()) AS t2 USING (s) WITH TOTALS; +EXPLAIN ESTIMATE SELECT any(s) FROM (SELECT '' AS s) AS t1 JOIN (SELECT '' AS s GROUP BY connection_id()) AS t2 USING (s); diff --git a/tests/queries/0_stateless/02923_hdfs_engine_size_virtual_column.reference b/tests/queries/0_stateless/02923_hdfs_engine_size_virtual_column.reference new file mode 100644 index 00000000000..bc42121fb39 --- /dev/null +++ b/tests/queries/0_stateless/02923_hdfs_engine_size_virtual_column.reference @@ -0,0 +1,6 @@ +2 +3 +4 +2 +3 +4 diff --git a/tests/queries/0_stateless/02923_hdfs_engine_size_virtual_column.sh b/tests/queries/0_stateless/02923_hdfs_engine_size_virtual_column.sh new file mode 100755 index 00000000000..dc01687772f --- /dev/null +++ b/tests/queries/0_stateless/02923_hdfs_engine_size_virtual_column.sh @@ -0,0 +1,15 @@ +#!/usr/bin/env bash +# Tags: no-fasttest, use-hdfs + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +$CLICKHOUSE_CLIENT -q "insert into table function hdfs('hdfs://localhost:12222/$CLICKHOUSE_TEST_UNIQUE_NAME.data1.tsv') select 1 settings hdfs_truncate_on_insert=1;" +$CLICKHOUSE_CLIENT -q "insert into table function hdfs('hdfs://localhost:12222/$CLICKHOUSE_TEST_UNIQUE_NAME.data2.tsv') select 11 settings hdfs_truncate_on_insert=1;" +$CLICKHOUSE_CLIENT -q "insert into table function hdfs('hdfs://localhost:12222/$CLICKHOUSE_TEST_UNIQUE_NAME.data3.tsv') select 111 settings hdfs_truncate_on_insert=1;" + + +$CLICKHOUSE_CLIENT -q "select _size from hdfs('hdfs://localhost:12222/$CLICKHOUSE_TEST_UNIQUE_NAME.data*.tsv', auto, 'x UInt64') order by _size" +$CLICKHOUSE_CLIENT -q "select _size from hdfs('hdfs://localhost:12222/$CLICKHOUSE_TEST_UNIQUE_NAME.data*.tsv', auto, 'x UInt64') order by _size" + diff --git a/tests/queries/0_stateless/02923_join_use_nulls_modulo.reference b/tests/queries/0_stateless/02923_join_use_nulls_modulo.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/02923_join_use_nulls_modulo.sql b/tests/queries/0_stateless/02923_join_use_nulls_modulo.sql new file mode 100644 index 00000000000..4134a42c599 --- /dev/null +++ b/tests/queries/0_stateless/02923_join_use_nulls_modulo.sql @@ -0,0 +1,22 @@ +--https://github.com/ClickHouse/ClickHouse/issues/47366 +SELECT + id % 255, + toTypeName(d.id) +FROM +( + SELECT + toLowCardinality(1048577) AS id, + toLowCardinality(9223372036854775807) AS value + GROUP BY + GROUPING SETS ( + (toLowCardinality(1024)), + (id % 10.0001), + ((id % 2147483646) != -9223372036854775807), + ((id % -1) != 255)) + ) AS a + SEMI LEFT JOIN +( + SELECT toLowCardinality(9223372036854775807) AS id + WHERE (id % 2147483646) != NULL +) AS d USING (id) +SETTINGS join_use_nulls=1; diff --git a/tests/queries/0_stateless/02930_client_file_log_comment.reference b/tests/queries/0_stateless/02930_client_file_log_comment.reference new file mode 100644 index 00000000000..09639302c0f --- /dev/null +++ b/tests/queries/0_stateless/02930_client_file_log_comment.reference @@ -0,0 +1,4 @@ +42 +select 42\n /dev/stdin +4242 +select 4242\n foo diff --git a/tests/queries/0_stateless/02930_client_file_log_comment.sh b/tests/queries/0_stateless/02930_client_file_log_comment.sh new file mode 100755 index 00000000000..c425f28ecbe --- /dev/null +++ b/tests/queries/0_stateless/02930_client_file_log_comment.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# reset --log_comment +CLICKHOUSE_LOG_COMMENT= +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +$CLICKHOUSE_CLIENT --queries-file /dev/stdin <<<'select 42' +$CLICKHOUSE_CLIENT -nm -q " + system flush logs; + select query, log_comment from system.query_log where current_database = '$CLICKHOUSE_DATABASE' and event_date >= yesterday() and query = 'select 42\n' and type != 'QueryStart'; +" + +$CLICKHOUSE_CLIENT --log_comment foo --queries-file /dev/stdin <<<'select 4242' +$CLICKHOUSE_CLIENT -nm -q " + system flush logs; + select query, log_comment from system.query_log where current_database = '$CLICKHOUSE_DATABASE' and event_date >= yesterday() and query = 'select 4242\n' and type != 'QueryStart'; +" diff --git a/tests/queries/0_stateless/data_npy/float_16.npy b/tests/queries/0_stateless/data_npy/float_16.npy new file mode 100644 index 00000000000..3224d1e57e3 Binary files /dev/null and b/tests/queries/0_stateless/data_npy/float_16.npy differ diff --git a/tests/queries/0_stateless/data_npy/npy_big.npy b/tests/queries/0_stateless/data_npy/npy_big.npy new file mode 100644 index 00000000000..7dc3c2bf600 Binary files /dev/null and b/tests/queries/0_stateless/data_npy/npy_big.npy differ diff --git a/tests/queries/0_stateless/data_npy/npy_inf_nan_null.npy b/tests/queries/0_stateless/data_npy/npy_inf_nan_null.npy new file mode 100644 index 00000000000..12ee359f665 Binary files /dev/null and b/tests/queries/0_stateless/data_npy/npy_inf_nan_null.npy differ diff --git a/tests/queries/0_stateless/data_parquet/delta_length_byte_array_encoding.parquet b/tests/queries/0_stateless/data_parquet/delta_length_byte_array_encoding.parquet new file mode 100644 index 00000000000..cf785d97dc7 Binary files /dev/null and b/tests/queries/0_stateless/data_parquet/delta_length_byte_array_encoding.parquet differ diff --git a/utils/CMakeLists.txt b/utils/CMakeLists.txt index 4e1184cc9a5..ec44a1e1de9 100644 --- a/utils/CMakeLists.txt +++ b/utils/CMakeLists.txt @@ -9,11 +9,7 @@ if (ENABLE_CLICKHOUSE_SELF_EXTRACTING) add_subdirectory (self-extracting-executable) endif () -# Utils used in package -add_subdirectory (config-processor) -add_subdirectory (report) - -# Not used in package +# Not used in packages if (ENABLE_UTILS) add_subdirectory (compressor) add_subdirectory (corrector_utf8) diff --git a/utils/backup/print_backup_info.py b/utils/backup/print_backup_info.py deleted file mode 100755 index 54e5c745a8c..00000000000 --- a/utils/backup/print_backup_info.py +++ /dev/null @@ -1,208 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: UTF-8 -*- -""" -print_backup_info: Extract information about a backup from ".backup" file -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Usage: print_backup_info -""" -import sys -import os -import xml.etree.ElementTree as ET - - -def main(): - if len(sys.argv) != 2: - print(__doc__) - sys.exit(1) - backup_xml = sys.argv[1] - - if not os.path.isfile(backup_xml): - print("error: {} does not exist".format(backup_xml)) - sys.exit(1) - - # Process the file line-by-line - tree = ET.parse(backup_xml) - root = tree.getroot() - contents = root.find("contents") - - version_node = root.find("version") - version = int(version_node.text) if (version_node != None) else None - - timestamp_node = root.find("timestamp") - timestamp = timestamp_node.text if (timestamp_node != None) else None - - base_backup_node = root.find("base_backup") - base_backup = base_backup_node.text if (base_backup_node != None) else None - - number_of_files = 0 - size_of_files = 0 - number_of_files_from_base_backup = 0 - size_of_files_from_base_backup = 0 - databases = set() - tables = {} - - for file in contents: - name = file.find("name").text - size = int(file.find("size").text) - - use_base_node = file.find("use_base") - use_base = (use_base_node.text == "true") if (use_base_node != None) else False - - if use_base: - base_size_node = file.find("base_size") - base_size = int(base_size_node.text) if (base_size_node != None) else size - else: - base_size = 0 - - data_file_node = file.find("data_file") - data_file = data_file_node.text if (data_file_node != None) else name - - has_data_file = name == data_file - - if has_data_file: - if size > base_size: - number_of_files += 1 - size_of_files += size - base_size - if base_size > 0: - number_of_files_from_base_backup += 1 - size_of_files_from_base_backup += base_size - - table_name = extract_table_name_from_path(name) - if table_name: - if table_name not in tables: - tables[table_name] = [0, 0, 0, 0] - if not name.endswith(".sql") and has_data_file: - table_info = tables[table_name] - if size > base_size: - table_info[0] += 1 - table_info[1] += size - base_size - if base_size > 0: - table_info[2] += 1 - table_info[3] += base_size - tables[table_name] = table_info - - database_name = extract_database_name_from_path(name) - if database_name: - databases.add(database_name) - - size_of_backup = size_of_files + os.path.getsize(backup_xml) - - print(f"version={version}") - print(f"timestamp={timestamp}") - print(f"base_backup={base_backup}") - print(f"size_of_backup={size_of_backup}") - print(f"number_of_files={number_of_files}") - print(f"size_of_files={size_of_files}") - print(f"number_of_files_from_base_backup={number_of_files_from_base_backup}") - print(f"size_of_files_from_base_backup={size_of_files_from_base_backup}") - print(f"number_of_databases={len(databases)}") - print(f"number_of_tables={len(tables)}") - - print() - - print(f"{len(databases)} database(s):") - for database_name in sorted(databases): - print(database_name) - - print() - - print(f"{len(tables)} table(s):") - table_info_format = "{:>70} | {:>20} | {:>20} | {:>26} | {:>30}" - table_info_separator_line = ( - "{:->70}-+-{:->20}-+-{:->20}-+-{:->26}-+-{:->30}".format("", "", "", "", "") - ) - table_info_title_line = table_info_format.format( - "table name", - "num_files", - "size_of_files", - "num_files_from_base_backup", - "size_of_files_from_base_backup", - ) - print(table_info_title_line) - print(table_info_separator_line) - for table_name in sorted(tables): - table_info = tables[table_name] - print( - table_info_format.format( - table_name, table_info[0], table_info[1], table_info[2], table_info[3] - ) - ) - - -# Extracts a table name from a path inside a backup. -# For example, extracts 'default.tbl' from 'shards/1/replicas/1/data/default/tbl/all_0_0_0/data.bin'. -def extract_table_name_from_path(path): - path = strip_shards_replicas_from_path(path) - if not path: - return None - if path.startswith("metadata/"): - path = path[len("metadata/") :] - sep = path.find("/") - if sep == -1: - return None - database_name = path[:sep] - path = path[sep + 1 :] - sep = path.find(".sql") - if sep == -1: - return None - table_name = path[:sep] - return database_name + "." + table_name - if path.startswith("data/"): - path = path[len("data/") :] - sep = path.find("/") - if sep == -1: - return None - database_name = path[:sep] - path = path[sep + 1 :] - sep = path.find("/") - if sep == -1: - return None - table_name = path[:sep] - return database_name + "." + table_name - return None - - -# Extracts a database name from a path inside a backup. -# For example, extracts 'default' from 'shards/1/replicas/1/data/default/tbl/all_0_0_0/data.bin'. -def extract_database_name_from_path(path): - path = strip_shards_replicas_from_path(path) - if not path: - return None - if path.startswith("metadata/"): - path = path[len("metadata/") :] - sep = path.find(".sql") - if sep == -1 or path.find("/") != -1: - return None - return path[:sep] - if path.startswith("data/"): - path = path[len("data/") :] - sep = path.find("/") - if sep == -1: - return None - return path[:sep] - return None - - -# Removes a prefix "shards//replicas//" from a path. -def strip_shards_replicas_from_path(path): - if path.startswith("shards"): - sep = path.find("/") - if sep == -1: - return None - sep = path.find("/", sep + 1) - if sep == -1: - return None - path = path[sep + 1 :] - if path.startswith("replicas"): - sep = path.find("/") - if sep == -1: - return None - sep = path.find("/", sep + 1) - if sep == -1: - return None - path = path[sep + 1 :] - return path - - -if __name__ == "__main__": - main() diff --git a/utils/backupview/clickhouse_backupview.py b/utils/backupview/clickhouse_backupview.py new file mode 100755 index 00000000000..c9eac87f0ae --- /dev/null +++ b/utils/backupview/clickhouse_backupview.py @@ -0,0 +1,1347 @@ +#!/usr/bin/env python3 + +import bisect +import os.path +import xml.etree.ElementTree as ET +from urllib.parse import urlparse +import shutil + +import zipfile # For reading backups from zip archives +import boto3 # For reading backups from S3 + + +## Examples: +## from backupview import open_backup +## +## Get information about the backup's contents: +## backup = open_backup("/path/to/backup/") +## print(backup.get_databases())) +## for database in backup.get_databases(): +## print(backup.get_create_query(database=database)) +## for table in backup.get_tables(database=database): +## print(backup.get_create_query(database=database, table=table)) +## print(backup.get_partitions(database=database, table=table)) +## print(backup.get_parts(database=database, table=table)) +## +## Extract everything from the backup to a folder: +## backup.extract_all(out="/where/to/extract/1/") +## +## Extract the data of a single table: +## backup.extract_table_data(database="mydb", table="mytable", out="/where/to/extract/2/") +## backup.extract_table_data(table="mydb.mytable", part="all_1_1", out="/where/to/extract/3/") +## backup.extract_table_data(database="mydb", table="mytable", partition="2022", out="/where/to/extract/4/") +## backup.extract_table_metadata(table=('mydb', 'mytable'), out="/where/to/extract/5.sql") +## +## Get a list of all files in the backup: +## print(backup.get_files()) +## +## Get information about files in the backup: +## print(backup.get_file_infos()) +## +## Extract files to a folder: +## backup.extract_dir("/shards/1/replicas/1/", out="/where/to/extract/6/") +## backup.extract_file("/shards/1/replicas/1/metadata/mydb/mytable.sql", out="/where/to/extract/7.sql") +## +## Reading from S3: +## backup = open_backup(S3("uri", "access_key_id", "secret_access_key")) +## backup.extract_table_data(table="mydb.mytable", partition="2022", out="/where/to/extract/8/") + + +# Opens a backup for viewing. +def open_backup(backup_name, base_backup=None): + return Backup(backup_name, base_backup=base_backup) + + +# Main class, an instance of Backup is returned by the open_backup() function. +class Backup: + def __init__(self, backup_name, base_backup=None): + self.__location = None + self.__close_base_backup = False + self.__base_backup = base_backup + self.__reader = None + + try: + self.__location = Location(backup_name) + if TypeChecks.is_location_like(base_backup): + self.__base_backup = Location(base_backup) + self.__reader = self.__location.create_reader() + self.__parse_backup_metadata() + except: + self.close() + raise + + def close(self): + if self.__reader is not None: + self.__reader.close() + self.__reader = None + if ( + (self.__base_backup is not None) + and (not TypeChecks.is_location_like(self.__base_backup)) + and self.__close_base_backup + ): + self.__base_backup.close() + self.__base_backup = None + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, traceback): + self.close() + + # Get general information about the backup. + + # Returns the name of the backup, e.g. File('/path/to/backup/') + def get_name(self): + return str(self.get_location()) + + def get_location(self): + return self.__location + + def __repr__(self): + return "Backup(" + repr(self.get_location()) + ")" + + # Returns the base backup or None if there is no base backup. + def get_base_backup(self): + if TypeChecks.is_location_like(self.__base_backup): + self.__close_base_backup = True + self.__base_backup = open_backup(self.__base_backup) + return self.__base_backup + + def get_base_backup_location(self): + if self.__base_backup is None: + return None + if TypeChecks.is_location_like(self.__base_backup): + return self.__base_backup + return self.__base_backup.get_location() + + def get_base_backup_name(self): + if self.__base_backup is None: + return None + return str(self.get_base_backup_location()) + + # Returns the version of the backup. + def get_version(self): + return self.__version + + # Returns the timestamp of the backup. + def get_timestamp(self): + return self.__timestamp + + # Get high-level information about the contents of the backup. + + # Returns shards stored in the backup. + def get_shards(self): + if self.dir_exists("/shards/"): + return self.get_subdirs("/shards/") + return ["1"] + + # Returns replicas stored in the backup. + def get_replicas(self, shard="1"): + if self.dir_exists(f"/shards/{shard}/replicas/"): + return self.get_subdirs(f"/shards/{shard}/replicas/") + elif self.dir_exists("/replicas/"): + return self.get_subdirs("/replicas/") + else: + return ["1"] + + # Returns databases stored in the backup. + def get_databases(self, shard="1", replica="1"): + res = [] + for path in self.__get_paths_in_backup(shard=shard, replica=replica): + dir = path + "metadata/" + if self.dir_exists(dir): + files = self.get_files_in_dir(dir) + subdirs = self.get_subdirs(dir) + res += [Backup.__unescape_for_filename(name) for name in subdirs] + res += [ + Backup.__unescape_for_filename(os.path.splitext(name)[0]) + for name in files + if name.endswith(".sql") + ] + return sorted(set(res)) + + # Returns tables stored in the backup. + # b.get_tables(database='mydb') returns the names of tables in that database 'mydb'; + # b.get_tables() returns a list of tuples (db, table) for all tables in the backup. + def get_tables(self, database=None, shard="1", replica="1"): + if database is None: + databases = self.get_databases(shard=shard, replica=replica) + else: + databases = [database] + res = [] + paths = self.__get_paths_in_backup(shard=shard, replica=replica) + for path in paths: + if self.dir_exists(f"{path}metadata/"): + for db in databases: + dir = path + "metadata/" + Backup.__escape_for_filename(db) + "/" + if self.dir_exists(dir): + files = self.get_files_in_dir(dir) + tables = [ + Backup.__unescape_for_filename(os.path.splitext(name)[0]) + for name in files + if name.endswith(".sql") + ] + if database is None: + tables = [(db, table) for table in tables] + res += tables + return sorted(set(res)) + + # Returns the create query of a table or a database. + # The function can return None if there is no create query in the backup for such table or database. + # b.get_create_query(database='mydb') returns the create query of the database `mydb`; + # b.get_create_query(database='mydb', table='mytable') returns the create query of the table `mydb`.`mytable`; + # b.get_create_query(table='mydb.mytable') and b.get_create_query(table=('mydb', 'mytable')) also returns the create query of the table `mydb`.`mytable`. + def get_create_query(self, table=None, database=None, shard="1", replica="1"): + path = self.get_create_query_path( + table=table, database=database, shard=shard, replica=replica + ) + if path is None: + return None + return self.read_file(path).decode("utf-8") + + def get_table_metadata(self, table, database=None, shard="1", replica="1"): + return self.get_create_query( + table=table, database=database, shard=shard, replica=replica + ) + + def get_database_metadata(self, database, shard="1", replica="1"): + return self.get_create_query(database=database, shard=shard, replica=replica) + + # Like get_create_query(), but returns the path to the corresponding file containing the create query in the backup. + def get_create_query_path(self, table=None, database=None, shard="1", replica="1"): + if database is None: + database, table = Backup.__split_database_table(table) + if table is None: + suffix = "metadata/" + Backup.__escape_for_filename(database) + ".sql" + else: + suffix = ( + "metadata/" + + Backup.__escape_for_filename(database) + + "/" + + Backup.__escape_for_filename(table) + + ".sql" + ) + for path in self.__get_paths_in_backup(shard=shard, replica=replica): + metadata_path = path + suffix + if self.file_exists(metadata_path): + return metadata_path + return None + + def get_table_metadata_path(self, table, database=None, shard="1", replica="1"): + return self.get_create_query_path( + table=table, database=database, shard=shard, replica=replica + ) + + def get_database_metadata_path(self, database, shard="1", replica="1"): + return self.get_create_query_path( + database=database, shard=shard, replica=replica + ) + + # Returns the names of parts of a specified table. + # If the 'partition' parameter is specified, the function returns only parts related to that partition. + # The table can be specified either as b.get_parts(database='mydb', table='mytable') or + # b.get_parts(table='mydb.mytable') or b.get_parts(table=('mydb', 'mytable')). + def get_parts(self, table, database=None, partition=None, shard="1", replica="1"): + data_path = self.get_table_data_path( + table=table, database=database, shard=shard, replica=replica + ) + if data_path is None: + return [] + part_names = self.get_subdirs(data_path) + if "mutations" in part_names: + part_names.remove("mutations") + if partition is not None: + part_names = [ + part_name + for part_name in part_names + if Backup.__extract_partition_id_from_part_name(part_name) == partition + ] + return part_names + + # Returns the names of partitions of a specified table. + # The table can be specified either as b.get_partitions(database='mydb', table='mytable') or + # b.get_partitions(table='mydb.mytable') or b.get_partitions(table=('mydb', 'mytable')) + def get_partitions(self, table, database=None, shard="1", replica="1"): + parts = self.get_parts( + table=table, database=database, shard=shard, replica=replica + ) + partitions = [] + prev_partition = None + for part in parts: + partition = Backup.__extract_partition_id_from_part_name(part) + if partition != prev_partition: + partitions.append(partition) + prev_partition = partition + return partitions + + # Returns the path to the 'data' folder of a specified table in the backup. + # The function can return None if there is no such folder in the backup. + # The table can be specified either as b.get_table_data_path(database='mydb', table='mytable') + # b.get_table_data_path(table='mydb.mytable') or b.get_table_data_path(table=('mydb', 'mytable')) + def get_table_data_path(self, table, database=None, shard="1", replica="1"): + if database is None: + database, table = Backup.__split_database_table(table) + suffix = ( + "metadata/" + + Backup.__escape_for_filename(database) + + "/" + + Backup.__escape_for_filename(table) + + ".sql" + ) + for path in self.__get_paths_in_backup(shard=shard, replica=replica): + if self.file_exists(path + suffix): + data_path = ( + path + + "data/" + + Backup.__escape_for_filename(database) + + "/" + + Backup.__escape_for_filename(table) + + "/" + ) + return data_path if self.dir_exists(data_path) else None + return None + + # Returns the paths to files in the 'data' folder of a specified table in the backup. + # If any of the parameters 'part' and 'partition' is specified the function returns only the files related to that part or partition. + # The table can be specified either as b.get_table_data_files(database='mydb', table='mytable') + # b.get_table_data_files(table='mydb.mytable') or b.get_table_data_files(table=('mydb', 'mytable')) + def get_table_data_files( + self, table, database=None, part=None, partition=None, shard="1", replica="1" + ): + data_path = self.get_table_data_path( + table=table, database=database, shard=shard, replica=replica + ) + if data_path is None: + return [] + if (part is not None) and (partition is not None): + raise Exception( + "get_table_data_files: `only_part` and `only_partition` cannot be set together" + ) + files = [] + if part is not None: + files = self.get_files_in_dir(os.path.join(data_path, part), recursive=True) + elif partition is not None: + for part in self.get_parts( + table=table, + database=database, + partition=partition, + shard=shard, + replica=replica, + ): + files += self.get_files_in_dir( + os.path.join(data_path, part), recursive=True + ) + else: + files = self.get_files_in_dir(data_path, recursive=True) + return [data_path + file for file in files] + + # Extracts the create query of a table or a database to a specified destination. + # The function returns a tuple (files_extracted, bytes_extracted). + # The function does nothing if there is no create query for such table or database in the backup. + def extract_create_query( + self, table=None, database=None, shard="1", replica="1", out=None, out_path="" + ): + file = self.get_create_query_path( + table=table, database=database, shard=shard, replica=replica + ) + if file is None: + return (0, 0) + return self.extract_file(path=file, out=out, out_path=out_path) + + def extract_table_metadata( + self, table, database=None, shard="1", replica="1", out=None, out_path="" + ): + return self.extract_create_query( + table=table, + database=database, + shard=shard, + replica=replica, + out=out, + out_path=out_path, + ) + + def extract_database_metadata( + self, database, shard="1", replica="1", out=None, out_path="" + ): + return self.extract_create_query( + database=database, shard=shard, replica=replica, out=out, out_path=out_path + ) + + # Extracts the data of a table or a database to a specified destination. + # The function returns a tuple (files_extracted, bytes_extracted). + # The function does nothing if there is no data for such table in the backup. + def extract_table_data( + self, + table, + database=None, + part=None, + partition=None, + shard="1", + replica="1", + out=None, + out_path="", + ): + files = self.get_table_data_files( + table=table, + database=database, + part=part, + partition=partition, + shard=shard, + replica=replica, + ) + data_path = self.get_table_data_path( + table=table, database=database, shard=shard, replica=replica + ) + return self.extract_files( + path=data_path, + files=Backup.__remove_prefix_path(files, data_path), + out=out, + out_path=out_path, + ) + + # Get low-level information about files in the backup. + + # Returns a list of all files in the backup. + def get_files(self): + return self.get_files_in_dir(path="/", recursive=True) + + # Returns True if a specified file exists in the backup. + def file_exists(self, path): + if not path.startswith("/"): + path = "/" + path + return path in self.__file_infos + + # Returns True if a specified folder exists in the backup. + def dir_exists(self, path): + if not path.startswith("/"): + path = "/" + path + if not path.endswith("/"): + path += "/" + if path == "/": + return True + pos = bisect.bisect_left(self.__file_paths, path) + return (pos < len(self.__file_paths)) and self.__file_paths[pos].startswith( + path + ) + + # Returns the size of a file in the backup. + # The function raises an exception of the file doesn't exist. + def get_file_size(self, path): + fi = self.get_file_info(path) + return fi.size + + # Returns the information about a file in the backup. + # The function raises an exception of the file doesn't exist. + def get_file_info(self, path): + if not path.startswith("/"): + path = "/" + path + fi = self.__file_infos.get(path) + if fi is None: + raise Exception(f"File {path} not found in backup {self}") + return fi + + # Returns the information about multiple or all files files in the backup. + def get_file_infos(self, paths=None): + if paths is None: + return self.__file_infos.values() + return [self.get_file_info(path) for path in paths] + + # Finds the information about a file in the backup by its checksum. + # The function raises an exception of the file doesn't exist. + def get_file_info_by_checksum(self, checksum): + fi = self.__file_infos_by_checksum.get(checksum) + if fi is None: + raise Exception(f"File with checksum={checksum} not found in backup {self}") + return fi + + # Returns all files in a directory inside the backup. + def get_files_in_dir(self, path, recursive=False): + if not path.startswith("/"): + path = "/" + path + if not path.endswith("/"): + path += "/" + if path == "/" and recursive: + return self.__file_paths + pos = bisect.bisect_left(self.__file_paths, path) + files = [] + while pos < len(self.__file_paths): + file = self.__file_paths[pos] + if not file.startswith(path): + break + file = file[len(path) :] + if recursive or (file.find("/") == -1): + files.append(file) + pos += 1 + return files + + # Returns all subdirectories in a directory inside the backup. + def get_subdirs(self, path): + if not path.startswith("/"): + path = "/" + path + if not path.endswith("/"): + path += "/" + pos = bisect.bisect_left(self.__file_paths, path) + subdirs = [] + prev_subdir = "" + while pos < len(self.__file_paths): + file = self.__file_paths[pos] + if not file.startswith(path): + break + file = file[len(path) :] + sep = file.find("/") + if sep != -1: + subdir = file[:sep] + if subdir != prev_subdir: + subdirs.append(subdir) + prev_subdir = subdir + pos += 1 + return subdirs + + # Opens a file for reading from the backup. + def open_file(self, path): + fi = self.get_file_info(path) + if fi.size == 0: + return EmptyFileObj() + elif fi.base_size == 0: + return self.__reader.open_file(fi.data_file) + elif fi.size == fi.base_size: + base_fi = self.get_base_backup().get_file_info_by_checksum(fi.base_checksum) + return self.get_base_backup().open_file(base_fi.name) + else: + base_fi = self.get_base_backup().get_file_info_by_checksum(fi.base_checksum) + base_stream = self.get_base_backup().open_file(base_fi.name) + stream = self.__reader.open_file(fi.data_file) + return ConcatFileObj(base_stream, stream) + + # Reads a file and returns its contents. + def read_file(self, path): + fi = self.get_file_info(path) + if fi.size == 0: + return b"" + elif fi.base_size == 0: + return self.__reader.read_file(fi.data_file) + elif fi.size == fi.base_size: + base_fi = self.get_base_backup().get_file_info_by_checksum(fi.base_checksum) + return self.get_base_backup().read_file(base_fi.name) + else: + base_fi = self.get_base_backup().get_file_info_by_checksum(fi.base_checksum) + return self.get_base_backup().read_file( + base_fi.name + ) + self.__reader.read_file(fi.data_file) + + # Extracts a file from the backup to a specified destination. + def extract_file(self, path, out=None, out_path="", make_dirs=True): + if (out is None) and (len(out_path) > 0): + return self.extract_file(path, out=out_path, make_dirs=make_dirs) + + if TypeChecks.is_file_opened_for_writing(out): + ostream = out + fi = self.get_file_info(path) + with self.open_file(path) as istream: + shutil.copyfileobj(istream, ostream) + return ExtractionInfo(num_files=1, num_bytes=fi.size) + + if TypeChecks.is_location_like(out): + with Location(out).create_writer() as writer: + return self.extract_file( + path, out=writer, out_path=out_path, make_dirs=make_dirs + ) + + TypeChecks.check_is_writer(out) + writer = out + + fi = self.get_file_info(path) + + if make_dirs: + sep = out_path.rfind("/") + if sep != -1: + subdir = out_path[: sep + 1] + writer.make_dirs(subdir) + + if fi.size == 0: + writer.create_empty_file(out_path) + elif fi.base_size == 0: + self.__reader.extract_file(fi.data_file, writer=writer, out_path=out_path) + elif fi.size == fi.base_size: + base_fi = self.get_base_backup().get_file_info_by_checksum(fi.base_checksum) + self.get_base_backup().extract_file( + path=base_fi.name, out=writer, out_path=out_path + ) + else: + with self.open_file(path) as istream: + with writer.open_file(out_path) as ostream: + shutil.copyfileobj(istream, ostream) + + return ExtractionInfo(num_files=1, num_bytes=fi.size) + + # Extracts multiple files from the backup to a specified destination. + def extract_files(self, path, files, out=None, out_path=""): + if (out is None) and (len(out_path) > 0): + return self.extract_files(path, files, out=out_path) + + if TypeChecks.is_location_like(out): + with Location(out).create_writer() as writer: + return self.extract_files(path, files, out=writer, out_path=out_path) + + TypeChecks.check_is_writer(out) + writer = out + + subdirs = set() + for file in files: + sep = file.rfind("/") + if sep != -1: + subdirs.add(file[: sep + 1]) + for subdir in subdirs: + writer.make_dirs(os.path.join(out_path, subdir)) + + extracted_files_info = ExtractionInfo() + for file in files: + extracted_files_info.add( + self.extract_file( + os.path.join(path, file), + out=writer, + out_path=os.path.join(out_path, file), + make_dirs=False, + ) + ) + + return extracted_files_info + + def extract_dir(self, path, out=None, out_path=""): + files = self.get_files_in_dir(path, recursive=True) + return self.extract_files(path=path, files=files, out=out, out_path=out_path) + + def extract_all(self, out=None, out_path=""): + return self.extract_dir("/", out=out, out_path=out_path) + + # Checks that all files in the backup exist and have the expected sizes. + def check_files(self): + data_files = {} + for fi in self.__file_infos.values(): + if fi.size > fi.base_size: + data_file = fi.data_file + if data_file in data_files: + prev_fi = data_files[data_file] + if ( + (fi.size != prev_fi.size) + or (fi.checksum != prev_fi.checksum) + or (fi.use_base != prev_fi.use_base) + or (fi.base_size != prev_fi.base_size) + or (fi.base_checksum != prev_fi.base_checksum) + or (fi.encrypted_by_disk != prev_fi.encrypted_by_disk) + ): + raise Exception( + f"Files {prev_fi.name} and {fi.name} uses the same data file but their file infos are different: {prev_fi} and {fi}, backup: {self}" + ) + else: + data_files[data_file] = fi + + if fi.base_size > 0: + fi_base = self.get_base_backup().get_file_info_by_checksum( + fi.base_checksum + ) + if fi.base_size != fi_base.size: + raise Exception( + f"Size of file {fi_base.name} in the base backup is different ({fi.base_size} != {fi_base.size}) " + f"from it's base size in this backup, backup={self}, base_backup={self.get_base_backup()}" + ) + if fi.size < fi_base.size: + raise Exception( + f"File {fi.name} has a smaller size ({fi.size} < {fi_base.size}) than the size of a corresponding file {fi_base.name} " + f"in the base backup, backup={self}, base_backup={self.get_base_backup()}" + ) + + for fi in data_files.values(): + if not self.__reader.file_exists(fi.data_file): + raise Exception( + f"File {fi.data_file} must exist but not found inside backup {self} " + ) + actual_size = self.__reader.get_file_size(fi.data_file) + expected_size = fi.size - fi.base_size + if actual_size != expected_size: + raise Exception( + f"File {fi.data_file} has unexpected size {actual_size} != {expected_size} inside backup {self}" + ) + + if self.get_base_backup() is not None: + self.get_base_backup().check_files() + + def __parse_backup_metadata(self): + metadata_str = self.__reader.read_file(".backup") + + xmlroot = ET.fromstring(metadata_str) + + version_node = xmlroot.find("version") + self.__version = int(version_node.text) if (version_node is not None) else None + + timestamp_node = xmlroot.find("timestamp") + self.__timestamp = timestamp_node.text if (timestamp_node is not None) else None + + if self.__base_backup is None: + base_backup_node = xmlroot.find("base_backup") + if base_backup_node is not None: + self.__base_backup = Location(base_backup_node.text) + + self.__file_infos = {} + self.__file_infos_by_checksum = {} + self.__file_paths = [] + + contents = xmlroot.find("contents") + for file in contents: + name = file.find("name").text + if not name.startswith("/"): + name = "/" + name + + fi = FileInfo(name) + fi.size = int(file.find("size").text) + + if fi.size != 0: + checksum_node = file.find("checksum") + fi.checksum = checksum_node.text + + encrypted_by_disk_node = file.find("encrypted_by_disk") + if encrypted_by_disk_node is not None: + fi.encrypted_by_disk = encrypted_by_disk_node.text == "true" + + base_size_node = file.find("base_size") + if base_size_node is not None: + fi.base_size = int(base_size_node.text) + else: + use_base_node = file.find("use_base") + if (use_base_node is not None) and (use_base_node.text == "true"): + fi.base_size = fi.size + + if fi.base_size > 0: + fi.use_base = True + + if fi.use_base: + if fi.base_size == fi.size: + fi.base_checksum = fi.checksum + else: + base_checksum_node = file.find("base_checksum") + fi.base_checksum = base_checksum_node.text + + if fi.size > fi.base_size: + data_file_node = file.find("data_file") + data_file = ( + data_file_node.text if (data_file_node is not None) else fi.name + ) + if not data_file.startswith("/"): + data_file = "/" + data_file + fi.data_file = data_file + + self.__file_infos[fi.name] = fi + if fi.size > 0: + self.__file_infos_by_checksum[fi.checksum] = fi + self.__file_paths.append(fi.name) + + metadata_fi = FileInfo("/.backup") + metadata_fi.size = len(metadata_str) + metadata_fi.data_file = metadata_fi.name + self.__file_infos[metadata_fi.name] = metadata_fi + self.__file_paths.append(metadata_fi.name) + + self.__file_paths.sort() + + def __get_paths_in_backup(self, shard, replica): + paths = [] + if self.dir_exists(f"/shards/{shard}/replicas/{replica}/metadata/"): + paths.append(f"/shards/{shard}/replicas/{replica}/") + if self.dir_exists(f"/shards/{shard}metadata/"): + paths.append(f"/shards/{shard}/") + if self.dir_exists(f"/replicas/{replica}/metadata/"): + paths.append(f"/replicas/{replica}/") + if self.dir_exists(f"/metadata/"): + paths.append(f"/") + return paths + + def __split_database_table(table): + if isinstance(table, tuple): + return table[0], table[1] + elif isinstance(table, str) and (table.find(".") != -1): + return table.split(".", maxsplit=1) + + def __remove_prefix_path(files, prefix_path): + for file in files: + if not file.startswith(prefix_path): + raise Exception( + f"remove_prefix_path: File '{file}' doesn't have the expected prefix '{prefix_path}'" + ) + return [file[len(prefix_path) :] for file in files] + + def __escape_for_filename(text): + res = "" + for c in text: + if (c.isascii() and c.isalnum()) or c == "_": + res += c + else: + for b in c.encode("utf-8"): + res += f"%{b:X}" + return res + + def __unescape_for_filename(text): + res = b"" + i = 0 + while i < len(text): + c = text[i] + if c == "%" and i + 2 < len(text): + res += bytes.fromhex(text[i + 1 : i + 3]) + i += 3 + else: + res += c.encode("ascii") + i += 1 + return res.decode("utf-8") + + def __extract_partition_id_from_part_name(part_name): + underscore = part_name.find("_") + if underscore <= 0: + return None + return part_name[:underscore] + + +# Information about a single file inside a backup. +class FileInfo: + def __init__( + self, + name, + size=0, + checksum="00000000000000000000000000000000", + data_file="", + use_base=False, + base_size=0, + base_checksum="00000000000000000000000000000000", + encrypted_by_disk=False, + ): + self.name = name + self.size = size + self.checksum = checksum + self.data_file = data_file + self.use_base = use_base + self.base_size = base_size + self.base_checksum = base_checksum + self.encrypted_by_disk = encrypted_by_disk + + def __repr__(self): + res = "FileInfo(" + res += f"name='{self.name}'" + res += f", size={self.size}" + if self.checksum != "00000000000000000000000000000000": + res += f", checksum='{self.checksum}'" + if self.data_file: + res += f", data_file='{self.data_file}'" + if self.use_base: + res += f", use_base={self.use_base}" + res += f", base_size={self.base_size}" + res += f", base_checksum='{self.base_checksum}'" + if self.encrypted_by_disk: + res += f", encrypted_by_disk={self.encrypted_by_disk}" + res += ")" + return res + + def __eq__(self, other): + if not isinstance(other, FileInfo): + return False + return ( + (self.name == other.name) + and (self.size == other.size) + and (self.checksum == other.checksum) + and (self.data_file == other.data_file) + and (self.use_base == other.use_base) + and (self.base_size == other.base_size) + and (self.base_checksum == other.base_checksum) + and (self.encrypted_by_disk == other.encrypted_by_disk) + ) + + +# Information about extracted files. +class ExtractionInfo: + def __init__(self, num_files=0, num_bytes=0): + self.num_files = num_files + self.num_bytes = num_bytes + + def __repr__(self): + return f"ExtractionInfo(num_files={self.num_files}, num_bytes={self.num_bytes})" + + def __eq__(self, other): + if not isinstance(other, ExtractionInfo): + return False + return self.num_files == other.num_files and self.num_bytes == other.num_bytes + + def add(self, other): + self.num_files += other.num_files + self.num_bytes += other.num_bytes + + +# File('') can be used to specify the location of a backup or a destination for extracting data. +class File: + def __init__(self, path): + self.path = path + + def __repr__(self): + return f"File('{self.path}')" + + +# S3('', '', '') can be used to specify the location of a backup. +class S3: + def __init__(self, uri, access_key_id=None, secret_access_key=None): + self.uri = uri + self.access_key_id = access_key_id + self.secret_access_key = secret_access_key + + def __repr__(self): + str = f"S3('{self.uri}'" + if self.access_key_id: + str += f", '{self.access_key_id}'" + if self.secret_access_key: + str += f", '{self.secret_access_key}'" + str += ")" + return str + + +#################################################################################################### +# Implementation - helper classes and functions. + + +# Helps to check types. +class TypeChecks: + def is_location_like(obj): + return Location.can_init_from(obj) + + def is_file_opened_for_reading(obj): + return callable(getattr(obj, "read", None)) + + def is_file_opened_for_writing(obj): + return callable(getattr(obj, "write", None)) + + def is_reader(obj): + return ( + isinstance(obj, FileReader) + or isinstance(obj, S3Reader) + or isinstance(obj, ZipReader) + ) + + def is_writer(obj): + return isinstance(obj, FileWriter) + + def check_is_writer(obj): + if TypeChecks.is_writer(obj): + return + raise Exception(f"{obj} is not a writer") + + +# Helps to represents either File() or S3() location and to parse them from a string. +class Location: + def __init__(self, obj): + self.__location = None + if isinstance(obj, Location): + self.__location = obj.__location + elif isinstance(obj, File) or isinstance(obj, S3): + self.__location = obj + elif isinstance(obj, str) and len(obj) > 0: + self.__location = Location.__parse_location(obj) + else: + raise Exception("Cannot parse a location from {obj}") + + def can_init_from(obj): + if isinstance(obj, Location): + return True + elif isinstance(obj, File) or isinstance(obj, S3): + return True + elif isinstance(obj, str) and len(obj) > 0: + return True + else: + return False + + def __repr__(self): + return repr(self.__location) + + def create_reader(self): + if isinstance(self.__location, File): + path = self.__location.path + path, zip_filename = Location.__split_filename_if_archive(path) + reader = FileReader(path) + if zip_filename is not None: + reader = ZipReader(reader, zip_filename) + return reader + + if isinstance(self.__location, S3): + uri = self.__location.uri + uri, zip_filename = Location.__split_filename_if_archive(uri) + reader = S3Reader( + uri, + self.__location.access_key_id, + self.__location.secret_access_key, + ) + if zip_filename is not None: + reader = ZipReader(reader, zip_filename) + return reader + + raise Exception(f"Couldn't create a reader from {self}") + + def create_writer(self): + if isinstance(self.__location, File): + return FileWriter(self.__location.path) + + raise Exception(f"Couldn't create a writer to {self}") + + def __parse_location(desc): + startpos = len(desc) - len(desc.lstrip()) + + opening_parenthesis = desc.find("(", startpos) + if opening_parenthesis == -1: + endpos = len(desc.rstrip()) + if startpos == endpos: + raise Exception( + f"Couldn't parse a location from '{desc}': empty string" + ) + return File(desc[startpos:endpos]) + + closing_parenthesis = desc.find(")", opening_parenthesis) + if closing_parenthesis == -1: + raise Exception( + f"Couldn't parse a location from '{desc}': No closing parenthesis" + ) + + name = desc[startpos:opening_parenthesis] + args = desc[opening_parenthesis + 1 : closing_parenthesis].split(",") + args = [Location.__unquote_argument(arg.strip()) for arg in args] + endpos = closing_parenthesis + 1 + + if name == "File": + if len(args) == 1: + return File(args[0]) + else: + raise Exception( + f"Couldn't parse a location from '{desc}': File() requires a single argument, got {len(args)} arguments" + ) + + if name == "S3": + if 1 <= len(args) and len(args) <= 3: + return S3(*args) + else: + raise Exception( + f"Couldn't parse a location from '{desc}': S3( [, , ]) requires from 1 to 3 arguments, got {len(args)} arguments" + ) + + raise Exception( + f"Couldn't parse a location from '{desc}': Unknown type {name} (only File and S3 are supported)" + ) + + def __unquote_argument(arg): + if arg.startswith("'"): + return arg.strip("'") + elif arg.startswith('"'): + return arg.strip('"') + else: + return arg + + def __split_filename_if_archive(path): + is_archive = path.endswith(".zip") or path.endswith(".zipx") + if not is_archive: + return path, None + sep = path.rfind("/") + if sep == -1: + return "", path + return path[: sep + 1], path[sep + 1 :] + + +# Represents an empty file object. +class EmptyFileObj: + def close(self): + pass + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, traceback): + pass + + def read(self, count=None): + return b"" + + +# Represent a file object which concatenate data from two file objects. +class ConcatFileObj: + def __init__(self, fileobj1, fileobj2): + self.__fileobj1 = fileobj1 + self.__fileobj2 = fileobj2 + self.__first_is_already_read = False + + def close(self): + if self.__fileobj1 is not None: + self.__fileobj1.close() + self.__fileobj1 = None + if self.__fileobj2 is not None: + self.__fileobj2.close() + self.__fileobj2 = None + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, traceback): + self.close() + + def read(self, count=None): + read_data = b"" + + if count != 0 and not self.__first_is_already_read: + read_data += self.__fileobj1.read(count) + if (count is None) or (count > len(read_data)): + self.__first_is_already_read = True + if count is not None: + count -= len(read_data) + + if count != 0: + read_data += self.__fileobj2.read(count) + + return read_data + + +# Helps to read a File() backup. +class FileReader: + def __init__(self, root_path): + self.__root_path = root_path + + def close(self): + pass + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, traceback): + pass + + def file_exists(self, path): + return os.path.isfile(self.get_abs_path(path)) + + def get_file_size(self, path): + return os.path.getsize(self.get_abs_path(path)) + + def read_file(self, path): + with self.open_file(path) as f: + return f.read() + + def open_file(self, path): + return open(self.get_abs_path(path), "rb") + + def extract_file(self, path, writer, out_path): + if isinstance(writer, FileWriter): + shutil.copyfile(self.get_abs_path(path), writer.get_abs_path(out_path)) + else: + with self.open_file(path) as istream: + with writer.open_file(out_path) as ostream: + shutil.copyfileobj(istream, ostream) + + def get_abs_path(self, path): + if path.startswith("/"): + path = path[1:] + return os.path.join(self.__root_path, path) + + +# Helps to extract files to a File() destination. +class FileWriter: + def __init__(self, root_path): + self.__root_path = root_path + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, traceback): + pass + + def open_file(self, path): + return open(self.get_abs_path(path), "wb") + + def create_empty_file(self, path): + with self.open_file(path) as file: + pass + + def make_dirs(self, path): + abs_path = self.get_abs_path(path) + if not os.path.isdir(abs_path): + os.makedirs(abs_path) + + def get_abs_path(self, path): + if path.startswith("/"): + path = path[1:] + return os.path.join(self.__root_path, path) + + +# Helps to read a S3() backup. +class S3Reader: + def __init__(self, uri, access_key_id, secret_access_key): + s3_uri = S3URI(uri) + self.__bucket = s3_uri.bucket + self.__key = s3_uri.key + self.__client = None + + try: + self.__client = boto3.client( + "s3", + endpoint_url=s3_uri.endpoint, + aws_access_key_id=access_key_id, + aws_secret_access_key=secret_access_key, + ) + except: + self.close() + raise + + def close(self): + if self.__client is not None: + self.__client.close() + self.__client = None + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, traceback): + self.close() + + def file_exists(self, path): + try: + self.__client.head_object(Bucket=self.__bucket, Key=self.get_key(path)) + return True + except botocore.exceptions.ClientError as e: + if e.response["Error"]["Code"] == "404": + return False + else: + raise + + def get_file_size(self, path): + response = self.__client.head_object( + Bucket=self.__bucket, Key=self.get_key(path) + ) + return response["ContentLength"] + + def read_file(self, path): + with self.open_file(path) as f: + return f.read() + + def open_file(self, path): + response = self.__client.get_object( + Bucket=self.__bucket, Key=self.get_key(path) + ) + return response["Body"] + + def extract_file(self, path, writer, out_path): + if isinstance(writer, FileWriter): + self.__client.download_file( + Bucket=self.__bucket, + Key=self.get_key(path), + Filename=writer.get_abs_path(out_path), + ) + else: + with writer.open_file(out_path) as ostream: + self.__client.download_fileobj( + Bucket=self.__bucket, Key=self.get_key(path), Fileobj=ostream + ) + + def get_key(self, path): + if path.startswith("/"): + path = path[1:] + return self.__key + "/" + path + + +# Parses a S3 URI with detecting endpoint, bucket name, and key. +class S3URI: + def __init__(self, uri): + parsed_url = urlparse(uri, allow_fragments=False) + if not self.__parse_virtual_hosted(parsed_url) and not self.__parse_path_style( + parsed_url + ): + raise Exception(f"S3URI: Could not parse {uri}") + + # https://bucket-name.s3.Region.amazonaws.com/key + def __parse_virtual_hosted(self, parsed_url): + host = parsed_url.netloc + if host.find(".s3") == -1: + return False + self.bucket, new_host = path.split(".s3", maxsplit=1) + if len(self.bucket) < 3: + return False + new_host = "s3" + new_host + self.endpoint = parsed_url.scheme + "://" + new_host + path = parsed_url.path + if path.startswith("/"): + path = path[1:] + if path.endswith("/"): + path = path[:-1] + self.key = path + return True + + # https://s3.Region.amazonaws.com/bucket-name/key + def __parse_path_style(self, parsed_url): + self.endpoint = parsed_url.scheme + "://" + parsed_url.netloc + path = parsed_url.path + if path.startswith("/"): + path = path[1:] + if path.endswith("/"): + path = path[:-1] + if path.find("/") == -1: + self.bucket = path + self.key = "" + else: + self.bucket, self.key = path.split("/", maxsplit=1) + if len(self.bucket) < 3: + return False + return True + + +# Helps to read a backup from a zip-archive. +class ZipReader: + def __init__(self, base_reader, archive_name): + self.__base_reader = None + self.__zipfileobj = None + self.__zipfile = None + + try: + self.__base_reader = base_reader + self.__zipfileobj = base_reader.open_file(archive_name) + self.__zipfile = zipfile.ZipFile(self.__zipfileobj) + except: + self.close() + raise + + def close(self): + if self.__zipfile is not None: + self.__zipfile.close() + self.__zipfile = None + if self.__zipfileobj is not None: + self.__zipfileobj.close() + self.__zipfileobj = None + if self.__base_reader is not None: + self.__base_reader.close() + self.__base_reader = None + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, traceback): + self.close() + + def file_exists(self, path): + return self.__get_zippath(path).is_file() + + def get_file_size(self, path): + return self.__get_zipinfo(path).file_size + + def read_file(self, path): + return self.__get_zippath(path).read_bytes() + + def open_file(self, path): + return self.__get_zippath(path).open(mode="rb") + + def extract_file(self, path, writer, out_path): + with self.open_file(path) as istream: + with writer.open_file(out_path) as ostream: + shutil.copyfileobj(istream, ostream) + + def __get_zippath(self, path): + if path.startswith("/"): + path = path[1:] + return zipfile.Path(self.__zipfile, path) + + def __get_zipinfo(self, path): + if path.startswith("/"): + path = path[1:] + return self.__zipfile.getinfo(path) diff --git a/utils/backupview/test/test_backup_1.zip b/utils/backupview/test/test_backup_1.zip new file mode 100644 index 00000000000..658c3585364 Binary files /dev/null and b/utils/backupview/test/test_backup_1.zip differ diff --git a/utils/backupview/test/test_backupview.py b/utils/backupview/test/test_backupview.py new file mode 100755 index 00000000000..5c0d546cbfa --- /dev/null +++ b/utils/backupview/test/test_backupview.py @@ -0,0 +1,143 @@ +#!/usr/bin/env python3 + +# Tests for the clickhouse_backupview utility. +# Use pytest ./test_backupview.py to run. + +import pytest + +import os.path +import sys +import tempfile + +script_dir = os.path.dirname(os.path.realpath(__file__)) +backupview_dir = os.path.abspath(os.path.join(script_dir, "..")) +if backupview_dir not in sys.path: + sys.path.append(backupview_dir) +from clickhouse_backupview import open_backup, S3, FileInfo + + +def calculate_num_files(dir): + count = 0 + for _, _, files in os.walk(dir, topdown=False): + count += len([1 for name in files if name]) + return count + + +def calculate_total_size(dir): + total_size = 0 + for root, _, files in os.walk(dir, topdown=False): + total_size += sum( + [os.path.getsize(os.path.join(dir, root, name)) for name in files if name] + ) + return total_size + + +########################################################################################### +# Actual tests + + +def test_backupview_1(): + with open_backup(os.path.join(script_dir, "test_backup_1.zip")) as b: + assert b.get_subdirs("/") == ["shards"] + assert b.dir_exists("/shards") + assert not b.file_exists("/shards") + assert b.get_subdirs("/shards/") == ["1"] + assert b.get_subdirs("/shards/1/replicas/1") == ["data", "metadata"] + assert b.get_subdirs("/shards/1/replicas/1/metadata/") == ["mydb"] + assert b.get_files_in_dir("/shards/1/replicas/1/metadata/") == ["mydb.sql"] + assert b.file_exists("/shards/1/replicas/1/metadata/mydb.sql") + + assert b.get_subdirs("/shards/1/replicas/1/data/mydb/tbl1") == [ + "all_0_0_0", + "all_1_1_0", + "all_2_2_0", + "all_3_3_0", + ] + + assert b.get_files_in_dir("/shards/1/replicas/1/data/mydb/tbl1/all_0_0_0") == [ + "checksums.txt", + "columns.txt", + "count.txt", + "data.bin", + "data.cmrk3", + "default_compression_codec.txt", + "metadata_version.txt", + "serialization.json", + ] + + assert b.get_databases() == ["mydb"] + assert b.get_tables(database="mydb") == ["tbl1", "tbl2"] + assert b.get_tables() == [("mydb", "tbl1"), ("mydb", "tbl2")] + + assert b.get_create_query(database="mydb").startswith("CREATE DATABASE mydb") + + assert b.get_create_query(table="mydb.tbl1").startswith( + "CREATE TABLE mydb.tbl1" + ) + + assert b.get_create_query(table=("mydb", "tbl1")).startswith( + "CREATE TABLE mydb.tbl1" + ) + + assert b.get_create_query(database="mydb", table="tbl2").startswith( + "CREATE TABLE mydb.tbl2" + ) + + assert b.get_partitions(table="mydb.tbl2") == ["all"] + assert b.get_parts(table="mydb.tbl2") == [ + "all_0_0_0", + "all_1_1_0", + "all_2_2_0", + ] + + assert b.get_parts(table="mydb.tbl2", partition="all") == [ + "all_0_0_0", + "all_1_1_0", + "all_2_2_0", + ] + + assert ( + b.get_table_data_path(table="mydb.tbl1") + == "/shards/1/replicas/1/data/mydb/tbl1/" + ) + + assert b.get_table_data_files(table="mydb.tbl1", part="all_1_1_0") == [ + "/shards/1/replicas/1/data/mydb/tbl1/checksums.txt", + "/shards/1/replicas/1/data/mydb/tbl1/columns.txt", + "/shards/1/replicas/1/data/mydb/tbl1/count.txt", + "/shards/1/replicas/1/data/mydb/tbl1/data.bin", + "/shards/1/replicas/1/data/mydb/tbl1/data.cmrk3", + "/shards/1/replicas/1/data/mydb/tbl1/default_compression_codec.txt", + "/shards/1/replicas/1/data/mydb/tbl1/metadata_version.txt", + "/shards/1/replicas/1/data/mydb/tbl1/serialization.json", + ] + + assert ( + b.read_file( + "/shards/1/replicas/1/data/mydb/tbl1/all_1_1_0/default_compression_codec.txt" + ) + == b"CODEC(LZ4)" + ) + + with b.open_file( + "/shards/1/replicas/1/data/mydb/tbl1/all_1_1_0/default_compression_codec.txt" + ) as f: + assert f.read() == b"CODEC(LZ4)" + + assert b.get_file_info( + "/shards/1/replicas/1/data/mydb/tbl1/all_1_1_0/default_compression_codec.txt" + ) == FileInfo( + name="/shards/1/replicas/1/data/mydb/tbl1/all_1_1_0/default_compression_codec.txt", + size=10, + checksum="557036eda0fb0a277a7caf9b9c8d4dd6", + data_file="/shards/1/replicas/1/data/mydb/tbl1/all_0_0_0/default_compression_codec.txt", + ) + + with tempfile.TemporaryDirectory() as temp_dir: + res = b.extract_table_data(table="mydb.tbl1", out=temp_dir) + num_files = res.num_files + num_bytes = res.num_bytes + assert calculate_num_files(temp_dir) == num_files + assert calculate_total_size(temp_dir) == num_bytes + assert num_files == 32 + assert num_bytes == 1728 diff --git a/utils/check-style/aspell-ignore/en/aspell-dict.txt b/utils/check-style/aspell-ignore/en/aspell-dict.txt index aedb267b3fb..86f59e52482 100644 --- a/utils/check-style/aspell-ignore/en/aspell-dict.txt +++ b/utils/check-style/aspell-ignore/en/aspell-dict.txt @@ -1,4 +1,4 @@ -personal_ws-1.1 en 2633 +personal_ws-1.1 en 2646 AArch ACLs ALTERs @@ -261,6 +261,7 @@ FOSDEM FQDN Failover FarmHash +FileLog FilesystemCacheBytes FilesystemCacheElements FilesystemCacheFiles @@ -278,7 +279,6 @@ FilesystemMainPathTotalBytes FilesystemMainPathTotalINodes FilesystemMainPathUsedBytes FilesystemMainPathUsedINodes -FileLog FixedString Flink ForEach @@ -441,6 +441,7 @@ Kolmogorov Kubernetes LDAP LGPL +LIMITs LLDB LLVM's LOCALTIME @@ -571,13 +572,13 @@ NetworkSendPackets NodeJs NuRaft NumHexagons +NumPy NumToString NumToStringClassC NumberOfDatabases NumberOfDetachedByUserParts NumberOfDetachedParts NumberOfTables -NumPy OFNS OLAP OLTP @@ -588,10 +589,10 @@ OSGuestNiceTimeNormalized OSGuestTime OSGuestTimeCPU OSGuestTimeNormalized +OSIOWaitMicroseconds OSIOWaitTime OSIOWaitTimeCPU OSIOWaitTimeNormalized -OSIOWaitMicroseconds OSIdleTime OSIdleTimeCPU OSIdleTimeNormalized @@ -1125,6 +1126,7 @@ azureBlobStorageCluster backend backoff backticks +backupview balancer basename bcrypt @@ -1469,12 +1471,12 @@ fastops fcoverage fibonacci fifo +filelog filesystem filesystemAvailable filesystemCapacity filesystemFree filesystems -filelog finalizeAggregation fips firstLine @@ -1542,6 +1544,7 @@ github glibc globalIn globalNotIn +globbing glushkovds golang googletest @@ -1552,6 +1555,7 @@ graphql greatCircleAngle greatCircleDistance greaterOrEquals +greaterorequals greenspace groupArray groupArrayInsertAt @@ -1737,6 +1741,7 @@ lemmatize lemmatized lengthUTF lessOrEquals +lessorequals levenshtein levenshteinDistance lexicographically @@ -1915,6 +1920,7 @@ notEquals notILike notIn notLike +notequals notretry nowInBlock ntile @@ -2216,6 +2222,7 @@ seektable sequenceCount sequenceMatch sequenceNextNode +seriesPeriodDetectFFT serverTimeZone serverTimezone serverUUID @@ -2345,6 +2352,7 @@ subtractSeconds subtractWeeks subtractYears subtree +subtrees subtype sudo sumCount diff --git a/utils/check-style/check-large-objects.sh b/utils/check-style/check-large-objects.sh new file mode 100755 index 00000000000..6b3fe86d310 --- /dev/null +++ b/utils/check-style/check-large-objects.sh @@ -0,0 +1,22 @@ +#!/usr/bin/env bash + +# Check that there are no new translation units compiled to an object file larger than a certain size. + +TU_EXCLUDES=( + CastOverloadResolver + AggregateFunctionMax + AggregateFunctionMin + AggregateFunctionUniq + FunctionsConversion + + RangeHashedDictionary + + Aggregator +) + +if find $1 -name '*.o' | xargs wc -c | grep -v total | sort -rn | awk '{ if ($1 > 50000000) print }' \ + | grep -v -f <(printf "%s\n" "${TU_EXCLUDES[@]}") +then + echo "^ It's not allowed to have so large translation units." + exit 1 +fi diff --git a/utils/check-style/check-style b/utils/check-style/check-style index f87d2e292b5..39d371e25d5 100755 --- a/utils/check-style/check-style +++ b/utils/check-style/check-style @@ -422,3 +422,10 @@ find $ROOT_PATH/{src,programs,utils} -name '*.h' -or -name '*.cpp' | xargs grep # Cyrillic characters hiding inside Latin. find $ROOT_PATH/{src,programs,utils} -name '*.h' -or -name '*.cpp' | xargs grep -P --line-number '[a-zA-Z][а-яА-ЯёЁ]|[а-яА-ЯёЁ][a-zA-Z]' && echo "^ Cyrillic characters found in unexpected place." + +# Orphaned header files. +join -v1 <(find $ROOT_PATH/{src,programs,utils} -name '*.h' -printf '%f\n' | sort | uniq) <(find $ROOT_PATH/{src,programs,utils} -name '*.cpp' -or -name '*.c' -or -name '*.h' -or -name '*.S' | xargs grep --no-filename -o -P '[\w-]+\.h' | sort | uniq) | + grep . && echo '^ Found orphan header files.' + +# Don't allow dynamic compiler check with CMake, because we are using hermetic, reproducible, cross-compiled, static (TLDR, good) builds. +ls -1d $ROOT_PATH/contrib/*-cmake | xargs -I@ find @ -name 'CMakeLists.txt' -or -name '*.cmake' | xargs grep --with-filename -i -P 'check_c_compiler_flag|check_cxx_compiler_flag|check_c_source_compiles|check_cxx_source_compiles|check_include_file|check_symbol_exists|cmake_push_check_state|cmake_pop_check_state|find_package|CMAKE_REQUIRED_FLAGS|CheckIncludeFile|CheckCCompilerFlag|CheckCXXCompilerFlag|CheckCSourceCompiles|CheckCXXSourceCompiles|CheckCSymbolExists|CheckCXXSymbolExists' | grep -v Rust && echo "^ It's not allowed to have dynamic compiler checks with CMake." diff --git a/utils/check-style/check-workflows b/utils/check-style/check-workflows index df2292d84ca..fb41d5af461 100755 --- a/utils/check-style/check-workflows +++ b/utils/check-style/check-workflows @@ -2,8 +2,14 @@ set -e +WORKING_DIR=$(dirname "$0") +cd "$WORKING_DIR" + GIT_ROOT=$(git rev-parse --show-cdup) -GIT_ROOT=${GIT_ROOT:-.} +GIT_ROOT=${GIT_ROOT:-../../} act --list --directory="$GIT_ROOT" 1>/dev/null 2>&1 || act --list --directory="$GIT_ROOT" 2>&1 actionlint -ignore 'reusable workflow call.+' || : + + +python3 check_reusable_workflows.py diff --git a/utils/check-style/check_reusable_workflows.py b/utils/check-style/check_reusable_workflows.py new file mode 100644 index 00000000000..6fe22786650 --- /dev/null +++ b/utils/check-style/check_reusable_workflows.py @@ -0,0 +1,53 @@ +#!/usr/bin/env python3 + +from pathlib import Path +from typing import Dict, Iterable, List +import yaml + +git_root = Path(__file__).absolute().parents[2] + + +def check_workflows(paths: Iterable[Path]) -> List[str]: + outputs = [] # type: List[str] + for path in paths: + workflow_object = yaml.safe_load(path.read_bytes()) + workflow_object["file---name"] = path.name + outputs.extend(check_name_override(workflow_object)) + + return outputs + + +def check_name_override(workflow_object: dict) -> List[str]: + outputs = [] # type: List[str] + workflow_file = workflow_object.get("file---name", "") # type: str + jobs = workflow_object.get("jobs", {}) # type: Dict[str, dict] + for name, obj in jobs.items(): + header = f"Workflow '{workflow_file}': Job '{name}': " + name_overriden = obj.get("name", "") + env_name_overriden = obj.get("env", {}).get("GITHUB_JOB_OVERRIDDEN", "") + if name_overriden or env_name_overriden: + if not (name_overriden and env_name_overriden): + outputs.append( + f"{header}job has one of 'name' and 'env.GITHUB_JOB_OVERRIDDEN', " + "but not both" + ) + elif name_overriden != env_name_overriden: + outputs.append( + f"{header}value of 'name' and 'env.GITHUB_JOB_OVERRIDDEN' are not " + f"equal. name={name_overriden}; " + f"env.GITHUB_JOB_OVERRIDDEN={env_name_overriden}" + ) + return outputs + + +def main() -> None: + reusable_workflow_paths = git_root.glob(".github/workflows/reusable_*.y*ml") + outputs = check_workflows(reusable_workflow_paths) + if outputs: + print("Found next issues for workflows:") + for o in outputs: + print(o) + + +if __name__ == "__main__": + main() diff --git a/utils/config-processor/CMakeLists.txt b/utils/config-processor/CMakeLists.txt deleted file mode 100644 index 80c3535ef4e..00000000000 --- a/utils/config-processor/CMakeLists.txt +++ /dev/null @@ -1,2 +0,0 @@ -clickhouse_add_executable (config-processor config-processor.cpp) -target_link_libraries(config-processor PRIVATE dbms) diff --git a/utils/config-processor/config-processor.cpp b/utils/config-processor/config-processor.cpp deleted file mode 100644 index 242a6782b3b..00000000000 --- a/utils/config-processor/config-processor.cpp +++ /dev/null @@ -1,35 +0,0 @@ -#include -#include - -int main(int argc, char ** argv) -{ - try - { - if (argc != 2) - { - std::cerr << "usage: " << argv[0] << " path" << std::endl; - return 3; - } - - DB::ConfigProcessor processor(argv[1], false, true); - DB::XMLDocumentPtr document = processor.processConfig(); - Poco::XML::DOMWriter().writeNode(std::cout, document); - } - catch (Poco::Exception & e) - { - std::cerr << "Exception: " << e.displayText() << std::endl; - return 1; - } - catch (std::exception & e) - { - std::cerr << "std::exception: " << e.what() << std::endl; - return 3; - } - catch (...) - { - std::cerr << "Some exception" << std::endl; - return 2; - } - - return 0; -} diff --git a/utils/keeper-bench/Runner.cpp b/utils/keeper-bench/Runner.cpp index 13855c6d94e..611ca948c53 100644 --- a/utils/keeper-bench/Runner.cpp +++ b/utils/keeper-bench/Runner.cpp @@ -10,10 +10,12 @@ #include #include + namespace CurrentMetrics { extern const Metric LocalThread; extern const Metric LocalThreadActive; + extern const Metric LocalThreadScheduled; } namespace DB::ErrorCodes @@ -106,7 +108,7 @@ Runner::Runner( std::cerr << "---- Run options ----\n" << std::endl; - pool.emplace(CurrentMetrics::LocalThread, CurrentMetrics::LocalThreadActive, concurrency); + pool.emplace(CurrentMetrics::LocalThread, CurrentMetrics::LocalThreadActive, CurrentMetrics::LocalThreadScheduled, concurrency); queue.emplace(concurrency); } @@ -461,4 +463,3 @@ Runner::~Runner() pool->wait(); generator->cleanup(*connections[0]); } - diff --git a/utils/list-versions/version_date.tsv b/utils/list-versions/version_date.tsv index 0f2684cd91d..ebe138d597a 100644 --- a/utils/list-versions/version_date.tsv +++ b/utils/list-versions/version_date.tsv @@ -1,10 +1,16 @@ +v23.10.5.20-stable 2023-11-25 +v23.10.4.25-stable 2023-11-17 v23.10.3.5-stable 2023-11-10 v23.10.2.13-stable 2023-11-08 v23.10.1.1976-stable 2023-11-02 +v23.9.6.20-stable 2023-11-25 +v23.9.5.29-stable 2023-11-17 v23.9.4.11-stable 2023-11-08 v23.9.3.12-stable 2023-10-31 v23.9.2.56-stable 2023-10-19 v23.9.1.1854-stable 2023-09-29 +v23.8.8.20-lts 2023-11-25 +v23.8.7.24-lts 2023-11-17 v23.8.6.16-lts 2023-11-08 v23.8.5.16-lts 2023-10-31 v23.8.4.69-lts 2023-10-19 @@ -31,6 +37,8 @@ v23.4.4.16-stable 2023-06-17 v23.4.3.48-stable 2023-06-12 v23.4.2.11-stable 2023-05-02 v23.4.1.1943-stable 2023-04-27 +v23.3.18.15-lts 2023-11-25 +v23.3.17.13-lts 2023-11-17 v23.3.16.7-lts 2023-11-08 v23.3.15.29-lts 2023-10-31 v23.3.14.78-lts 2023-10-18 diff --git a/utils/prepare-time-trace/prepare-time-trace.sh b/utils/prepare-time-trace/prepare-time-trace.sh index 7e585db2000..812928e8bd8 100755 --- a/utils/prepare-time-trace/prepare-time-trace.sh +++ b/utils/prepare-time-trace/prepare-time-trace.sh @@ -8,7 +8,7 @@ # See also https://docs.google.com/document/d/1CvAClvFfyA5R-PhYUmn5OOQtYMH4h6I0nSsKchNAySU/preview -< str: with open(VERSIONS_FILE, "r", encoding="utf-8") as fd: versions = [line.split(maxsplit=1)[0][1:] for line in fd.readlines()] - # The versions in VERSIONS_FILE are ordered ascending, so the first one is - # the greatest one. We may have supported versions in the previous year - greatest_year = int(versions[0].split(".", maxsplit=1)[0]) - unsupported_year = greatest_year - 2 + supported_year = 0 # set automatically when all supported versions are filled # 3 regular versions regular = [] # type: List[str] max_regular = 3 @@ -82,14 +79,12 @@ def generate_supported_versions() -> str: lts.append(version) to_append = f"| {version} | ✔️ |" if to_append: - if len(regular) == max_regular and len(lts) == max_lts: - # if we reached the max number of supported versions, the rest - # are unsopported, so year.* will be used - unsupported_year = min(greatest_year - 1, year) + if len(regular) == max_regular or len(lts) == max_lts: + supported_year = year table.append(to_append) continue - if year <= unsupported_year: - # The whole year is unsopported + if year < supported_year: + # The whole year is unsupported version = f"{year}.*" if not version in unsupported: unsupported.append(version)