diff --git a/.github/workflows/cancel.yml b/.github/workflows/cancel.yml index 03e1007b841..9b96a1dbffa 100644 --- a/.github/workflows/cancel.yml +++ b/.github/workflows/cancel.yml @@ -1,7 +1,7 @@ name: Cancel on: # yamllint disable-line rule:truthy workflow_run: - workflows: ["CIGithubActions"] + workflows: ["CIGithubActions", "ReleaseCI"] types: - requested jobs: diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index fda43824fc8..daa05e19798 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -9,6 +9,9 @@ on: # yamllint disable-line rule:truthy - opened branches: - master +########################################################################################## +##################################### SMALL CHECKS ####################################### +########################################################################################## jobs: CheckLabels: runs-on: [self-hosted, style-checker] @@ -82,286 +85,6 @@ jobs: docker kill $(docker ps -q) ||: docker rm -f $(docker ps -a -q) ||: sudo rm -fr $TEMP_PATH - BuilderDebDebug: - needs: DockerHubPush - if: ${{ !contains(github.event.pull_request.labels.*.name, 'pr-documentation') && !contains(github.event.pull_request.labels.*.name, 'pr-doc-fix') }} - runs-on: [self-hosted, builder] - steps: - - name: Download changed images - uses: actions/download-artifact@v2 - with: - name: changed_images - path: ${{ runner.temp }}/images_path - - name: Check out repository code - uses: actions/checkout@v2 - with: - submodules: 'recursive' - fetch-depth: 0 # otherwise we will have no info about contributors - - name: Build - env: - TEMP_PATH: ${{runner.temp}}/build_check - IMAGES_PATH: ${{runner.temp}}/images_path - REPO_COPY: ${{runner.temp}}/build_check/ClickHouse - CACHES_PATH: ${{runner.temp}}/../ccaches - CHECK_NAME: 'ClickHouse build check (actions)' - BUILD_NUMBER: 7 - run: | - sudo rm -fr $TEMP_PATH - mkdir -p $TEMP_PATH - cp -r $GITHUB_WORKSPACE $TEMP_PATH - cd $REPO_COPY/tests/ci && python3 build_check.py "$CHECK_NAME" $BUILD_NUMBER - - name: Upload build URLs to artifacts - uses: actions/upload-artifact@v2 - with: - name: ${{ env.BUILD_NAME }} - path: ${{ runner.temp }}/build_check/${{ env.BUILD_NAME }}.json - - name: Cleanup - if: always() - run: | - docker kill $(docker ps -q) ||: - docker rm -f $(docker ps -a -q) ||: - sudo rm -fr $TEMP_PATH - BuilderDebAsan: - needs: DockerHubPush - if: ${{ !contains(github.event.pull_request.labels.*.name, 'pr-documentation') && !contains(github.event.pull_request.labels.*.name, 'pr-doc-fix') }} - runs-on: [self-hosted, builder] - steps: - - name: Download changed images - uses: actions/download-artifact@v2 - with: - name: changed_images - path: ${{ runner.temp }}/images_path - - name: Check out repository code - uses: actions/checkout@v2 - with: - submodules: 'recursive' - fetch-depth: 0 # otherwise we will have no info about contributors - - name: Build - env: - TEMP_PATH: ${{runner.temp}}/build_check - IMAGES_PATH: ${{runner.temp}}/images_path - REPO_COPY: ${{runner.temp}}/build_check/ClickHouse - CACHES_PATH: ${{runner.temp}}/../ccaches - CHECK_NAME: 'ClickHouse build check (actions)' - BUILD_NUMBER: 3 - run: | - sudo rm -fr $TEMP_PATH - mkdir -p $TEMP_PATH - cp -r $GITHUB_WORKSPACE $TEMP_PATH - cd $REPO_COPY/tests/ci && python3 build_check.py "$CHECK_NAME" $BUILD_NUMBER - - name: Upload build URLs to artifacts - uses: actions/upload-artifact@v2 - with: - name: ${{ env.BUILD_NAME }} - path: ${{ runner.temp }}/build_check/${{ env.BUILD_NAME }}.json - - name: Cleanup - if: always() - run: | - docker kill $(docker ps -q) ||: - docker rm -f $(docker ps -a -q) ||: - sudo rm -fr $TEMP_PATH - BuilderReport: - needs: [BuilderDebDebug, BuilderDebAsan] - runs-on: [self-hosted, style-checker] - steps: - - name: Download json reports - uses: actions/download-artifact@v2 - with: - path: ${{runner.temp}}/reports_dir - - name: Check out repository code - uses: actions/checkout@v2 - - name: Report Builder - env: - TEMP_PATH: ${{runner.temp}}/report_check - REPORTS_PATH: ${{runner.temp}}/reports_dir - CHECK_NAME: 'ClickHouse build check (actions)' - run: | - sudo rm -fr $TEMP_PATH - mkdir -p $TEMP_PATH - cd $GITHUB_WORKSPACE/tests/ci - python3 build_report_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - docker kill $(docker ps -q) ||: - docker rm -f $(docker ps -a -q) ||: - sudo rm -fr $TEMP_PATH - FunctionalStatelessTestDebug: - needs: [BuilderDebDebug] - runs-on: [self-hosted, func-tester] - steps: - - name: Download json reports - uses: actions/download-artifact@v2 - with: - path: ${{runner.temp}}/reports_dir - - name: Check out repository code - uses: actions/checkout@v2 - - name: Functional test - env: - TEMP_PATH: ${{runner.temp}}/stateless_debug - REPORTS_PATH: ${{runner.temp}}/reports_dir - CHECK_NAME: 'Stateless tests (debug, actions)' - REPO_COPY: ${{runner.temp}}/stateless_debug/ClickHouse - REQUIRED_BUILD_NUMBER: 7 - KILL_TIMEOUT: 10800 - run: | - sudo rm -fr $TEMP_PATH - mkdir -p $TEMP_PATH - cp -r $GITHUB_WORKSPACE $TEMP_PATH - cd $REPO_COPY/tests/ci - python3 functional_test_check.py "$CHECK_NAME" $REQUIRED_BUILD_NUMBER $KILL_TIMEOUT - - name: Cleanup - if: always() - run: | - docker kill $(docker ps -q) ||: - docker rm -f $(docker ps -a -q) ||: - sudo rm -fr $TEMP_PATH - FunctionalStatefulTestDebug: - needs: [BuilderDebDebug] - runs-on: [self-hosted, func-tester] - steps: - - name: Download json reports - uses: actions/download-artifact@v2 - with: - path: ${{runner.temp}}/reports_dir - - name: Check out repository code - uses: actions/checkout@v2 - - name: Functional test - env: - TEMP_PATH: ${{runner.temp}}/stateful_debug - REPORTS_PATH: ${{runner.temp}}/reports_dir - CHECK_NAME: 'Stateful tests (debug, actions)' - REPO_COPY: ${{runner.temp}}/stateful_debug/ClickHouse - REQUIRED_BUILD_NUMBER: 7 - KILL_TIMEOUT: 3600 - run: | - sudo rm -fr $TEMP_PATH - mkdir -p $TEMP_PATH - cp -r $GITHUB_WORKSPACE $TEMP_PATH - cd $REPO_COPY/tests/ci - python3 functional_test_check.py "$CHECK_NAME" $REQUIRED_BUILD_NUMBER $KILL_TIMEOUT - - name: Cleanup - if: always() - run: | - docker kill $(docker ps -q) ||: - docker rm -f $(docker ps -a -q) ||: - sudo rm -fr $TEMP_PATH - StressTestDebug: - needs: [BuilderDebDebug] - runs-on: [self-hosted, stress-tester] - steps: - - name: Download json reports - uses: actions/download-artifact@v2 - with: - path: ${{runner.temp}}/reports_dir - - name: Check out repository code - uses: actions/checkout@v2 - - name: Stress test - env: - TEMP_PATH: ${{runner.temp}}/stress_debug - REPORTS_PATH: ${{runner.temp}}/reports_dir - CHECK_NAME: 'Stress tests (debug, actions)' - REPO_COPY: ${{runner.temp}}/stress_debug/ClickHouse - REQUIRED_BUILD_NUMBER: 7 - run: | - sudo rm -fr $TEMP_PATH - mkdir -p $TEMP_PATH - cp -r $GITHUB_WORKSPACE $TEMP_PATH - cd $REPO_COPY/tests/ci - python3 stress_check.py "$CHECK_NAME" $REQUIRED_BUILD_NUMBER - - name: Cleanup - if: always() - run: | - docker kill $(docker ps -q) ||: - docker rm -f $(docker ps -a -q) ||: - sudo rm -fr $TEMP_PATH - ASTFuzzerTestDebug: - needs: [BuilderDebDebug] - runs-on: [self-hosted, func-tester] - steps: - - name: Download json reports - uses: actions/download-artifact@v2 - with: - path: ${{runner.temp}}/reports_dir - - name: Check out repository code - uses: actions/checkout@v2 - - name: Fuzzer - env: - TEMP_PATH: ${{runner.temp}}/ast_fuzzer_debug - REPORTS_PATH: ${{runner.temp}}/reports_dir - CHECK_NAME: 'AST fuzzer (debug, actions)' - REPO_COPY: ${{runner.temp}}/ast_fuzzer_debug/ClickHouse - REQUIRED_BUILD_NUMBER: 7 - run: | - sudo rm -fr $TEMP_PATH - mkdir -p $TEMP_PATH - cp -r $GITHUB_WORKSPACE $TEMP_PATH - cd $REPO_COPY/tests/ci - python3 ast_fuzzer_check.py "$CHECK_NAME" $REQUIRED_BUILD_NUMBER - - name: Cleanup - if: always() - run: | - docker kill $(docker ps -q) ||: - docker rm -f $(docker ps -a -q) ||: - sudo rm -fr $TEMP_PATH - IntegrationTestsAsan: - needs: [BuilderDebAsan] - runs-on: [self-hosted, stress-tester] - steps: - - name: Download json reports - uses: actions/download-artifact@v2 - with: - path: ${{runner.temp}}/reports_dir - - name: Check out repository code - uses: actions/checkout@v2 - - name: Integration test - env: - TEMP_PATH: ${{runner.temp}}/integration_tests_debug - REPORTS_PATH: ${{runner.temp}}/reports_dir - CHECK_NAME: 'Integration tests (asan, actions)' - REPO_COPY: ${{runner.temp}}/integration_tests_debug/ClickHouse - REQUIRED_BUILD_NUMBER: 3 - run: | - sudo rm -fr $TEMP_PATH - mkdir -p $TEMP_PATH - cp -r $GITHUB_WORKSPACE $TEMP_PATH - cd $REPO_COPY/tests/ci - python3 integration_test_check.py "$CHECK_NAME" $REQUIRED_BUILD_NUMBER - - name: Cleanup - if: always() - run: | - docker kill $(docker ps -q) ||: - docker rm -f $(docker ps -a -q) ||: - sudo rm -fr $TEMP_PATH - UnitTestsAsan: - needs: [BuilderDebAsan] - runs-on: [self-hosted, func-tester] - steps: - - name: Download json reports - uses: actions/download-artifact@v2 - with: - path: ${{runner.temp}}/reports_dir - - name: Check out repository code - uses: actions/checkout@v2 - - name: Unit test - env: - TEMP_PATH: ${{runner.temp}}/unit_tests_asan - REPORTS_PATH: ${{runner.temp}}/reports_dir - CHECK_NAME: 'Unit tests (asan, actions)' - REPO_COPY: ${{runner.temp}}/unit_tests_asan/ClickHouse - REQUIRED_BUILD_NUMBER: 3 - run: | - sudo rm -fr $TEMP_PATH - mkdir -p $TEMP_PATH - cp -r $GITHUB_WORKSPACE $TEMP_PATH - cd $REPO_COPY/tests/ci - python3 unit_tests_check.py "$CHECK_NAME" $REQUIRED_BUILD_NUMBER - - name: Cleanup - if: always() - run: | - docker kill $(docker ps -q) ||: - docker rm -f $(docker ps -a -q) ||: - sudo rm -fr $TEMP_PATH FastTest: needs: DockerHubPush if: ${{ !contains(github.event.pull_request.labels.*.name, 'pr-documentation') && !contains(github.event.pull_request.labels.*.name, 'pr-doc-fix') }} @@ -409,8 +132,1355 @@ jobs: docker kill $(docker ps -q) ||: docker rm -f $(docker ps -a -q) ||: sudo rm -fr $TEMP_PATH + CompatibilityCheck: + needs: [BuilderDebRelease] + if: ${{ !contains(github.event.pull_request.labels.*.name, 'pr-documentation') && !contains(github.event.pull_request.labels.*.name, 'pr-doc-fix') }} + runs-on: [self-hosted, style-checker] + steps: + - name: Check out repository code + uses: actions/checkout@v2 + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{runner.temp}}/reports_dir + - name: CompatibilityCheck + env: + TEMP_PATH: ${{runner.temp}}/compatibility_check + REPO_COPY: ${{runner.temp}}/compatibility_check/ClickHouse + REPORTS_PATH: ${{runner.temp}}/reports_dir + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci && python3 compatibility_check.py 0 + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH + SplitBuildSmokeTest: + needs: [BuilderDebSplitted] + if: ${{ !contains(github.event.pull_request.labels.*.name, 'pr-documentation') && !contains(github.event.pull_request.labels.*.name, 'pr-doc-fix') }} + runs-on: [self-hosted, style-checker] + steps: + - name: Check out repository code + uses: actions/checkout@v2 + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{runner.temp}}/reports_dir + - name: Split build check + env: + TEMP_PATH: ${{runner.temp}}/split_build_check + REPO_COPY: ${{runner.temp}}/split_build_check/ClickHouse + REPORTS_PATH: ${{runner.temp}}/reports_dir + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci && python3 split_build_smoke_check.py + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH +######################################################################################### +#################################### ORDINARY BUILDS #################################### +######################################################################################### + BuilderDebRelease: + needs: [DockerHubPush, FastTest] + if: ${{ !contains(github.event.pull_request.labels.*.name, 'pr-documentation') && !contains(github.event.pull_request.labels.*.name, 'pr-doc-fix') }} + runs-on: [self-hosted, builder] + steps: + - name: Download changed images + uses: actions/download-artifact@v2 + with: + name: changed_images + path: ${{ runner.temp }}/images_path + - name: Check out repository code + uses: actions/checkout@v2 + with: + submodules: 'recursive' + fetch-depth: 0 # otherwise we will have no info about contributors + - name: Build + env: + TEMP_PATH: ${{runner.temp}}/build_check + IMAGES_PATH: ${{runner.temp}}/images_path + REPO_COPY: ${{runner.temp}}/build_check/ClickHouse + CACHES_PATH: ${{runner.temp}}/../ccaches + CHECK_NAME: 'ClickHouse build check (actions)' + BUILD_NUMBER: 0 + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci && python3 build_check.py "$CHECK_NAME" $BUILD_NUMBER + - name: Upload build URLs to artifacts + uses: actions/upload-artifact@v2 + with: + name: ${{ env.BUILD_NAME }} + path: ${{ runner.temp }}/build_check/${{ env.BUILD_NAME }}.json + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH + BuilderBinRelease: + needs: [DockerHubPush, FastTest] + if: ${{ !contains(github.event.pull_request.labels.*.name, 'pr-documentation') && !contains(github.event.pull_request.labels.*.name, 'pr-doc-fix') }} + runs-on: [self-hosted, builder] + steps: + - name: Download changed images + uses: actions/download-artifact@v2 + with: + name: changed_images + path: ${{ runner.temp }}/images_path + - name: Check out repository code + uses: actions/checkout@v2 + with: + submodules: 'recursive' + fetch-depth: 0 # otherwise we will have no info about contributors + - name: Build + env: + TEMP_PATH: ${{runner.temp}}/build_check + IMAGES_PATH: ${{runner.temp}}/images_path + REPO_COPY: ${{runner.temp}}/build_check/ClickHouse + CACHES_PATH: ${{runner.temp}}/../ccaches + CHECK_NAME: 'ClickHouse build check (actions)' + BUILD_NUMBER: 9 + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci && python3 build_check.py "$CHECK_NAME" $BUILD_NUMBER + - name: Upload build URLs to artifacts + uses: actions/upload-artifact@v2 + with: + name: ${{ env.BUILD_NAME }} + path: ${{ runner.temp }}/build_check/${{ env.BUILD_NAME }}.json + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH + BuilderDebAsan: + needs: [DockerHubPush, FastTest] + if: ${{ !contains(github.event.pull_request.labels.*.name, 'pr-documentation') && !contains(github.event.pull_request.labels.*.name, 'pr-doc-fix') }} + runs-on: [self-hosted, builder] + steps: + - name: Download changed images + uses: actions/download-artifact@v2 + with: + name: changed_images + path: ${{ runner.temp }}/images_path + - name: Check out repository code + uses: actions/checkout@v2 + with: + submodules: 'recursive' + fetch-depth: 0 # otherwise we will have no info about contributors + - name: Build + env: + TEMP_PATH: ${{runner.temp}}/build_check + IMAGES_PATH: ${{runner.temp}}/images_path + REPO_COPY: ${{runner.temp}}/build_check/ClickHouse + CACHES_PATH: ${{runner.temp}}/../ccaches + CHECK_NAME: 'ClickHouse build check (actions)' + BUILD_NUMBER: 3 + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci && python3 build_check.py "$CHECK_NAME" $BUILD_NUMBER + - name: Upload build URLs to artifacts + uses: actions/upload-artifact@v2 + with: + name: ${{ env.BUILD_NAME }} + path: ${{ runner.temp }}/build_check/${{ env.BUILD_NAME }}.json + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH + BuilderDebUBsan: + needs: [DockerHubPush, FastTest] + if: ${{ !contains(github.event.pull_request.labels.*.name, 'pr-documentation') && !contains(github.event.pull_request.labels.*.name, 'pr-doc-fix') }} + runs-on: [self-hosted, builder] + steps: + - name: Download changed images + uses: actions/download-artifact@v2 + with: + name: changed_images + path: ${{ runner.temp }}/images_path + - name: Check out repository code + uses: actions/checkout@v2 + with: + submodules: 'recursive' + fetch-depth: 0 # otherwise we will have no info about contributors + - name: Build + env: + TEMP_PATH: ${{runner.temp}}/build_check + IMAGES_PATH: ${{runner.temp}}/images_path + REPO_COPY: ${{runner.temp}}/build_check/ClickHouse + CACHES_PATH: ${{runner.temp}}/../ccaches + CHECK_NAME: 'ClickHouse build check (actions)' + BUILD_NUMBER: 4 + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci && python3 build_check.py "$CHECK_NAME" $BUILD_NUMBER + - name: Upload build URLs to artifacts + uses: actions/upload-artifact@v2 + with: + name: ${{ env.BUILD_NAME }} + path: ${{ runner.temp }}/build_check/${{ env.BUILD_NAME }}.json + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH + BuilderDebTsan: + needs: [DockerHubPush, FastTest] + if: ${{ !contains(github.event.pull_request.labels.*.name, 'pr-documentation') && !contains(github.event.pull_request.labels.*.name, 'pr-doc-fix') }} + runs-on: [self-hosted, builder] + steps: + - name: Download changed images + uses: actions/download-artifact@v2 + with: + name: changed_images + path: ${{ runner.temp }}/images_path + - name: Check out repository code + uses: actions/checkout@v2 + with: + submodules: 'recursive' + fetch-depth: 0 # otherwise we will have no info about contributors + - name: Build + env: + TEMP_PATH: ${{runner.temp}}/build_check + IMAGES_PATH: ${{runner.temp}}/images_path + REPO_COPY: ${{runner.temp}}/build_check/ClickHouse + CACHES_PATH: ${{runner.temp}}/../ccaches + CHECK_NAME: 'ClickHouse build check (actions)' + BUILD_NUMBER: 5 + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci && python3 build_check.py "$CHECK_NAME" $BUILD_NUMBER + - name: Upload build URLs to artifacts + uses: actions/upload-artifact@v2 + with: + name: ${{ env.BUILD_NAME }} + path: ${{ runner.temp }}/build_check/${{ env.BUILD_NAME }}.json + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH + BuilderDebMsan: + needs: [DockerHubPush, FastTest] + if: ${{ !contains(github.event.pull_request.labels.*.name, 'pr-documentation') && !contains(github.event.pull_request.labels.*.name, 'pr-doc-fix') }} + runs-on: [self-hosted, builder] + steps: + - name: Download changed images + uses: actions/download-artifact@v2 + with: + name: changed_images + path: ${{ runner.temp }}/images_path + - name: Check out repository code + uses: actions/checkout@v2 + with: + submodules: 'recursive' + fetch-depth: 0 # otherwise we will have no info about contributors + - name: Build + env: + TEMP_PATH: ${{runner.temp}}/build_check + IMAGES_PATH: ${{runner.temp}}/images_path + REPO_COPY: ${{runner.temp}}/build_check/ClickHouse + CACHES_PATH: ${{runner.temp}}/../ccaches + CHECK_NAME: 'ClickHouse build check (actions)' + BUILD_NUMBER: 6 + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci && python3 build_check.py "$CHECK_NAME" $BUILD_NUMBER + - name: Upload build URLs to artifacts + uses: actions/upload-artifact@v2 + with: + name: ${{ env.BUILD_NAME }} + path: ${{ runner.temp }}/build_check/${{ env.BUILD_NAME }}.json + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH + BuilderDebDebug: + needs: [DockerHubPush, FastTest] + if: ${{ !contains(github.event.pull_request.labels.*.name, 'pr-documentation') && !contains(github.event.pull_request.labels.*.name, 'pr-doc-fix') }} + runs-on: [self-hosted, builder] + steps: + - name: Download changed images + uses: actions/download-artifact@v2 + with: + name: changed_images + path: ${{ runner.temp }}/images_path + - name: Check out repository code + uses: actions/checkout@v2 + with: + submodules: 'recursive' + fetch-depth: 0 # otherwise we will have no info about contributors + - name: Build + env: + TEMP_PATH: ${{runner.temp}}/build_check + IMAGES_PATH: ${{runner.temp}}/images_path + REPO_COPY: ${{runner.temp}}/build_check/ClickHouse + CACHES_PATH: ${{runner.temp}}/../ccaches + CHECK_NAME: 'ClickHouse build check (actions)' + BUILD_NUMBER: 7 + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci && python3 build_check.py "$CHECK_NAME" $BUILD_NUMBER + - name: Upload build URLs to artifacts + uses: actions/upload-artifact@v2 + with: + name: ${{ env.BUILD_NAME }} + path: ${{ runner.temp }}/build_check/${{ env.BUILD_NAME }}.json + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH +########################################################################################## +##################################### SPECIAL BUILDS ##################################### +########################################################################################## + BuilderDebSplitted: + needs: [DockerHubPush, FastTest] + if: ${{ !contains(github.event.pull_request.labels.*.name, 'pr-documentation') && !contains(github.event.pull_request.labels.*.name, 'pr-doc-fix') }} + runs-on: [self-hosted, builder] + steps: + - name: Download changed images + uses: actions/download-artifact@v2 + with: + name: changed_images + path: ${{ runner.temp }}/images_path + - name: Check out repository code + uses: actions/checkout@v2 + with: + submodules: 'recursive' + fetch-depth: 0 # otherwise we will have no info about contributors + - name: Build + env: + TEMP_PATH: ${{runner.temp}}/build_check + IMAGES_PATH: ${{runner.temp}}/images_path + REPO_COPY: ${{runner.temp}}/build_check/ClickHouse + CACHES_PATH: ${{runner.temp}}/../ccaches + CHECK_NAME: 'ClickHouse special build check (actions)' + BUILD_NUMBER: 1 + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci && python3 build_check.py "$CHECK_NAME" $BUILD_NUMBER + - name: Upload build URLs to artifacts + uses: actions/upload-artifact@v2 + with: + name: ${{ env.BUILD_NAME }} + path: ${{ runner.temp }}/build_check/${{ env.BUILD_NAME }}.json + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH +############################################################################################ +##################################### BUILD REPORTER ####################################### +############################################################################################ + BuilderReport: + needs: + - BuilderDebRelease + - BuilderBinRelease + - BuilderDebAsan + - BuilderDebTsan + - BuilderDebUBsan + - BuilderDebMsan + - BuilderDebDebug + - BuilderDebSplitted + runs-on: [self-hosted, style-checker] + steps: + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{runner.temp}}/reports_dir + - name: Check out repository code + uses: actions/checkout@v2 + - name: Report Builder + env: + TEMP_PATH: ${{runner.temp}}/report_check + REPORTS_PATH: ${{runner.temp}}/reports_dir + CHECK_NAME: 'ClickHouse build check (actions)' + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cd $GITHUB_WORKSPACE/tests/ci + python3 build_report_check.py "$CHECK_NAME" + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH +############################################################################################## +########################### FUNCTIONAl STATELESS TESTS ####################################### +############################################################################################## + FunctionalStatelessTestRelease: + needs: [BuilderDebRelease] + runs-on: [self-hosted, func-tester] + steps: + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{runner.temp}}/reports_dir + - name: Check out repository code + uses: actions/checkout@v2 + - name: Functional test + env: + TEMP_PATH: ${{runner.temp}}/stateless_debug + REPORTS_PATH: ${{runner.temp}}/reports_dir + CHECK_NAME: 'Stateless tests (release, actions)' + REPO_COPY: ${{runner.temp}}/stateless_debug/ClickHouse + KILL_TIMEOUT: 10800 + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci + python3 functional_test_check.py "$CHECK_NAME" $KILL_TIMEOUT + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH + FunctionalStatelessTestAsan: + needs: [BuilderDebAsan] + runs-on: [self-hosted, func-tester] + steps: + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{runner.temp}}/reports_dir + - name: Check out repository code + uses: actions/checkout@v2 + - name: Functional test + env: + TEMP_PATH: ${{runner.temp}}/stateless_debug + REPORTS_PATH: ${{runner.temp}}/reports_dir + CHECK_NAME: 'Stateless tests (address, actions)' + REPO_COPY: ${{runner.temp}}/stateless_debug/ClickHouse + KILL_TIMEOUT: 10800 + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci + python3 functional_test_check.py "$CHECK_NAME" $KILL_TIMEOUT + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH + FunctionalStatelessTestTsan: + needs: [BuilderDebTsan] + runs-on: [self-hosted, func-tester] + steps: + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{runner.temp}}/reports_dir + - name: Check out repository code + uses: actions/checkout@v2 + - name: Functional test + env: + TEMP_PATH: ${{runner.temp}}/stateless_tsan + REPORTS_PATH: ${{runner.temp}}/reports_dir + CHECK_NAME: 'Stateless tests (thread, actions)' + REPO_COPY: ${{runner.temp}}/stateless_tsan/ClickHouse + KILL_TIMEOUT: 10800 + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci + python3 functional_test_check.py "$CHECK_NAME" $KILL_TIMEOUT + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH + FunctionalStatelessTestUBsan: + needs: [BuilderDebUBsan] + runs-on: [self-hosted, func-tester] + steps: + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{runner.temp}}/reports_dir + - name: Check out repository code + uses: actions/checkout@v2 + - name: Functional test + env: + TEMP_PATH: ${{runner.temp}}/stateless_ubsan + REPORTS_PATH: ${{runner.temp}}/reports_dir + CHECK_NAME: 'Stateless tests (ubsan, actions)' + REPO_COPY: ${{runner.temp}}/stateless_ubsan/ClickHouse + KILL_TIMEOUT: 10800 + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci + python3 functional_test_check.py "$CHECK_NAME" $KILL_TIMEOUT + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH + FunctionalStatelessTestMsan: + needs: [BuilderDebMsan] + runs-on: [self-hosted, func-tester] + steps: + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{runner.temp}}/reports_dir + - name: Check out repository code + uses: actions/checkout@v2 + - name: Functional test + env: + TEMP_PATH: ${{runner.temp}}/stateless_memory + REPORTS_PATH: ${{runner.temp}}/reports_dir + CHECK_NAME: 'Stateless tests (memory, actions)' + REPO_COPY: ${{runner.temp}}/stateless_memory/ClickHouse + KILL_TIMEOUT: 10800 + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci + python3 functional_test_check.py "$CHECK_NAME" $KILL_TIMEOUT + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH + FunctionalStatelessTestDebug: + needs: [BuilderDebDebug] + runs-on: [self-hosted, func-tester] + steps: + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{runner.temp}}/reports_dir + - name: Check out repository code + uses: actions/checkout@v2 + - name: Functional test + env: + TEMP_PATH: ${{runner.temp}}/stateless_debug + REPORTS_PATH: ${{runner.temp}}/reports_dir + CHECK_NAME: 'Stateless tests (debug, actions)' + REPO_COPY: ${{runner.temp}}/stateless_debug/ClickHouse + KILL_TIMEOUT: 10800 + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci + python3 functional_test_check.py "$CHECK_NAME" $KILL_TIMEOUT + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH + FunctionalStatelessTestFlakyCheck: + needs: [BuilderDebAsan] + runs-on: [self-hosted, func-tester] + steps: + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{runner.temp}}/reports_dir + - name: Check out repository code + uses: actions/checkout@v2 + - name: Functional test + env: + TEMP_PATH: ${{runner.temp}}/stateless_flaky_asan + REPORTS_PATH: ${{runner.temp}}/reports_dir + CHECK_NAME: 'Stateless tests flaky check (address, actions)' + REPO_COPY: ${{runner.temp}}/stateless_flaky_asan/ClickHouse + KILL_TIMEOUT: 3600 + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci + python3 functional_test_check.py "$CHECK_NAME" $KILL_TIMEOUT + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH +############################################################################################## +############################ FUNCTIONAl STATEFUL TESTS ####################################### +############################################################################################## + FunctionalStatefulTestRelease: + needs: [BuilderDebRelease] + runs-on: [self-hosted, func-tester] + steps: + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{runner.temp}}/reports_dir + - name: Check out repository code + uses: actions/checkout@v2 + - name: Functional test + env: + TEMP_PATH: ${{runner.temp}}/stateful_debug + REPORTS_PATH: ${{runner.temp}}/reports_dir + CHECK_NAME: 'Stateful tests (release, actions)' + REPO_COPY: ${{runner.temp}}/stateful_debug/ClickHouse + KILL_TIMEOUT: 3600 + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci + python3 functional_test_check.py "$CHECK_NAME" $KILL_TIMEOUT + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH + FunctionalStatefulTestAsan: + needs: [BuilderDebAsan] + runs-on: [self-hosted, func-tester] + steps: + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{runner.temp}}/reports_dir + - name: Check out repository code + uses: actions/checkout@v2 + - name: Functional test + env: + TEMP_PATH: ${{runner.temp}}/stateful_debug + REPORTS_PATH: ${{runner.temp}}/reports_dir + CHECK_NAME: 'Stateful tests (address, actions)' + REPO_COPY: ${{runner.temp}}/stateful_debug/ClickHouse + KILL_TIMEOUT: 3600 + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci + python3 functional_test_check.py "$CHECK_NAME" $KILL_TIMEOUT + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH + FunctionalStatefulTestTsan: + needs: [BuilderDebTsan] + runs-on: [self-hosted, func-tester] + steps: + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{runner.temp}}/reports_dir + - name: Check out repository code + uses: actions/checkout@v2 + - name: Functional test + env: + TEMP_PATH: ${{runner.temp}}/stateful_tsan + REPORTS_PATH: ${{runner.temp}}/reports_dir + CHECK_NAME: 'Stateful tests (thread, actions)' + REPO_COPY: ${{runner.temp}}/stateful_tsan/ClickHouse + KILL_TIMEOUT: 3600 + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci + python3 functional_test_check.py "$CHECK_NAME" $KILL_TIMEOUT + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH + FunctionalStatefulTestMsan: + needs: [BuilderDebMsan] + runs-on: [self-hosted, func-tester] + steps: + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{runner.temp}}/reports_dir + - name: Check out repository code + uses: actions/checkout@v2 + - name: Functional test + env: + TEMP_PATH: ${{runner.temp}}/stateful_msan + REPORTS_PATH: ${{runner.temp}}/reports_dir + CHECK_NAME: 'Stateful tests (memory, actions)' + REPO_COPY: ${{runner.temp}}/stateful_msan/ClickHouse + KILL_TIMEOUT: 3600 + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci + python3 functional_test_check.py "$CHECK_NAME" $KILL_TIMEOUT + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH + FunctionalStatefulTestUBsan: + needs: [BuilderDebUBsan] + runs-on: [self-hosted, func-tester] + steps: + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{runner.temp}}/reports_dir + - name: Check out repository code + uses: actions/checkout@v2 + - name: Functional test + env: + TEMP_PATH: ${{runner.temp}}/stateful_ubsan + REPORTS_PATH: ${{runner.temp}}/reports_dir + CHECK_NAME: 'Stateful tests (ubsan, actions)' + REPO_COPY: ${{runner.temp}}/stateful_ubsan/ClickHouse + KILL_TIMEOUT: 3600 + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci + python3 functional_test_check.py "$CHECK_NAME" $KILL_TIMEOUT + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH + FunctionalStatefulTestDebug: + needs: [BuilderDebDebug] + runs-on: [self-hosted, func-tester] + steps: + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{runner.temp}}/reports_dir + - name: Check out repository code + uses: actions/checkout@v2 + - name: Functional test + env: + TEMP_PATH: ${{runner.temp}}/stateful_debug + REPORTS_PATH: ${{runner.temp}}/reports_dir + CHECK_NAME: 'Stateful tests (debug, actions)' + REPO_COPY: ${{runner.temp}}/stateful_debug/ClickHouse + KILL_TIMEOUT: 3600 + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci + python3 functional_test_check.py "$CHECK_NAME" $KILL_TIMEOUT + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH +############################################################################################## +######################################### STRESS TESTS ####################################### +############################################################################################## + StressTestAsan: + needs: [BuilderDebAsan] + runs-on: [self-hosted, stress-tester] + steps: + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{runner.temp}}/reports_dir + - name: Check out repository code + uses: actions/checkout@v2 + - name: Stress test + env: + TEMP_PATH: ${{runner.temp}}/stress_thread + REPORTS_PATH: ${{runner.temp}}/reports_dir + CHECK_NAME: 'Stress test (address, actions)' + REPO_COPY: ${{runner.temp}}/stress_thread/ClickHouse + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci + python3 stress_check.py "$CHECK_NAME" + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH + StressTestTsan: + needs: [BuilderDebTsan] + runs-on: [self-hosted, stress-tester] + steps: + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{runner.temp}}/reports_dir + - name: Check out repository code + uses: actions/checkout@v2 + - name: Stress test + env: + TEMP_PATH: ${{runner.temp}}/stress_thread + REPORTS_PATH: ${{runner.temp}}/reports_dir + CHECK_NAME: 'Stress test (thread, actions)' + REPO_COPY: ${{runner.temp}}/stress_thread/ClickHouse + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci + python3 stress_check.py "$CHECK_NAME" + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH + StressTestMsan: + needs: [BuilderDebMsan] + runs-on: [self-hosted, stress-tester] + steps: + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{runner.temp}}/reports_dir + - name: Check out repository code + uses: actions/checkout@v2 + - name: Stress test + env: + TEMP_PATH: ${{runner.temp}}/stress_memory + REPORTS_PATH: ${{runner.temp}}/reports_dir + CHECK_NAME: 'Stress test (memory, actions)' + REPO_COPY: ${{runner.temp}}/stress_memory/ClickHouse + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci + python3 stress_check.py "$CHECK_NAME" + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH + StressTestUBsan: + needs: [BuilderDebUBsan] + runs-on: [self-hosted, stress-tester] + steps: + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{runner.temp}}/reports_dir + - name: Check out repository code + uses: actions/checkout@v2 + - name: Stress test + env: + TEMP_PATH: ${{runner.temp}}/stress_undefined + REPORTS_PATH: ${{runner.temp}}/reports_dir + CHECK_NAME: 'Stress test (undefined, actions)' + REPO_COPY: ${{runner.temp}}/stress_undefined/ClickHouse + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci + python3 stress_check.py "$CHECK_NAME" + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH + StressTestDebug: + needs: [BuilderDebDebug] + runs-on: [self-hosted, stress-tester] + steps: + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{runner.temp}}/reports_dir + - name: Check out repository code + uses: actions/checkout@v2 + - name: Stress test + env: + TEMP_PATH: ${{runner.temp}}/stress_debug + REPORTS_PATH: ${{runner.temp}}/reports_dir + CHECK_NAME: 'Stress test (debug, actions)' + REPO_COPY: ${{runner.temp}}/stress_debug/ClickHouse + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci + python3 stress_check.py "$CHECK_NAME" + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH +############################################################################################## +##################################### AST FUZZERS ############################################ +############################################################################################## + ASTFuzzerTestAsan: + needs: [BuilderDebAsan] + runs-on: [self-hosted, func-tester] + steps: + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{runner.temp}}/reports_dir + - name: Check out repository code + uses: actions/checkout@v2 + - name: Fuzzer + env: + TEMP_PATH: ${{runner.temp}}/ast_fuzzer_asan + REPORTS_PATH: ${{runner.temp}}/reports_dir + CHECK_NAME: 'AST fuzzer (ASan, actions)' + REPO_COPY: ${{runner.temp}}/ast_fuzzer_asan/ClickHouse + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci + python3 ast_fuzzer_check.py "$CHECK_NAME" + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH + ASTFuzzerTestTsan: + needs: [BuilderDebTsan] + runs-on: [self-hosted, func-tester] + steps: + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{runner.temp}}/reports_dir + - name: Check out repository code + uses: actions/checkout@v2 + - name: Fuzzer + env: + TEMP_PATH: ${{runner.temp}}/ast_fuzzer_tsan + REPORTS_PATH: ${{runner.temp}}/reports_dir + CHECK_NAME: 'AST fuzzer (TSan, actions)' + REPO_COPY: ${{runner.temp}}/ast_fuzzer_tsan/ClickHouse + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci + python3 ast_fuzzer_check.py "$CHECK_NAME" + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH + ASTFuzzerTestUBSan: + needs: [BuilderDebUBsan] + runs-on: [self-hosted, func-tester] + steps: + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{runner.temp}}/reports_dir + - name: Check out repository code + uses: actions/checkout@v2 + - name: Fuzzer + env: + TEMP_PATH: ${{runner.temp}}/ast_fuzzer_ubsan + REPORTS_PATH: ${{runner.temp}}/reports_dir + CHECK_NAME: 'AST fuzzer (UBSan, actions)' + REPO_COPY: ${{runner.temp}}/ast_fuzzer_ubsan/ClickHouse + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci + python3 ast_fuzzer_check.py "$CHECK_NAME" + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH + ASTFuzzerTestMSan: + needs: [BuilderDebMsan] + runs-on: [self-hosted, func-tester] + steps: + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{runner.temp}}/reports_dir + - name: Check out repository code + uses: actions/checkout@v2 + - name: Fuzzer + env: + TEMP_PATH: ${{runner.temp}}/ast_fuzzer_msan + REPORTS_PATH: ${{runner.temp}}/reports_dir + CHECK_NAME: 'AST fuzzer (MSan, actions)' + REPO_COPY: ${{runner.temp}}/ast_fuzzer_msan/ClickHouse + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci + python3 ast_fuzzer_check.py "$CHECK_NAME" + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH + ASTFuzzerTestDebug: + needs: [BuilderDebDebug] + runs-on: [self-hosted, func-tester] + steps: + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{runner.temp}}/reports_dir + - name: Check out repository code + uses: actions/checkout@v2 + - name: Fuzzer + env: + TEMP_PATH: ${{runner.temp}}/ast_fuzzer_debug + REPORTS_PATH: ${{runner.temp}}/reports_dir + CHECK_NAME: 'AST fuzzer (debug, actions)' + REPO_COPY: ${{runner.temp}}/ast_fuzzer_debug/ClickHouse + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci + python3 ast_fuzzer_check.py "$CHECK_NAME" + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH +############################################################################################# +############################# INTEGRATION TESTS ############################################# +############################################################################################# + IntegrationTestsAsan: + needs: [BuilderDebAsan, FunctionalStatelessTestAsan] + runs-on: [self-hosted, stress-tester] + steps: + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{runner.temp}}/reports_dir + - name: Check out repository code + uses: actions/checkout@v2 + - name: Integration test + env: + TEMP_PATH: ${{runner.temp}}/integration_tests_asan + REPORTS_PATH: ${{runner.temp}}/reports_dir + CHECK_NAME: 'Integration tests (asan, actions)' + REPO_COPY: ${{runner.temp}}/integration_tests_asan/ClickHouse + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci + python3 integration_test_check.py "$CHECK_NAME" + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH + IntegrationTestsTsan: + needs: [BuilderDebTsan, FunctionalStatelessTestTsan] + runs-on: [self-hosted, stress-tester] + steps: + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{runner.temp}}/reports_dir + - name: Check out repository code + uses: actions/checkout@v2 + - name: Integration test + env: + TEMP_PATH: ${{runner.temp}}/integration_tests_tsan + REPORTS_PATH: ${{runner.temp}}/reports_dir + CHECK_NAME: 'Integration tests (thread, actions)' + REPO_COPY: ${{runner.temp}}/integration_tests_tsan/ClickHouse + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci + python3 integration_test_check.py "$CHECK_NAME" + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH + IntegrationTestsRelease: + needs: [BuilderDebRelease, FunctionalStatelessTestRelease] + runs-on: [self-hosted, stress-tester] + steps: + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{runner.temp}}/reports_dir + - name: Check out repository code + uses: actions/checkout@v2 + - name: Integration test + env: + TEMP_PATH: ${{runner.temp}}/integration_tests_release + REPORTS_PATH: ${{runner.temp}}/reports_dir + CHECK_NAME: 'Integration tests (release, actions)' + REPO_COPY: ${{runner.temp}}/integration_tests_release/ClickHouse + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci + python3 integration_test_check.py "$CHECK_NAME" + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH +############################################################################################# +#################################### UNIT TESTS ############################################# +############################################################################################# + UnitTestsAsan: + needs: [BuilderDebAsan] + runs-on: [self-hosted, func-tester] + steps: + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{runner.temp}}/reports_dir + - name: Check out repository code + uses: actions/checkout@v2 + - name: Unit test + env: + TEMP_PATH: ${{runner.temp}}/unit_tests_asan + REPORTS_PATH: ${{runner.temp}}/reports_dir + CHECK_NAME: 'Unit tests (asan, actions)' + REPO_COPY: ${{runner.temp}}/unit_tests_asan/ClickHouse + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci + python3 unit_tests_check.py "$CHECK_NAME" + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH + UnitTestsReleaseClang: + needs: [BuilderBinRelease] + runs-on: [self-hosted, func-tester] + steps: + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{runner.temp}}/reports_dir + - name: Check out repository code + uses: actions/checkout@v2 + - name: Unit test + env: + TEMP_PATH: ${{runner.temp}}/unit_tests_asan + REPORTS_PATH: ${{runner.temp}}/reports_dir + CHECK_NAME: 'Unit tests (release-clang, actions)' + REPO_COPY: ${{runner.temp}}/unit_tests_asan/ClickHouse + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci + python3 unit_tests_check.py "$CHECK_NAME" + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH + UnitTestsTsan: + needs: [BuilderDebTsan] + runs-on: [self-hosted, func-tester] + steps: + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{runner.temp}}/reports_dir + - name: Check out repository code + uses: actions/checkout@v2 + - name: Unit test + env: + TEMP_PATH: ${{runner.temp}}/unit_tests_tsan + REPORTS_PATH: ${{runner.temp}}/reports_dir + CHECK_NAME: 'Unit tests (tsan, actions)' + REPO_COPY: ${{runner.temp}}/unit_tests_tsan/ClickHouse + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci + python3 unit_tests_check.py "$CHECK_NAME" + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH + UnitTestsMsan: + needs: [BuilderDebMsan] + runs-on: [self-hosted, func-tester] + steps: + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{runner.temp}}/reports_dir + - name: Check out repository code + uses: actions/checkout@v2 + - name: Unit test + env: + TEMP_PATH: ${{runner.temp}}/unit_tests_msan + REPORTS_PATH: ${{runner.temp}}/reports_dir + CHECK_NAME: 'Unit tests (msan, actions)' + REPO_COPY: ${{runner.temp}}/unit_tests_msan/ClickHouse + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci + python3 unit_tests_check.py "$CHECK_NAME" + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH + UnitTestsUBsan: + needs: [BuilderDebUBsan] + runs-on: [self-hosted, func-tester] + steps: + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{runner.temp}}/reports_dir + - name: Check out repository code + uses: actions/checkout@v2 + - name: Unit test + env: + TEMP_PATH: ${{runner.temp}}/unit_tests_ubsan + REPORTS_PATH: ${{runner.temp}}/reports_dir + CHECK_NAME: 'Unit tests (msan, actions)' + REPO_COPY: ${{runner.temp}}/unit_tests_ubsan/ClickHouse + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci + python3 unit_tests_check.py "$CHECK_NAME" + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH FinishCheck: - needs: [StyleCheck, DockerHubPush, CheckLabels, BuilderReport, FastTest, FunctionalStatelessTestDebug, FunctionalStatefulTestDebug, DocsCheck, StressTestDebug, ASTFuzzerTestDebug, IntegrationTestsAsan, PVSCheck, UnitTestsAsan] + needs: + - StyleCheck + - DockerHubPush + - CheckLabels + - BuilderReport + - FastTest + - FunctionalStatelessTestDebug + - FunctionalStatelessTestRelease + - FunctionalStatelessTestAsan + - FunctionalStatelessTestTsan + - FunctionalStatelessTestMsan + - FunctionalStatelessTestUBsan + - FunctionalStatefulTestDebug + - FunctionalStatefulTestRelease + - FunctionalStatefulTestAsan + - FunctionalStatefulTestTsan + - FunctionalStatefulTestMsan + - FunctionalStatefulTestUBsan + - DocsCheck + - StressTestDebug + - StressTestAsan + - StressTestTsan + - StressTestMsan + - StressTestUBsan + - ASTFuzzerTestDebug + - ASTFuzzerTestAsan + - ASTFuzzerTestTsan + - ASTFuzzerTestMSan + - ASTFuzzerTestUBSan + - IntegrationTestsAsan + - IntegrationTestsRelease + - IntegrationTestsTsan + - PVSCheck + - UnitTestsAsan + - UnitTestsTsan + - UnitTestsMsan + - UnitTestsUBsan + - UnitTestsReleaseClang + - SplitBuildSmokeTest + - CompatibilityCheck runs-on: [self-hosted, style-checker] steps: - name: Check out repository code diff --git a/.github/workflows/master.yml b/.github/workflows/master.yml new file mode 100644 index 00000000000..fc2e78a21e3 --- /dev/null +++ b/.github/workflows/master.yml @@ -0,0 +1,1384 @@ +name: MasterCI +on: # yamllint disable-line rule:truthy + push: + branches: + - 'master' +jobs: + DockerHubPush: + runs-on: [self-hosted, style-checker] + steps: + - name: Check out repository code + uses: actions/checkout@v2 + - name: Images check + run: | + cd $GITHUB_WORKSPACE/tests/ci + python3 docker_images_check.py + - name: Upload images files to artifacts + uses: actions/upload-artifact@v2 + with: + name: changed_images + path: ${{ runner.temp }}/docker_images_check/changed_images.json + StyleCheck: + needs: DockerHubPush + runs-on: [self-hosted, style-checker] + steps: + - name: Download changed images + uses: actions/download-artifact@v2 + with: + name: changed_images + path: ${{ runner.temp }}/style_check + - name: Check out repository code + uses: actions/checkout@v2 + - name: Style Check + env: + TEMP_PATH: ${{ runner.temp }}/style_check + run: | + cd $GITHUB_WORKSPACE/tests/ci + python3 style_check.py + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH + PVSCheck: + needs: DockerHubPush + if: ${{ !contains(github.event.pull_request.labels.*.name, 'pr-documentation') && !contains(github.event.pull_request.labels.*.name, 'pr-doc-fix') }} + runs-on: [self-hosted, func-tester] + steps: + - name: Check out repository code + uses: actions/checkout@v2 + with: + submodules: 'recursive' + - name: PVS Check + env: + TEMP_PATH: ${{runner.temp}}/pvs_check + REPO_COPY: ${{runner.temp}}/pvs_check/ClickHouse + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci && python3 pvs_check.py + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH + CompatibilityCheck: + needs: [BuilderDebRelease] + runs-on: [self-hosted, style-checker] + steps: + - name: Check out repository code + uses: actions/checkout@v2 + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{runner.temp}}/reports_dir + - name: CompatibilityCheck + env: + TEMP_PATH: ${{runner.temp}}/compatibility_check + REPO_COPY: ${{runner.temp}}/compatibility_check/ClickHouse + REPORTS_PATH: ${{runner.temp}}/reports_dir + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci && python3 compatibility_check.py 0 + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH + SplitBuildSmokeTest: + needs: [BuilderDebSplitted] + if: ${{ !contains(github.event.pull_request.labels.*.name, 'pr-documentation') && !contains(github.event.pull_request.labels.*.name, 'pr-doc-fix') }} + runs-on: [self-hosted, style-checker] + steps: + - name: Check out repository code + uses: actions/checkout@v2 + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{runner.temp}}/reports_dir + - name: Split build check + env: + TEMP_PATH: ${{runner.temp}}/split_build_check + REPO_COPY: ${{runner.temp}}/split_build_check/ClickHouse + REPORTS_PATH: ${{runner.temp}}/reports_dir + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci && python3 split_build_smoke_check.py + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH +######################################################################################### +#################################### ORDINARY BUILDS #################################### +######################################################################################### + BuilderDebRelease: + needs: [DockerHubPush] + runs-on: [self-hosted, builder] + steps: + - name: Download changed images + uses: actions/download-artifact@v2 + with: + name: changed_images + path: ${{ runner.temp }}/images_path + - name: Check out repository code + uses: actions/checkout@v2 + with: + submodules: 'recursive' + fetch-depth: 0 # otherwise we will have no info about contributors + - name: Build + env: + TEMP_PATH: ${{runner.temp}}/build_check + IMAGES_PATH: ${{runner.temp}}/images_path + REPO_COPY: ${{runner.temp}}/build_check/ClickHouse + CACHES_PATH: ${{runner.temp}}/../ccaches + CHECK_NAME: 'ClickHouse build check (actions)' + BUILD_NUMBER: 0 + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci && python3 build_check.py "$CHECK_NAME" $BUILD_NUMBER + - name: Upload build URLs to artifacts + uses: actions/upload-artifact@v2 + with: + name: ${{ env.BUILD_NAME }} + path: ${{ runner.temp }}/build_check/${{ env.BUILD_NAME }}.json + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH + BuilderBinRelease: + needs: [DockerHubPush] + if: ${{ !contains(github.event.pull_request.labels.*.name, 'pr-documentation') && !contains(github.event.pull_request.labels.*.name, 'pr-doc-fix') }} + runs-on: [self-hosted, builder] + steps: + - name: Download changed images + uses: actions/download-artifact@v2 + with: + name: changed_images + path: ${{ runner.temp }}/images_path + - name: Check out repository code + uses: actions/checkout@v2 + with: + submodules: 'recursive' + fetch-depth: 0 # otherwise we will have no info about contributors + - name: Build + env: + TEMP_PATH: ${{runner.temp}}/build_check + IMAGES_PATH: ${{runner.temp}}/images_path + REPO_COPY: ${{runner.temp}}/build_check/ClickHouse + CACHES_PATH: ${{runner.temp}}/../ccaches + CHECK_NAME: 'ClickHouse build check (actions)' + BUILD_NUMBER: 9 + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci && python3 build_check.py "$CHECK_NAME" $BUILD_NUMBER + - name: Upload build URLs to artifacts + uses: actions/upload-artifact@v2 + with: + name: ${{ env.BUILD_NAME }} + path: ${{ runner.temp }}/build_check/${{ env.BUILD_NAME }}.json + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH + BuilderDebAsan: + needs: [DockerHubPush] + runs-on: [self-hosted, builder] + steps: + - name: Download changed images + uses: actions/download-artifact@v2 + with: + name: changed_images + path: ${{ runner.temp }}/images_path + - name: Check out repository code + uses: actions/checkout@v2 + with: + submodules: 'recursive' + fetch-depth: 0 # otherwise we will have no info about contributors + - name: Build + env: + TEMP_PATH: ${{runner.temp}}/build_check + IMAGES_PATH: ${{runner.temp}}/images_path + REPO_COPY: ${{runner.temp}}/build_check/ClickHouse + CACHES_PATH: ${{runner.temp}}/../ccaches + CHECK_NAME: 'ClickHouse build check (actions)' + BUILD_NUMBER: 3 + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci && python3 build_check.py "$CHECK_NAME" $BUILD_NUMBER + - name: Upload build URLs to artifacts + uses: actions/upload-artifact@v2 + with: + name: ${{ env.BUILD_NAME }} + path: ${{ runner.temp }}/build_check/${{ env.BUILD_NAME }}.json + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH + BuilderDebUBsan: + needs: [DockerHubPush] + runs-on: [self-hosted, builder] + steps: + - name: Download changed images + uses: actions/download-artifact@v2 + with: + name: changed_images + path: ${{ runner.temp }}/images_path + - name: Check out repository code + uses: actions/checkout@v2 + with: + submodules: 'recursive' + fetch-depth: 0 # otherwise we will have no info about contributors + - name: Build + env: + TEMP_PATH: ${{runner.temp}}/build_check + IMAGES_PATH: ${{runner.temp}}/images_path + REPO_COPY: ${{runner.temp}}/build_check/ClickHouse + CACHES_PATH: ${{runner.temp}}/../ccaches + CHECK_NAME: 'ClickHouse build check (actions)' + BUILD_NUMBER: 4 + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci && python3 build_check.py "$CHECK_NAME" $BUILD_NUMBER + - name: Upload build URLs to artifacts + uses: actions/upload-artifact@v2 + with: + name: ${{ env.BUILD_NAME }} + path: ${{ runner.temp }}/build_check/${{ env.BUILD_NAME }}.json + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH + BuilderDebTsan: + needs: [DockerHubPush] + runs-on: [self-hosted, builder] + steps: + - name: Download changed images + uses: actions/download-artifact@v2 + with: + name: changed_images + path: ${{ runner.temp }}/images_path + - name: Check out repository code + uses: actions/checkout@v2 + with: + submodules: 'recursive' + fetch-depth: 0 # otherwise we will have no info about contributors + - name: Build + env: + TEMP_PATH: ${{runner.temp}}/build_check + IMAGES_PATH: ${{runner.temp}}/images_path + REPO_COPY: ${{runner.temp}}/build_check/ClickHouse + CACHES_PATH: ${{runner.temp}}/../ccaches + CHECK_NAME: 'ClickHouse build check (actions)' + BUILD_NUMBER: 5 + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci && python3 build_check.py "$CHECK_NAME" $BUILD_NUMBER + - name: Upload build URLs to artifacts + uses: actions/upload-artifact@v2 + with: + name: ${{ env.BUILD_NAME }} + path: ${{ runner.temp }}/build_check/${{ env.BUILD_NAME }}.json + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH + BuilderDebMsan: + needs: [DockerHubPush] + runs-on: [self-hosted, builder] + steps: + - name: Download changed images + uses: actions/download-artifact@v2 + with: + name: changed_images + path: ${{ runner.temp }}/images_path + - name: Check out repository code + uses: actions/checkout@v2 + with: + submodules: 'recursive' + fetch-depth: 0 # otherwise we will have no info about contributors + - name: Build + env: + TEMP_PATH: ${{runner.temp}}/build_check + IMAGES_PATH: ${{runner.temp}}/images_path + REPO_COPY: ${{runner.temp}}/build_check/ClickHouse + CACHES_PATH: ${{runner.temp}}/../ccaches + CHECK_NAME: 'ClickHouse build check (actions)' + BUILD_NUMBER: 6 + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci && python3 build_check.py "$CHECK_NAME" $BUILD_NUMBER + - name: Upload build URLs to artifacts + uses: actions/upload-artifact@v2 + with: + name: ${{ env.BUILD_NAME }} + path: ${{ runner.temp }}/build_check/${{ env.BUILD_NAME }}.json + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH + BuilderDebDebug: + needs: [DockerHubPush] + runs-on: [self-hosted, builder] + steps: + - name: Download changed images + uses: actions/download-artifact@v2 + with: + name: changed_images + path: ${{ runner.temp }}/images_path + - name: Check out repository code + uses: actions/checkout@v2 + with: + submodules: 'recursive' + fetch-depth: 0 # otherwise we will have no info about contributors + - name: Build + env: + TEMP_PATH: ${{runner.temp}}/build_check + IMAGES_PATH: ${{runner.temp}}/images_path + REPO_COPY: ${{runner.temp}}/build_check/ClickHouse + CACHES_PATH: ${{runner.temp}}/../ccaches + CHECK_NAME: 'ClickHouse build check (actions)' + BUILD_NUMBER: 7 + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci && python3 build_check.py "$CHECK_NAME" $BUILD_NUMBER + - name: Upload build URLs to artifacts + uses: actions/upload-artifact@v2 + with: + name: ${{ env.BUILD_NAME }} + path: ${{ runner.temp }}/build_check/${{ env.BUILD_NAME }}.json + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH +########################################################################################## +##################################### SPECIAL BUILDS ##################################### +########################################################################################## + BuilderDebSplitted: + needs: [DockerHubPush] + if: ${{ !contains(github.event.pull_request.labels.*.name, 'pr-documentation') && !contains(github.event.pull_request.labels.*.name, 'pr-doc-fix') }} + runs-on: [self-hosted, builder] + steps: + - name: Download changed images + uses: actions/download-artifact@v2 + with: + name: changed_images + path: ${{ runner.temp }}/images_path + - name: Check out repository code + uses: actions/checkout@v2 + with: + submodules: 'recursive' + fetch-depth: 0 # otherwise we will have no info about contributors + - name: Build + env: + TEMP_PATH: ${{runner.temp}}/build_check + IMAGES_PATH: ${{runner.temp}}/images_path + REPO_COPY: ${{runner.temp}}/build_check/ClickHouse + CACHES_PATH: ${{runner.temp}}/../ccaches + CHECK_NAME: 'ClickHouse special build check (actions)' + BUILD_NUMBER: 1 + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci && python3 build_check.py "$CHECK_NAME" $BUILD_NUMBER + - name: Upload build URLs to artifacts + uses: actions/upload-artifact@v2 + with: + name: ${{ env.BUILD_NAME }} + path: ${{ runner.temp }}/build_check/${{ env.BUILD_NAME }}.json + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH +############################################################################################ +##################################### BUILD REPORTER ####################################### +############################################################################################ + BuilderReport: + needs: + - BuilderDebRelease + - BuilderBinRelease + - BuilderDebAsan + - BuilderDebTsan + - BuilderDebUBsan + - BuilderDebMsan + - BuilderDebDebug + - BuilderDebSplitted + runs-on: [self-hosted, style-checker] + steps: + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{runner.temp}}/reports_dir + - name: Check out repository code + uses: actions/checkout@v2 + - name: Report Builder + env: + TEMP_PATH: ${{runner.temp}}/report_check + REPORTS_PATH: ${{runner.temp}}/reports_dir + CHECK_NAME: 'ClickHouse build check (actions)' + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cd $GITHUB_WORKSPACE/tests/ci + python3 build_report_check.py "$CHECK_NAME" + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH +############################################################################################## +########################### FUNCTIONAl STATELESS TESTS ####################################### +############################################################################################## + FunctionalStatelessTestRelease: + needs: [BuilderDebRelease] + runs-on: [self-hosted, func-tester] + steps: + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{runner.temp}}/reports_dir + - name: Check out repository code + uses: actions/checkout@v2 + - name: Functional test + env: + TEMP_PATH: ${{runner.temp}}/stateless_debug + REPORTS_PATH: ${{runner.temp}}/reports_dir + CHECK_NAME: 'Stateless tests (release, actions)' + REPO_COPY: ${{runner.temp}}/stateless_debug/ClickHouse + KILL_TIMEOUT: 10800 + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci + python3 functional_test_check.py "$CHECK_NAME" $KILL_TIMEOUT + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH + FunctionalStatelessTestAsan: + needs: [BuilderDebAsan] + runs-on: [self-hosted, func-tester] + steps: + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{runner.temp}}/reports_dir + - name: Check out repository code + uses: actions/checkout@v2 + - name: Functional test + env: + TEMP_PATH: ${{runner.temp}}/stateless_debug + REPORTS_PATH: ${{runner.temp}}/reports_dir + CHECK_NAME: 'Stateless tests (address, actions)' + REPO_COPY: ${{runner.temp}}/stateless_debug/ClickHouse + KILL_TIMEOUT: 10800 + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci + python3 functional_test_check.py "$CHECK_NAME" $KILL_TIMEOUT + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH + FunctionalStatelessTestTsan: + needs: [BuilderDebTsan] + runs-on: [self-hosted, func-tester] + steps: + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{runner.temp}}/reports_dir + - name: Check out repository code + uses: actions/checkout@v2 + - name: Functional test + env: + TEMP_PATH: ${{runner.temp}}/stateless_tsan + REPORTS_PATH: ${{runner.temp}}/reports_dir + CHECK_NAME: 'Stateless tests (thread, actions)' + REPO_COPY: ${{runner.temp}}/stateless_tsan/ClickHouse + KILL_TIMEOUT: 10800 + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci + python3 functional_test_check.py "$CHECK_NAME" $KILL_TIMEOUT + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH + FunctionalStatelessTestUBsan: + needs: [BuilderDebUBsan] + runs-on: [self-hosted, func-tester] + steps: + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{runner.temp}}/reports_dir + - name: Check out repository code + uses: actions/checkout@v2 + - name: Functional test + env: + TEMP_PATH: ${{runner.temp}}/stateless_ubsan + REPORTS_PATH: ${{runner.temp}}/reports_dir + CHECK_NAME: 'Stateless tests (ubsan, actions)' + REPO_COPY: ${{runner.temp}}/stateless_ubsan/ClickHouse + KILL_TIMEOUT: 10800 + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci + python3 functional_test_check.py "$CHECK_NAME" $KILL_TIMEOUT + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH + FunctionalStatelessTestMsan: + needs: [BuilderDebMsan] + runs-on: [self-hosted, func-tester] + steps: + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{runner.temp}}/reports_dir + - name: Check out repository code + uses: actions/checkout@v2 + - name: Functional test + env: + TEMP_PATH: ${{runner.temp}}/stateless_memory + REPORTS_PATH: ${{runner.temp}}/reports_dir + CHECK_NAME: 'Stateless tests (memory, actions)' + REPO_COPY: ${{runner.temp}}/stateless_memory/ClickHouse + KILL_TIMEOUT: 10800 + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci + python3 functional_test_check.py "$CHECK_NAME" $KILL_TIMEOUT + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH + FunctionalStatelessTestDebug: + needs: [BuilderDebDebug] + runs-on: [self-hosted, func-tester] + steps: + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{runner.temp}}/reports_dir + - name: Check out repository code + uses: actions/checkout@v2 + - name: Functional test + env: + TEMP_PATH: ${{runner.temp}}/stateless_debug + REPORTS_PATH: ${{runner.temp}}/reports_dir + CHECK_NAME: 'Stateless tests (debug, actions)' + REPO_COPY: ${{runner.temp}}/stateless_debug/ClickHouse + KILL_TIMEOUT: 10800 + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci + python3 functional_test_check.py "$CHECK_NAME" $KILL_TIMEOUT + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH +############################################################################################## +############################ FUNCTIONAl STATEFUL TESTS ####################################### +############################################################################################## + FunctionalStatefulTestRelease: + needs: [BuilderDebRelease] + runs-on: [self-hosted, func-tester] + steps: + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{runner.temp}}/reports_dir + - name: Check out repository code + uses: actions/checkout@v2 + - name: Functional test + env: + TEMP_PATH: ${{runner.temp}}/stateful_debug + REPORTS_PATH: ${{runner.temp}}/reports_dir + CHECK_NAME: 'Stateful tests (release, actions)' + REPO_COPY: ${{runner.temp}}/stateful_debug/ClickHouse + KILL_TIMEOUT: 3600 + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci + python3 functional_test_check.py "$CHECK_NAME" $KILL_TIMEOUT + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH + FunctionalStatefulTestAsan: + needs: [BuilderDebAsan] + runs-on: [self-hosted, func-tester] + steps: + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{runner.temp}}/reports_dir + - name: Check out repository code + uses: actions/checkout@v2 + - name: Functional test + env: + TEMP_PATH: ${{runner.temp}}/stateful_debug + REPORTS_PATH: ${{runner.temp}}/reports_dir + CHECK_NAME: 'Stateful tests (address, actions)' + REPO_COPY: ${{runner.temp}}/stateful_debug/ClickHouse + KILL_TIMEOUT: 3600 + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci + python3 functional_test_check.py "$CHECK_NAME" $KILL_TIMEOUT + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH + FunctionalStatefulTestTsan: + needs: [BuilderDebTsan] + runs-on: [self-hosted, func-tester] + steps: + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{runner.temp}}/reports_dir + - name: Check out repository code + uses: actions/checkout@v2 + - name: Functional test + env: + TEMP_PATH: ${{runner.temp}}/stateful_tsan + REPORTS_PATH: ${{runner.temp}}/reports_dir + CHECK_NAME: 'Stateful tests (thread, actions)' + REPO_COPY: ${{runner.temp}}/stateful_tsan/ClickHouse + KILL_TIMEOUT: 3600 + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci + python3 functional_test_check.py "$CHECK_NAME" $KILL_TIMEOUT + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH + FunctionalStatefulTestMsan: + needs: [BuilderDebMsan] + runs-on: [self-hosted, func-tester] + steps: + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{runner.temp}}/reports_dir + - name: Check out repository code + uses: actions/checkout@v2 + - name: Functional test + env: + TEMP_PATH: ${{runner.temp}}/stateful_msan + REPORTS_PATH: ${{runner.temp}}/reports_dir + CHECK_NAME: 'Stateful tests (memory, actions)' + REPO_COPY: ${{runner.temp}}/stateful_msan/ClickHouse + KILL_TIMEOUT: 3600 + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci + python3 functional_test_check.py "$CHECK_NAME" $KILL_TIMEOUT + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH + FunctionalStatefulTestUBsan: + needs: [BuilderDebUBsan] + runs-on: [self-hosted, func-tester] + steps: + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{runner.temp}}/reports_dir + - name: Check out repository code + uses: actions/checkout@v2 + - name: Functional test + env: + TEMP_PATH: ${{runner.temp}}/stateful_ubsan + REPORTS_PATH: ${{runner.temp}}/reports_dir + CHECK_NAME: 'Stateful tests (ubsan, actions)' + REPO_COPY: ${{runner.temp}}/stateful_ubsan/ClickHouse + KILL_TIMEOUT: 3600 + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci + python3 functional_test_check.py "$CHECK_NAME" $KILL_TIMEOUT + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH + FunctionalStatefulTestDebug: + needs: [BuilderDebDebug] + runs-on: [self-hosted, func-tester] + steps: + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{runner.temp}}/reports_dir + - name: Check out repository code + uses: actions/checkout@v2 + - name: Functional test + env: + TEMP_PATH: ${{runner.temp}}/stateful_debug + REPORTS_PATH: ${{runner.temp}}/reports_dir + CHECK_NAME: 'Stateful tests (debug, actions)' + REPO_COPY: ${{runner.temp}}/stateful_debug/ClickHouse + KILL_TIMEOUT: 3600 + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci + python3 functional_test_check.py "$CHECK_NAME" $KILL_TIMEOUT + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH +############################################################################################## +######################################### STRESS TESTS ####################################### +############################################################################################## + StressTestAsan: + needs: [BuilderDebAsan] + runs-on: [self-hosted, stress-tester] + steps: + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{runner.temp}}/reports_dir + - name: Check out repository code + uses: actions/checkout@v2 + - name: Stress test + env: + TEMP_PATH: ${{runner.temp}}/stress_thread + REPORTS_PATH: ${{runner.temp}}/reports_dir + CHECK_NAME: 'Stress test (address, actions)' + REPO_COPY: ${{runner.temp}}/stress_thread/ClickHouse + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci + python3 stress_check.py "$CHECK_NAME" + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH + StressTestTsan: + needs: [BuilderDebTsan] + runs-on: [self-hosted, stress-tester] + steps: + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{runner.temp}}/reports_dir + - name: Check out repository code + uses: actions/checkout@v2 + - name: Stress test + env: + TEMP_PATH: ${{runner.temp}}/stress_thread + REPORTS_PATH: ${{runner.temp}}/reports_dir + CHECK_NAME: 'Stress test (thread, actions)' + REPO_COPY: ${{runner.temp}}/stress_thread/ClickHouse + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci + python3 stress_check.py "$CHECK_NAME" + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH + StressTestMsan: + needs: [BuilderDebMsan] + runs-on: [self-hosted, stress-tester] + steps: + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{runner.temp}}/reports_dir + - name: Check out repository code + uses: actions/checkout@v2 + - name: Stress test + env: + TEMP_PATH: ${{runner.temp}}/stress_memory + REPORTS_PATH: ${{runner.temp}}/reports_dir + CHECK_NAME: 'Stress test (memory, actions)' + REPO_COPY: ${{runner.temp}}/stress_memory/ClickHouse + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci + python3 stress_check.py "$CHECK_NAME" + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH + StressTestUBsan: + needs: [BuilderDebUBsan] + runs-on: [self-hosted, stress-tester] + steps: + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{runner.temp}}/reports_dir + - name: Check out repository code + uses: actions/checkout@v2 + - name: Stress test + env: + TEMP_PATH: ${{runner.temp}}/stress_undefined + REPORTS_PATH: ${{runner.temp}}/reports_dir + CHECK_NAME: 'Stress test (undefined, actions)' + REPO_COPY: ${{runner.temp}}/stress_undefined/ClickHouse + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci + python3 stress_check.py "$CHECK_NAME" + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH + StressTestDebug: + needs: [BuilderDebDebug] + runs-on: [self-hosted, stress-tester] + steps: + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{runner.temp}}/reports_dir + - name: Check out repository code + uses: actions/checkout@v2 + - name: Stress test + env: + TEMP_PATH: ${{runner.temp}}/stress_debug + REPORTS_PATH: ${{runner.temp}}/reports_dir + CHECK_NAME: 'Stress test (debug, actions)' + REPO_COPY: ${{runner.temp}}/stress_debug/ClickHouse + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci + python3 stress_check.py "$CHECK_NAME" + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH +############################################################################################# +############################# INTEGRATION TESTS ############################################# +############################################################################################# + IntegrationTestsAsan: + needs: [BuilderDebAsan, FunctionalStatelessTestAsan] + runs-on: [self-hosted, stress-tester] + steps: + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{runner.temp}}/reports_dir + - name: Check out repository code + uses: actions/checkout@v2 + - name: Integration test + env: + TEMP_PATH: ${{runner.temp}}/integration_tests_asan + REPORTS_PATH: ${{runner.temp}}/reports_dir + CHECK_NAME: 'Integration tests (asan, actions)' + REPO_COPY: ${{runner.temp}}/integration_tests_asan/ClickHouse + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci + python3 integration_test_check.py "$CHECK_NAME" + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH + IntegrationTestsTsan: + needs: [BuilderDebTsan, FunctionalStatelessTestTsan] + runs-on: [self-hosted, stress-tester] + steps: + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{runner.temp}}/reports_dir + - name: Check out repository code + uses: actions/checkout@v2 + - name: Integration test + env: + TEMP_PATH: ${{runner.temp}}/integration_tests_tsan + REPORTS_PATH: ${{runner.temp}}/reports_dir + CHECK_NAME: 'Integration tests (thread, actions)' + REPO_COPY: ${{runner.temp}}/integration_tests_tsan/ClickHouse + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci + python3 integration_test_check.py "$CHECK_NAME" + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH + IntegrationTestsRelease: + needs: [BuilderDebRelease, FunctionalStatelessTestRelease] + runs-on: [self-hosted, stress-tester] + steps: + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{runner.temp}}/reports_dir + - name: Check out repository code + uses: actions/checkout@v2 + - name: Integration test + env: + TEMP_PATH: ${{runner.temp}}/integration_tests_release + REPORTS_PATH: ${{runner.temp}}/reports_dir + CHECK_NAME: 'Integration tests (release, actions)' + REPO_COPY: ${{runner.temp}}/integration_tests_release/ClickHouse + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci + python3 integration_test_check.py "$CHECK_NAME" + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH +############################################################################################## +##################################### AST FUZZERS ############################################ +############################################################################################## + ASTFuzzerTestAsan: + needs: [BuilderDebAsan] + runs-on: [self-hosted, func-tester] + steps: + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{runner.temp}}/reports_dir + - name: Check out repository code + uses: actions/checkout@v2 + - name: Fuzzer + env: + TEMP_PATH: ${{runner.temp}}/ast_fuzzer_asan + REPORTS_PATH: ${{runner.temp}}/reports_dir + CHECK_NAME: 'AST fuzzer (ASan, actions)' + REPO_COPY: ${{runner.temp}}/ast_fuzzer_asan/ClickHouse + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci + python3 ast_fuzzer_check.py "$CHECK_NAME" + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH + ASTFuzzerTestTsan: + needs: [BuilderDebTsan] + runs-on: [self-hosted, func-tester] + steps: + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{runner.temp}}/reports_dir + - name: Check out repository code + uses: actions/checkout@v2 + - name: Fuzzer + env: + TEMP_PATH: ${{runner.temp}}/ast_fuzzer_tsan + REPORTS_PATH: ${{runner.temp}}/reports_dir + CHECK_NAME: 'AST fuzzer (TSan, actions)' + REPO_COPY: ${{runner.temp}}/ast_fuzzer_tsan/ClickHouse + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci + python3 ast_fuzzer_check.py "$CHECK_NAME" + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH + ASTFuzzerTestUBSan: + needs: [BuilderDebUBsan] + runs-on: [self-hosted, func-tester] + steps: + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{runner.temp}}/reports_dir + - name: Check out repository code + uses: actions/checkout@v2 + - name: Fuzzer + env: + TEMP_PATH: ${{runner.temp}}/ast_fuzzer_ubsan + REPORTS_PATH: ${{runner.temp}}/reports_dir + CHECK_NAME: 'AST fuzzer (UBSan, actions)' + REPO_COPY: ${{runner.temp}}/ast_fuzzer_ubsan/ClickHouse + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci + python3 ast_fuzzer_check.py "$CHECK_NAME" + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH + ASTFuzzerTestMSan: + needs: [BuilderDebMsan] + runs-on: [self-hosted, func-tester] + steps: + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{runner.temp}}/reports_dir + - name: Check out repository code + uses: actions/checkout@v2 + - name: Fuzzer + env: + TEMP_PATH: ${{runner.temp}}/ast_fuzzer_msan + REPORTS_PATH: ${{runner.temp}}/reports_dir + CHECK_NAME: 'AST fuzzer (MSan, actions)' + REPO_COPY: ${{runner.temp}}/ast_fuzzer_msan/ClickHouse + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci + python3 ast_fuzzer_check.py "$CHECK_NAME" + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH + ASTFuzzerTestDebug: + needs: [BuilderDebDebug] + runs-on: [self-hosted, func-tester] + steps: + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{runner.temp}}/reports_dir + - name: Check out repository code + uses: actions/checkout@v2 + - name: Fuzzer + env: + TEMP_PATH: ${{runner.temp}}/ast_fuzzer_debug + REPORTS_PATH: ${{runner.temp}}/reports_dir + CHECK_NAME: 'AST fuzzer (debug, actions)' + REPO_COPY: ${{runner.temp}}/ast_fuzzer_debug/ClickHouse + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci + python3 ast_fuzzer_check.py "$CHECK_NAME" + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH +############################################################################################# +#################################### UNIT TESTS ############################################# +############################################################################################# + UnitTestsAsan: + needs: [BuilderDebAsan] + runs-on: [self-hosted, func-tester] + steps: + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{runner.temp}}/reports_dir + - name: Check out repository code + uses: actions/checkout@v2 + - name: Unit test + env: + TEMP_PATH: ${{runner.temp}}/unit_tests_asan + REPORTS_PATH: ${{runner.temp}}/reports_dir + CHECK_NAME: 'Unit tests (asan, actions)' + REPO_COPY: ${{runner.temp}}/unit_tests_asan/ClickHouse + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci + python3 unit_tests_check.py "$CHECK_NAME" + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH + UnitTestsReleaseClang: + needs: [BuilderBinRelease] + runs-on: [self-hosted, func-tester] + steps: + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{runner.temp}}/reports_dir + - name: Check out repository code + uses: actions/checkout@v2 + - name: Unit test + env: + TEMP_PATH: ${{runner.temp}}/unit_tests_asan + REPORTS_PATH: ${{runner.temp}}/reports_dir + CHECK_NAME: 'Unit tests (release-clang, actions)' + REPO_COPY: ${{runner.temp}}/unit_tests_asan/ClickHouse + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci + python3 unit_tests_check.py "$CHECK_NAME" + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH + UnitTestsTsan: + needs: [BuilderDebTsan] + runs-on: [self-hosted, func-tester] + steps: + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{runner.temp}}/reports_dir + - name: Check out repository code + uses: actions/checkout@v2 + - name: Unit test + env: + TEMP_PATH: ${{runner.temp}}/unit_tests_tsan + REPORTS_PATH: ${{runner.temp}}/reports_dir + CHECK_NAME: 'Unit tests (tsan, actions)' + REPO_COPY: ${{runner.temp}}/unit_tests_tsan/ClickHouse + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci + python3 unit_tests_check.py "$CHECK_NAME" + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH + UnitTestsMsan: + needs: [BuilderDebMsan] + runs-on: [self-hosted, func-tester] + steps: + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{runner.temp}}/reports_dir + - name: Check out repository code + uses: actions/checkout@v2 + - name: Unit test + env: + TEMP_PATH: ${{runner.temp}}/unit_tests_msan + REPORTS_PATH: ${{runner.temp}}/reports_dir + CHECK_NAME: 'Unit tests (msan, actions)' + REPO_COPY: ${{runner.temp}}/unit_tests_msan/ClickHouse + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci + python3 unit_tests_check.py "$CHECK_NAME" + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH + UnitTestsUBsan: + needs: [BuilderDebUBsan] + runs-on: [self-hosted, func-tester] + steps: + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{runner.temp}}/reports_dir + - name: Check out repository code + uses: actions/checkout@v2 + - name: Unit test + env: + TEMP_PATH: ${{runner.temp}}/unit_tests_ubsan + REPORTS_PATH: ${{runner.temp}}/reports_dir + CHECK_NAME: 'Unit tests (msan, actions)' + REPO_COPY: ${{runner.temp}}/unit_tests_ubsan/ClickHouse + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci + python3 unit_tests_check.py "$CHECK_NAME" + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH + FinishCheck: + needs: + - DockerHubPush + - BuilderReport + - FunctionalStatelessTestDebug + - FunctionalStatelessTestRelease + - FunctionalStatelessTestAsan + - FunctionalStatelessTestTsan + - FunctionalStatelessTestMsan + - FunctionalStatelessTestUBsan + - FunctionalStatefulTestDebug + - FunctionalStatefulTestRelease + - FunctionalStatefulTestAsan + - FunctionalStatefulTestTsan + - FunctionalStatefulTestMsan + - FunctionalStatefulTestUBsan + - StressTestDebug + - StressTestAsan + - StressTestTsan + - StressTestMsan + - StressTestUBsan + - IntegrationTestsAsan + - IntegrationTestsRelease + - IntegrationTestsTsan + - CompatibilityCheck + - ASTFuzzerTestDebug + - ASTFuzzerTestAsan + - ASTFuzzerTestTsan + - ASTFuzzerTestMSan + - ASTFuzzerTestUBSan + - UnitTestsAsan + - UnitTestsTsan + - UnitTestsMsan + - UnitTestsUBsan + - UnitTestsReleaseClang + - SplitBuildSmokeTest + - PVSCheck + runs-on: [self-hosted, style-checker] + steps: + - name: Check out repository code + uses: actions/checkout@v2 + - name: Finish label + run: | + cd $GITHUB_WORKSPACE/tests/ci + python3 finish_check.py diff --git a/.github/workflows/release_branches.yml b/.github/workflows/release_branches.yml new file mode 100644 index 00000000000..e279ae91588 --- /dev/null +++ b/.github/workflows/release_branches.yml @@ -0,0 +1,933 @@ +name: ReleaseCI +on: # yamllint disable-line rule:truthy + push: + branches: + - '21.**' + - '22.**' + - '23.**' + - '24.**' + - 'backport/**' +jobs: + DockerHubPush: + runs-on: [self-hosted, style-checker] + steps: + - name: Check out repository code + uses: actions/checkout@v2 + - name: Images check + run: | + cd $GITHUB_WORKSPACE/tests/ci + python3 docker_images_check.py + - name: Upload images files to artifacts + uses: actions/upload-artifact@v2 + with: + name: changed_images + path: ${{ runner.temp }}/docker_images_check/changed_images.json + CompatibilityCheck: + needs: [BuilderDebRelease] + runs-on: [self-hosted, style-checker] + steps: + - name: Check out repository code + uses: actions/checkout@v2 + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{runner.temp}}/reports_dir + - name: CompatibilityCheck + env: + TEMP_PATH: ${{runner.temp}}/compatibility_check + REPO_COPY: ${{runner.temp}}/compatibility_check/ClickHouse + REPORTS_PATH: ${{runner.temp}}/reports_dir + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci && python3 compatibility_check.py 0 + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH +######################################################################################### +#################################### ORDINARY BUILDS #################################### +######################################################################################### + BuilderDebRelease: + needs: [DockerHubPush] + runs-on: [self-hosted, builder] + steps: + - name: Download changed images + uses: actions/download-artifact@v2 + with: + name: changed_images + path: ${{ runner.temp }}/images_path + - name: Check out repository code + uses: actions/checkout@v2 + with: + submodules: 'recursive' + fetch-depth: 0 # otherwise we will have no info about contributors + - name: Build + env: + TEMP_PATH: ${{runner.temp}}/build_check + IMAGES_PATH: ${{runner.temp}}/images_path + REPO_COPY: ${{runner.temp}}/build_check/ClickHouse + CACHES_PATH: ${{runner.temp}}/../ccaches + CHECK_NAME: 'ClickHouse build check (actions)' + BUILD_NUMBER: 0 + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci && python3 build_check.py "$CHECK_NAME" $BUILD_NUMBER + - name: Upload build URLs to artifacts + uses: actions/upload-artifact@v2 + with: + name: ${{ env.BUILD_NAME }} + path: ${{ runner.temp }}/build_check/${{ env.BUILD_NAME }}.json + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH + BuilderDebAsan: + needs: [DockerHubPush] + runs-on: [self-hosted, builder] + steps: + - name: Download changed images + uses: actions/download-artifact@v2 + with: + name: changed_images + path: ${{ runner.temp }}/images_path + - name: Check out repository code + uses: actions/checkout@v2 + with: + submodules: 'recursive' + fetch-depth: 0 # otherwise we will have no info about contributors + - name: Build + env: + TEMP_PATH: ${{runner.temp}}/build_check + IMAGES_PATH: ${{runner.temp}}/images_path + REPO_COPY: ${{runner.temp}}/build_check/ClickHouse + CACHES_PATH: ${{runner.temp}}/../ccaches + CHECK_NAME: 'ClickHouse build check (actions)' + BUILD_NUMBER: 3 + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci && python3 build_check.py "$CHECK_NAME" $BUILD_NUMBER + - name: Upload build URLs to artifacts + uses: actions/upload-artifact@v2 + with: + name: ${{ env.BUILD_NAME }} + path: ${{ runner.temp }}/build_check/${{ env.BUILD_NAME }}.json + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH + BuilderDebUBsan: + needs: [DockerHubPush] + runs-on: [self-hosted, builder] + steps: + - name: Download changed images + uses: actions/download-artifact@v2 + with: + name: changed_images + path: ${{ runner.temp }}/images_path + - name: Check out repository code + uses: actions/checkout@v2 + with: + submodules: 'recursive' + fetch-depth: 0 # otherwise we will have no info about contributors + - name: Build + env: + TEMP_PATH: ${{runner.temp}}/build_check + IMAGES_PATH: ${{runner.temp}}/images_path + REPO_COPY: ${{runner.temp}}/build_check/ClickHouse + CACHES_PATH: ${{runner.temp}}/../ccaches + CHECK_NAME: 'ClickHouse build check (actions)' + BUILD_NUMBER: 4 + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci && python3 build_check.py "$CHECK_NAME" $BUILD_NUMBER + - name: Upload build URLs to artifacts + uses: actions/upload-artifact@v2 + with: + name: ${{ env.BUILD_NAME }} + path: ${{ runner.temp }}/build_check/${{ env.BUILD_NAME }}.json + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH + BuilderDebTsan: + needs: [DockerHubPush] + runs-on: [self-hosted, builder] + steps: + - name: Download changed images + uses: actions/download-artifact@v2 + with: + name: changed_images + path: ${{ runner.temp }}/images_path + - name: Check out repository code + uses: actions/checkout@v2 + with: + submodules: 'recursive' + fetch-depth: 0 # otherwise we will have no info about contributors + - name: Build + env: + TEMP_PATH: ${{runner.temp}}/build_check + IMAGES_PATH: ${{runner.temp}}/images_path + REPO_COPY: ${{runner.temp}}/build_check/ClickHouse + CACHES_PATH: ${{runner.temp}}/../ccaches + CHECK_NAME: 'ClickHouse build check (actions)' + BUILD_NUMBER: 5 + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci && python3 build_check.py "$CHECK_NAME" $BUILD_NUMBER + - name: Upload build URLs to artifacts + uses: actions/upload-artifact@v2 + with: + name: ${{ env.BUILD_NAME }} + path: ${{ runner.temp }}/build_check/${{ env.BUILD_NAME }}.json + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH + BuilderDebMsan: + needs: [DockerHubPush] + runs-on: [self-hosted, builder] + steps: + - name: Download changed images + uses: actions/download-artifact@v2 + with: + name: changed_images + path: ${{ runner.temp }}/images_path + - name: Check out repository code + uses: actions/checkout@v2 + with: + submodules: 'recursive' + fetch-depth: 0 # otherwise we will have no info about contributors + - name: Build + env: + TEMP_PATH: ${{runner.temp}}/build_check + IMAGES_PATH: ${{runner.temp}}/images_path + REPO_COPY: ${{runner.temp}}/build_check/ClickHouse + CACHES_PATH: ${{runner.temp}}/../ccaches + CHECK_NAME: 'ClickHouse build check (actions)' + BUILD_NUMBER: 6 + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci && python3 build_check.py "$CHECK_NAME" $BUILD_NUMBER + - name: Upload build URLs to artifacts + uses: actions/upload-artifact@v2 + with: + name: ${{ env.BUILD_NAME }} + path: ${{ runner.temp }}/build_check/${{ env.BUILD_NAME }}.json + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH + BuilderDebDebug: + needs: [DockerHubPush] + runs-on: [self-hosted, builder] + steps: + - name: Download changed images + uses: actions/download-artifact@v2 + with: + name: changed_images + path: ${{ runner.temp }}/images_path + - name: Check out repository code + uses: actions/checkout@v2 + with: + submodules: 'recursive' + fetch-depth: 0 # otherwise we will have no info about contributors + - name: Build + env: + TEMP_PATH: ${{runner.temp}}/build_check + IMAGES_PATH: ${{runner.temp}}/images_path + REPO_COPY: ${{runner.temp}}/build_check/ClickHouse + CACHES_PATH: ${{runner.temp}}/../ccaches + CHECK_NAME: 'ClickHouse build check (actions)' + BUILD_NUMBER: 7 + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci && python3 build_check.py "$CHECK_NAME" $BUILD_NUMBER + - name: Upload build URLs to artifacts + uses: actions/upload-artifact@v2 + with: + name: ${{ env.BUILD_NAME }} + path: ${{ runner.temp }}/build_check/${{ env.BUILD_NAME }}.json + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH +############################################################################################ +##################################### BUILD REPORTER ####################################### +############################################################################################ + BuilderReport: + needs: + - BuilderDebRelease + - BuilderDebAsan + - BuilderDebTsan + - BuilderDebUBsan + - BuilderDebMsan + - BuilderDebDebug + runs-on: [self-hosted, style-checker] + steps: + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{runner.temp}}/reports_dir + - name: Check out repository code + uses: actions/checkout@v2 + - name: Report Builder + env: + TEMP_PATH: ${{runner.temp}}/report_check + REPORTS_PATH: ${{runner.temp}}/reports_dir + CHECK_NAME: 'ClickHouse build check (actions)' + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cd $GITHUB_WORKSPACE/tests/ci + python3 build_report_check.py "$CHECK_NAME" + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH +############################################################################################## +########################### FUNCTIONAl STATELESS TESTS ####################################### +############################################################################################## + FunctionalStatelessTestRelease: + needs: [BuilderDebRelease] + runs-on: [self-hosted, func-tester] + steps: + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{runner.temp}}/reports_dir + - name: Check out repository code + uses: actions/checkout@v2 + - name: Functional test + env: + TEMP_PATH: ${{runner.temp}}/stateless_debug + REPORTS_PATH: ${{runner.temp}}/reports_dir + CHECK_NAME: 'Stateless tests (release, actions)' + REPO_COPY: ${{runner.temp}}/stateless_debug/ClickHouse + KILL_TIMEOUT: 10800 + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci + python3 functional_test_check.py "$CHECK_NAME" $KILL_TIMEOUT + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH + FunctionalStatelessTestAsan: + needs: [BuilderDebAsan] + runs-on: [self-hosted, func-tester] + steps: + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{runner.temp}}/reports_dir + - name: Check out repository code + uses: actions/checkout@v2 + - name: Functional test + env: + TEMP_PATH: ${{runner.temp}}/stateless_debug + REPORTS_PATH: ${{runner.temp}}/reports_dir + CHECK_NAME: 'Stateless tests (address, actions)' + REPO_COPY: ${{runner.temp}}/stateless_debug/ClickHouse + KILL_TIMEOUT: 10800 + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci + python3 functional_test_check.py "$CHECK_NAME" $KILL_TIMEOUT + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH + FunctionalStatelessTestTsan: + needs: [BuilderDebTsan] + runs-on: [self-hosted, func-tester] + steps: + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{runner.temp}}/reports_dir + - name: Check out repository code + uses: actions/checkout@v2 + - name: Functional test + env: + TEMP_PATH: ${{runner.temp}}/stateless_tsan + REPORTS_PATH: ${{runner.temp}}/reports_dir + CHECK_NAME: 'Stateless tests (thread, actions)' + REPO_COPY: ${{runner.temp}}/stateless_tsan/ClickHouse + KILL_TIMEOUT: 10800 + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci + python3 functional_test_check.py "$CHECK_NAME" $KILL_TIMEOUT + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH + FunctionalStatelessTestUBsan: + needs: [BuilderDebUBsan] + runs-on: [self-hosted, func-tester] + steps: + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{runner.temp}}/reports_dir + - name: Check out repository code + uses: actions/checkout@v2 + - name: Functional test + env: + TEMP_PATH: ${{runner.temp}}/stateless_ubsan + REPORTS_PATH: ${{runner.temp}}/reports_dir + CHECK_NAME: 'Stateless tests (ubsan, actions)' + REPO_COPY: ${{runner.temp}}/stateless_ubsan/ClickHouse + KILL_TIMEOUT: 10800 + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci + python3 functional_test_check.py "$CHECK_NAME" $KILL_TIMEOUT + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH + FunctionalStatelessTestMsan: + needs: [BuilderDebMsan] + runs-on: [self-hosted, func-tester] + steps: + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{runner.temp}}/reports_dir + - name: Check out repository code + uses: actions/checkout@v2 + - name: Functional test + env: + TEMP_PATH: ${{runner.temp}}/stateless_memory + REPORTS_PATH: ${{runner.temp}}/reports_dir + CHECK_NAME: 'Stateless tests (memory, actions)' + REPO_COPY: ${{runner.temp}}/stateless_memory/ClickHouse + KILL_TIMEOUT: 10800 + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci + python3 functional_test_check.py "$CHECK_NAME" $KILL_TIMEOUT + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH + FunctionalStatelessTestDebug: + needs: [BuilderDebDebug] + runs-on: [self-hosted, func-tester] + steps: + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{runner.temp}}/reports_dir + - name: Check out repository code + uses: actions/checkout@v2 + - name: Functional test + env: + TEMP_PATH: ${{runner.temp}}/stateless_debug + REPORTS_PATH: ${{runner.temp}}/reports_dir + CHECK_NAME: 'Stateless tests (debug, actions)' + REPO_COPY: ${{runner.temp}}/stateless_debug/ClickHouse + KILL_TIMEOUT: 10800 + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci + python3 functional_test_check.py "$CHECK_NAME" $KILL_TIMEOUT + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH +############################################################################################## +############################ FUNCTIONAl STATEFUL TESTS ####################################### +############################################################################################## + FunctionalStatefulTestRelease: + needs: [BuilderDebRelease] + runs-on: [self-hosted, func-tester] + steps: + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{runner.temp}}/reports_dir + - name: Check out repository code + uses: actions/checkout@v2 + - name: Functional test + env: + TEMP_PATH: ${{runner.temp}}/stateful_debug + REPORTS_PATH: ${{runner.temp}}/reports_dir + CHECK_NAME: 'Stateful tests (release, actions)' + REPO_COPY: ${{runner.temp}}/stateful_debug/ClickHouse + KILL_TIMEOUT: 3600 + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci + python3 functional_test_check.py "$CHECK_NAME" $KILL_TIMEOUT + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH + FunctionalStatefulTestAsan: + needs: [BuilderDebAsan] + runs-on: [self-hosted, func-tester] + steps: + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{runner.temp}}/reports_dir + - name: Check out repository code + uses: actions/checkout@v2 + - name: Functional test + env: + TEMP_PATH: ${{runner.temp}}/stateful_debug + REPORTS_PATH: ${{runner.temp}}/reports_dir + CHECK_NAME: 'Stateful tests (address, actions)' + REPO_COPY: ${{runner.temp}}/stateful_debug/ClickHouse + KILL_TIMEOUT: 3600 + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci + python3 functional_test_check.py "$CHECK_NAME" $KILL_TIMEOUT + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH + FunctionalStatefulTestTsan: + needs: [BuilderDebTsan] + runs-on: [self-hosted, func-tester] + steps: + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{runner.temp}}/reports_dir + - name: Check out repository code + uses: actions/checkout@v2 + - name: Functional test + env: + TEMP_PATH: ${{runner.temp}}/stateful_tsan + REPORTS_PATH: ${{runner.temp}}/reports_dir + CHECK_NAME: 'Stateful tests (thread, actions)' + REPO_COPY: ${{runner.temp}}/stateful_tsan/ClickHouse + KILL_TIMEOUT: 3600 + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci + python3 functional_test_check.py "$CHECK_NAME" $KILL_TIMEOUT + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH + FunctionalStatefulTestMsan: + needs: [BuilderDebMsan] + runs-on: [self-hosted, func-tester] + steps: + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{runner.temp}}/reports_dir + - name: Check out repository code + uses: actions/checkout@v2 + - name: Functional test + env: + TEMP_PATH: ${{runner.temp}}/stateful_msan + REPORTS_PATH: ${{runner.temp}}/reports_dir + CHECK_NAME: 'Stateful tests (memory, actions)' + REPO_COPY: ${{runner.temp}}/stateful_msan/ClickHouse + KILL_TIMEOUT: 3600 + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci + python3 functional_test_check.py "$CHECK_NAME" $KILL_TIMEOUT + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH + FunctionalStatefulTestUBsan: + needs: [BuilderDebUBsan] + runs-on: [self-hosted, func-tester] + steps: + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{runner.temp}}/reports_dir + - name: Check out repository code + uses: actions/checkout@v2 + - name: Functional test + env: + TEMP_PATH: ${{runner.temp}}/stateful_ubsan + REPORTS_PATH: ${{runner.temp}}/reports_dir + CHECK_NAME: 'Stateful tests (ubsan, actions)' + REPO_COPY: ${{runner.temp}}/stateful_ubsan/ClickHouse + KILL_TIMEOUT: 3600 + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci + python3 functional_test_check.py "$CHECK_NAME" $KILL_TIMEOUT + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH + FunctionalStatefulTestDebug: + needs: [BuilderDebDebug] + runs-on: [self-hosted, func-tester] + steps: + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{runner.temp}}/reports_dir + - name: Check out repository code + uses: actions/checkout@v2 + - name: Functional test + env: + TEMP_PATH: ${{runner.temp}}/stateful_debug + REPORTS_PATH: ${{runner.temp}}/reports_dir + CHECK_NAME: 'Stateful tests (debug, actions)' + REPO_COPY: ${{runner.temp}}/stateful_debug/ClickHouse + KILL_TIMEOUT: 3600 + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci + python3 functional_test_check.py "$CHECK_NAME" $KILL_TIMEOUT + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH +############################################################################################## +######################################### STRESS TESTS ####################################### +############################################################################################## + StressTestAsan: + needs: [BuilderDebAsan] + runs-on: [self-hosted, stress-tester] + steps: + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{runner.temp}}/reports_dir + - name: Check out repository code + uses: actions/checkout@v2 + - name: Stress test + env: + TEMP_PATH: ${{runner.temp}}/stress_thread + REPORTS_PATH: ${{runner.temp}}/reports_dir + CHECK_NAME: 'Stress test (address, actions)' + REPO_COPY: ${{runner.temp}}/stress_thread/ClickHouse + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci + python3 stress_check.py "$CHECK_NAME" + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH + StressTestTsan: + needs: [BuilderDebTsan] + runs-on: [self-hosted, stress-tester] + steps: + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{runner.temp}}/reports_dir + - name: Check out repository code + uses: actions/checkout@v2 + - name: Stress test + env: + TEMP_PATH: ${{runner.temp}}/stress_thread + REPORTS_PATH: ${{runner.temp}}/reports_dir + CHECK_NAME: 'Stress test (thread, actions)' + REPO_COPY: ${{runner.temp}}/stress_thread/ClickHouse + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci + python3 stress_check.py "$CHECK_NAME" + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH + StressTestMsan: + needs: [BuilderDebMsan] + runs-on: [self-hosted, stress-tester] + steps: + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{runner.temp}}/reports_dir + - name: Check out repository code + uses: actions/checkout@v2 + - name: Stress test + env: + TEMP_PATH: ${{runner.temp}}/stress_memory + REPORTS_PATH: ${{runner.temp}}/reports_dir + CHECK_NAME: 'Stress test (memory, actions)' + REPO_COPY: ${{runner.temp}}/stress_memory/ClickHouse + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci + python3 stress_check.py "$CHECK_NAME" + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH + StressTestUBsan: + needs: [BuilderDebUBsan] + runs-on: [self-hosted, stress-tester] + steps: + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{runner.temp}}/reports_dir + - name: Check out repository code + uses: actions/checkout@v2 + - name: Stress test + env: + TEMP_PATH: ${{runner.temp}}/stress_undefined + REPORTS_PATH: ${{runner.temp}}/reports_dir + CHECK_NAME: 'Stress test (undefined, actions)' + REPO_COPY: ${{runner.temp}}/stress_undefined/ClickHouse + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci + python3 stress_check.py "$CHECK_NAME" + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH + StressTestDebug: + needs: [BuilderDebDebug] + runs-on: [self-hosted, stress-tester] + steps: + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{runner.temp}}/reports_dir + - name: Check out repository code + uses: actions/checkout@v2 + - name: Stress test + env: + TEMP_PATH: ${{runner.temp}}/stress_debug + REPORTS_PATH: ${{runner.temp}}/reports_dir + CHECK_NAME: 'Stress test (debug, actions)' + REPO_COPY: ${{runner.temp}}/stress_debug/ClickHouse + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci + python3 stress_check.py "$CHECK_NAME" + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH +############################################################################################# +############################# INTEGRATION TESTS ############################################# +############################################################################################# + IntegrationTestsAsan: + needs: [BuilderDebAsan, FunctionalStatelessTestAsan] + runs-on: [self-hosted, stress-tester] + steps: + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{runner.temp}}/reports_dir + - name: Check out repository code + uses: actions/checkout@v2 + - name: Integration test + env: + TEMP_PATH: ${{runner.temp}}/integration_tests_asan + REPORTS_PATH: ${{runner.temp}}/reports_dir + CHECK_NAME: 'Integration tests (asan, actions)' + REPO_COPY: ${{runner.temp}}/integration_tests_asan/ClickHouse + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci + python3 integration_test_check.py "$CHECK_NAME" + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH + IntegrationTestsTsan: + needs: [BuilderDebTsan, FunctionalStatelessTestTsan] + runs-on: [self-hosted, stress-tester] + steps: + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{runner.temp}}/reports_dir + - name: Check out repository code + uses: actions/checkout@v2 + - name: Integration test + env: + TEMP_PATH: ${{runner.temp}}/integration_tests_tsan + REPORTS_PATH: ${{runner.temp}}/reports_dir + CHECK_NAME: 'Integration tests (thread, actions)' + REPO_COPY: ${{runner.temp}}/integration_tests_tsan/ClickHouse + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci + python3 integration_test_check.py "$CHECK_NAME" + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH + IntegrationTestsRelease: + needs: [BuilderDebRelease, FunctionalStatelessTestRelease] + runs-on: [self-hosted, stress-tester] + steps: + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{runner.temp}}/reports_dir + - name: Check out repository code + uses: actions/checkout@v2 + - name: Integration test + env: + TEMP_PATH: ${{runner.temp}}/integration_tests_release + REPORTS_PATH: ${{runner.temp}}/reports_dir + CHECK_NAME: 'Integration tests (release, actions)' + REPO_COPY: ${{runner.temp}}/integration_tests_release/ClickHouse + run: | + sudo rm -fr $TEMP_PATH + mkdir -p $TEMP_PATH + cp -r $GITHUB_WORKSPACE $TEMP_PATH + cd $REPO_COPY/tests/ci + python3 integration_test_check.py "$CHECK_NAME" + - name: Cleanup + if: always() + run: | + docker kill $(docker ps -q) ||: + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr $TEMP_PATH + FinishCheck: + needs: + - DockerHubPush + - BuilderReport + - FunctionalStatelessTestDebug + - FunctionalStatelessTestRelease + - FunctionalStatelessTestAsan + - FunctionalStatelessTestTsan + - FunctionalStatelessTestMsan + - FunctionalStatelessTestUBsan + - FunctionalStatefulTestDebug + - FunctionalStatefulTestRelease + - FunctionalStatefulTestAsan + - FunctionalStatefulTestTsan + - FunctionalStatefulTestMsan + - FunctionalStatefulTestUBsan + - StressTestDebug + - StressTestAsan + - StressTestTsan + - StressTestMsan + - StressTestUBsan + - IntegrationTestsAsan + - IntegrationTestsRelease + - IntegrationTestsTsan + - CompatibilityCheck + runs-on: [self-hosted, style-checker] + steps: + - name: Check out repository code + uses: actions/checkout@v2 + - name: Finish label + run: | + cd $GITHUB_WORKSPACE/tests/ci + python3 finish_check.py diff --git a/CHANGELOG.md b/CHANGELOG.md index 686f0072005..f34725448f2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -17,7 +17,7 @@ * Support `EXISTS (subquery)`. Closes [#6852](https://github.com/ClickHouse/ClickHouse/issues/6852). [#29731](https://github.com/ClickHouse/ClickHouse/pull/29731) ([Kseniia Sumarokova](https://github.com/kssenii)). * Session logging for audit. Logging all successful and failed login and logout events to a new `system.session_log` table. [#22415](https://github.com/ClickHouse/ClickHouse/pull/22415) ([Vasily Nemkov](https://github.com/Enmk)) ([Vitaly Baranov](https://github.com/vitlibar)). * Support multidimensional cosine distance and euclidean distance functions; L1, L2, Lp, Linf distances and norms. Scalar product on tuples and various arithmetic operators on tuples. This fully closes [#4509](https://github.com/ClickHouse/ClickHouse/issues/4509) and even more. [#27933](https://github.com/ClickHouse/ClickHouse/pull/27933) ([Alexey Boykov](https://github.com/mathalex)). -* Add support for compression and decompression for `INTO OUTPUT` and `FROM INFILE` (with autodetect or with additional optional parameter). [#27135](https://github.com/ClickHouse/ClickHouse/pull/27135) ([Filatenkov Artur](https://github.com/FArthur-cmd)). +* Add support for compression and decompression for `INTO OUTFILE` and `FROM INFILE` (with autodetect or with additional optional parameter). [#27135](https://github.com/ClickHouse/ClickHouse/pull/27135) ([Filatenkov Artur](https://github.com/FArthur-cmd)). * Add CORS (Cross Origin Resource Sharing) support with HTTP `OPTIONS` request. It means, now Grafana will work with serverless requests without a kludges. Closes [#18693](https://github.com/ClickHouse/ClickHouse/issues/18693). [#29155](https://github.com/ClickHouse/ClickHouse/pull/29155) ([Filatenkov Artur](https://github.com/FArthur-cmd)). * Queries with JOIN ON now supports disjunctions (OR). [#21320](https://github.com/ClickHouse/ClickHouse/pull/21320) ([Ilya Golshtein](https://github.com/ilejn)). * Added function `tokens`. That allow to split string into tokens using non-alpha numeric ASCII characters as separators. [#29981](https://github.com/ClickHouse/ClickHouse/pull/29981) ([Maksim Kita](https://github.com/kitaisreal)). Added function `ngrams` to extract ngrams from text. Closes [#29699](https://github.com/ClickHouse/ClickHouse/issues/29699). [#29738](https://github.com/ClickHouse/ClickHouse/pull/29738) ([Maksim Kita](https://github.com/kitaisreal)). diff --git a/CMakeLists.txt b/CMakeLists.txt index ede70b09d94..0879aeac009 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -624,7 +624,7 @@ macro (add_executable target) # invoke built-in add_executable # explicitly acquire and interpose malloc symbols by clickhouse_malloc # if GLIBC_COMPATIBILITY is ON and ENABLE_THINLTO is on than provide memcpy symbol explicitly to neutrialize thinlto's libcall generation. - if (GLIBC_COMPATIBILITY AND ENABLE_THINLTO) + if (ARCH_AMD64 AND GLIBC_COMPATIBILITY AND ENABLE_THINLTO) _add_executable (${ARGV} $ $) else () _add_executable (${ARGV} $) diff --git a/base/base/ReplxxLineReader.cpp b/base/base/ReplxxLineReader.cpp index 38867faf5d5..3c2ac1f8891 100644 --- a/base/base/ReplxxLineReader.cpp +++ b/base/base/ReplxxLineReader.cpp @@ -25,6 +25,16 @@ void trim(String & s) s.erase(std::find_if(s.rbegin(), s.rend(), [](int ch) { return !std::isspace(ch); }).base(), s.end()); } +std::string getEditor() +{ + const char * editor = std::getenv("EDITOR"); + + if (!editor || !*editor) + editor = "vim"; + + return editor; +} + /// Copied from replxx::src/util.cxx::now_ms_str() under the terms of 3-clause BSD license of Replxx. /// Copyright (c) 2017-2018, Marcin Konarski (amok at codestation.org) /// Copyright (c) 2010, Salvatore Sanfilippo (antirez at gmail dot com) @@ -123,6 +133,7 @@ ReplxxLineReader::ReplxxLineReader( Patterns delimiters_, replxx::Replxx::highlighter_callback_t highlighter_) : LineReader(history_file_path_, multiline_, std::move(extenders_), std::move(delimiters_)), highlighter(std::move(highlighter_)) + , editor(getEditor()) { using namespace std::placeholders; using Replxx = replxx::Replxx; @@ -236,14 +247,13 @@ void ReplxxLineReader::addToHistory(const String & line) rx.print("Unlock of history file failed: %s\n", errnoToString(errno).c_str()); } -int ReplxxLineReader::execute(const std::string & command) +/// See comments in ShellCommand::executeImpl() +/// (for the vfork via dlsym()) +int ReplxxLineReader::executeEditor(const std::string & path) { - std::vector argv0("sh", &("sh"[3])); - std::vector argv1("-c", &("-c"[3])); - std::vector argv2(command.data(), command.data() + command.size() + 1); - - const char * filename = "/bin/sh"; - char * const argv[] = {argv0.data(), argv1.data(), argv2.data(), nullptr}; + std::vector argv0(editor.data(), editor.data() + editor.size() + 1); + std::vector argv1(path.data(), path.data() + path.size() + 1); + char * const argv[] = {argv0.data(), argv1.data(), nullptr}; static void * real_vfork = dlsym(RTLD_DEFAULT, "vfork"); if (!real_vfork) @@ -260,6 +270,7 @@ int ReplxxLineReader::execute(const std::string & command) return -1; } + /// Child if (0 == pid) { sigset_t mask; @@ -267,16 +278,26 @@ int ReplxxLineReader::execute(const std::string & command) sigprocmask(0, nullptr, &mask); sigprocmask(SIG_UNBLOCK, &mask, nullptr); - execv(filename, argv); + execvp(editor.c_str(), argv); + rx.print("Cannot execute %s: %s\n", editor.c_str(), errnoToString(errno).c_str()); _exit(-1); } int status = 0; - if (-1 == waitpid(pid, &status, 0)) + do { - rx.print("Cannot waitpid: %s\n", errnoToString(errno).c_str()); - return -1; - } + int exited_pid = waitpid(pid, &status, 0); + if (exited_pid == -1) + { + if (errno == EINTR) + continue; + + rx.print("Cannot waitpid: %s\n", errnoToString(errno).c_str()); + return -1; + } + else + break; + } while (true); return status; } @@ -290,10 +311,6 @@ void ReplxxLineReader::openEditor() return; } - const char * editor = std::getenv("EDITOR"); - if (!editor || !*editor) - editor = "vim"; - replxx::Replxx::State state(rx.get_state()); size_t bytes_written = 0; @@ -316,7 +333,7 @@ void ReplxxLineReader::openEditor() return; } - if (0 == execute(fmt::format("{} {}", editor, filename))) + if (0 == executeEditor(filename)) { try { diff --git a/base/base/ReplxxLineReader.h b/base/base/ReplxxLineReader.h index 9aa32a1e26d..d4cc7de1e7a 100644 --- a/base/base/ReplxxLineReader.h +++ b/base/base/ReplxxLineReader.h @@ -22,7 +22,7 @@ public: private: InputStatus readOneLine(const String & prompt) override; void addToHistory(const String & line) override; - int execute(const std::string & command); + int executeEditor(const std::string & path); void openEditor(); replxx::Replxx rx; @@ -31,4 +31,6 @@ private: // used to call flock() to synchronize multiple clients using same history file int history_file_fd = -1; bool bracketed_paste_enabled = false; + + std::string editor; }; diff --git a/base/base/defines.h b/base/base/defines.h index 21a3c09f532..bd98e99f5b9 100644 --- a/base/base/defines.h +++ b/base/base/defines.h @@ -28,8 +28,8 @@ #define NO_INLINE __attribute__((__noinline__)) #define MAY_ALIAS __attribute__((__may_alias__)) -#if !defined(__x86_64__) && !defined(__aarch64__) && !defined(__PPC__) -# error "The only supported platforms are x86_64 and AArch64, PowerPC (work in progress)" +#if !defined(__x86_64__) && !defined(__aarch64__) && !defined(__PPC__) && !(defined(__riscv) && (__riscv_xlen == 64)) +# error "The only supported platforms are x86_64 and AArch64, PowerPC (work in progress) and RISC-V 64 (experimental)" #endif /// Check for presence of address sanitizer diff --git a/base/daemon/BaseDaemon.cpp b/base/daemon/BaseDaemon.cpp index 70915a520b3..2b703f7fa3a 100644 --- a/base/daemon/BaseDaemon.cpp +++ b/base/daemon/BaseDaemon.cpp @@ -63,6 +63,9 @@ #include #include +#include +#include + #include #if defined(OS_DARWIN) @@ -675,6 +678,34 @@ void BaseDaemon::initialize(Application & self) if ((!log_path.empty() && is_daemon) || config().has("logger.stderr")) { std::string stderr_path = config().getString("logger.stderr", log_path + "/stderr.log"); + + /// Check that stderr is writable before freopen(), + /// since freopen() will make stderr invalid on error, + /// and logging to stderr will be broken, + /// so the following code (that is used in every program) will not write anything: + /// + /// int main(int argc, char ** argv) + /// { + /// try + /// { + /// DB::SomeApp app; + /// return app.run(argc, argv); + /// } + /// catch (...) + /// { + /// std::cerr << DB::getCurrentExceptionMessage(true) << "\n"; + /// return 1; + /// } + /// } + if (access(stderr_path.c_str(), W_OK)) + { + int fd; + if ((fd = creat(stderr_path.c_str(), 0600)) == -1 && errno != EEXIST) + throw Poco::OpenFileException("File " + stderr_path + " (logger.stderr) is not writable"); + if (fd != -1) + ::close(fd); + } + if (!freopen(stderr_path.c_str(), "a+", stderr)) throw Poco::OpenFileException("Cannot attach stderr to " + stderr_path); @@ -973,6 +1004,14 @@ void BaseDaemon::setupWatchdog() memcpy(argv0, new_process_name, std::min(strlen(new_process_name), original_process_name.size())); } + /// If streaming compression of logs is used then we write watchdog logs to cerr + if (config().getRawString("logger.stream_compress", "false") == "true") + { + Poco::AutoPtr pf = new OwnPatternFormatter; + Poco::AutoPtr log = new DB::OwnFormattingChannel(pf, new Poco::ConsoleChannel(std::cerr)); + logger().setChannel(log); + } + logger().information(fmt::format("Will watch for the process with pid {}", pid)); /// Forward signals to the child process. diff --git a/base/loggers/Loggers.cpp b/base/loggers/Loggers.cpp index 0f41296819e..5eb9ef95176 100644 --- a/base/loggers/Loggers.cpp +++ b/base/loggers/Loggers.cpp @@ -62,7 +62,13 @@ void Loggers::buildLoggers(Poco::Util::AbstractConfiguration & config, Poco::Log if (!log_path.empty()) { createDirectory(log_path); - std::cerr << "Logging " << log_level_string << " to " << log_path << std::endl; + + std::string ext; + if (config.getRawString("logger.stream_compress", "false") == "true") + ext = ".lz4"; + + std::cerr << "Logging " << log_level_string << " to " << log_path << ext << std::endl; + auto log_level = Poco::Logger::parseLevel(log_level_string); if (log_level > max_log_level) { @@ -75,6 +81,7 @@ void Loggers::buildLoggers(Poco::Util::AbstractConfiguration & config, Poco::Log log_file->setProperty(Poco::FileChannel::PROP_ROTATION, config.getRawString("logger.size", "100M")); log_file->setProperty(Poco::FileChannel::PROP_ARCHIVE, "number"); log_file->setProperty(Poco::FileChannel::PROP_COMPRESS, config.getRawString("logger.compress", "true")); + log_file->setProperty(Poco::FileChannel::PROP_STREAMCOMPRESS, config.getRawString("logger.stream_compress", "false")); log_file->setProperty(Poco::FileChannel::PROP_PURGECOUNT, config.getRawString("logger.count", "1")); log_file->setProperty(Poco::FileChannel::PROP_FLUSH, config.getRawString("logger.flush", "true")); log_file->setProperty(Poco::FileChannel::PROP_ROTATEONOPEN, config.getRawString("logger.rotateOnOpen", "false")); @@ -100,13 +107,18 @@ void Loggers::buildLoggers(Poco::Util::AbstractConfiguration & config, Poco::Log max_log_level = errorlog_level; } - std::cerr << "Logging errors to " << errorlog_path << std::endl; + std::string ext; + if (config.getRawString("logger.stream_compress", "false") == "true") + ext = ".lz4"; + + std::cerr << "Logging errors to " << errorlog_path << ext << std::endl; error_log_file = new Poco::FileChannel; error_log_file->setProperty(Poco::FileChannel::PROP_PATH, fs::weakly_canonical(errorlog_path)); error_log_file->setProperty(Poco::FileChannel::PROP_ROTATION, config.getRawString("logger.size", "100M")); error_log_file->setProperty(Poco::FileChannel::PROP_ARCHIVE, "number"); error_log_file->setProperty(Poco::FileChannel::PROP_COMPRESS, config.getRawString("logger.compress", "true")); + error_log_file->setProperty(Poco::FileChannel::PROP_STREAMCOMPRESS, config.getRawString("logger.stream_compress", "false")); error_log_file->setProperty(Poco::FileChannel::PROP_PURGECOUNT, config.getRawString("logger.count", "1")); error_log_file->setProperty(Poco::FileChannel::PROP_FLUSH, config.getRawString("logger.flush", "true")); error_log_file->setProperty(Poco::FileChannel::PROP_ROTATEONOPEN, config.getRawString("logger.rotateOnOpen", "false")); diff --git a/cmake/arch.cmake b/cmake/arch.cmake index 00cc16fbd10..82c0d40994c 100644 --- a/cmake/arch.cmake +++ b/cmake/arch.cmake @@ -16,3 +16,7 @@ endif () if (CMAKE_SYSTEM_PROCESSOR MATCHES "^(ppc64le.*|PPC64LE.*)") set (ARCH_PPC64LE 1) endif () +if (CMAKE_SYSTEM_PROCESSOR MATCHES "riscv64") + set (ARCH_RISCV64 1) +endif () + diff --git a/cmake/linux/toolchain-riscv64.cmake b/cmake/linux/toolchain-riscv64.cmake index 1ccbd3ee0da..6ae279f4716 100644 --- a/cmake/linux/toolchain-riscv64.cmake +++ b/cmake/linux/toolchain-riscv64.cmake @@ -20,10 +20,10 @@ set (CMAKE_C_FLAGS_INIT "${CMAKE_C_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}") set (CMAKE_CXX_FLAGS_INIT "${CMAKE_CXX_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}") set (CMAKE_ASM_FLAGS_INIT "${CMAKE_ASM_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}") -set (LINKER_NAME "ld.lld" CACHE STRING "" FORCE) +set (LINKER_NAME "riscv64-linux-gnu-ld.bfd" CACHE STRING "" FORCE) -set (CMAKE_EXE_LINKER_FLAGS_INIT "-fuse-ld=lld") -set (CMAKE_SHARED_LINKER_FLAGS_INIT "-fuse-ld=lld") +set (CMAKE_EXE_LINKER_FLAGS_INIT "-fuse-ld=bfd") +set (CMAKE_SHARED_LINKER_FLAGS_INIT "-fuse-ld=bfd") set (HAS_PRE_1970_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE) set (HAS_PRE_1970_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE) diff --git a/contrib/boost b/contrib/boost index 79358a3106a..fcb058e1459 160000 --- a/contrib/boost +++ b/contrib/boost @@ -1 +1 @@ -Subproject commit 79358a3106aab6af464430ed67c7efafebf5cd6f +Subproject commit fcb058e1459ac273ecfe7cdf72791cb1479115af diff --git a/contrib/boost-cmake/CMakeLists.txt b/contrib/boost-cmake/CMakeLists.txt index 27072910135..4ad68ce4d39 100644 --- a/contrib/boost-cmake/CMakeLists.txt +++ b/contrib/boost-cmake/CMakeLists.txt @@ -196,6 +196,12 @@ if (NOT EXTERNAL_BOOST_FOUND) "${LIBRARY_DIR}/libs/context/src/asm/make_ppc64_sysv_elf_gas.S" "${LIBRARY_DIR}/libs/context/src/asm/ontop_ppc64_sysv_elf_gas.S" ) + elseif (ARCH_RISCV64) + set (SRCS_CONTEXT ${SRCS_CONTEXT} + "${LIBRARY_DIR}/libs/context/src/asm/jump_riscv64_sysv_elf_gas.S" + "${LIBRARY_DIR}/libs/context/src/asm/make_riscv64_sysv_elf_gas.S" + "${LIBRARY_DIR}/libs/context/src/asm/ontop_riscv64_sysv_elf_gas.S" + ) elseif(OS_DARWIN) set (SRCS_CONTEXT ${SRCS_CONTEXT} "${LIBRARY_DIR}/libs/context/src/asm/jump_x86_64_sysv_macho_gas.S" diff --git a/contrib/jemalloc-cmake/CMakeLists.txt b/contrib/jemalloc-cmake/CMakeLists.txt index 30dd3baa55b..fd52ce4a4f3 100644 --- a/contrib/jemalloc-cmake/CMakeLists.txt +++ b/contrib/jemalloc-cmake/CMakeLists.txt @@ -1,5 +1,5 @@ if (SANITIZE OR NOT ( - ((OS_LINUX OR OS_FREEBSD) AND (ARCH_AMD64 OR ARCH_ARM OR ARCH_PPC64LE)) OR + ((OS_LINUX OR OS_FREEBSD) AND (ARCH_AMD64 OR ARCH_ARM OR ARCH_PPC64LE OR ARCH_RISCV64)) OR (OS_DARWIN AND (CMAKE_BUILD_TYPE STREQUAL "RelWithDebInfo" OR CMAKE_BUILD_TYPE STREQUAL "Debug")) )) if (ENABLE_JEMALLOC) @@ -112,6 +112,8 @@ elseif (ARCH_ARM) set(JEMALLOC_INCLUDE_PREFIX "${JEMALLOC_INCLUDE_PREFIX}_aarch64") elseif (ARCH_PPC64LE) set(JEMALLOC_INCLUDE_PREFIX "${JEMALLOC_INCLUDE_PREFIX}_ppc64le") +elseif (ARCH_RISCV64) + set(JEMALLOC_INCLUDE_PREFIX "${JEMALLOC_INCLUDE_PREFIX}_riscv64") else () message (FATAL_ERROR "internal jemalloc: This arch is not supported") endif () diff --git a/contrib/jemalloc-cmake/include_linux_riscv64/README b/contrib/jemalloc-cmake/include_linux_riscv64/README new file mode 100644 index 00000000000..01b65655c55 --- /dev/null +++ b/contrib/jemalloc-cmake/include_linux_riscv64/README @@ -0,0 +1,8 @@ +Here are pre-generated files from jemalloc on Linux risc-v. +You can obtain these files by running ./autogen.sh inside jemalloc source directory. + +Added #define GNU_SOURCE +Added JEMALLOC_OVERRIDE___POSIX_MEMALIGN because why not. +Removed JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF because it's non standard. +Removed JEMALLOC_PURGE_MADVISE_FREE because it's available only from Linux 4.5. +Added JEMALLOC_CONFIG_MALLOC_CONF substitution diff --git a/contrib/jemalloc-cmake/include_linux_riscv64/jemalloc/internal/jemalloc_internal_defs.h.in b/contrib/jemalloc-cmake/include_linux_riscv64/jemalloc/internal/jemalloc_internal_defs.h.in new file mode 100644 index 00000000000..5e0135cc0d0 --- /dev/null +++ b/contrib/jemalloc-cmake/include_linux_riscv64/jemalloc/internal/jemalloc_internal_defs.h.in @@ -0,0 +1,367 @@ +/* include/jemalloc/internal/jemalloc_internal_defs.h. Generated from jemalloc_internal_defs.h.in by configure. */ +#ifndef JEMALLOC_INTERNAL_DEFS_H_ +#define JEMALLOC_INTERNAL_DEFS_H_ +/* + * If JEMALLOC_PREFIX is defined via --with-jemalloc-prefix, it will cause all + * public APIs to be prefixed. This makes it possible, with some care, to use + * multiple allocators simultaneously. + */ +/* #undef JEMALLOC_PREFIX */ +/* #undef JEMALLOC_CPREFIX */ + +/* + * Define overrides for non-standard allocator-related functions if they are + * present on the system. + */ +#define JEMALLOC_OVERRIDE___LIBC_CALLOC +#define JEMALLOC_OVERRIDE___LIBC_FREE +#define JEMALLOC_OVERRIDE___LIBC_MALLOC +#define JEMALLOC_OVERRIDE___LIBC_MEMALIGN +#define JEMALLOC_OVERRIDE___LIBC_REALLOC +#define JEMALLOC_OVERRIDE___LIBC_VALLOC +/* #undef JEMALLOC_OVERRIDE___POSIX_MEMALIGN */ + +/* + * JEMALLOC_PRIVATE_NAMESPACE is used as a prefix for all library-private APIs. + * For shared libraries, symbol visibility mechanisms prevent these symbols + * from being exported, but for static libraries, naming collisions are a real + * possibility. + */ +#define JEMALLOC_PRIVATE_NAMESPACE je_ + +/* + * Hyper-threaded CPUs may need a special instruction inside spin loops in + * order to yield to another virtual CPU. + */ +#define CPU_SPINWAIT +/* 1 if CPU_SPINWAIT is defined, 0 otherwise. */ +#define HAVE_CPU_SPINWAIT 0 + +/* + * Number of significant bits in virtual addresses. This may be less than the + * total number of bits in a pointer, e.g. on x64, for which the uppermost 16 + * bits are the same as bit 47. + */ +#define LG_VADDR 48 + +/* Defined if C11 atomics are available. */ +#define JEMALLOC_C11_ATOMICS 1 + +/* Defined if GCC __atomic atomics are available. */ +#define JEMALLOC_GCC_ATOMIC_ATOMICS 1 +/* and the 8-bit variant support. */ +#define JEMALLOC_GCC_U8_ATOMIC_ATOMICS 1 + +/* Defined if GCC __sync atomics are available. */ +#define JEMALLOC_GCC_SYNC_ATOMICS 1 +/* and the 8-bit variant support. */ +#define JEMALLOC_GCC_U8_SYNC_ATOMICS 1 + +/* + * Defined if __builtin_clz() and __builtin_clzl() are available. + */ +#define JEMALLOC_HAVE_BUILTIN_CLZ + +/* + * Defined if os_unfair_lock_*() functions are available, as provided by Darwin. + */ +/* #undef JEMALLOC_OS_UNFAIR_LOCK */ + +/* Defined if syscall(2) is usable. */ +#define JEMALLOC_USE_SYSCALL + +/* + * Defined if secure_getenv(3) is available. + */ +// #define JEMALLOC_HAVE_SECURE_GETENV + +/* + * Defined if issetugid(2) is available. + */ +/* #undef JEMALLOC_HAVE_ISSETUGID */ + +/* Defined if pthread_atfork(3) is available. */ +#define JEMALLOC_HAVE_PTHREAD_ATFORK + +/* Defined if pthread_setname_np(3) is available. */ +#define JEMALLOC_HAVE_PTHREAD_SETNAME_NP + +/* + * Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available. + */ +#define JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE 1 + +/* + * Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available. + */ +#define JEMALLOC_HAVE_CLOCK_MONOTONIC 1 + +/* + * Defined if mach_absolute_time() is available. + */ +/* #undef JEMALLOC_HAVE_MACH_ABSOLUTE_TIME */ + +/* + * Defined if _malloc_thread_cleanup() exists. At least in the case of + * FreeBSD, pthread_key_create() allocates, which if used during malloc + * bootstrapping will cause recursion into the pthreads library. Therefore, if + * _malloc_thread_cleanup() exists, use it as the basis for thread cleanup in + * malloc_tsd. + */ +/* #undef JEMALLOC_MALLOC_THREAD_CLEANUP */ + +/* + * Defined if threaded initialization is known to be safe on this platform. + * Among other things, it must be possible to initialize a mutex without + * triggering allocation in order for threaded allocation to be safe. + */ +#define JEMALLOC_THREADED_INIT + +/* + * Defined if the pthreads implementation defines + * _pthread_mutex_init_calloc_cb(), in which case the function is used in order + * to avoid recursive allocation during mutex initialization. + */ +/* #undef JEMALLOC_MUTEX_INIT_CB */ + +/* Non-empty if the tls_model attribute is supported. */ +#define JEMALLOC_TLS_MODEL __attribute__((tls_model("initial-exec"))) + +/* + * JEMALLOC_DEBUG enables assertions and other sanity checks, and disables + * inline functions. + */ +/* #undef JEMALLOC_DEBUG */ + +/* JEMALLOC_STATS enables statistics calculation. */ +#define JEMALLOC_STATS + +/* JEMALLOC_EXPERIMENTAL_SMALLOCX_API enables experimental smallocx API. */ +/* #undef JEMALLOC_EXPERIMENTAL_SMALLOCX_API */ + +/* JEMALLOC_PROF enables allocation profiling. */ +/* #undef JEMALLOC_PROF */ + +/* Use libunwind for profile backtracing if defined. */ +/* #undef JEMALLOC_PROF_LIBUNWIND */ + +/* Use libgcc for profile backtracing if defined. */ +/* #undef JEMALLOC_PROF_LIBGCC */ + +/* Use gcc intrinsics for profile backtracing if defined. */ +/* #undef JEMALLOC_PROF_GCC */ + +/* + * JEMALLOC_DSS enables use of sbrk(2) to allocate extents from the data storage + * segment (DSS). + */ +#define JEMALLOC_DSS + +/* Support memory filling (junk/zero). */ +#define JEMALLOC_FILL + +/* Support utrace(2)-based tracing. */ +/* #undef JEMALLOC_UTRACE */ + +/* Support optional abort() on OOM. */ +/* #undef JEMALLOC_XMALLOC */ + +/* Support lazy locking (avoid locking unless a second thread is launched). */ +/* #undef JEMALLOC_LAZY_LOCK */ + +/* + * Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size + * classes). + */ +/* #undef LG_QUANTUM */ + +/* One page is 2^LG_PAGE bytes. */ +#define LG_PAGE 16 + +/* + * One huge page is 2^LG_HUGEPAGE bytes. Note that this is defined even if the + * system does not explicitly support huge pages; system calls that require + * explicit huge page support are separately configured. + */ +#define LG_HUGEPAGE 29 + +/* + * If defined, adjacent virtual memory mappings with identical attributes + * automatically coalesce, and they fragment when changes are made to subranges. + * This is the normal order of things for mmap()/munmap(), but on Windows + * VirtualAlloc()/VirtualFree() operations must be precisely matched, i.e. + * mappings do *not* coalesce/fragment. + */ +#define JEMALLOC_MAPS_COALESCE + +/* + * If defined, retain memory for later reuse by default rather than using e.g. + * munmap() to unmap freed extents. This is enabled on 64-bit Linux because + * common sequences of mmap()/munmap() calls will cause virtual memory map + * holes. + */ +#define JEMALLOC_RETAIN + +/* TLS is used to map arenas and magazine caches to threads. */ +#define JEMALLOC_TLS + +/* + * Used to mark unreachable code to quiet "end of non-void" compiler warnings. + * Don't use this directly; instead use unreachable() from util.h + */ +#define JEMALLOC_INTERNAL_UNREACHABLE __builtin_unreachable + +/* + * ffs*() functions to use for bitmapping. Don't use these directly; instead, + * use ffs_*() from util.h. + */ +#define JEMALLOC_INTERNAL_FFSLL __builtin_ffsll +#define JEMALLOC_INTERNAL_FFSL __builtin_ffsl +#define JEMALLOC_INTERNAL_FFS __builtin_ffs + +/* + * popcount*() functions to use for bitmapping. + */ +#define JEMALLOC_INTERNAL_POPCOUNTL __builtin_popcountl +#define JEMALLOC_INTERNAL_POPCOUNT __builtin_popcount + +/* + * If defined, explicitly attempt to more uniformly distribute large allocation + * pointer alignments across all cache indices. + */ +#define JEMALLOC_CACHE_OBLIVIOUS + +/* + * If defined, enable logging facilities. We make this a configure option to + * avoid taking extra branches everywhere. + */ +/* #undef JEMALLOC_LOG */ + +/* + * If defined, use readlinkat() (instead of readlink()) to follow + * /etc/malloc_conf. + */ +/* #undef JEMALLOC_READLINKAT */ + +/* + * Darwin (OS X) uses zones to work around Mach-O symbol override shortcomings. + */ +/* #undef JEMALLOC_ZONE */ + +/* + * Methods for determining whether the OS overcommits. + * JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY: Linux's + * /proc/sys/vm.overcommit_memory file. + * JEMALLOC_SYSCTL_VM_OVERCOMMIT: FreeBSD's vm.overcommit sysctl. + */ +/* #undef JEMALLOC_SYSCTL_VM_OVERCOMMIT */ +#define JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY + +/* Defined if madvise(2) is available. */ +#define JEMALLOC_HAVE_MADVISE + +/* + * Defined if transparent huge pages are supported via the MADV_[NO]HUGEPAGE + * arguments to madvise(2). + */ +#define JEMALLOC_HAVE_MADVISE_HUGE + +/* + * Methods for purging unused pages differ between operating systems. + * + * madvise(..., MADV_FREE) : This marks pages as being unused, such that they + * will be discarded rather than swapped out. + * madvise(..., MADV_DONTNEED) : If JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS is + * defined, this immediately discards pages, + * such that new pages will be demand-zeroed if + * the address region is later touched; + * otherwise this behaves similarly to + * MADV_FREE, though typically with higher + * system overhead. + */ +#define JEMALLOC_PURGE_MADVISE_FREE +#define JEMALLOC_PURGE_MADVISE_DONTNEED +#define JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS + +/* Defined if madvise(2) is available but MADV_FREE is not (x86 Linux only). */ +/* #undef JEMALLOC_DEFINE_MADVISE_FREE */ + +/* + * Defined if MADV_DO[NT]DUMP is supported as an argument to madvise. + */ +#define JEMALLOC_MADVISE_DONTDUMP + +/* + * Defined if transparent huge pages (THPs) are supported via the + * MADV_[NO]HUGEPAGE arguments to madvise(2), and THP support is enabled. + */ +/* #undef JEMALLOC_THP */ + +/* Define if operating system has alloca.h header. */ +#define JEMALLOC_HAS_ALLOCA_H 1 + +/* C99 restrict keyword supported. */ +#define JEMALLOC_HAS_RESTRICT 1 + +/* For use by hash code. */ +/* #undef JEMALLOC_BIG_ENDIAN */ + +/* sizeof(int) == 2^LG_SIZEOF_INT. */ +#define LG_SIZEOF_INT 2 + +/* sizeof(long) == 2^LG_SIZEOF_LONG. */ +#define LG_SIZEOF_LONG 3 + +/* sizeof(long long) == 2^LG_SIZEOF_LONG_LONG. */ +#define LG_SIZEOF_LONG_LONG 3 + +/* sizeof(intmax_t) == 2^LG_SIZEOF_INTMAX_T. */ +#define LG_SIZEOF_INTMAX_T 3 + +/* glibc malloc hooks (__malloc_hook, __realloc_hook, __free_hook). */ +#define JEMALLOC_GLIBC_MALLOC_HOOK + +/* glibc memalign hook. */ +#define JEMALLOC_GLIBC_MEMALIGN_HOOK + +/* pthread support */ +#define JEMALLOC_HAVE_PTHREAD + +/* dlsym() support */ +#define JEMALLOC_HAVE_DLSYM + +/* Adaptive mutex support in pthreads. */ +#define JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP + +/* GNU specific sched_getcpu support */ +#define JEMALLOC_HAVE_SCHED_GETCPU + +/* GNU specific sched_setaffinity support */ +#define JEMALLOC_HAVE_SCHED_SETAFFINITY + +/* + * If defined, all the features necessary for background threads are present. + */ +#define JEMALLOC_BACKGROUND_THREAD 1 + +/* + * If defined, jemalloc symbols are not exported (doesn't work when + * JEMALLOC_PREFIX is not defined). + */ +/* #undef JEMALLOC_EXPORT */ + +/* config.malloc_conf options string. */ +#define JEMALLOC_CONFIG_MALLOC_CONF "@JEMALLOC_CONFIG_MALLOC_CONF@" + +/* If defined, jemalloc takes the malloc/free/etc. symbol names. */ +#define JEMALLOC_IS_MALLOC 1 + +/* + * Defined if strerror_r returns char * if _GNU_SOURCE is defined. + */ +#define JEMALLOC_STRERROR_R_RETURNS_CHAR_WITH_GNU_SOURCE + +/* Performs additional safety checks when defined. */ +/* #undef JEMALLOC_OPT_SAFETY_CHECKS */ + +#endif /* JEMALLOC_INTERNAL_DEFS_H_ */ diff --git a/contrib/librdkafka-cmake/config.h.in b/contrib/librdkafka-cmake/config.h.in index 9fecb45e42d..212ffd1c165 100644 --- a/contrib/librdkafka-cmake/config.h.in +++ b/contrib/librdkafka-cmake/config.h.in @@ -66,7 +66,7 @@ #cmakedefine WITH_SASL_OAUTHBEARER 1 #cmakedefine WITH_SASL_CYRUS 1 // crc32chw -#if !defined(__PPC__) && (!defined(__aarch64__) || defined(__ARM_FEATURE_CRC32)) && !(defined(__aarch64__) && defined(__APPLE__)) +#if !defined(__PPC__) && !defined(__riscv) && (!defined(__aarch64__) || defined(__ARM_FEATURE_CRC32)) && !(defined(__aarch64__) && defined(__APPLE__)) #define WITH_CRC32C_HW 1 #endif // regex diff --git a/contrib/poco b/contrib/poco index 39fd359765a..258b9ba6cd2 160000 --- a/contrib/poco +++ b/contrib/poco @@ -1 +1 @@ -Subproject commit 39fd359765a3a77b46d94ec3c5def3c7802a920f +Subproject commit 258b9ba6cd245ff88e9346f75c43464c403f329d diff --git a/contrib/poco-cmake/Foundation/CMakeLists.txt b/contrib/poco-cmake/Foundation/CMakeLists.txt index a9a4933873c..0c13d109344 100644 --- a/contrib/poco-cmake/Foundation/CMakeLists.txt +++ b/contrib/poco-cmake/Foundation/CMakeLists.txt @@ -51,6 +51,7 @@ if (USE_INTERNAL_POCO_LIBRARY) "${LIBRARY_DIR}/Foundation/src/Channel.cpp" "${LIBRARY_DIR}/Foundation/src/Checksum.cpp" "${LIBRARY_DIR}/Foundation/src/Clock.cpp" + "${LIBRARY_DIR}/Foundation/src/CompressedLogFile.cpp" "${LIBRARY_DIR}/Foundation/src/Condition.cpp" "${LIBRARY_DIR}/Foundation/src/Configurable.cpp" "${LIBRARY_DIR}/Foundation/src/ConsoleChannel.cpp" @@ -222,7 +223,7 @@ if (USE_INTERNAL_POCO_LIBRARY) POCO_OS_FAMILY_UNIX ) target_include_directories (_poco_foundation SYSTEM PUBLIC "${LIBRARY_DIR}/Foundation/include") - target_link_libraries (_poco_foundation PRIVATE Poco::Foundation::PCRE ${ZLIB_LIBRARIES}) + target_link_libraries (_poco_foundation PRIVATE Poco::Foundation::PCRE ${ZLIB_LIBRARIES} lz4) else () add_library (Poco::Foundation UNKNOWN IMPORTED GLOBAL) diff --git a/contrib/rocksdb b/contrib/rocksdb index 296c1b8b95f..e7c2b2f7bcf 160000 --- a/contrib/rocksdb +++ b/contrib/rocksdb @@ -1 +1 @@ -Subproject commit 296c1b8b95fd448b8097a1b2cc9f704ff4a73a2c +Subproject commit e7c2b2f7bcf3b4b33892a1a6d25c32a93edfbdb9 diff --git a/contrib/sentry-native b/contrib/sentry-native index 94644e92f0a..f431047ac8d 160000 --- a/contrib/sentry-native +++ b/contrib/sentry-native @@ -1 +1 @@ -Subproject commit 94644e92f0a3ff14bd35ed902a8622a2d15f7be4 +Subproject commit f431047ac8da13179c488018dddf1c0d0771a997 diff --git a/contrib/sysroot b/contrib/sysroot index 6172893931e..1a64956aa7c 160000 --- a/contrib/sysroot +++ b/contrib/sysroot @@ -1 +1 @@ -Subproject commit 6172893931e19b028f9cabb7095a44361be863df +Subproject commit 1a64956aa7c280448be6526251bb2b8e6d380ab1 diff --git a/docker/test/fuzzer/run-fuzzer.sh b/docker/test/fuzzer/run-fuzzer.sh index 0c11e0a615d..f099e391a90 100755 --- a/docker/test/fuzzer/run-fuzzer.sh +++ b/docker/test/fuzzer/run-fuzzer.sh @@ -256,6 +256,12 @@ continue task_exit_code=0 echo "success" > status.txt echo "OK" > description.txt + elif [ "$fuzzer_exit_code" == "137" ] + then + # Killed. + task_exit_code=$fuzzer_exit_code + echo "failure" > status.txt + echo "Killed" > description.txt else # The server was alive, but the fuzzer returned some error. This might # be some client-side error detected by fuzzing, or a problem in the diff --git a/docker/test/integration/base/Dockerfile b/docker/test/integration/base/Dockerfile index 519c64297e5..add4dad0132 100644 --- a/docker/test/integration/base/Dockerfile +++ b/docker/test/integration/base/Dockerfile @@ -19,6 +19,7 @@ RUN apt-get update \ sqlite3 \ curl \ tar \ + lz4 \ krb5-user \ iproute2 \ lsof \ diff --git a/docker/test/integration/runner/Dockerfile b/docker/test/integration/runner/Dockerfile index 06e1f64ced2..58c77d285d9 100644 --- a/docker/test/integration/runner/Dockerfile +++ b/docker/test/integration/runner/Dockerfile @@ -60,7 +60,7 @@ RUN dockerd --version; docker --version RUN python3 -m pip install \ PyMySQL \ aerospike==4.0.0 \ - avro \ + avro==1.10.2 \ cassandra-driver \ confluent-kafka==1.5.0 \ dict2xml \ diff --git a/docker/test/performance-comparison/config/config.d/zzz-perf-comparison-tweaks-config.xml b/docker/test/performance-comparison/config/config.d/zzz-perf-comparison-tweaks-config.xml index 292665c4f68..39c29bb61ca 100644 --- a/docker/test/performance-comparison/config/config.d/zzz-perf-comparison-tweaks-config.xml +++ b/docker/test/performance-comparison/config/config.d/zzz-perf-comparison-tweaks-config.xml @@ -19,6 +19,31 @@ + + + ENGINE = Memory + + + + ENGINE = Memory + + + + ENGINE = Memory + + + + ENGINE = Memory + + + + ENGINE = Memory + + + 1000000000 10 diff --git a/docker/test/stress/run.sh b/docker/test/stress/run.sh index 04845f2a4d1..ed215bd4273 100755 --- a/docker/test/stress/run.sh +++ b/docker/test/stress/run.sh @@ -37,6 +37,12 @@ function configure() # install test configs /usr/share/clickhouse-test/config/install.sh + # avoid too slow startup + sudo cat /etc/clickhouse-server/config.d/keeper_port.xml | sed "s|100000|10000|" > /etc/clickhouse-server/config.d/keeper_port.xml.tmp + sudo mv /etc/clickhouse-server/config.d/keeper_port.xml.tmp /etc/clickhouse-server/config.d/keeper_port.xml + sudo chown clickhouse /etc/clickhouse-server/config.d/keeper_port.xml + sudo chgrp clickhouse /etc/clickhouse-server/config.d/keeper_port.xml + # for clickhouse-server (via service) echo "ASAN_OPTIONS='malloc_context_size=10 verbosity=1 allocator_release_to_os_interval_ms=10000'" >> /etc/environment # for clickhouse-client diff --git a/docs/en/development/browse-code.md b/docs/en/development/browse-code.md index 35555bbd79c..fa57d2289b3 100644 --- a/docs/en/development/browse-code.md +++ b/docs/en/development/browse-code.md @@ -1,5 +1,5 @@ --- -toc_priority: 71 +toc_priority: 72 toc_title: Source Code Browser --- diff --git a/docs/en/development/build-cross-osx.md b/docs/en/development/build-cross-osx.md index 6f3b3a717d0..c7e40013113 100644 --- a/docs/en/development/build-cross-osx.md +++ b/docs/en/development/build-cross-osx.md @@ -9,14 +9,14 @@ This is for the case when you have Linux machine and want to use it to build `cl The cross-build for Mac OS X is based on the [Build instructions](../development/build.md), follow them first. -## Install Clang-8 {#install-clang-8} +## Install Clang-13 Follow the instructions from https://apt.llvm.org/ for your Ubuntu or Debian setup. For example the commands for Bionic are like: ``` bash -sudo echo "deb [trusted=yes] http://apt.llvm.org/bionic/ llvm-toolchain-bionic-8 main" >> /etc/apt/sources.list -sudo apt-get install clang-8 +sudo echo "deb [trusted=yes] http://apt.llvm.org/bionic/ llvm-toolchain-bionic-13 main" >> /etc/apt/sources.list +sudo apt-get install clang-13 ``` ## Install Cross-Compilation Toolset {#install-cross-compilation-toolset} @@ -25,6 +25,7 @@ Let’s remember the path where we install `cctools` as ${CCTOOLS} ``` bash mkdir ${CCTOOLS} +cd ${CCTOOLS} git clone https://github.com/tpoechtrager/apple-libtapi.git cd apple-libtapi @@ -34,7 +35,7 @@ cd .. git clone https://github.com/tpoechtrager/cctools-port.git cd cctools-port/cctools -./configure --prefix=${CCTOOLS} --with-libtapi=${CCTOOLS} --target=x86_64-apple-darwin +./configure --prefix=$(readlink -f ${CCTOOLS}) --with-libtapi=$(readlink -f ${CCTOOLS}) --target=x86_64-apple-darwin make install ``` @@ -51,12 +52,10 @@ tar xJf MacOSX10.15.sdk.tar.xz -C build-darwin/cmake/toolchain/darwin-x86_64 --s ``` bash cd ClickHouse -mkdir build-osx -CC=clang-8 CXX=clang++-8 cmake . -Bbuild-osx -DCMAKE_TOOLCHAIN_FILE=cmake/darwin/toolchain-x86_64.cmake \ - -DCMAKE_AR:FILEPATH=${CCTOOLS}/bin/x86_64-apple-darwin-ar \ - -DCMAKE_RANLIB:FILEPATH=${CCTOOLS}/bin/x86_64-apple-darwin-ranlib \ - -DLINKER_NAME=${CCTOOLS}/bin/x86_64-apple-darwin-ld -ninja -C build-osx +mkdir build-darwin +cd build-darwin +CC=clang-13 CXX=clang++-13 cmake -DCMAKE_AR:FILEPATH=${CCTOOLS}/bin/aarch64-apple-darwin-ar -DCMAKE_INSTALL_NAME_TOOL=${CCTOOLS}/bin/aarch64-apple-darwin-install_name_tool -DCMAKE_RANLIB:FILEPATH=${CCTOOLS}/bin/aarch64-apple-darwin-ranlib -DLINKER_NAME=${CCTOOLS}/bin/aarch64-apple-darwin-ld -DCMAKE_TOOLCHAIN_FILE=cmake/darwin/toolchain-x86_64.cmake .. +ninja ``` The resulting binary will have a Mach-O executable format and can’t be run on Linux. diff --git a/docs/en/development/build-cross-riscv.md b/docs/en/development/build-cross-riscv.md new file mode 100644 index 00000000000..977387af207 --- /dev/null +++ b/docs/en/development/build-cross-riscv.md @@ -0,0 +1,30 @@ +--- +toc_priority: 68 +toc_title: Build on Linux for RISC-V 64 +--- + +# How to Build ClickHouse on Linux for RISC-V 64 Architecture {#how-to-build-clickhouse-on-linux-for-risc-v-64-architecture} + +As of writing (11.11.2021) building for risc-v considered to be highly experimental. Not all features can be enabled. + +This is for the case when you have Linux machine and want to use it to build `clickhouse` binary that will run on another Linux machine with RISC-V 64 CPU architecture. This is intended for continuous integration checks that run on Linux servers. + +The cross-build for RISC-V 64 is based on the [Build instructions](../development/build.md), follow them first. + +## Install Clang-13 + +Follow the instructions from https://apt.llvm.org/ for your Ubuntu or Debian setup or do +``` +sudo bash -c "$(wget -O - https://apt.llvm.org/llvm.sh)" +``` + +## Build ClickHouse {#build-clickhouse} + +``` bash +cd ClickHouse +mkdir build-riscv64 +CC=clang-13 CXX=clang++-13 cmake . -Bbuild-riscv64 -G Ninja -DCMAKE_TOOLCHAIN_FILE=cmake/linux/toolchain-riscv64.cmake -DGLIBC_COMPATIBILITY=OFF -DENABLE_LDAP=OFF -DOPENSSL_NO_ASM=ON -DENABLE_JEMALLOC=ON -DENABLE_PARQUET=OFF -DUSE_INTERNAL_PARQUET_LIBRARY=OFF -DENABLE_ORC=OFF -DUSE_INTERNAL_ORC_LIBRARY=OFF -DUSE_UNWIND=OFF -DUSE_INTERNAL_PROTOBUF_LIBRARY=ON -DENABLE_GRPC=OFF -DUSE_INTERNAL_GRPC_LIBRARY=OFF -DENABLE_HDFS=OFF -DUSE_INTERNAL_HDFS3_LIBRARY=OFF -DENABLE_MYSQL=OFF -DUSE_INTERNAL_MYSQL_LIBRARY=OFF +ninja -C build-riscv64 +``` + +The resulting binary will run only on Linux with the RISC-V 64 CPU architecture. diff --git a/docs/en/development/contrib.md b/docs/en/development/contrib.md index a9b9a5d1e44..07969f8ef6a 100644 --- a/docs/en/development/contrib.md +++ b/docs/en/development/contrib.md @@ -1,5 +1,5 @@ --- -toc_priority: 70 +toc_priority: 71 toc_title: Third-Party Libraries Used --- diff --git a/docs/en/development/developer-instruction.md b/docs/en/development/developer-instruction.md index 024ce27d60d..52fa307333c 100644 --- a/docs/en/development/developer-instruction.md +++ b/docs/en/development/developer-instruction.md @@ -37,7 +37,7 @@ Next, you need to download the source files onto your working machine. This is c In the command line terminal run: - git clone git@github.com:your_github_username/ClickHouse.git + git clone --recursive git@github.com:your_github_username/ClickHouse.git cd ClickHouse Note: please, substitute *your_github_username* with what is appropriate! @@ -65,7 +65,7 @@ It generally means that the SSH keys for connecting to GitHub are missing. These You can also clone the repository via https protocol: - git clone https://github.com/ClickHouse/ClickHouse.git + git clone --recursive https://github.com/ClickHouse/ClickHouse.git This, however, will not let you send your changes to the server. You can still use it temporarily and add the SSH keys later replacing the remote address of the repository with `git remote` command. @@ -241,7 +241,7 @@ Adding third-party libraries: https://clickhouse.com/docs/en/development/contrib Writing tests: https://clickhouse.com/docs/en/development/tests/ -List of tasks: https://github.com/ClickHouse/ClickHouse/issues?q=is%3Aopen+is%3Aissue+label%3A%22easy+task%22 +List of tasks: https://github.com/ClickHouse/ClickHouse/issues?q=is%3Aopen+is%3Aissue+label%3Ahacktoberfest ## Test Data {#test-data} diff --git a/docs/en/development/style.md b/docs/en/development/style.md index bc38f0711cf..49b2f68b9f3 100644 --- a/docs/en/development/style.md +++ b/docs/en/development/style.md @@ -1,5 +1,5 @@ --- -toc_priority: 68 +toc_priority: 69 toc_title: C++ Guide --- diff --git a/docs/en/development/tests.md b/docs/en/development/tests.md index 0e2aa348483..ea32f608124 100644 --- a/docs/en/development/tests.md +++ b/docs/en/development/tests.md @@ -1,5 +1,5 @@ --- -toc_priority: 69 +toc_priority: 70 toc_title: Testing --- diff --git a/docs/en/engines/table-engines/integrations/s3.md b/docs/en/engines/table-engines/integrations/s3.md index e494e9aec6a..691666cffef 100644 --- a/docs/en/engines/table-engines/integrations/s3.md +++ b/docs/en/engines/table-engines/integrations/s3.md @@ -11,7 +11,8 @@ This engine provides integration with [Amazon S3](https://aws.amazon.com/s3/) ec ``` sql CREATE TABLE s3_engine_table (name String, value UInt32) -ENGINE = S3(path, [aws_access_key_id, aws_secret_access_key,] format, [compression]) + ENGINE = S3(path, [aws_access_key_id, aws_secret_access_key,] format, [compression]) + [SETTINGS ...] ``` **Engine parameters** @@ -23,21 +24,13 @@ ENGINE = S3(path, [aws_access_key_id, aws_secret_access_key,] format, [compressi **Example** -1. Set up the `s3_engine_table` table: - ``` sql -CREATE TABLE s3_engine_table (name String, value UInt32) ENGINE=S3('https://storage.yandexcloud.net/my-test-bucket-768/test-data.csv.gz', 'CSV', 'gzip'); -``` +CREATE TABLE s3_engine_table (name String, value UInt32) + ENGINE=S3('https://storage.yandexcloud.net/my-test-bucket-768/test-data.csv.gz', 'CSV', 'gzip') + SETTINGS input_format_with_names_use_header = 0; -2. Fill file: - -``` sql INSERT INTO s3_engine_table VALUES ('one', 1), ('two', 2), ('three', 3); -``` -3. Query the data: - -``` sql SELECT * FROM s3_engine_table LIMIT 2; ``` @@ -73,57 +66,54 @@ For more information about virtual columns see [here](../../../engines/table-eng Constructions with `{}` are similar to the [remote](../../../sql-reference/table-functions/remote.md) table function. -**Example** +!!! warning "Warning" + If the listing of files contains number ranges with leading zeros, use the construction with braces for each digit separately or use `?`. -1. Suppose we have several files in CSV format with the following URIs on S3: - -- 'https://storage.yandexcloud.net/my-test-bucket-768/some_prefix/some_file_1.csv' -- 'https://storage.yandexcloud.net/my-test-bucket-768/some_prefix/some_file_2.csv' -- 'https://storage.yandexcloud.net/my-test-bucket-768/some_prefix/some_file_3.csv' -- 'https://storage.yandexcloud.net/my-test-bucket-768/another_prefix/some_file_1.csv' -- 'https://storage.yandexcloud.net/my-test-bucket-768/another_prefix/some_file_2.csv' -- 'https://storage.yandexcloud.net/my-test-bucket-768/another_prefix/some_file_3.csv' - -There are several ways to make a table consisting of all six files: - -The first way: - -``` sql -CREATE TABLE table_with_range (name String, value UInt32) ENGINE = S3('https://storage.yandexcloud.net/my-test-bucket-768/{some,another}_prefix/some_file_{1..3}', 'CSV'); -``` - -Another way: - -``` sql -CREATE TABLE table_with_question_mark (name String, value UInt32) ENGINE = S3('https://storage.yandexcloud.net/my-test-bucket-768/{some,another}_prefix/some_file_?', 'CSV'); -``` - -Table consists of all the files in both directories (all files should satisfy format and schema described in query): - -``` sql -CREATE TABLE table_with_asterisk (name String, value UInt32) ENGINE = S3('https://storage.yandexcloud.net/my-test-bucket-768/{some,another}_prefix/*', 'CSV'); -``` - -If the listing of files contains number ranges with leading zeros, use the construction with braces for each digit separately or use `?`. - -**Example** +**Example with wildcards 1** Create table with files named `file-000.csv`, `file-001.csv`, … , `file-999.csv`: ``` sql -CREATE TABLE big_table (name String, value UInt32) ENGINE = S3('https://storage.yandexcloud.net/my-test-bucket-768/big_prefix/file-{000..999}.csv', 'CSV'); +CREATE TABLE big_table (name String, value UInt32) + ENGINE = S3('https://storage.yandexcloud.net/my-bucket/my_folder/file-{000..999}.csv', 'CSV'); ``` -## Virtual Columns {#virtual-columns} +**Example with wildcards 2** -- `_path` — Path to the file. -- `_file` — Name of the file. +Suppose we have several files in CSV format with the following URIs on S3: -**See Also** +- 'https://storage.yandexcloud.net/my-bucket/some_folder/some_file_1.csv' +- 'https://storage.yandexcloud.net/my-bucket/some_folder/some_file_2.csv' +- 'https://storage.yandexcloud.net/my-bucket/some_folder/some_file_3.csv' +- 'https://storage.yandexcloud.net/my-bucket/another_folder/some_file_1.csv' +- 'https://storage.yandexcloud.net/my-bucket/another_folder/some_file_2.csv' +- 'https://storage.yandexcloud.net/my-bucket/another_folder/some_file_3.csv' -- [Virtual columns](../../../engines/table-engines/index.md#table_engines-virtual_columns) -## S3-related settings {#settings} +There are several ways to make a table consisting of all six files: + +1. Specify the range of file postfixes: + +``` sql +CREATE TABLE table_with_range (name String, value UInt32) + ENGINE = S3('https://storage.yandexcloud.net/my-bucket/{some,another}_folder/some_file_{1..3}', 'CSV'); +``` + +2. Take all files with `some_file_` prefix (there should be no extra files with such prefix in both folders): + +``` sql +CREATE TABLE table_with_question_mark (name String, value UInt32) + ENGINE = S3('https://storage.yandexcloud.net/my-bucket/{some,another}_folder/some_file_?', 'CSV'); +``` + +3. Take all the files in both folders (all files should satisfy format and schema described in query): + +``` sql +CREATE TABLE table_with_asterisk (name String, value UInt32) + ENGINE = S3('https://storage.yandexcloud.net/my-bucket/{some,another}_folder/*', 'CSV'); +``` + +## S3-related Settings {#settings} The following settings can be set before query execution or placed into configuration file. @@ -165,49 +155,6 @@ The following settings can be specified in configuration file for given endpoint ``` -## Usage {#usage-examples} - -Suppose we have several files in CSV format with the following URIs on S3: - -- 'https://storage.yandexcloud.net/my-test-bucket-768/some_prefix/some_file_1.csv' -- 'https://storage.yandexcloud.net/my-test-bucket-768/some_prefix/some_file_2.csv' -- 'https://storage.yandexcloud.net/my-test-bucket-768/some_prefix/some_file_3.csv' -- 'https://storage.yandexcloud.net/my-test-bucket-768/another_prefix/some_file_1.csv' -- 'https://storage.yandexcloud.net/my-test-bucket-768/another_prefix/some_file_2.csv' -- 'https://storage.yandexcloud.net/my-test-bucket-768/another_prefix/some_file_3.csv' - - -1. There are several ways to make a table consisting of all six files: - -``` sql -CREATE TABLE table_with_range (name String, value UInt32) -ENGINE = S3('https://storage.yandexcloud.net/my-test-bucket-768/{some,another}_prefix/some_file_{1..3}', 'CSV'); -``` - -2. Another way: - -``` sql -CREATE TABLE table_with_question_mark (name String, value UInt32) -ENGINE = S3('https://storage.yandexcloud.net/my-test-bucket-768/{some,another}_prefix/some_file_?', 'CSV'); -``` - -3. Table consists of all the files in both directories (all files should satisfy format and schema described in query): - -``` sql -CREATE TABLE table_with_asterisk (name String, value UInt32) -ENGINE = S3('https://storage.yandexcloud.net/my-test-bucket-768/{some,another}_prefix/*', 'CSV'); -``` - -!!! warning "Warning" - If the listing of files contains number ranges with leading zeros, use the construction with braces for each digit separately or use `?`. - -4. Create table with files named `file-000.csv`, `file-001.csv`, … , `file-999.csv`: - -``` sql -CREATE TABLE big_table (name String, value UInt32) -ENGINE = S3('https://storage.yandexcloud.net/my-test-bucket-768/big_prefix/file-{000..999}.csv', 'CSV'); -``` - ## See also - [s3 table function](../../../sql-reference/table-functions/s3.md) diff --git a/docs/en/getting-started/example-datasets/nyc-taxi.md b/docs/en/getting-started/example-datasets/nyc-taxi.md index ff29fef7fe0..64810d3fa37 100644 --- a/docs/en/getting-started/example-datasets/nyc-taxi.md +++ b/docs/en/getting-started/example-datasets/nyc-taxi.md @@ -381,8 +381,11 @@ We ran queries using a client located in a Yandex datacenter in Finland on a clu | servers | Q1 | Q2 | Q3 | Q4 | |---------|-------|-------|-------|-------| -| 1 | 0.490 | 1.224 | 2.104 | 3.593 | -| 3 | 0.212 | 0.438 | 0.733 | 1.241 | -| 140 | 0.028 | 0.043 | 0.051 | 0.072 | +| 1, E5-2650v2 | 0.490 | 1.224 | 2.104 | 3.593 | +| 3, E5-2650v2 | 0.212 | 0.438 | 0.733 | 1.241 | +| 1, AWS c5n.4xlarge | 0.249 | 1.279 | 1.738 | 3.527 | +| 1, AWS c5n.9xlarge | 0.130 | 0.584 | 0.777 | 1.811 | +| 3, AWS c5n.9xlarge | 0.057 | 0.231 | 0.285 | 0.641 | +| 140, E5-2650v2 | 0.028 | 0.043 | 0.051 | 0.072 | [Original article](https://clickhouse.com/docs/en/getting_started/example_datasets/nyc_taxi/) diff --git a/docs/en/interfaces/formats.md b/docs/en/interfaces/formats.md index 34224d705c4..c16db5c3db2 100644 --- a/docs/en/interfaces/formats.md +++ b/docs/en/interfaces/formats.md @@ -177,6 +177,9 @@ This format is also available under the name `TSVRaw`. ## TabSeparatedWithNames {#tabseparatedwithnames} Differs from the `TabSeparated` format in that the column names are written in the first row. + +During parsing, the first row is expected to contain the column names. You can use column names to determine their position and to check their correctness. + If setting [input_format_with_names_use_header](../operations/settings/settings.md#settings-input_format_with_names_use_header) is set to 1, the columns from input data will be mapped to the columns from the table by their names, columns with unknown names will be skipped if setting [input_format_skip_unknown_fields](../operations/settings/settings.md#settings-input_format_skip_unknown_fields) is set to 1. Otherwise, the first row will be skipped. diff --git a/docs/en/interfaces/http.md b/docs/en/interfaces/http.md index 6454262122f..a2f0944de47 100644 --- a/docs/en/interfaces/http.md +++ b/docs/en/interfaces/http.md @@ -432,7 +432,7 @@ Example: [^/]+)(/(?P[^/]+))?]]> - GET + GET TEST_HEADER_VALUE [^/]+)(/(?P[^/]+))?]]> @@ -639,4 +639,4 @@ $ curl -vv -H 'XXX:xxx' 'http://localhost:8123/get_relative_path_static_handler' < Relative Path File * Connection #0 to host localhost left intact -``` \ No newline at end of file +``` diff --git a/docs/en/interfaces/third-party/client-libraries.md b/docs/en/interfaces/third-party/client-libraries.md index cb8679e4bdd..342b1c9a496 100644 --- a/docs/en/interfaces/third-party/client-libraries.md +++ b/docs/en/interfaces/third-party/client-libraries.md @@ -35,6 +35,8 @@ toc_title: Client Libraries - NodeJs - [clickhouse (NodeJs)](https://github.com/TimonKK/clickhouse) - [node-clickhouse](https://github.com/apla/node-clickhouse) + - [nestjs-clickhouse](https://github.com/depyronick/nestjs-clickhouse) + - [clickhouse-client](https://github.com/depyronick/clickhouse-client) - Perl - [perl-DBD-ClickHouse](https://github.com/elcamlost/perl-DBD-ClickHouse) - [HTTP-ClickHouse](https://metacpan.org/release/HTTP-ClickHouse) diff --git a/docs/en/introduction/adopters.md b/docs/en/introduction/adopters.md index 0a2f8c57690..fd0f3a12b81 100644 --- a/docs/en/introduction/adopters.md +++ b/docs/en/introduction/adopters.md @@ -78,7 +78,8 @@ toc_title: Adopters | Ippon Technologies | Technology Consulting | — | — | — | [Talk in English, July 2020](https://youtu.be/GMiXCMFDMow?t=205) | | Ivi | Online Cinema | Analytics, Monitoring | — | — | [Article in Russian, Jan 2018](https://habr.com/en/company/ivi/blog/347408/) | | Jinshuju 金数据 | BI Analytics | Main product | — | — | [Slides in Chinese, October 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup24/3.%20金数据数据架构调整方案Public.pdf) | -| kakaocorp | Internet company | — | — | — | [if(kakao)2020 conference](https://if.kakao.com/session/117) | +| Jitsu | Cloud Software | Data Pipeline | — | — | [Documentation](https://jitsu.com/docs/destinations-configuration/clickhouse-destination), [Hacker News](https://news.ycombinator.com/item?id=29106082) | +| kakaocorp | Internet company | — | — | — | [if(kakao)2020](https://tv.kakao.com/channel/3693125/cliplink/414129353), [if(kakao)2021](https://if.kakao.com/session/24) | | Kodiak Data | Clouds | Main product | — | — | [Slides in Engish, April 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup13/kodiak_data.pdf) | | Kontur | Software Development | Metrics | — | — | [Talk in Russian, November 2018](https://www.youtube.com/watch?v=U4u4Bd0FtrY) | | Kuaishou | Video | — | — | — | [ClickHouse Meetup, October 2018](https://clickhouse.com/blog/en/2018/clickhouse-community-meetup-in-beijing-on-october-28-2018/) | @@ -101,11 +102,13 @@ toc_title: Adopters | Nuna Inc. | Health Data Analytics | — | — | — | [Talk in English, July 2020](https://youtu.be/GMiXCMFDMow?t=170) | | Ok.ru | Social Network | — | 72 servers | 810 TB compressed, 50bn rows/day, 1.5 TB/day | [SmartData conference, Oct 2021](https://assets.ctfassets.net/oxjq45e8ilak/4JPHkbJenLgZhBGGyyonFP/57472ec6987003ec4078d0941740703b/____________________ClickHouse_______________________.pdf) | | Omnicomm | Transportation Monitoring | — | — | — | [Facebook post, Oct 2021](https://www.facebook.com/OmnicommTeam/posts/2824479777774500) | -| OneAPM | Monitorings and Data Analysis | Main product | — | — | [Slides in Chinese, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/8.%20clickhouse在OneAPM的应用%20杜龙.pdf) | +| OneAPM | Monitoring and Data Analysis | Main product | — | — | [Slides in Chinese, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/8.%20clickhouse在OneAPM的应用%20杜龙.pdf) | +| Open Targets | Genome Research | Genome Search | — | — | [Tweet, Oct 2021](https://twitter.com/OpenTargets/status/1452570865342758913?s=20), [Blog](https://blog.opentargets.org/graphql/) | | OZON | E-commerce | — | — | — | [Official website](https://job.ozon.ru/vacancy/razrabotchik-clickhouse-ekspluatatsiya-40991870/) | | Panelbear | Analytics | Monitoring and Analytics | — | — | [Tech Stack, November 2020](https://panelbear.com/blog/tech-stack/) | | Percent 百分点 | Analytics | Main Product | — | — | [Slides in Chinese, June 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup24/4.%20ClickHouse万亿数据双中心的设计与实践%20.pdf) | | Percona | Performance analysis | Percona Monitoring and Management | — | — | [Official website, Mar 2020](https://www.percona.com/blog/2020/03/30/advanced-query-analysis-in-percona-monitoring-and-management-with-direct-clickhouse-access/) | +| Piwik PRO | Web Analytics | Main Product | — | — | [Official website, Dec 2018](https://piwik.pro/blog/piwik-pro-clickhouse-faster-efficient-reports/) | | Plausible | Analytics | Main Product | — | — | [Blog post, June 2020](https://twitter.com/PlausibleHQ/status/1273889629087969280) | | PostHog | Product Analytics | Main Product | — | — | [Release Notes, Oct 2020](https://posthog.com/blog/the-posthog-array-1-15-0) | | Postmates | Delivery | — | — | — | [Talk in English, July 2020](https://youtu.be/GMiXCMFDMow?t=188) | @@ -173,5 +176,6 @@ toc_title: Adopters | ДомКлик | Real Estate | — | — | — | [Article in Russian, October 2021](https://habr.com/ru/company/domclick/blog/585936/) | | Deepl | Machine Learning | — | — | — | [Video, October 2021](https://www.youtube.com/watch?v=WIYJiPwxXdM&t=1182s) | | Vercel | Traffic and Performance Analytics | — | — | — | Direct reference, October 2021 | +| YourAnalytics | Web Analytics | — | — | — | [Tweet, Nov 2021](https://twitter.com/mikenikles/status/1460860140249235461) | [Original article](https://clickhouse.com/docs/en/introduction/adopters/) diff --git a/docs/en/operations/server-configuration-parameters/settings.md b/docs/en/operations/server-configuration-parameters/settings.md index 803d10312f3..e00bc0a9ae4 100644 --- a/docs/en/operations/server-configuration-parameters/settings.md +++ b/docs/en/operations/server-configuration-parameters/settings.md @@ -107,7 +107,7 @@ Loading key from the environment variable: ```xml - + ``` @@ -120,7 +120,7 @@ Each of these methods can be applied for multiple keys: 00112233445566778899aabbccddeeff - + 1 @@ -763,6 +763,30 @@ Default value: 10000. 12000 ``` +## max_thread_pool_free_size {#max-thread-pool-free-size} + +The number of threads that are always held in the Global Thread pool. + +Default value: 1000. + +**Example** + +``` xml +1200 +``` + +## thread_pool_queue_size {#thread-pool-queue-size} + +The limit to the number of jobs that can be scheduled on the Global Thread pool. Increasing queue size leads to larger memory usage. It is recommended to keep this value equal to the `max_thread_pool_size`. + +Default value: 10000. + +**Example** + +``` xml +12000 +``` + ## merge_tree {#server_configuration_parameters-merge_tree} Fine tuning for tables in the [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md). @@ -1436,4 +1460,53 @@ To add an LDAP server as a remote user directory of users that are not defined l ``` -[Original article](https://clickhouse.com/docs/en/operations/server_configuration_parameters/settings/) +## total_memory_profiler_step {#total-memory-profiler-step} + +Sets the memory size (in bytes) for a stack trace at every peak allocation step. The data is stored in the [system.trace_log](../../operations/system-tables/trace_log.md) system table with `query_id` equal to an empty string. + +Possible values: + +- Positive integer. + +Default value: `4194304`. + +## total_memory_tracker_sample_probability {#total-memory-tracker-sample-probability} + +Allows to collect random allocations and deallocations and writes them in the [system.trace_log](../../operations/system-tables/trace_log.md) system table with `trace_type` equal to a `MemorySample` with the specified probability. The probability is for every allocation or deallocations, regardless of the size of the allocation. Note that sampling happens only when the amount of untracked memory exceeds the untracked memory limit (default value is `4` MiB). It can be lowered if [total_memory_profiler_step](#total-memory-profiler-step) is lowered. You can set `total_memory_profiler_step` equal to `1` for extra fine-grained sampling. + +Possible values: + +- Positive integer. +- 0 — Writing of random allocations and deallocations in the `system.trace_log` system table is disabled. + +Default value: `0`. + +## mmap_cache_size {#mmap-cache-size} + +Sets the cache size (in bytes) for mapped files. This setting allows to avoid frequent open/[mmap/munmap](https://en.wikipedia.org/wiki/Mmap)/close calls (which are very expensive due to consequent page faults) and to reuse mappings from several threads and queries. The setting value is the number of mapped regions (usually equal to the number of mapped files). The amount of data in mapped files can be monitored in [system.metrics](../../operations/system-tables/metrics.md), [system.metric_log](../../operations/system-tables/metric_log.md) system tables by the `MMappedFiles` and `MMappedFileBytes` metrics, in [system.asynchronous_metrics](../../operations/system-tables/asynchronous_metrics.md), [system.asynchronous_metrics_log](../../operations/system-tables/asynchronous_metric_log.md) by the `MMapCacheCells` metric, and also in [system.events](../../operations/system-tables/events.md), [system.processes](../../operations/system-tables/processes.md), [system.query_log](../../operations/system-tables/query_log.md), [system.query_thread_log](../../operations/system-tables/query_thread_log.md), [system.query_views_log](../../operations/system-tables/query_views_log.md) by the `CreatedReadBufferMMap`, `CreatedReadBufferMMapFailed`, `MMappedFileCacheHits`, `MMappedFileCacheMisses` events. Note that the amount of data in mapped files does not consume memory directly and is not accounted in query or server memory usage — because this memory can be discarded similar to OS page cache. The cache is dropped (the files are closed) automatically on the removal of old parts in tables of the [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) family, also it can be dropped manually by the `SYSTEM DROP MMAP CACHE` query. + +Possible values: + +- Positive integer. + +Default value: `1000`. + +## compiled_expression_cache_size {#compiled-expression-cache-size} + +Sets the cache size (in bytes) for [compiled expressions](../../operations/caches.md). + +Possible values: + +- Positive integer. + +Default value: `134217728`. + +## compiled_expression_cache_elements_size {#compiled_expression_cache_elements_size} + +Sets the cache size (in elements) for [compiled expressions](../../operations/caches.md). + +Possible values: + +- Positive integer. + +Default value: `10000`. diff --git a/docs/en/operations/settings/merge-tree-settings.md b/docs/en/operations/settings/merge-tree-settings.md index 16ec55f026a..0fd1e54955c 100644 --- a/docs/en/operations/settings/merge-tree-settings.md +++ b/docs/en/operations/settings/merge-tree-settings.md @@ -343,3 +343,16 @@ Default value: `0`. **Usage** The value of the `min_bytes_to_rebalance_partition_over_jbod` setting should be less than the value of the [max_bytes_to_merge_at_max_space_in_pool](../../operations/settings/merge-tree-settings.md#max-bytes-to-merge-at-max-space-in-pool) setting. Otherwise, ClickHouse throws an exception. + +## detach_not_byte_identical_parts {#detach_not_byte_identical_parts} + +Enables or disables detaching a data part on a replica after a merge or a mutation, if it is not byte-identical to data parts on other replicas. If disabled, the data part is removed. Activate this setting if you want to analyze such parts later. + +The setting is applicable to `MergeTree` tables with enabled [data replication](../../engines/table-engines/mergetree-family/replication.md). + +Possible values: + +- 0 — Parts are removed. +- 1 — Parts are detached. + +Default value: `0`. diff --git a/docs/en/operations/settings/settings.md b/docs/en/operations/settings/settings.md index 64b445c8b72..74a0f3a8c0d 100644 --- a/docs/en/operations/settings/settings.md +++ b/docs/en/operations/settings/settings.md @@ -4060,3 +4060,14 @@ Possible values: - `'by_name_case_insensitive'` — Names in enums should be the same case-insensitive, values can be different. Default value: `'by_values'`. + +## min_bytes_to_use_mmap_io {#min-bytes-to-use-mmap-io} + +This is an experimental setting. Sets the minimum amount of memory for reading large files without copying data from the kernel to userspace. Recommended threshold is about 64 MB, because [mmap/munmap](https://en.wikipedia.org/wiki/Mmap) is slow. It makes sense only for large files and helps only if data reside in the page cache. + +Possible values: + +- Positive integer. +- 0 — Big files read with only copying data from kernel to userspace. + +Default value: `0`. diff --git a/docs/en/operations/system-tables/dictionaries.md b/docs/en/operations/system-tables/dictionaries.md index bf8dcb9b5e4..5fd326297c8 100644 --- a/docs/en/operations/system-tables/dictionaries.md +++ b/docs/en/operations/system-tables/dictionaries.md @@ -6,6 +6,7 @@ Columns: - `database` ([String](../../sql-reference/data-types/string.md)) — Name of the database containing the dictionary created by DDL query. Empty string for other dictionaries. - `name` ([String](../../sql-reference/data-types/string.md)) — [Dictionary name](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict.md). +- `uuid` ([UUID](../../sql-reference/data-types/uuid.md)) — Dictionary UUID. - `status` ([Enum8](../../sql-reference/data-types/enum.md)) — Dictionary status. Possible values: - `NOT_LOADED` — Dictionary was not loaded because it was not used. - `LOADED` — Dictionary loaded successfully. @@ -15,9 +16,10 @@ Columns: - `FAILED_AND_RELOADING` — Could not load the dictionary as a result of an error and is loading now. - `origin` ([String](../../sql-reference/data-types/string.md)) — Path to the configuration file that describes the dictionary. - `type` ([String](../../sql-reference/data-types/string.md)) — Type of a dictionary allocation. [Storing Dictionaries in Memory](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md). -- `key` — [Key type](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md#ext_dict_structure-key): Numeric Key ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) or Сomposite key ([String](../../sql-reference/data-types/string.md)) — form “(type 1, type 2, …, type n)”. +- `key.names` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) — Array of [key names](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md#ext_dict_structure-key) provided by the dictionary. +- `key.types` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) — Corresponding array of [key types](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md#ext_dict_structure-key) provided by the dictionary. - `attribute.names` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) — Array of [attribute names](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md#ext_dict_structure-attributes) provided by the dictionary. -- `attribute.types` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) — Corresponding array of [attribute types](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md#ext_dict_structure-attributes) that are provided by the dictionary. +- `attribute.types` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) — Corresponding array of [attribute types](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md#ext_dict_structure-attributes) provided by the dictionary. - `bytes_allocated` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Amount of RAM allocated for the dictionary. - `query_count` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Number of queries since the dictionary was loaded or since the last successful reboot. - `hit_rate` ([Float64](../../sql-reference/data-types/float.md)) — For cache dictionaries, the percentage of uses for which the value was in the cache. @@ -31,34 +33,56 @@ Columns: - `last_successful_update_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — End time for loading or updating the dictionary. Helps to monitor some troubles with external sources and investigate causes. - `loading_duration` ([Float32](../../sql-reference/data-types/float.md)) — Duration of a dictionary loading. - `last_exception` ([String](../../sql-reference/data-types/string.md)) — Text of the error that occurs when creating or reloading the dictionary if the dictionary couldn’t be created. +- `comment` ([String](../../sql-reference/data-types/string.md)) — Text of the comment to dictionary. **Example** -Configure the dictionary. +Configure the dictionary: ``` sql -CREATE DICTIONARY dictdb.dict +CREATE DICTIONARY dictionary_with_comment ( - `key` Int64 DEFAULT -1, - `value_default` String DEFAULT 'world', - `value_expression` String DEFAULT 'xxx' EXPRESSION 'toString(127 * 172)' + id UInt64, + value String ) -PRIMARY KEY key -SOURCE(CLICKHOUSE(HOST 'localhost' PORT 9000 USER 'default' TABLE 'dicttbl' DB 'dictdb')) -LIFETIME(MIN 0 MAX 1) +PRIMARY KEY id +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() TABLE 'source_table')) LAYOUT(FLAT()) +LIFETIME(MIN 0 MAX 1000) +COMMENT 'The temporary dictionary'; ``` Make sure that the dictionary is loaded. ``` sql -SELECT * FROM system.dictionaries +SELECT * FROM system.dictionaries LIMIT 1 FORMAT Vertical; ``` ``` text -┌─database─┬─name─┬─status─┬─origin──────┬─type─┬─key────┬─attribute.names──────────────────────┬─attribute.types─────┬─bytes_allocated─┬─query_count─┬─hit_rate─┬─element_count─┬───────────load_factor─┬─source─────────────────────┬─lifetime_min─┬─lifetime_max─┬──loading_start_time─┌──last_successful_update_time─┬──────loading_duration─┬─last_exception─┐ -│ dictdb │ dict │ LOADED │ dictdb.dict │ Flat │ UInt64 │ ['value_default','value_expression'] │ ['String','String'] │ 74032 │ 0 │ 1 │ 1 │ 0.0004887585532746823 │ ClickHouse: dictdb.dicttbl │ 0 │ 1 │ 2020-03-04 04:17:34 │ 2020-03-04 04:30:34 │ 0.002 │ │ -└──────────┴──────┴────────┴─────────────┴──────┴────────┴──────────────────────────────────────┴─────────────────────┴─────────────────┴─────────────┴──────────┴───────────────┴───────────────────────┴────────────────────────────┴──────────────┴──────────────┴─────────────────────┴──────────────────────────────┘───────────────────────┴────────────────┘ +Row 1: +────── +database: default +name: dictionary_with_comment +uuid: 4654d460-0d03-433a-8654-d4600d03d33a +status: NOT_LOADED +origin: 4654d460-0d03-433a-8654-d4600d03d33a +type: +key.names: ['id'] +key.types: ['UInt64'] +attribute.names: ['value'] +attribute.types: ['String'] +bytes_allocated: 0 +query_count: 0 +hit_rate: 0 +found_rate: 0 +element_count: 0 +load_factor: 0 +source: +lifetime_min: 0 +lifetime_max: 0 +loading_start_time: 1970-01-01 00:00:00 +last_successful_update_time: 1970-01-01 00:00:00 +loading_duration: 0 +last_exception: +comment: The temporary dictionary ``` - -[Original article](https://clickhouse.com/docs/en/operations/system-tables/dictionaries) diff --git a/docs/en/operations/system-tables/storage_policies.md b/docs/en/operations/system-tables/storage_policies.md index 3d5be4b952b..c9d2659c289 100644 --- a/docs/en/operations/system-tables/storage_policies.md +++ b/docs/en/operations/system-tables/storage_policies.md @@ -6,7 +6,7 @@ Columns: - `policy_name` ([String](../../sql-reference/data-types/string.md)) — Name of the storage policy. - `volume_name` ([String](../../sql-reference/data-types/string.md)) — Volume name defined in the storage policy. -- `volume_priority` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Volume order number in the configuration. +- `volume_priority` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Volume order number in the configuration, the data fills the volumes according this priority, i.e. data during inserts and merges is written to volumes with a lower priority (taking into account other rules: TTL, `max_data_part_size`, `move_factor`). - `disks` ([Array(String)](../../sql-reference/data-types/array.md)) — Disk names, defined in the storage policy. - `max_data_part_size` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Maximum size of a data part that can be stored on volume disks (0 — no limit). - `move_factor` ([Float64](../../sql-reference/data-types/float.md)) — Ratio of free disk space. When the ratio exceeds the value of configuration parameter, ClickHouse start to move data to the next volume in order. diff --git a/docs/en/operations/tips.md b/docs/en/operations/tips.md index ce91d865735..6866c4db491 100644 --- a/docs/en/operations/tips.md +++ b/docs/en/operations/tips.md @@ -69,6 +69,8 @@ Regardless of RAID use, always use replication for data security. Enable NCQ with a long queue. For HDD, choose the CFQ scheduler, and for SSD, choose noop. Don’t reduce the ‘readahead’ setting. For HDD, enable the write cache. +Make sure that [fstrim](https://en.wikipedia.org/wiki/Trim_(computing)) is enabled for NVME and SSD disks in your OS (usually it's implemented using a cronjob or systemd service). + ## File System {#file-system} Ext4 is the most reliable option. Set the mount options `noatime`. @@ -265,4 +267,8 @@ script end script ``` +## Antivirus software {#antivirus-software} + +If you use antivirus software configure it to skip folders with Clickhouse datafiles (`/var/lib/clickhouse`) otherwise performance may be reduced and you may experience unexpected errors during data ingestion and background merges. + {## [Original article](https://clickhouse.com/docs/en/operations/tips/) ##} diff --git a/docs/en/sql-reference/data-types/string.md b/docs/en/sql-reference/data-types/string.md index 5b0059b330e..3d0f01e147f 100644 --- a/docs/en/sql-reference/data-types/string.md +++ b/docs/en/sql-reference/data-types/string.md @@ -10,6 +10,10 @@ The String type replaces the types VARCHAR, BLOB, CLOB, and others from other DB When creating tables, numeric parameters for string fields can be set (e.g. `VARCHAR(255)`), but ClickHouse ignores them. +Aliases: + +- `String` — `LONGTEXT`, `MEDIUMTEXT`, `TINYTEXT`, `TEXT`, `LONGBLOB`, `MEDIUMBLOB`, `TINYBLOB`, `BLOB`, `VARCHAR`, `CHAR`. + ## Encodings {#encodings} ClickHouse does not have the concept of encodings. Strings can contain an arbitrary set of bytes, which are stored and output as-is. diff --git a/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md b/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md index e9746e59d1d..095f059513c 100644 --- a/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md +++ b/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md @@ -66,7 +66,6 @@ Types of sources (`source_type`): - DBMS - [ODBC](#dicts-external_dicts_dict_sources-odbc) - [MySQL](#dicts-external_dicts_dict_sources-mysql) - - [PostgreSQL](#dicts-external_dicts_dict_sources-postgresql) - [ClickHouse](#dicts-external_dicts_dict_sources-clickhouse) - [MongoDB](#dicts-external_dicts_dict_sources-mongodb) - [Redis](#dicts-external_dicts_dict_sources-redis) @@ -210,45 +209,6 @@ Setting fields: When creating a dictionary using the DDL command (`CREATE DICTIONARY ...`) remote hosts for HTTP dictionaries are checked against the contents of `remote_url_allow_hosts` section from config to prevent database users to access arbitrary HTTP server. -## ODBC {#dicts-external_dicts_dict_sources-odbc} - -You can use this method to connect any database that has an ODBC driver. - -Example of settings: - -``` xml - - - DatabaseName - ShemaName.TableName
- DSN=some_parameters - SQL_QUERY -
- -``` - -or - -``` sql -SOURCE(ODBC( - db 'DatabaseName' - table 'SchemaName.TableName' - connection_string 'DSN=some_parameters' - invalidate_query 'SQL_QUERY' -)) -``` - -Setting fields: - -- `db` – Name of the database. Omit it if the database name is set in the `` parameters. -- `table` – Name of the table and schema if exists. -- `connection_string` – Connection string. -- `invalidate_query` – Query for checking the dictionary status. Optional parameter. Read more in the section [Updating dictionaries](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md). - -ClickHouse receives quoting symbols from ODBC-driver and quote all settings in queries to driver, so it’s necessary to set table name accordingly to table name case in database. - -If you have a problems with encodings when using Oracle, see the corresponding [F.A.Q.](../../../faq/integration/oracle-odbc.md) item. - ### Known Vulnerability of the ODBC Dictionary Functionality {#known-vulnerability-of-the-odbc-dictionary-functionality} !!! attention "Attention" @@ -464,6 +424,51 @@ LIFETIME(MIN 300 MAX 360) ## DBMS {#dbms} +### ODBC {#dicts-external_dicts_dict_sources-odbc} + +You can use this method to connect any database that has an ODBC driver. + +Example of settings: + +``` xml + + + DatabaseName + ShemaName.TableName
+ DSN=some_parameters + SQL_QUERY + SELECT id, value_1, value_2 FROM ShemaName.TableName +
+ +``` + +or + +``` sql +SOURCE(ODBC( + db 'DatabaseName' + table 'SchemaName.TableName' + connection_string 'DSN=some_parameters' + invalidate_query 'SQL_QUERY' + query 'SELECT id, value_1, value_2 FROM db_name.table_name' +)) +``` + +Setting fields: + +- `db` – Name of the database. Omit it if the database name is set in the `` parameters. +- `table` – Name of the table and schema if exists. +- `connection_string` – Connection string. +- `invalidate_query` – Query for checking the dictionary status. Optional parameter. Read more in the section [Updating dictionaries](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md). +- `query` – The custom query. Optional parameter. + +!!! info "Note" + The `table` and `query` fields cannot be used together. And either one of the `table` or `query` fields must be declared. + +ClickHouse receives quoting symbols from ODBC-driver and quote all settings in queries to driver, so it’s necessary to set table name accordingly to table name case in database. + +If you have a problems with encodings when using Oracle, see the corresponding [F.A.Q.](../../../faq/integration/oracle-odbc.md) item. + ### Mysql {#dicts-external_dicts_dict_sources-mysql} Example of settings: @@ -487,6 +492,7 @@ Example of settings: id=10 SQL_QUERY true + SELECT id, value_1, value_2 FROM db_name.table_name ``` @@ -505,6 +511,7 @@ SOURCE(MYSQL( where 'id=10' invalidate_query 'SQL_QUERY' fail_on_connection_loss 'true' + query 'SELECT id, value_1, value_2 FROM db_name.table_name' )) ``` @@ -531,6 +538,11 @@ Setting fields: - `fail_on_connection_loss` – The configuration parameter that controls behavior of the server on connection loss. If `true`, an exception is thrown immediately if the connection between client and server was lost. If `false`, the ClickHouse server retries to execute the query three times before throwing an exception. Note that retrying leads to increased response times. Default value: `false`. +- `query` – The custom query. Optional parameter. + +!!! info "Note" + The `table` or `where` fields cannot be used together with the `query` field. And either one of the `table` or `query` fields must be declared. + MySQL can be connected on a local host via sockets. To do this, set `host` and `socket`. Example of settings: @@ -547,6 +559,7 @@ Example of settings: id=10 SQL_QUERY true + SELECT id, value_1, value_2 FROM db_name.table_name ``` @@ -564,6 +577,7 @@ SOURCE(MYSQL( where 'id=10' invalidate_query 'SQL_QUERY' fail_on_connection_loss 'true' + query 'SELECT id, value_1, value_2 FROM db_name.table_name' )) ``` @@ -582,6 +596,7 @@ Example of settings: ids
id=10 1 + SELECT id, value_1, value_2 FROM default.ids ``` @@ -598,6 +613,7 @@ SOURCE(CLICKHOUSE( table 'ids' where 'id=10' secure 1 + query 'SELECT id, value_1, value_2 FROM default.ids' )); ``` @@ -612,6 +628,10 @@ Setting fields: - `where` – The selection criteria. May be omitted. - `invalidate_query` – Query for checking the dictionary status. Optional parameter. Read more in the section [Updating dictionaries](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md). - `secure` - Use ssl for connection. +- `query` – The custom query. Optional parameter. + +!!! info "Note" + The `table` or `where` fields cannot be used together with the `query` field. And either one of the `table` or `query` fields must be declared. ### Mongodb {#dicts-external_dicts_dict_sources-mongodb} @@ -703,27 +723,30 @@ Example of settings: One "SomeColumn" = 42 8 + SELECT id, value_1, value_2 FROM database_name.table_name ``` Setting fields: -- `host` – The Cassandra host or comma-separated list of hosts. -- `port` – The port on the Cassandra servers. If not specified, default port 9042 is used. -- `user` – Name of the Cassandra user. -- `password` – Password of the Cassandra user. -- `keyspace` – Name of the keyspace (database). -- `column_family` – Name of the column family (table). -- `allow_filering` – Flag to allow or not potentially expensive conditions on clustering key columns. Default value is 1. -- `partition_key_prefix` – Number of partition key columns in primary key of the Cassandra table. -Required for compose key dictionaries. Order of key columns in the dictionary definition must be the same as in Cassandra. -Default value is 1 (the first key column is a partition key and other key columns are clustering key). -- `consistency` – Consistency level. Possible values: `One`, `Two`, `Three`, -`All`, `EachQuorum`, `Quorum`, `LocalQuorum`, `LocalOne`, `Serial`, `LocalSerial`. Default is `One`. -- `where` – Optional selection criteria. -- `max_threads` – The maximum number of threads to use for loading data from multiple partitions in compose key dictionaries. -### PosgreSQL {#dicts-external_dicts_dict_sources-postgresql} +- `host` – The Cassandra host or comma-separated list of hosts. +- `port` – The port on the Cassandra servers. If not specified, default port 9042 is used. +- `user` – Name of the Cassandra user. +- `password` – Password of the Cassandra user. +- `keyspace` – Name of the keyspace (database). +- `column_family` – Name of the column family (table). +- `allow_filering` – Flag to allow or not potentially expensive conditions on clustering key columns. Default value is 1. +- `partition_key_prefix` – Number of partition key columns in primary key of the Cassandra table. Required for compose key dictionaries. Order of key columns in the dictionary definition must be the same as in Cassandra. Default value is 1 (the first key column is a partition key and other key columns are clustering key). +- `consistency` – Consistency level. Possible values: `One`, `Two`, `Three`, `All`, `EachQuorum`, `Quorum`, `LocalQuorum`, `LocalOne`, `Serial`, `LocalSerial`. Default value is `One`. +- `where` – Optional selection criteria. +- `max_threads` – The maximum number of threads to use for loading data from multiple partitions in compose key dictionaries. +- `query` – The custom query. Optional parameter. + +!!! info "Note" + The `column_family` or `where` fields cannot be used together with the `query` field. And either one of the `column_family` or `query` fields must be declared. + +### PostgreSQL {#dicts-external_dicts_dict_sources-postgresql} Example of settings: @@ -737,6 +760,7 @@ Example of settings: table_name
id=10 SQL_QUERY + SELECT id, value_1, value_2 FROM db_name.table_name ``` @@ -755,6 +779,7 @@ SOURCE(POSTGRESQL( replica(host 'example01-2' port 5432 priority 2) where 'id=10' invalidate_query 'SQL_QUERY' + query 'SELECT id, value_1, value_2 FROM db_name.table_name' )) ``` @@ -764,11 +789,15 @@ Setting fields: - `port` – The port on the PostgreSQL server. You can specify it for all replicas, or for each one individually (inside ``). - `user` – Name of the PostgreSQL user. You can specify it for all replicas, or for each one individually (inside ``). - `password` – Password of the PostgreSQL user. You can specify it for all replicas, or for each one individually (inside ``). -- `replica` – Section of replica configurations. There can be multiple sections. - - `replica/host` – The PostgreSQL host. - - `replica/port` – The PostgreSQL port. - - `replica/priority` – The replica priority. When attempting to connect, ClickHouse traverses the replicas in order of priority. The lower the number, the higher the priority. +- `replica` – Section of replica configurations. There can be multiple sections: + - `replica/host` – The PostgreSQL host. + - `replica/port` – The PostgreSQL port. + - `replica/priority` – The replica priority. When attempting to connect, ClickHouse traverses the replicas in order of priority. The lower the number, the higher the priority. - `db` – Name of the database. - `table` – Name of the table. -- `where` – The selection criteria. The syntax for conditions is the same as for `WHERE` clause in PostgreSQL, for example, `id > 10 AND id < 20`. Optional parameter. +- `where` – The selection criteria. The syntax for conditions is the same as for `WHERE` clause in PostgreSQL. For example, `id > 10 AND id < 20`. Optional parameter. - `invalidate_query` – Query for checking the dictionary status. Optional parameter. Read more in the section [Updating dictionaries](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md). +- `query` – The custom query. Optional parameter. + +!!! info "Note" + The `table` or `where` fields cannot be used together with the `query` field. And either one of the `table` or `query` fields must be declared. diff --git a/docs/en/sql-reference/functions/bit-functions.md b/docs/en/sql-reference/functions/bit-functions.md index 5af04f75e66..899b6385a3e 100644 --- a/docs/en/sql-reference/functions/bit-functions.md +++ b/docs/en/sql-reference/functions/bit-functions.md @@ -5,7 +5,7 @@ toc_title: Bit # Bit Functions {#bit-functions} -Bit functions work for any pair of types from UInt8, UInt16, UInt32, UInt64, Int8, Int16, Int32, Int64, Float32, or Float64. +Bit functions work for any pair of types from `UInt8`, `UInt16`, `UInt32`, `UInt64`, `Int8`, `Int16`, `Int32`, `Int64`, `Float32`, or `Float64`. Some functions support `String` and `FixedString` types. The result type is an integer with bits equal to the maximum bits of its arguments. If at least one of the arguments is signed, the result is a signed number. If an argument is a floating-point number, it is cast to Int64. @@ -19,8 +19,100 @@ The result type is an integer with bits equal to the maximum bits of its argumen ## bitShiftLeft(a, b) {#bitshiftlefta-b} +Shifts the binary representation of a value to the left by a specified number of bit positions. + +A `FixedString` or a `String` is treated as a single multibyte value. + +Bits of a `FixedString` value are lost as they are shifted out. On the contrary, a `String` value is extended with additional bytes, so no bits are lost. + +**Syntax** + +``` sql +bitShiftLeft(a, b) +``` + +**Arguments** + +- `a` — A value to shift. [Integer types](../../sql-reference/data-types/int-uint.md), [String](../../sql-reference/data-types/string.md) or [FixedString](../../sql-reference/data-types/fixedstring.md). +- `b` — The number of shift positions. [Unsigned integer types](../../sql-reference/data-types/int-uint.md), 64 bit types or less are allowed. + +**Returned value** + +- Shifted value. + +The type of the returned value is the same as the type of the input value. + +**Example** + +In the following queries [bin](encoding-functions.md#bin) and [hex](encoding-functions.md#hex) functions are used to show bits of shifted values. + +``` sql +SELECT 99 AS a, bin(a), bitShiftLeft(a, 2) AS a_shifted, bin(a_shifted); +SELECT 'abc' AS a, hex(a), bitShiftLeft(a, 4) AS a_shifted, hex(a_shifted); +SELECT toFixedString('abc', 3) AS a, hex(a), bitShiftLeft(a, 4) AS a_shifted, hex(a_shifted); +``` + +Result: + +``` text +┌──a─┬─bin(99)──┬─a_shifted─┬─bin(bitShiftLeft(99, 2))─┐ +│ 99 │ 01100011 │ 140 │ 10001100 │ +└────┴──────────┴───────────┴──────────────────────────┘ +┌─a───┬─hex('abc')─┬─a_shifted─┬─hex(bitShiftLeft('abc', 4))─┐ +│ abc │ 616263 │ &0 │ 06162630 │ +└─────┴────────────┴───────────┴─────────────────────────────┘ +┌─a───┬─hex(toFixedString('abc', 3))─┬─a_shifted─┬─hex(bitShiftLeft(toFixedString('abc', 3), 4))─┐ +│ abc │ 616263 │ &0 │ 162630 │ +└─────┴──────────────────────────────┴───────────┴───────────────────────────────────────────────┘ +``` + ## bitShiftRight(a, b) {#bitshiftrighta-b} +Shifts the binary representation of a value to the right by a specified number of bit positions. + +A `FixedString` or a `String` is treated as a single multibyte value. Note that the length of a `String` value is reduced as bits are shifted out. + +**Syntax** + +``` sql +bitShiftRight(a, b) +``` + +**Arguments** + +- `a` — A value to shift. [Integer types](../../sql-reference/data-types/int-uint.md), [String](../../sql-reference/data-types/string.md) or [FixedString](../../sql-reference/data-types/fixedstring.md). +- `b` — The number of shift positions. [Unsigned integer types](../../sql-reference/data-types/int-uint.md), 64 bit types or less are allowed. + +**Returned value** + +- Shifted value. + +The type of the returned value is the same as the type of the input value. + +**Example** + +Query: + +``` sql +SELECT 101 AS a, bin(a), bitShiftRight(a, 2) AS a_shifted, bin(a_shifted); +SELECT 'abc' AS a, hex(a), bitShiftRight(a, 12) AS a_shifted, hex(a_shifted); +SELECT toFixedString('abc', 3) AS a, hex(a), bitShiftRight(a, 12) AS a_shifted, hex(a_shifted); +``` + +Result: + +``` text +┌───a─┬─bin(101)─┬─a_shifted─┬─bin(bitShiftRight(101, 2))─┐ +│ 101 │ 01100101 │ 25 │ 00011001 │ +└─────┴──────────┴───────────┴────────────────────────────┘ +┌─a───┬─hex('abc')─┬─a_shifted─┬─hex(bitShiftRight('abc', 12))─┐ +│ abc │ 616263 │ │ 0616 │ +└─────┴────────────┴───────────┴───────────────────────────────┘ +┌─a───┬─hex(toFixedString('abc', 3))─┬─a_shifted─┬─hex(bitShiftRight(toFixedString('abc', 3), 12))─┐ +│ abc │ 616263 │ │ 000616 │ +└─────┴──────────────────────────────┴───────────┴─────────────────────────────────────────────────┘ +``` + ## bitRotateLeft(a, b) {#bitrotatelefta-b} ## bitRotateRight(a, b) {#bitrotaterighta-b} diff --git a/docs/en/sql-reference/functions/json-functions.md b/docs/en/sql-reference/functions/json-functions.md index f3617082522..4e2c83eaf23 100644 --- a/docs/en/sql-reference/functions/json-functions.md +++ b/docs/en/sql-reference/functions/json-functions.md @@ -216,6 +216,44 @@ Example: SELECT JSONExtractKeysAndValues('{"x": {"a": 5, "b": 7, "c": 11}}', 'x', 'Int8') = [('a',5),('b',7),('c',11)]; ``` +## JSONExtractKeys {#jsonextractkeysjson-indices-or-keys} + +Parses a JSON string and extracts the keys. + +**Syntax** + +``` sql +JSONExtractKeys(json[, a, b, c...]) +``` + +**Arguments** + +- `json` — [String](../../sql-reference/data-types/string.md) with valid JSON. +- `a, b, c...` — Comma-separated indices or keys that specify the path to the inner field in a nested JSON object. Each argument can be either a [String](../../sql-reference/data-types/string.md) to get the field by the key or an [Integer](../../sql-reference/data-types/int-uint.md) to get the N-th field (indexed from 1, negative integers count from the end). If not set, the whole JSON is parsed as the top-level object. Optional parameter. + +**Returned value** + +Array with the keys of the JSON. + +Type: [Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md)). + +**Example** + +Query: + +```sql +SELECT JSONExtractKeys('{"a": "hello", "b": [-100, 200.0, 300]}'); +``` + +Result: + +``` +text +┌─JSONExtractKeys('{"a": "hello", "b": [-100, 200.0, 300]}')─┐ +│ ['a','b'] │ +└────────────────────────────────────────────────────────────┘ +``` + ## JSONExtractRaw(json\[, indices_or_keys\]…) {#jsonextractrawjson-indices-or-keys} Returns a part of JSON as unparsed string. diff --git a/docs/en/sql-reference/functions/tuple-map-functions.md b/docs/en/sql-reference/functions/tuple-map-functions.md index 47e33806930..8ead8c58c7a 100644 --- a/docs/en/sql-reference/functions/tuple-map-functions.md +++ b/docs/en/sql-reference/functions/tuple-map-functions.md @@ -320,7 +320,7 @@ Can be optimized by enabling the [optimize_functions_to_subcolumns](../../operat **Syntax** ```sql -mapKeys(map) +mapValues(map) ``` **Parameters** diff --git a/docs/en/sql-reference/operators/exists.md b/docs/en/sql-reference/operators/exists.md new file mode 100644 index 00000000000..ee0c7317637 --- /dev/null +++ b/docs/en/sql-reference/operators/exists.md @@ -0,0 +1,44 @@ +# EXISTS {#exists-operator} + +The `EXISTS` operator checks how many records are in the result of a subquery. If it is empty, then the operator returns `0`. Otherwise, it returns `1`. + +`EXISTS` can be used in a [WHERE](../../sql-reference/statements/select/where.md) clause. + +!!! warning "Warning" + References to main query tables and columns are not supported in a subquery. + +**Syntax** + +```sql +WHERE EXISTS(subquery) +``` + +**Example** + +Query with a subquery returning several rows: + +``` sql +SELECT count() FROM numbers(10) WHERE EXISTS(SELECT number FROM numbers(10) WHERE number > 8); +``` + +Result: + +``` text +┌─count()─┐ +│ 10 │ +└─────────┘ +``` + +Query with a subquery that returns an empty result: + +``` sql +SELECT count() FROM numbers(10) WHERE EXISTS(SELECT number FROM numbers(10) WHERE number > 11); +``` + +Result: + +``` text +┌─count()─┐ +│ 0 │ +└─────────┘ +``` diff --git a/docs/en/sql-reference/operators/index.md b/docs/en/sql-reference/operators/index.md index efbc600fdd4..1c59b25fc63 100644 --- a/docs/en/sql-reference/operators/index.md +++ b/docs/en/sql-reference/operators/index.md @@ -71,7 +71,7 @@ For tuple subtraction: [tupleMinus](../../sql-reference/functions/tuple-function ## Operators for Working with Data Sets {#operators-for-working-with-data-sets} -*See [IN operators](../../sql-reference/operators/in.md).* +See [IN operators](../../sql-reference/operators/in.md) and [EXISTS](../../sql-reference/operators/exists.md) operator. `a IN ...` – The `in(a, b)` function. diff --git a/docs/en/sql-reference/statements/alter/comment.md b/docs/en/sql-reference/statements/alter/comment.md new file mode 100644 index 00000000000..67a17fc8974 --- /dev/null +++ b/docs/en/sql-reference/statements/alter/comment.md @@ -0,0 +1,58 @@ +--- +toc_priority: 51 +toc_title: COMMENT +--- + +# ALTER TABLE … MODIFY COMMENT {#alter-modify-comment} + +Adds, modifies, or removes comment to the table, regardless if it was set before or not. Comment change is reflected in both [system.tables](../../../operations/system-tables/tables.md) and `SHOW CREATE TABLE` query. + +**Syntax** + +``` sql +ALTER TABLE [db].name [ON CLUSTER cluster] MODIFY COMMENT 'Comment' +``` + +**Examples** + +Creating a table with comment (for more information, see the [COMMENT] clause(../../../sql-reference/statements/create/table.md#comment-table)): + +``` sql +CREATE TABLE table_with_comment +( + `k` UInt64, + `s` String +) +ENGINE = Memory() +COMMENT 'The temporary table'; +``` + +Modifying the table comment: + +``` sql +ALTER TABLE table_with_comment MODIFY COMMENT 'new comment on a table'; +SELECT comment FROM system.tables WHERE database = currentDatabase() AND name = 'table_with_comment'; +``` + +Output of a new comment: + +```text +┌─comment────────────────┐ +│ new comment on a table │ +└────────────────────────┘ +``` + +Removing the table comment: + +``` sql +ALTER TABLE table_with_comment MODIFY COMMENT ''; +SELECT comment FROM system.tables WHERE database = currentDatabase() AND name = 'table_with_comment'; +``` + +Output of a removed comment: + +```text +┌─comment─┐ +│ │ +└─────────┘ +``` diff --git a/docs/en/sql-reference/statements/alter/index.md b/docs/en/sql-reference/statements/alter/index.md index dbf695edf3b..0d5909518ed 100644 --- a/docs/en/sql-reference/statements/alter/index.md +++ b/docs/en/sql-reference/statements/alter/index.md @@ -32,6 +32,8 @@ These `ALTER` statements modify entities related to role-based access control: - [ROW POLICY](../../../sql-reference/statements/alter/row-policy.md) - [SETTINGS PROFILE](../../../sql-reference/statements/alter/settings-profile.md) +[ALTER TABLE ... MODIFY COMMENT](../../../sql-reference/statements/alter/comment.md) statement adds, modifies, or removes comments to the table, regardless if it was set before or not. + ## Mutations {#mutations} `ALTER` queries that are intended to manipulate table data are implemented with a mechanism called “mutations”, most notably [ALTER TABLE … DELETE](../../../sql-reference/statements/alter/delete.md) and [ALTER TABLE … UPDATE](../../../sql-reference/statements/alter/update.md). They are asynchronous background processes similar to merges in [MergeTree](../../../engines/table-engines/mergetree-family/index.md) tables that to produce new “mutated” versions of parts. diff --git a/docs/en/sql-reference/statements/create/dictionary.md b/docs/en/sql-reference/statements/create/dictionary.md index 889669da5c8..61428cce126 100644 --- a/docs/en/sql-reference/statements/create/dictionary.md +++ b/docs/en/sql-reference/statements/create/dictionary.md @@ -7,13 +7,13 @@ toc_title: DICTIONARY Creates a new [external dictionary](../../../sql-reference/dictionaries/external-dictionaries/external-dicts.md) with given [structure](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md), [source](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md), [layout](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md) and [lifetime](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md). -Syntax: +**Syntax** ``` sql -CREATE DICTIONARY [IF NOT EXISTS] [db.]dictionary_name [ON CLUSTER cluster] +CREATE DICTIONARY [OR REPLACE][IF NOT EXISTS] [db.]dictionary_name [ON CLUSTER cluster] ( key1 type1 [DEFAULT|EXPRESSION expr1] [IS_OBJECT_ID], - key2 type2 [DEFAULT|EXPRESSION expr2] , + key2 type2 [DEFAULT|EXPRESSION expr2], attr1 type2 [DEFAULT|EXPRESSION expr3] [HIERARCHICAL|INJECTIVE], attr2 type2 [DEFAULT|EXPRESSION expr4] [HIERARCHICAL|INJECTIVE] ) @@ -21,6 +21,8 @@ PRIMARY KEY key1, key2 SOURCE(SOURCE_NAME([param1 value1 ... paramN valueN])) LAYOUT(LAYOUT_NAME([param_name param_value])) LIFETIME({MIN min_val MAX max_val | max_val}) +SETTINGS(setting_name = setting_value, setting_name = setting_value, ...) +COMMENT 'Comment' ``` External dictionary structure consists of attributes. Dictionary attributes are specified similarly to table columns. The only required attribute property is its type, all other properties may have default values. @@ -30,3 +32,68 @@ External dictionary structure consists of attributes. Dictionary attributes are Depending on dictionary [layout](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md) one or more attributes can be specified as dictionary keys. For more information, see [External Dictionaries](../../../sql-reference/dictionaries/external-dictionaries/external-dicts.md) section. + +You can add a comment to the dictionary when you creating it using `COMMENT` clause. + +**Example** + +Input table `source_table`: + +``` text +┌─id─┬─value──┐ +│ 1 │ First │ +│ 2 │ Second │ +└────┴────────┘ +``` + +Creating the dictionary: + +``` sql +CREATE DICTIONARY dictionary_with_comment +( + id UInt64, + value String +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() TABLE 'source_table')) +LAYOUT(FLAT()) +LIFETIME(MIN 0 MAX 1000) +COMMENT 'The temporary dictionary'; +``` + +Output the dictionary: + +``` sql +SHOW CREATE DICTIONARY dictionary_with_comment; +``` + +```text +┌─statement───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┐ +│ CREATE DICTIONARY default.dictionary_with_comment +( + `id` UInt64, + `value` String +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() TABLE 'source_table')) +LIFETIME(MIN 0 MAX 1000) +LAYOUT(FLAT()) +COMMENT 'The temporary dictionary' │ +└─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘ +``` + +Output the comment to dictionary: + +``` sql +SELECT comment FROM system.dictionaries WHERE name == 'dictionary_with_comment' AND database == currentDatabase(); +``` + +```text +┌─comment──────────────────┐ +│ The temporary dictionary │ +└──────────────────────────┘ +``` + +**See Also** + +- [system.dictionaries](../../../operations/system-tables/dictionaries.md) — This table contains information about [external dictionaries](../../../sql-reference/dictionaries/external-dictionaries/external-dicts.md). diff --git a/docs/en/sql-reference/statements/create/table.md b/docs/en/sql-reference/statements/create/table.md index 5334532a54f..d64642704f5 100644 --- a/docs/en/sql-reference/statements/create/table.md +++ b/docs/en/sql-reference/statements/create/table.md @@ -252,21 +252,47 @@ CREATE TABLE codec_example ENGINE = MergeTree() ``` - + +**Example** + +```sql +CREATE TABLE mytable +( + x String Codec(AES_128_GCM_SIV) +) +ENGINE = MergeTree ORDER BY x; +``` + +!!!note "Note" + If compression needs to be applied, it must be explicitly specified. Otherwise, only encryption will be applied to data. + +**Example** + +```sql +CREATE TABLE mytable +( + x String Codec(Delta, LZ4, AES_128_GCM_SIV) +) +ENGINE = MergeTree ORDER BY x; +``` + ## Temporary Tables {#temporary-tables} ClickHouse supports temporary tables which have the following characteristics: diff --git a/docs/en/sql-reference/statements/select/where.md b/docs/en/sql-reference/statements/select/where.md index 69505a51db4..348b869e2db 100644 --- a/docs/en/sql-reference/statements/select/where.md +++ b/docs/en/sql-reference/statements/select/where.md @@ -6,9 +6,51 @@ toc_title: WHERE `WHERE` clause allows to filter the data that is coming from [FROM](../../../sql-reference/statements/select/from.md) clause of `SELECT`. -If there is a `WHERE` clause, it must contain an expression with the `UInt8` type. This is usually an expression with comparison and logical operators. Rows where this expression evaluates to 0 are excluded from further transformations or result. +If there is a `WHERE` clause, it must contain an expression with the `UInt8` type. This is usually an expression with comparison and logical operators. Rows where this expression evaluates to `0` are excluded from further transformations or result. `WHERE` expression is evaluated on the ability to use indexes and partition pruning, if the underlying table engine supports that. !!! note "Note" - There’s a filtering optimization called [prewhere](../../../sql-reference/statements/select/prewhere.md). + There is a filtering optimization called [PREWHERE](../../../sql-reference/statements/select/prewhere.md). + +If you need to test a value for [NULL](../../../sql-reference/syntax.md#null-literal), use [IS NULL](../../operators/index.md#operator-is-null) and [IS NOT NULL](../../operators/index.md#is-not-null) operators or [isNull](../../../sql-reference/functions/functions-for-nulls.md#isnull) and [isNotNull](../../../sql-reference/functions/functions-for-nulls.md#isnotnull) functions. +Otherwise an expression with `NULL` never passes. + +**Example** + +To find numbers that are multiples of 3 and are greater than 10 execute the following query on the [numbers table](../../../sql-reference/table-functions/numbers.md): + +``` sql +SELECT number FROM numbers(20) WHERE (number > 10) AND (number % 3 == 0); +``` + +Result: + +``` text +┌─number─┐ +│ 12 │ +│ 15 │ +│ 18 │ +└────────┘ +``` + +Queries with `NULL` values: + +``` sql +CREATE TABLE t_null(x Int8, y Nullable(Int8)) ENGINE=MergeTree() ORDER BY x; +INSERT INTO t_null VALUES (1, NULL), (2, 3); + +SELECT * FROM t_null WHERE y IS NULL; +SELECT * FROM t_null WHERE y != 0; +``` + +Result: + +``` text +┌─x─┬────y─┐ +│ 1 │ ᴺᵁᴸᴸ │ +└───┴──────┘ +┌─x─┬─y─┐ +│ 2 │ 3 │ +└───┴───┘ +``` diff --git a/docs/en/sql-reference/statements/show.md b/docs/en/sql-reference/statements/show.md index e00d58dfed4..96cbee0b04d 100644 --- a/docs/en/sql-reference/statements/show.md +++ b/docs/en/sql-reference/statements/show.md @@ -13,6 +13,8 @@ SHOW CREATE [TEMPORARY] [TABLE|DICTIONARY|VIEW] [db.]table|view [INTO OUTFILE fi Returns a single `String`-type ‘statement’ column, which contains a single value – the `CREATE` query used for creating the specified object. +Note that if you use this statement to get `CREATE` query of system tables, you will get a *fake* query, which only declares table structure, but cannot be used to create table. + ## SHOW DATABASES {#show-databases} Prints a list of all databases. diff --git a/docs/ru/development/browse-code.md b/docs/ru/development/browse-code.md index 196054c3307..26b3f491599 100644 --- a/docs/ru/development/browse-code.md +++ b/docs/ru/development/browse-code.md @@ -1,5 +1,5 @@ --- -toc_priority: 71 +toc_priority: 72 toc_title: "Навигация по коду ClickHouse" --- diff --git a/docs/ru/development/build-cross-riscv.md b/docs/ru/development/build-cross-riscv.md new file mode 120000 index 00000000000..7d1e8c46ed8 --- /dev/null +++ b/docs/ru/development/build-cross-riscv.md @@ -0,0 +1 @@ +../../en/development/build-cross-riscv.md \ No newline at end of file diff --git a/docs/ru/development/contrib.md b/docs/ru/development/contrib.md index 3d6e5e4cb1b..318c658ab9b 100644 --- a/docs/ru/development/contrib.md +++ b/docs/ru/development/contrib.md @@ -1,5 +1,5 @@ --- -toc_priority: 70 +toc_priority: 71 toc_title: "Используемые сторонние библиотеки" --- diff --git a/docs/ru/development/style.md b/docs/ru/development/style.md index c73eb138c9c..a4e3f1f82df 100644 --- a/docs/ru/development/style.md +++ b/docs/ru/development/style.md @@ -1,5 +1,5 @@ --- -toc_priority: 68 +toc_priority: 69 toc_title: "Как писать код на C++" --- diff --git a/docs/ru/engines/database-engines/replicated.md b/docs/ru/engines/database-engines/replicated.md index b4850a8dafd..6777529f3e0 100644 --- a/docs/ru/engines/database-engines/replicated.md +++ b/docs/ru/engines/database-engines/replicated.md @@ -34,7 +34,9 @@ DDL-запросы с базой данных `Replicated` работают по В системной таблице [system.clusters](../../operations/system-tables/clusters.md) есть кластер с именем, как у реплицируемой базы, который состоит из всех реплик базы. Этот кластер обновляется автоматически при создании/удалении реплик, и его можно использовать для [Distributed](../../engines/table-engines/special/distributed.md#distributed) таблиц. - При создании новой реплики базы, эта реплика сама создаёт таблицы. Если реплика долго была недоступна и отстала от лога репликации — она сверяет свои локальные метаданные с актуальными метаданными в ZooKeeper, перекладывает лишние таблицы с данными в отдельную нереплицируемую базу (чтобы случайно не удалить что-нибудь лишнее), создаёт недостающие таблицы, обновляет имена таблиц, если были переименования. Данные реплицируются на уровне `ReplicatedMergeTree`, т.е. если таблица не реплицируемая, то данные реплицироваться не будут (база отвечает только за метаданные). +При создании новой реплики базы, эта реплика сама создаёт таблицы. Если реплика долго была недоступна и отстала от лога репликации — она сверяет свои локальные метаданные с актуальными метаданными в ZooKeeper, перекладывает лишние таблицы с данными в отдельную нереплицируемую базу (чтобы случайно не удалить что-нибудь лишнее), создаёт недостающие таблицы, обновляет имена таблиц, если были переименования. Данные реплицируются на уровне `ReplicatedMergeTree`, т.е. если таблица не реплицируемая, то данные реплицироваться не будут (база отвечает только за метаданные). + +Запросы [`ALTER TABLE ATTACH|FETCH|DROP|DROP DETACHED|DETACH PARTITION|PART`](../../sql-reference/statements/alter/partition.md) допустимы, но не реплицируются. Движок базы данных может только добавить/извлечь/удалить партицию или кусок нынешней реплики. Однако если сама таблица использует движок реплицируемой таблицы, тогда данные будут реплицированы после применения `ATTACH`. ## Примеры использования {#usage-example} diff --git a/docs/ru/engines/table-engines/integrations/s3.md b/docs/ru/engines/table-engines/integrations/s3.md index c90b7293e1c..ae64c222f69 100644 --- a/docs/ru/engines/table-engines/integrations/s3.md +++ b/docs/ru/engines/table-engines/integrations/s3.md @@ -11,7 +11,8 @@ toc_title: S3 ``` sql CREATE TABLE s3_engine_table (name String, value UInt32) -ENGINE = S3(path, [aws_access_key_id, aws_secret_access_key,] format, [compression]) + ENGINE = S3(path, [aws_access_key_id, aws_secret_access_key,] format, [compression]) + [SETTINGS ...] ``` **Параметры движка** @@ -24,9 +25,12 @@ ENGINE = S3(path, [aws_access_key_id, aws_secret_access_key,] format, [compressi **Пример** ``` sql -CREATE TABLE s3_engine_table (name String, value UInt32) -ENGINE = S3('https://storage.yandexcloud.net/my-test-bucket-768/test-data.csv.gz', 'CSV', 'gzip'); +CREATE TABLE s3_engine_table (name String, value UInt32) + ENGINE=S3('https://storage.yandexcloud.net/my-test-bucket-768/test-data.csv.gz', 'CSV', 'gzip') + SETTINGS input_format_with_names_use_header = 0; + INSERT INTO s3_engine_table VALUES ('one', 1), ('two', 2), ('three', 3); + SELECT * FROM s3_engine_table LIMIT 2; ``` @@ -54,7 +58,7 @@ SELECT * FROM s3_engine_table LIMIT 2; ## Символы подстановки {#wildcards-in-path} -Аргумент `path` может указывать на несколько файлов, используя подстановочные знаки. Для обработки файл должен существовать и соответствовать всему шаблону пути. Список файлов определяется во время выполнения запроса `SELECT` (не в момент выполнения запроса `CREATE`). +Аргумент `path` может указывать на несколько файлов, используя символы подстановки. Для обработки файл должен существовать и соответствовать всему шаблону пути. Список файлов определяется во время выполнения запроса `SELECT` (не в момент выполнения запроса `CREATE`). - `*` — заменяет любое количество любых символов, кроме `/`, включая пустую строку. - `?` — заменяет любые одиночные символы. @@ -63,6 +67,52 @@ SELECT * FROM s3_engine_table LIMIT 2; Конструкции с `{}` аналогичны функции [remote](../../../sql-reference/table-functions/remote.md). +!!! warning "Примечание" + Если список файлов содержит диапазоны чисел с ведущими нулями, используйте конструкцию с фигурными скобками для каждой цифры отдельно или используйте `?`. + +**Пример подстановки 1** + +Таблица содержит данные из файлов с именами `file-000.csv`, `file-001.csv`, … , `file-999.csv`: + +``` sql +CREATE TABLE big_table (name String, value UInt32) + ENGINE = S3('https://storage.yandexcloud.net/my-bucket/my_folder/file-{000..999}.csv', 'CSV'); +``` + +**Пример подстановки 2** + +Предположим, есть несколько файлов в формате CSV со следующими URL-адресами в S3: + +- 'https://storage.yandexcloud.net/my-bucket/some_folder/some_file_1.csv' +- 'https://storage.yandexcloud.net/my-bucket/some_folder/some_file_2.csv' +- 'https://storage.yandexcloud.net/my-bucket/some_folder/some_file_3.csv' +- 'https://storage.yandexcloud.net/my-bucket/another_folder/some_file_1.csv' +- 'https://storage.yandexcloud.net/my-bucket/another_folder/some_file_2.csv' +- 'https://storage.yandexcloud.net/my-bucket/another_folder/some_file_3.csv' + +Существует несколько способов создать таблицу, включающую в себя все шесть файлов: + +1. Задайте диапазон для суффиксов в названии файла: + +``` sql +CREATE TABLE table_with_range (name String, value UInt32) + ENGINE = S3('https://storage.yandexcloud.net/my-bucket/{some,another}_folder/some_file_{1..3}', 'CSV'); +``` + +2. Таблица содержит все файлы с префиксом `some_file_` (в каталогах не должно быть других файлов с таким префиксом): + +``` sql +CREATE TABLE table_with_question_mark (name String, value UInt32) + ENGINE = S3('https://storage.yandexcloud.net/my-bucket/{some,another}_folder/some_file_?', 'CSV'); +``` + +3. Таблица содержит все файлы в обоих каталогах (в каталогах не должно быть других файлов, соответствующих формату и схеме, описанным в запросе): + +``` sql +CREATE TABLE table_with_asterisk (name String, value UInt32) + ENGINE = S3('https://storage.yandexcloud.net/my-bucket/{some,another}_folder/*', 'CSV'); +``` + ## Настройки движка S3 {#s3-settings} Перед выполнением запроса или в конфигурационном файле могут быть установлены следующие настройки: @@ -108,47 +158,6 @@ SELECT * FROM s3_engine_table LIMIT 2; ``` -## Примеры использования {#usage-examples} - -Предположим, у нас есть несколько файлов в формате CSV со следующими URL-адресами в S3: - -- 'https://storage.yandexcloud.net/my-test-bucket-768/some_prefix/some_file_1.csv' -- 'https://storage.yandexcloud.net/my-test-bucket-768/some_prefix/some_file_2.csv' -- 'https://storage.yandexcloud.net/my-test-bucket-768/some_prefix/some_file_3.csv' -- 'https://storage.yandexcloud.net/my-test-bucket-768/another_prefix/some_file_1.csv' -- 'https://storage.yandexcloud.net/my-test-bucket-768/another_prefix/some_file_2.csv' -- 'https://storage.yandexcloud.net/my-test-bucket-768/another_prefix/some_file_3.csv' - -1. Существует несколько способов создать таблицу, включающую в себя все шесть файлов: - -``` sql -CREATE TABLE table_with_range (name String, value UInt32) -ENGINE = S3('https://storage.yandexcloud.net/my-test-bucket-768/{some,another}_prefix/some_file_{1..3}', 'CSV'); -``` - -2. Другой способ: - -``` sql -CREATE TABLE table_with_question_mark (name String, value UInt32) -ENGINE = S3('https://storage.yandexcloud.net/my-test-bucket-768/{some,another}_prefix/some_file_?', 'CSV'); -``` - -3. Таблица содержит все файлы в обоих каталогах (все файлы должны соответствовать формату и схеме, описанным в запросе): - -``` sql -CREATE TABLE table_with_asterisk (name String, value UInt32) -ENGINE = S3('https://storage.yandexcloud.net/my-test-bucket-768/{some,another}_prefix/*', 'CSV'); -``` - -Если список файлов содержит диапазоны чисел с ведущими нулями, используйте конструкцию с фигурными скобками для каждой цифры отдельно или используйте `?`. - -4. Создание таблицы из файлов с именами `file-000.csv`, `file-001.csv`, … , `file-999.csv`: - -``` sql -CREATE TABLE big_table (name String, value UInt32) -ENGINE = S3('https://storage.yandexcloud.net/my-test-bucket-768/big_prefix/file-{000..999}.csv', 'CSV'); -``` - **Смотрите также** - [Табличная функция s3](../../../sql-reference/table-functions/s3.md) diff --git a/docs/ru/engines/table-engines/mergetree-family/aggregatingmergetree.md b/docs/ru/engines/table-engines/mergetree-family/aggregatingmergetree.md index 6e01cc2bcac..46f5db70004 100644 --- a/docs/ru/engines/table-engines/mergetree-family/aggregatingmergetree.md +++ b/docs/ru/engines/table-engines/mergetree-family/aggregatingmergetree.md @@ -9,7 +9,7 @@ toc_title: AggregatingMergeTree Таблицы типа `AggregatingMergeTree` могут использоваться для инкрементальной агрегации данных, в том числе, для агрегирующих материализованных представлений. -Движок обрабатывает все столбцы типа [AggregateFunction](../../../engines/table-engines/mergetree-family/aggregatingmergetree.md). +Движок обрабатывает все столбцы типа [AggregateFunction](../../../sql-reference/data-types/aggregatefunction.md). Использование `AggregatingMergeTree` оправдано только в том случае, когда это уменьшает количество строк на порядки. diff --git a/docs/ru/engines/table-engines/mergetree-family/custom-partitioning-key.md b/docs/ru/engines/table-engines/mergetree-family/custom-partitioning-key.md index 4f0206158f1..8508ba18d9e 100644 --- a/docs/ru/engines/table-engines/mergetree-family/custom-partitioning-key.md +++ b/docs/ru/engines/table-engines/mergetree-family/custom-partitioning-key.md @@ -64,9 +64,9 @@ WHERE table = 'visits' └───────────┴────────────────┴────────┘ ``` -Столбец `partition` содержит имена всех партиций таблицы. Таблица `visits` из нашего примера содержит две партиции: `201901` и `201902`. Используйте значения из этого столбца в запросах [ALTER … PARTITION](#alter_manipulations-with-partitions). +Столбец `partition` содержит имена всех партиций таблицы. Таблица `visits` из нашего примера содержит две партиции: `201901` и `201902`. Используйте значения из этого столбца в запросах [ALTER … PARTITION](../../../sql-reference/statements/alter/partition.md). -Столбец `name` содержит названия кусков партиций. Значения из этого столбца можно использовать в запросах [ALTER ATTACH PART](#alter_attach-partition). +Столбец `name` содержит названия кусков партиций. Значения из этого столбца можно использовать в запросах [ALTER ATTACH PART](../../../sql-reference/statements/alter/partition.md#alter_attach-partition). Столбец `active` отображает состояние куска. `1` означает, что кусок активен; `0` – неактивен. К неактивным можно отнести куски, оставшиеся после слияния данных. Поврежденные куски также отображаются как неактивные. Неактивные куски удаляются приблизительно через 10 минут после того, как было выполнено слияние. @@ -82,7 +82,7 @@ WHERE table = 'visits' Как видно из примера выше, таблица содержит несколько отдельных кусков для одной и той же партиции (например, куски `201901_1_3_1` и `201901_1_9_2` принадлежат партиции `201901`). Это означает, что эти куски еще не были объединены – в файловой системе они хранятся отдельно. После того как будет выполнено автоматическое слияние данных (выполняется примерно спустя 10 минут после вставки данных), исходные куски будут объединены в один более крупный кусок и помечены как неактивные. -Вы можете запустить внеочередное слияние данных с помощью запроса [OPTIMIZE](../../../engines/table-engines/mergetree-family/custom-partitioning-key.md#misc_operations-optimize). Пример: +Вы можете запустить внеочередное слияние данных с помощью запроса [OPTIMIZE](../../../sql-reference/statements/optimize.md). Пример: ``` sql OPTIMIZE TABLE visits PARTITION 201902; @@ -123,11 +123,11 @@ drwxr-xr-x 2 clickhouse clickhouse 4096 Feb 1 16:48 detached Директория `detached` содержит куски, отсоединенные от таблицы с помощью запроса [DETACH](../../../sql-reference/statements/alter/partition.md#alter_detach-partition). Поврежденные куски также попадают в эту директорию – они не удаляются с сервера. -Сервер не использует куски из директории `detached`. Вы можете в любое время добавлять, удалять, модифицировать данные в директории detached - сервер не будет об этом знать, пока вы не сделаете запрос [ATTACH](../../../engines/table-engines/mergetree-family/custom-partitioning-key.md#alter_attach-partition). +Сервер не использует куски из директории `detached`. Вы можете в любое время добавлять, удалять, модифицировать данные в директории detached - сервер не будет об этом знать, пока вы не сделаете запрос [ATTACH](../../../sql-reference/statements/alter/partition.md#alter_attach-partition). Следует иметь в виду, что при работающем сервере нельзя вручную изменять набор кусков на файловой системе, так как сервер не будет знать об этом. Для нереплицируемых таблиц, вы можете это делать при остановленном сервере, однако это не рекомендуется. Для реплицируемых таблиц, набор кусков нельзя менять в любом случае. -ClickHouse позволяет производить различные манипуляции с кусками: удалять, копировать из одной таблицы в другую или создавать их резервные копии. Подробнее см. в разделе [Манипуляции с партициями и кусками](../../../engines/table-engines/mergetree-family/custom-partitioning-key.md#alter_manipulations-with-partitions). +ClickHouse позволяет производить различные манипуляции с кусками: удалять, копировать из одной таблицы в другую или создавать их резервные копии. Подробнее см. в разделе [Манипуляции с партициями и кусками](../../../sql-reference/statements/alter/partition.md). diff --git a/docs/ru/engines/table-engines/mergetree-family/summingmergetree.md b/docs/ru/engines/table-engines/mergetree-family/summingmergetree.md index adb40037319..f12a9971a44 100644 --- a/docs/ru/engines/table-engines/mergetree-family/summingmergetree.md +++ b/docs/ru/engines/table-engines/mergetree-family/summingmergetree.md @@ -132,7 +132,7 @@ ClickHouse может слить куски данных таким образо [(1, 100), (2, 150)] + [(1, -100)] -> [(2, 150)] ``` -При запросе данных используйте функцию [sumMap(key, value)](../../../engines/table-engines/mergetree-family/summingmergetree.md) для агрегации `Map`. +При запросе данных используйте функцию [sumMap(key, value)](../../../sql-reference/aggregate-functions/reference/summap.md#agg_functions-summap) для агрегации `Map`. Для вложенной структуры данных не нужно указывать её столбцы в кортеже столбцов для суммирования. diff --git a/docs/ru/engines/table-engines/special/dictionary.md b/docs/ru/engines/table-engines/special/dictionary.md index 15d32419472..05a78a24e8f 100644 --- a/docs/ru/engines/table-engines/special/dictionary.md +++ b/docs/ru/engines/table-engines/special/dictionary.md @@ -62,7 +62,7 @@ WHERE name = 'products' └──────────┴──────┴────────┴─────────────────┴─────────────────┴─────────────────┴───────────────┴─────────────────┘ ``` -В таком виде данные из словаря можно получить при помощи функций [dictGet\*](../../../engines/table-engines/special/dictionary.md#ext_dict_functions). +В таком виде данные из словаря можно получить при помощи функций [dictGet\*](../../../sql-reference/functions/ext-dict-functions.md#dictget). Такое представление неудобно, когда нам необходимо получить данные в чистом виде, а также при выполнении операции `JOIN`. Для этих случаев можно использовать движок `Dictionary`, который отобразит данные словаря в таблицу. diff --git a/docs/ru/engines/table-engines/special/file.md b/docs/ru/engines/table-engines/special/file.md index 6f1c723d2a7..fe93e814335 100644 --- a/docs/ru/engines/table-engines/special/file.md +++ b/docs/ru/engines/table-engines/special/file.md @@ -21,11 +21,11 @@ File(Format) `Format` должен быть таким, который ClickHouse может использовать и в запросах `INSERT` и в запросах `SELECT`. Полный список поддерживаемых форматов смотрите в разделе [Форматы](../../../interfaces/formats.md#formats). -Сервер ClickHouse не позволяет указать путь к файлу, с которым будет работать `File`. Используется путь к хранилищу, определенный параметром [path](../../../operations/server-configuration-parameters/settings.md) в конфигурации сервера. +Сервер ClickHouse не позволяет указать путь к файлу, с которым будет работать `File`. Используется путь к хранилищу, определенный параметром [path](../../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-path) в конфигурации сервера. При создании таблицы с помощью `File(Format)` сервер ClickHouse создает в хранилище каталог с именем таблицы, а после добавления в таблицу данных помещает туда файл `data.Format`. -Можно вручную создать в хранилище каталог таблицы, поместить туда файл, затем на сервере ClickHouse добавить ([ATTACH](../../../engines/table-engines/special/file.md)) информацию о таблице, соответствующей имени каталога и прочитать из файла данные. +Можно вручную создать в хранилище каталог таблицы, поместить туда файл, затем на сервере ClickHouse добавить ([ATTACH](../../../sql-reference/statements/attach.md#attach)) информацию о таблице, соответствующей имени каталога и прочитать из файла данные. !!! warning "Warning" Будьте аккуратны с этой функциональностью, поскольку сервер ClickHouse не отслеживает внешние изменения данных. Если в файл будет производиться запись одновременно со стороны сервера ClickHouse и с внешней стороны, то результат непредсказуем. diff --git a/docs/ru/engines/table-engines/special/join.md b/docs/ru/engines/table-engines/special/join.md index 28bbfe6dea4..cbb536ac321 100644 --- a/docs/ru/engines/table-engines/special/join.md +++ b/docs/ru/engines/table-engines/special/join.md @@ -5,7 +5,7 @@ toc_title: Join # Join {#join} -Подготовленная структура данных для использования в операциях [JOIN](../../../engines/table-engines/special/join.md#select-join). +Подготовленная структура данных для использования в операциях [JOIN](../../../sql-reference/statements/select/join.md#select-join). ## Создание таблицы {#creating-a-table} @@ -21,8 +21,8 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] **Параметры движка** -- `join_strictness` – [строгость JOIN](../../../engines/table-engines/special/join.md#select-join-types). -- `join_type` – [тип JOIN](../../../engines/table-engines/special/join.md#select-join-types). +- `join_strictness` – [строгость JOIN](../../../sql-reference/statements/select/join.md#select-join-types). +- `join_type` – [тип JOIN](../../../sql-reference/statements/select/join.md#select-join-types). - `k1[, k2, ...]` – ключевые столбцы секции `USING` с которыми выполняется операция `JOIN`. Вводите параметры `join_strictness` и `join_type` без кавычек, например, `Join(ANY, LEFT, col1)`. Они должны быть такими же как и в той операции `JOIN`, в которой таблица будет использоваться. Если параметры не совпадают, ClickHouse не генерирует исключение и может возвращать неверные данные. @@ -42,7 +42,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] Основные применения `Join` таблиц: - Использование в правой части секции `JOIN`. -- Извлечение данных из таблицы таким же образом как из словаря с помощью функции [joinGet](../../../engines/table-engines/special/join.md#joinget). +- Извлечение данных из таблицы таким же образом как из словаря с помощью функции [joinGet](../../../sql-reference/functions/other-functions.md#joinget). ### Удаление данных {#deleting-data} diff --git a/docs/ru/interfaces/formats.md b/docs/ru/interfaces/formats.md index 0abca2f53da..70f1a7bcb2e 100644 --- a/docs/ru/interfaces/formats.md +++ b/docs/ru/interfaces/formats.md @@ -163,8 +163,8 @@ SELECT * FROM nestedt FORMAT TSV ## TabSeparatedWithNames {#tabseparatedwithnames} Отличается от формата `TabSeparated` тем, что в первой строке пишутся имена столбцов. -При парсинге, первая строка полностью игнорируется. Вы не можете использовать имена столбцов, чтобы указать их порядок расположения, или чтобы проверить их корректность. -(Поддержка обработки заголовка при парсинге может быть добавлена в будущем.) + +При парсинге первая строка должна содержать имена столбцов. Вы можете использовать имена столбцов, чтобы указать их порядок расположения, или чтобы проверить их корректность. Этот формат также доступен под именем `TSVWithNames`. diff --git a/docs/ru/interfaces/http.md b/docs/ru/interfaces/http.md index fca343e8529..6d94a43ff15 100644 --- a/docs/ru/interfaces/http.md +++ b/docs/ru/interfaces/http.md @@ -430,7 +430,7 @@ $ curl -v 'http://localhost:8123/predefined_query' [^/]+)(/(?P[^/]+))?]]> - GET + GET TEST_HEADER_VALUE [^/]+)(/(?P[^/]+))?]]> diff --git a/docs/ru/interfaces/third-party/client-libraries.md b/docs/ru/interfaces/third-party/client-libraries.md index b2896f810b7..82413062e41 100644 --- a/docs/ru/interfaces/third-party/client-libraries.md +++ b/docs/ru/interfaces/third-party/client-libraries.md @@ -31,6 +31,8 @@ toc_title: "Клиентские библиотеки от сторонних р - NodeJs - [clickhouse (NodeJs)](https://github.com/TimonKK/clickhouse) - [node-clickhouse](https://github.com/apla/node-clickhouse) + - [nestjs-clickhouse](https://github.com/depyronick/nestjs-clickhouse) + - [clickhouse-client](https://github.com/depyronick/clickhouse-client) - Perl - [perl-DBD-ClickHouse](https://github.com/elcamlost/perl-DBD-ClickHouse) - [HTTP-ClickHouse](https://metacpan.org/release/HTTP-ClickHouse) diff --git a/docs/ru/interfaces/third-party/gui.md b/docs/ru/interfaces/third-party/gui.md index 8e987fab554..b80aaf7948e 100644 --- a/docs/ru/interfaces/third-party/gui.md +++ b/docs/ru/interfaces/third-party/gui.md @@ -69,6 +69,14 @@ toc_title: "Визуальные интерфейсы от сторонних р - Проводник по базе данных. - Инструменты визуализации, позволяющие представить данные в различных формах. +### Grafana {#grafana} + +[Grafana](https://grafana.com/grafana/plugins/vertamedia-clickhouse-datasource) — платформа для мониторинга и визуализации. + +"С помощью Grafana вы можете делать запросы, визуализировать, получать уведомления и разбираться в метриках, где бы они ни хранились. Создавайте, исследуйте, делитесь дашбордами с командой и прививайте культуру принятия решений на основе данных. Мы пользуемся доверием и любовью пользователей" — grafana.com. + +Плагин источника данных ClickHouse поддерживает ClickHouse в качестве бэкенд базы данных. + ### DBeaver {#dbeaver} [DBeaver](https://dbeaver.io/) - универсальный desktop клиент баз данных с поддержкой ClickHouse. @@ -109,6 +117,36 @@ toc_title: "Визуальные интерфейсы от сторонних р [MindsDB](https://mindsdb.com/) — это продукт с открытым исходным кодом, реализующий слой искусственного интеллекта (Artificial Intelligence, AI) для различных СУБД, в том числе для ClickHouse. MindsDB облегчает процессы создания, обучения и развертывания современных моделей машинного обучения. Графический пользовательский интерфейс MindsDB Studio позволяет обучать новые модели на основе данных в БД, интерпретировать сделанные моделями прогнозы, выявлять потенциальные ошибки в данных, визуализировать и оценивать достоверность моделей с помощью функции Explainable AI, так чтобы вы могли быстрее адаптировать и настраивать ваши модели машинного обучения. +### DBM {#dbm} + +[DBM](https://dbm.incubator.edurt.io/) DBM — инструмент для визуального менеджмента в ClickHouse! + +Основные возможности: + +- Поддержка истории запросов (пагинация, очистка и т.д.) +- Поддержка отдельных секций запросов +- Поддержка остановки запроса +- Поддержка управления таблицами (метаданные, удаление, предпросмотр) +- Поддержка управления базами данных (удаление, создание) +- Поддержка пользовательских запросов +- Поддержка управления различными источниками данных (проверка соединения, мониторинг) +- Поддержка монитора (процессор, соединение, запрос) +- Поддержка миграции данных + +### Bytebase {#bytebase} + +[Bytebase](https://bytebase.com) — сетевой инструмент для смены схем и контроля версий с открытым исходным кодом для работы в команде. Поддерживает различные базы данных, в том числе ClickHouse. + +Основные возможности: + +- Проверка схемы для разработчиков и администраторов баз данных. +- "База данных в виде кода", хранение схемы базы данных с помощью системы контроля версий, например, GitLab, а также активация развертывания по коммиту. +- Рациональное развертывание и соответствующая среда. +- Полная история миграций. +- Определение смещения схемы. +- Резервное копирование и восстановление. +- Управление доступом на основе ролей. + ## Коммерческие {#commercial} ### DataGrip {#datagrip} diff --git a/docs/ru/operations/server-configuration-parameters/settings.md b/docs/ru/operations/server-configuration-parameters/settings.md index ef2a99ebffc..7d9804927e9 100644 --- a/docs/ru/operations/server-configuration-parameters/settings.md +++ b/docs/ru/operations/server-configuration-parameters/settings.md @@ -1432,3 +1432,54 @@ ClickHouse использует ZooKeeper для хранения метадан ``` + +## total_memory_profiler_step {#total-memory-profiler-step} + +Задает размер памяти (в байтах) для трассировки стека на каждом шаге выделения максимума памяти. Данные хранятся в системной таблице [system.trace_log](../../operations/system-tables/trace_log.md) с `query_id`, равным пустой строке. + +Возможные значения: + +- Положительное целое число. + +Значение по умолчанию: `4194304`. + +## total_memory_tracker_sample_probability {#total-memory-tracker-sample-probability} + +Позволяет собирать случайные выделения и освобождения памяти и записывать их в системную таблицу [system.trace_log](../../operations/system-tables/trace_log.md) с `trace_type`, равным `MemorySample`, с указанной вероятностью. Вероятность касается каждого выделения или освобождения памяти, независимо от размера выделения. Обратите внимание, что выборка происходит только тогда, когда объем неотслеживаемой памяти превышает лимит неотслеживаемой памяти (значение по умолчанию: `4` MiB). Значение настройки может быть уменьшено, если значение настройки [total_memory_profiler_step](#total-memory-profiler-step) уменьшено. Вы можете установить значение настройки `total_memory_profiler_step`, равным `1`, для особой детализованной выборки. + +Возможные значения: + +- Положительное целое число. +- 0 — запись случайных выделений и освобождений памяти в системную таблицу `system.trace_log` отключена. + +Значение по умолчанию: `0`. + +## mmap_cache_size {#mmap-cache-size} + +Задает размер кеша (в байтах) для сопоставленных файлов. Эта настройка позволяет избежать частых открытых/[mmap/munmap](https://en.wikipedia.org/wiki/Mmap)/закрытых вызовов (очень дорогостоящие из-за последующих ошибок страниц) и повторного использования сопоставления из нескольких потоков и запросов. Значение настройки — это количество сопоставленных областей (обычно равно количеству сопоставленных файлов). Объем данных в сопоставленных файлах можно отслеживать в системных таблицах [system.metrics](../../operations/system-tables/metrics.md), [system.metric_log](../../operations/system-tables/metric_log.md) по метрикам `MMappedFiles` и `MMappedFileBytes`, в таблицах [system.asynchronous_metrics](../../operations/system-tables/asynchronous_metrics.md), [system.asynchronous_metrics_log](../../operations/system-tables/asynchronous_metric_log.md) по метрике `MMapCacheCells`, а также в [system.events](../../operations/system-tables/events.md), [system.processes](../../operations/system-tables/processes.md), [system.query_log](../../operations/system-tables/query_log.md), [system.query_thread_log](../../operations/system-tables/query_thread_log.md), [system.query_views_log](../../operations/system-tables/query_views_log.md) по событиям `CreatedReadBufferMMap`, `CreatedReadBufferMMapFailed`, `MMappedFileCacheHits`, `MMappedFileCacheMisses`. Обратите внимание, что объем данных в сопоставленных файлах не потребляет память напрямую и не учитывается в запросе или использовании памяти сервера, поскольку эта память может быть удалена аналогично кешу страниц ОС. Кеш удаляется (т.е. файлы закрываются) автоматически при удалении старых кусков в таблицах семейства [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md), также его можно удалить вручную с помощью запроса `SYSTEM DROP MMAP CACHE`. + +Возможные значения: + +- Положительное целое число. + +Значение по умолчанию: `1000`. + +## compiled_expression_cache_size {#compiled-expression-cache-size} + +Задает размер кеша (в байтах) для [скомпилированных выражений](../../operations/caches.md). + +Возможные значения: + +- Положительное целое число. + +Значение по умолчанию: `134217728`. + +## compiled_expression_cache_elements_size {#compiled_expression_cache_elements_size} + +Задает размер кеша (в элементах) для [скомпилированных выражений](../../operations/caches.md). + +Возможные значения: + +- Положительное целое число. + +Значение по умолчанию: `10000`. diff --git a/docs/ru/operations/settings/merge-tree-settings.md b/docs/ru/operations/settings/merge-tree-settings.md index 117ed7d633b..31cc229c6aa 100644 --- a/docs/ru/operations/settings/merge-tree-settings.md +++ b/docs/ru/operations/settings/merge-tree-settings.md @@ -342,3 +342,16 @@ Eсли суммарное число активных кусков во все **Использование** Значение настройки `min_bytes_to_rebalance_partition_over_jbod` должно быть меньше значения настройки [max_bytes_to_merge_at_max_space_in_pool](../../operations/settings/merge-tree-settings.md#max-bytes-to-merge-at-max-space-in-pool). Иначе ClickHouse сгенерирует исключение. + +## detach_not_byte_identical_parts {#detach_not_byte_identical_parts} + +Настраивает отключение куска данных после выполнения слияния или мутации, если на одной реплике его содержимое побайтно не совпадает с кусками на других репликах. Если настройка отключена, то кусок удаляется. Активируйте это поведение, если хотите анализировать такие куски позже. + +Эта настройка применяется к таблицам `MergeTree` с включенной [репликацией данных](../../engines/table-engines/mergetree-family/replication.md). + +Возможные значения: + +- 0 — куски данных удаляются. +- 1 — куски данных открепляются. + +Значение по умолчанию: `0`. diff --git a/docs/ru/operations/settings/settings.md b/docs/ru/operations/settings/settings.md index e35258f8e30..9b62618e184 100644 --- a/docs/ru/operations/settings/settings.md +++ b/docs/ru/operations/settings/settings.md @@ -3819,3 +3819,14 @@ SELECT * FROM positional_arguments ORDER BY 2,3; - `'by_name_case_insensitive'` — имена в перечислениях должны быть одинаковыми без учета регистра, а значения могут быть разными. Значение по умолчанию: `'by_values'`. + +## min_bytes_to_use_mmap_io {#min-bytes-to-use-mmap-io} + +Это экспериментальная настройка. Устанавливает минимальный объем памяти для чтения больших файлов без копирования данных из ядра в пространство пользователей. Рекомендуемый лимит составляет около 64 MB, поскольку [mmap/munmap](https://en.wikipedia.org/wiki/Mmap) работает медленно. Это имеет смысл только для больших файлов и помогает только в том случае, если данные находятся в кеше страниц. + +Возможные значения: + +- Положительное целое число. +- 0 — большие файлы считываются только с копированием данных из ядра в пространство пользователей. + +Значение по умолчанию: `0`. diff --git a/docs/ru/operations/system-tables/dictionaries.md b/docs/ru/operations/system-tables/dictionaries.md index b865fea736f..ae4e5055ce8 100644 --- a/docs/ru/operations/system-tables/dictionaries.md +++ b/docs/ru/operations/system-tables/dictionaries.md @@ -4,58 +4,84 @@ Столбцы: -- `database` ([String](../../sql-reference/data-types/string.md)) — Имя базы данных, в которой находится словарь, созданный с помощью DDL-запроса. Пустая строка для других словарей. -- `name` ([String](../../sql-reference/data-types/string.md)) — [Имя словаря](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict.md). -- `status` ([Enum8](../../sql-reference/data-types/enum.md)) — Статус словаря. Возможные значения: - - `NOT_LOADED` — Словарь не загружен, потому что не использовался. - - `LOADED` — Словарь загружен успешно. - - `FAILED` — Словарь не загружен в результате ошибки. - - `LOADING` — Словарь в процессе загрузки. - - `LOADED_AND_RELOADING` — Словарь загружен успешно, сейчас перезагружается (частые причины: запрос [SYSTEM RELOAD DICTIONARY](../../sql-reference/statements/system.md#query_language-system-reload-dictionary), таймаут, изменение настроек словаря). - - `FAILED_AND_RELOADING` — Словарь не загружен в результате ошибки, сейчас перезагружается. -- `origin` ([String](../../sql-reference/data-types/string.md)) — Путь к конфигурационному файлу, описывающему словарь. -- `type` ([String](../../sql-reference/data-types/string.md)) — Тип размещения словаря. [Хранение словарей в памяти](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md). -- `key` — [Тип ключа](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md#ext_dict_structure-key): Числовой ключ ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) или Составной ключ ([String](../../sql-reference/data-types/string.md)) — строка вида “(тип 1, тип 2, …, тип n)”. -- `attribute.names` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) — Массив [имен атрибутов](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md#ext_dict_structure-attributes), предоставляемых справочником. -- `attribute.types` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) — Соответствующий массив [типов атрибутов](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md#ext_dict_structure-attributes), предоставляемых справочником. -- `bytes_allocated` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Объем оперативной памяти, используемый словарем. -- `query_count` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Количество запросов с момента загрузки словаря или с момента последней успешной перезагрузки. -- `hit_rate` ([Float64](../../sql-reference/data-types/float.md)) — Для cache-словарей — процент закэшированных значений. -- `found_rate` ([Float64](../../sql-reference/data-types/float.md)) — Процент обращений к словарю, при которых значение было найдено. -- `element_count` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Количество элементов, хранящихся в словаре. -- `load_factor` ([Float64](../../sql-reference/data-types/float.md)) — Процент заполнения словаря (для хэшированного словаря — процент заполнения хэш-таблицы). -- `source` ([String](../../sql-reference/data-types/string.md)) — Текст, описывающий [источник данных](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md) для словаря. -- `lifetime_min` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Минимальное [время обновления](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md) словаря в памяти, по истечении которого Clickhouse попытается перезагрузить словарь (если задано `invalidate_query`, то только если он изменился). Задается в секундах. -- `lifetime_max` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Максимальное [время обновления](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md) словаря в памяти, по истечении которого Clickhouse попытается перезагрузить словарь (если задано `invalidate_query`, то только если он изменился). Задается в секундах. -- `loading_start_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — Время начала загрузки словаря. -- `loading_duration` ([Float32](../../sql-reference/data-types/float.md)) — Время, затраченное на загрузку словаря. -- `last_exception` ([String](../../sql-reference/data-types/string.md)) — Текст ошибки, возникающей при создании или перезагрузке словаря, если словарь не удалось создать. +- `database` ([String](../../sql-reference/data-types/string.md)) — имя базы данных, в которой находится словарь, созданный с помощью DDL-запроса. Пустая строка для других словарей. +- `name` ([String](../../sql-reference/data-types/string.md)) — [имя словаря](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict.md). +- `uuid` ([UUID](../../sql-reference/data-types/uuid.md)) — уникальный UUID словаря. +- `status` ([Enum8](../../sql-reference/data-types/enum.md)) — статус словаря. Возможные значения: + - `NOT_LOADED` — словарь не загружен, потому что не использовался. + - `LOADED` — словарь загружен успешно. + - `FAILED` — словарь не загружен в результате ошибки. + - `LOADING` — словарь в процессе загрузки. + - `LOADED_AND_RELOADING` — словарь загружен успешно, сейчас перезагружается (частые причины: запрос [SYSTEM RELOAD DICTIONARY](../../sql-reference/statements/system.md#query_language-system-reload-dictionary), таймаут, изменение настроек словаря). + - `FAILED_AND_RELOADING` — словарь не загружен в результате ошибки, сейчас перезагружается. +- `origin` ([String](../../sql-reference/data-types/string.md)) — путь к конфигурационному файлу, описывающему словарь. +- `type` ([String](../../sql-reference/data-types/string.md)) — тип размещения словаря. [Хранение словарей в памяти](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md). +- `key.names` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) — массив [имен ключей](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md#ext_dict_structure-key), предоставляемых словарем. +- `key.types` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) — соответствующий массив [типов ключей](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md#ext_dict_structure-key), предоставляемых словарем. +- `attribute.names` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) — массив [имен атрибутов](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md#ext_dict_structure-attributes), предоставляемых словарем. +- `attribute.types` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) — соответствующий массив [типов атрибутов](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md#ext_dict_structure-attributes), предоставляемых словарем. +- `bytes_allocated` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — объем оперативной памяти, используемый словарем. +- `query_count` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — количество запросов с момента загрузки словаря или с момента последней успешной перезагрузки. +- `hit_rate` ([Float64](../../sql-reference/data-types/float.md)) — для cache-словарей — процент закэшированных значений. +- `found_rate` ([Float64](../../sql-reference/data-types/float.md)) — процент обращений к словарю, при которых значение было найдено. +- `element_count` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — количество элементов, хранящихся в словаре. +- `load_factor` ([Float64](../../sql-reference/data-types/float.md)) — процент заполнения словаря (для хэшированного словаря — процент заполнения хэш-таблицы). +- `source` ([String](../../sql-reference/data-types/string.md)) — текст, описывающий [источник данных](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md) для словаря. +- `lifetime_min` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — минимальное [время обновления](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md) словаря в памяти, по истечении которого Clickhouse попытается перезагрузить словарь (если задано `invalidate_query`, то только если он изменился). Задается в секундах. +- `lifetime_max` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — максимальное [время обновления](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md) словаря в памяти, по истечении которого Clickhouse попытается перезагрузить словарь (если задано `invalidate_query`, то только если он изменился). Задается в секундах. +- `loading_start_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — время начала загрузки словаря. +- `loading_duration` ([Float32](../../sql-reference/data-types/float.md)) — время, затраченное на загрузку словаря. +- `last_exception` ([String](../../sql-reference/data-types/string.md)) — текст ошибки, возникающей при создании или перезагрузке словаря, если словарь не удалось создать. +- `comment` ([String](../../sql-reference/data-types/string.md)) — текст комментария к словарю. **Пример** -Настройте словарь. +Настройте словарь: ``` sql -CREATE DICTIONARY dictdb.dict +CREATE DICTIONARY dictionary_with_comment ( - `key` Int64 DEFAULT -1, - `value_default` String DEFAULT 'world', - `value_expression` String DEFAULT 'xxx' EXPRESSION 'toString(127 * 172)' + id UInt64, + value String ) -PRIMARY KEY key -SOURCE(CLICKHOUSE(HOST 'localhost' PORT 9000 USER 'default' TABLE 'dicttbl' DB 'dictdb')) -LIFETIME(MIN 0 MAX 1) +PRIMARY KEY id +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() TABLE 'source_table')) LAYOUT(FLAT()) +LIFETIME(MIN 0 MAX 1000) +COMMENT 'The temporary dictionary'; ``` Убедитесь, что словарь загружен. ``` sql -SELECT * FROM system.dictionaries +SELECT * FROM system.dictionaries LIMIT 1 FORMAT Vertical; ``` ``` text -┌─database─┬─name─┬─status─┬─origin──────┬─type─┬─key────┬─attribute.names──────────────────────┬─attribute.types─────┬─bytes_allocated─┬─query_count─┬─hit_rate─┬─element_count─┬───────────load_factor─┬─source─────────────────────┬─lifetime_min─┬─lifetime_max─┬──loading_start_time─┌──last_successful_update_time─┬──────loading_duration─┬─last_exception─┐ -│ dictdb │ dict │ LOADED │ dictdb.dict │ Flat │ UInt64 │ ['value_default','value_expression'] │ ['String','String'] │ 74032 │ 0 │ 1 │ 1 │ 0.0004887585532746823 │ ClickHouse: dictdb.dicttbl │ 0 │ 1 │ 2020-03-04 04:17:34 │ 2020-03-04 04:30:34 │ 0.002 │ │ -└──────────┴──────┴────────┴─────────────┴──────┴────────┴──────────────────────────────────────┴─────────────────────┴─────────────────┴─────────────┴──────────┴───────────────┴───────────────────────┴────────────────────────────┴──────────────┴──────────────┴─────────────────────┴──────────────────────────────┘───────────────────────┴────────────────┘ +Row 1: +────── +database: default +name: dictionary_with_comment +uuid: 4654d460-0d03-433a-8654-d4600d03d33a +status: NOT_LOADED +origin: 4654d460-0d03-433a-8654-d4600d03d33a +type: +key.names: ['id'] +key.types: ['UInt64'] +attribute.names: ['value'] +attribute.types: ['String'] +bytes_allocated: 0 +query_count: 0 +hit_rate: 0 +found_rate: 0 +element_count: 0 +load_factor: 0 +source: +lifetime_min: 0 +lifetime_max: 0 +loading_start_time: 1970-01-01 00:00:00 +last_successful_update_time: 1970-01-01 00:00:00 +loading_duration: 0 +last_exception: +comment: The temporary dictionary ``` diff --git a/docs/ru/operations/system-tables/query_thread_log.md b/docs/ru/operations/system-tables/query_thread_log.md index 219d468e222..00538c9c9ae 100644 --- a/docs/ru/operations/system-tables/query_thread_log.md +++ b/docs/ru/operations/system-tables/query_thread_log.md @@ -9,7 +9,7 @@ Интервал сброса данных в таблицу задаётся параметром `flush_interval_milliseconds` в разделе настроек сервера [query_thread_log](../server-configuration-parameters/settings.md#server_configuration_parameters-query_thread_log). Чтобы принудительно записать логи из буфера памяти в таблицу, используйте запрос [SYSTEM FLUSH LOGS](../../sql-reference/statements/system.md#query_language-system-flush_logs). -ClickHouse не удаляет данные из таблицы автоматически. Подробности в разделе [Введение](#system-tables-introduction). +ClickHouse не удаляет данные из таблицы автоматически. Подробности в разделе [Введение](../../operations/system-tables/index.md#system-tables-introduction). Чтобы уменьшить количество запросов, регистрирующихся в таблице `query_thread_log`, вы можете использовать настройку [log_queries_probability](../../operations/settings/settings.md#log-queries-probability). diff --git a/docs/ru/operations/system-tables/storage_policies.md b/docs/ru/operations/system-tables/storage_policies.md index b2005d5f31e..0f9a4814a92 100644 --- a/docs/ru/operations/system-tables/storage_policies.md +++ b/docs/ru/operations/system-tables/storage_policies.md @@ -6,7 +6,7 @@ - `policy_name` ([String](../../sql-reference/data-types/string.md)) — имя политики хранения. - `volume_name` ([String](../../sql-reference/data-types/string.md)) — имя тома, который содержится в политике хранения. -- `volume_priority` ([UInt64](../../sql-reference/data-types/int-uint.md)) — порядковый номер тома согласно конфигурации. +- `volume_priority` ([UInt64](../../sql-reference/data-types/int-uint.md)) — порядковый номер тома согласно конфигурации, приоритет согласно которому данные заполняют тома, т.е. данные при инсертах и мержах записываются на тома с более низким приоритетом (с учетом других правил: TTL, `max_data_part_size`, `move_factor`). - `disks` ([Array(String)](../../sql-reference/data-types/array.md)) — имена дисков, содержащихся в политике хранения. - `max_data_part_size` ([UInt64](../../sql-reference/data-types/int-uint.md)) — максимальный размер куска данных, который может храниться на дисках тома (0 — без ограничений). - `move_factor` — доля доступного свободного места на томе, если места становится меньше, то данные начнут перемещение на следующий том, если он есть (по умолчанию 0.1). diff --git a/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md b/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md index 0486f4931b0..3baef3f5ba7 100644 --- a/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md +++ b/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md @@ -66,7 +66,6 @@ SETTINGS(format_csv_allow_single_quotes = 0) - СУБД: - [ODBC](#dicts-external_dicts_dict_sources-odbc) - [MySQL](#dicts-external_dicts_dict_sources-mysql) - - [PostgreSQL](#dicts-external_dicts_dict_sources-postgresql) - [ClickHouse](#dicts-external_dicts_dict_sources-clickhouse) - [MongoDB](#dicts-external_dicts_dict_sources-mongodb) - [Redis](#dicts-external_dicts_dict_sources-redis) @@ -210,45 +209,6 @@ SOURCE(HTTP( При создании словаря с помощью DDL-команды (`CREATE DICTIONARY ...`) удаленные хосты для HTTP-словарей проверяются в разделе `remote_url_allow_hosts` из конфигурации сервера. Иначе пользователи базы данных будут иметь доступ к произвольному HTTP-серверу. -## ODBC {#dicts-external_dicts_dict_sources-odbc} - -Этим способом можно подключить любую базу данных, имеющую ODBC драйвер. - -Пример настройки: - -``` xml - - - DatabaseName - ShemaName.TableName
- DSN=some_parameters - SQL_QUERY -
- -``` - -или - -``` sql -SOURCE(ODBC( - db 'DatabaseName' - table 'SchemaName.TableName' - connection_string 'DSN=some_parameters' - invalidate_query 'SQL_QUERY' -)) -``` - -Поля настройки: - -- `db` — имя базы данных. Не указывать, если имя базы задано в параметрах. ``. -- `table` — имя таблицы и схемы, если она есть. -- `connection_string` — строка соединения. -- `invalidate_query` — запрос для проверки статуса словаря. Необязательный параметр. Читайте подробнее в разделе [Обновление словарей](external-dicts-dict-lifetime.md). - -ClickHouse получает от ODBC-драйвера информацию о квотировании и квотирует настройки в запросах к драйверу, поэтому имя таблицы нужно указывать в соответствии с регистром имени таблицы в базе данных. - -Если у вас есть проблемы с кодировками при использовании Oracle, ознакомьтесь с соответствующим разделом [FAQ](../../../faq/integration/oracle-odbc.md). - ### Выявленная уязвимость в функционировании ODBC словарей {#vyiavlennaia-uiazvimost-v-funktsionirovanii-odbc-slovarei} !!! attention "Внимание" @@ -464,6 +424,51 @@ LIFETIME(MIN 300 MAX 360) ## СУБД {#subd} +### ODBC {#dicts-external_dicts_dict_sources-odbc} + +Этим способом можно подключить любую базу данных, имеющую ODBC драйвер. + +Пример настройки: + +``` xml + + + DatabaseName + ShemaName.TableName
+ DSN=some_parameters + SQL_QUERY + SELECT id, value_1, value_2 FROM ShemaName.TableName +
+ +``` + +или + +``` sql +SOURCE(ODBC( + db 'DatabaseName' + table 'SchemaName.TableName' + connection_string 'DSN=some_parameters' + invalidate_query 'SQL_QUERY' + query 'SELECT id, value_1, value_2 FROM db_name.table_name' +)) +``` + +Поля настройки: + +- `db` — имя базы данных. Не указывать, если имя базы задано в параметрах. ``. +- `table` — имя таблицы и схемы, если она есть. +- `connection_string` — строка соединения. +- `invalidate_query` — запрос для проверки статуса словаря. Необязательный параметр. Читайте подробнее в разделе [Обновление словарей](external-dicts-dict-lifetime.md). +- `query` – пользовательский запрос. Необязательный параметр. + +!!! info "Примечание" + Поля `table` и `query` не могут быть использованы вместе. Также обязательно должен быть один из источников данных: `table` или `query`. + +ClickHouse получает от ODBC-драйвера информацию о квотировании и квотирует настройки в запросах к драйверу, поэтому имя таблицы нужно указывать в соответствии с регистром имени таблицы в базе данных. + +Если у вас есть проблемы с кодировками при использовании Oracle, ознакомьтесь с соответствующим разделом [FAQ](../../../faq/integration/oracle-odbc.md). + ### MySQL {#dicts-external_dicts_dict_sources-mysql} Пример настройки: @@ -487,6 +492,7 @@ LIFETIME(MIN 300 MAX 360) id=10 SQL_QUERY true + SELECT id, value_1, value_2 FROM db_name.table_name ``` @@ -505,6 +511,7 @@ SOURCE(MYSQL( where 'id=10' invalidate_query 'SQL_QUERY' fail_on_connection_loss 'true' + query 'SELECT id, value_1, value_2 FROM db_name.table_name' )) ``` @@ -531,6 +538,11 @@ SOURCE(MYSQL( - `fail_on_connection_loss` – параметр конфигурации, контролирующий поведение сервера при потере соединения. Если значение `true`, то исключение генерируется сразу же, если соединение между клиентом и сервером было потеряно. Если значение `false`, то сервер повторно попытается выполнить запрос три раза прежде чем сгенерировать исключение. Имейте в виду, что повторные попытки могут увеличить время выполнения запроса. Значение по умолчанию: `false`. +- `query` – пользовательский запрос. Необязательный параметр. + +!!! info "Примечание" + Поля `table` или `where` не могут быть использованы вместе с полем `query`. Также обязательно должен быть один из источников данных: `table` или `query`. + MySQL можно подключить на локальном хосте через сокеты, для этого необходимо задать `host` и `socket`. Пример настройки: @@ -547,6 +559,7 @@ MySQL можно подключить на локальном хосте чер id=10 SQL_QUERY true + SELECT id, value_1, value_2 FROM db_name.table_name ``` @@ -564,6 +577,7 @@ SOURCE(MYSQL( where 'id=10' invalidate_query 'SQL_QUERY' fail_on_connection_loss 'true' + query 'SELECT id, value_1, value_2 FROM db_name.table_name' )) ``` @@ -582,6 +596,7 @@ SOURCE(MYSQL( ids
id=10 1 + SELECT id, value_1, value_2 FROM default.ids ``` @@ -598,6 +613,7 @@ SOURCE(CLICKHOUSE( table 'ids' where 'id=10' secure 1 + query 'SELECT id, value_1, value_2 FROM default.ids' )); ``` @@ -612,6 +628,10 @@ SOURCE(CLICKHOUSE( - `where` — условие выбора. Может отсутствовать. - `invalidate_query` — запрос для проверки статуса словаря. Необязательный параметр. Читайте подробнее в разделе [Обновление словарей](external-dicts-dict-lifetime.md). - `secure` - флаг, разрешающий или не разрешающий защищённое SSL-соединение. +- `query` – пользовательский запрос. Необязательный параметр. + +!!! info "Примечание" + Поля `table` или `where` не могут быть использованы вместе с полем `query`. Также обязательно должен быть один из источников данных: `table` или `query`. ### MongoDB {#dicts-external_dicts_dict_sources-mongodb} @@ -703,25 +723,30 @@ SOURCE(REDIS( One "SomeColumn" = 42 8 + SELECT id, value_1, value_2 FROM database_name.table_name ``` Поля настройки: -- `host` – Имя хоста с установленной Cassandra или разделенный через запятую список хостов. -- `port` – Порт на серверах Cassandra. Если не указан, используется значение по умолчанию 9042. -- `user` – Имя пользователя для соединения с Cassandra. -- `password` – Пароль для соединения с Cassandra. -- `keyspace` – Имя keyspace (база данных). -- `column_family` – Имя семейства столбцов (таблица). -- `allow_filering` – Флаг, разрешающий или не разрешающий потенциально дорогостоящие условия на кластеризации ключевых столбцов. Значение по умолчанию 1. -- `partition_key_prefix` – Количество партиций ключевых столбцов в первичном ключе таблицы Cassandra. -Необходимо для составления ключей словаря. Порядок ключевых столбцов в определении словеря должен быть таким же как в Cassandra. -Значение по умолчанию 1 (первый ключевой столбец это ключ партицирования, остальные ключевые столбцы - ключи кластеризации). -- `consistency` – Уровень консистентности. Возмодные значения: `One`, `Two`, `Three`, - `All`, `EachQuorum`, `Quorum`, `LocalQuorum`, `LocalOne`, `Serial`, `LocalSerial`. Значение по умолчанию `One`. -- `where` – Опциональный критерий выборки. -- `max_threads` – Максимальное кол-во тредов для загрузки данных из нескольких партиций в словарь. + +- `host` – имя хоста с установленной Cassandra или разделенный через запятую список хостов. +- `port` – порт на серверах Cassandra. Если не указан, используется значение по умолчанию: 9042. +- `user` – имя пользователя для соединения с Cassandra. +- `password` – пароль для соединения с Cassandra. +- `keyspace` – имя keyspace (база данных). +- `column_family` – имя семейства столбцов (таблица). +- `allow_filering` – флаг, разрешающий или не разрешающий потенциально дорогостоящие условия на кластеризации ключевых столбцов. Значение по умолчанию: 1. +- `partition_key_prefix` – количество партиций ключевых столбцов в первичном ключе таблицы Cassandra. +Необходимо для составления ключей словаря. Порядок ключевых столбцов в определении словаря должен быть таким же, как в Cassandra. +Значение по умолчанию: 1 (первый ключевой столбец - это ключ партицирования, остальные ключевые столбцы - ключи кластеризации). +- `consistency` – уровень консистентности. Возможные значения: `One`, `Two`, `Three`, `All`, `EachQuorum`, `Quorum`, `LocalQuorum`, `LocalOne`, `Serial`, `LocalSerial`. Значение по умолчанию: `One`. +- `where` – опциональный критерий выборки. +- `max_threads` – максимальное количество тредов для загрузки данных из нескольких партиций в словарь. +- `query` – пользовательский запрос. Необязательный параметр. + +!!! info "Примечание" + Поля `column_family` или `where` не могут быть использованы вместе с полем `query`. Также обязательно должен быть один из источников данных: `column_family` или `query`. ### PosgreSQL {#dicts-external_dicts_dict_sources-postgresql} @@ -737,6 +762,7 @@ SOURCE(REDIS( table_name
id=10 SQL_QUERY + SELECT id, value_1, value_2 FROM db_name.table_name ``` @@ -755,20 +781,25 @@ SOURCE(POSTGRESQL( replica(host 'example01-2' port 5432 priority 2) where 'id=10' invalidate_query 'SQL_QUERY' + query 'SELECT id, value_1, value_2 FROM db_name.table_name' )) ``` -Setting fields: +Поля настройки: -- `host` – Хост для соединения с PostgreSQL. Вы можете указать его для всех реплик или задать индивидуально для каждой релпики (внутри ``). -- `port` – Порт для соединения с PostgreSQL. Вы можете указать его для всех реплик или задать индивидуально для каждой релпики (внутри ``). -- `user` – Имя пользователя для соединения с PostgreSQL. Вы можете указать его для всех реплик или задать индивидуально для каждой релпики (внутри ``). -- `password` – Пароль для пользователя PostgreSQL. -- `replica` – Раздел конфигурации реплик. Может быть несколько. - - `replica/host` – хост PostgreSQL. - - `replica/port` – порт PostgreSQL . - - `replica/priority` – Приоритет реплики. Во время попытки соединения, ClickHouse будет перебирать реплики в порядке приоритет. Меньшее значение означает более высокий приоритет. -- `db` – Имя базы данных. -- `table` – Имя таблицы. -- `where` – Условие выборки. Синтаксис для условий такой же как для `WHERE` выражения в PostgreSQL, для примера, `id > 10 AND id < 20`. Необязательный параметр. -- `invalidate_query` – Запрос для проверки условия загрузки словаря. Необязательный параметр. Читайте больше в разделе [Обновление словарей](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md). +- `host` – хост для соединения с PostgreSQL. Вы можете указать его для всех реплик или задать индивидуально для каждой релпики (внутри ``). +- `port` – порт для соединения с PostgreSQL. Вы можете указать его для всех реплик или задать индивидуально для каждой релпики (внутри ``). +- `user` – имя пользователя для соединения с PostgreSQL. Вы можете указать его для всех реплик или задать индивидуально для каждой релпики (внутри ``). +- `password` – пароль для пользователя PostgreSQL. +- `replica` – раздел конфигурации реплик. Может быть несколько. + - `replica/host` – хост PostgreSQL. + - `replica/port` – порт PostgreSQL . + - `replica/priority` – приоритет реплики. Во время попытки соединения ClickHouse будет перебирать реплики в порядке приоритета. Меньшее значение означает более высокий приоритет. +- `db` – имя базы данных. +- `table` – имя таблицы. +- `where` – условие выборки. Синтаксис для условий такой же, как для выражения `WHERE` в PostgreSQL. Например, `id > 10 AND id < 20`. Необязательный параметр. +- `invalidate_query` – запрос для проверки условия загрузки словаря. Необязательный параметр. Более подробную информацию смотрите в разделе [обновление словарей](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md). +- `query` – пользовательский запрос. Необязательный параметр. + +!!! info "Примечание" + Поля `table` или `where` не могут быть использованы вместе с полем `query`. Также обязательно должен быть один из источников данных: `table` или `query`. diff --git a/docs/ru/sql-reference/functions/bit-functions.md b/docs/ru/sql-reference/functions/bit-functions.md index 557288ae7c1..c2f97b2c1b8 100644 --- a/docs/ru/sql-reference/functions/bit-functions.md +++ b/docs/ru/sql-reference/functions/bit-functions.md @@ -5,7 +5,7 @@ toc_title: "Битовые функции" # Битовые функции {#bitovye-funktsii} -Битовые функции работают для любой пары типов из UInt8, UInt16, UInt32, UInt64, Int8, Int16, Int32, Int64, Float32, Float64. +Битовые функции работают для любой пары типов из `UInt8`, `UInt16`, `UInt32`, `UInt64`, `Int8`, `Int16`, `Int32`, `Int64`, `Float32`, `Float64`. Тип результата - целое число, битность которого равна максимальной битности аргументов. Если хотя бы один аргумент знаковый, то результат - знаковое число. Если аргумент - число с плавающей запятой - оно приводится к Int64. @@ -19,8 +19,100 @@ toc_title: "Битовые функции" ## bitShiftLeft(a, b) {#bitshiftlefta-b} +Сдвигает влево на заданное количество битов бинарное представление значения. + +Если передан аргумент типа `FixedString` или `String`, то он рассматривается, как одно многобайтовое значение. + +Биты `FixedString` теряются по мере того, как выдвигаются за пределы строки. Значение `String` дополняется байтами, поэтому его биты не теряются. + +**Синтаксис** + +``` sql +bitShiftLeft(a, b) +``` + +**Аргументы** + +- `a` — сдвигаемое значение. [Целое число](../../sql-reference/data-types/int-uint.md), [String](../../sql-reference/data-types/string.md) или [FixedString](../../sql-reference/data-types/fixedstring.md). +- `b` — величина сдвига. [Беззнаковое целое число](../../sql-reference/data-types/int-uint.md), допустимы типы с разрядностью не более 64 битов. + +**Возвращаемое значение** + +- Сдвинутое значение. + +Тип совпадает с типом сдвигаемого значения. + +**Пример** + +В запросах используются функции [bin](encoding-functions.md#bin) и [hex](encoding-functions.md#hex), чтобы наглядно показать биты после сдвига. + +``` sql +SELECT 99 AS a, bin(a), bitShiftLeft(a, 2) AS a_shifted, bin(a_shifted); +SELECT 'abc' AS a, hex(a), bitShiftLeft(a, 4) AS a_shifted, hex(a_shifted); +SELECT toFixedString('abc', 3) AS a, hex(a), bitShiftLeft(a, 4) AS a_shifted, hex(a_shifted); +``` + +Результат: + +``` text +┌──a─┬─bin(99)──┬─a_shifted─┬─bin(bitShiftLeft(99, 2))─┐ +│ 99 │ 01100011 │ 140 │ 10001100 │ +└────┴──────────┴───────────┴──────────────────────────┘ +┌─a───┬─hex('abc')─┬─a_shifted─┬─hex(bitShiftLeft('abc', 4))─┐ +│ abc │ 616263 │ &0 │ 06162630 │ +└─────┴────────────┴───────────┴─────────────────────────────┘ +┌─a───┬─hex(toFixedString('abc', 3))─┬─a_shifted─┬─hex(bitShiftLeft(toFixedString('abc', 3), 4))─┐ +│ abc │ 616263 │ &0 │ 162630 │ +└─────┴──────────────────────────────┴───────────┴───────────────────────────────────────────────┘ +``` + ## bitShiftRight(a, b) {#bitshiftrighta-b} +Сдвигает вправо на заданное количество битов бинарное представление значения. + +Если передан аргумент типа `FixedString` или `String`, то он рассматривается, как одно многобайтовое значение. Длина значения типа `String` уменьшается по мере сдвига. + +**Синтаксис** + +``` sql +bitShiftRight(a, b) +``` + +**Аргументы** + +- `a` — сдвигаемое значение. [Целое число](../../sql-reference/data-types/int-uint.md), [String](../../sql-reference/data-types/string.md) или [FixedString](../../sql-reference/data-types/fixedstring.md). +- `b` — величина сдвига. [Беззнаковое целое число](../../sql-reference/data-types/int-uint.md), допустимы типы с разрядностью не более 64 битов. + +**Возвращаемое значение** + +- Сдвинутое значение. + +Тип совпадает с типом сдвигаемого значения. + +**Пример** + +Запрос: + +``` sql +SELECT 101 AS a, bin(a), bitShiftRight(a, 2) AS a_shifted, bin(a_shifted); +SELECT 'abc' AS a, hex(a), bitShiftRight(a, 12) AS a_shifted, hex(a_shifted); +SELECT toFixedString('abc', 3) AS a, hex(a), bitShiftRight(a, 12) AS a_shifted, hex(a_shifted); +``` + +Результат: + +``` text +┌───a─┬─bin(101)─┬─a_shifted─┬─bin(bitShiftRight(101, 2))─┐ +│ 101 │ 01100101 │ 25 │ 00011001 │ +└─────┴──────────┴───────────┴────────────────────────────┘ +┌─a───┬─hex('abc')─┬─a_shifted─┬─hex(bitShiftRight('abc', 12))─┐ +│ abc │ 616263 │ │ 0616 │ +└─────┴────────────┴───────────┴───────────────────────────────┘ +┌─a───┬─hex(toFixedString('abc', 3))─┬─a_shifted─┬─hex(bitShiftRight(toFixedString('abc', 3), 12))─┐ +│ abc │ 616263 │ │ 000616 │ +└─────┴──────────────────────────────┴───────────┴─────────────────────────────────────────────────┘ +``` + ## bitTest {#bittest} Принимает любое целое число и конвертирует его в [двоичное число](https://en.wikipedia.org/wiki/Binary_number), возвращает значение бита в указанной позиции. Отсчет начинается с 0 справа налево. diff --git a/docs/ru/sql-reference/functions/json-functions.md b/docs/ru/sql-reference/functions/json-functions.md index 4df0f8633ce..f36c00cb2aa 100644 --- a/docs/ru/sql-reference/functions/json-functions.md +++ b/docs/ru/sql-reference/functions/json-functions.md @@ -216,6 +216,44 @@ SELECT JSONExtract('{"day": 5}', 'day', 'Enum8(\'Sunday\' = 0, \'Monday\' = 1, \ SELECT JSONExtractKeysAndValues('{"x": {"a": 5, "b": 7, "c": 11}}', 'x', 'Int8') = [('a',5),('b',7),('c',11)]; ``` +## JSONExtractKeys {#jsonextractkeysjson-indices-or-keys} + +Парсит строку JSON и извлекает ключи. + +**Синтаксис** + +``` sql +JSONExtractKeys(json[, a, b, c...]) +``` + +**Аргументы** + +- `json` — [строка](../data-types/string.md), содержащая валидный JSON. +- `a, b, c...` — индексы или ключи, разделенные запятыми, которые указывают путь к внутреннему полю во вложенном объекте JSON. Каждый аргумент может быть либо [строкой](../data-types/string.md) для получения поля по ключу, либо [целым числом](../data-types/int-uint.md) для получения N-го поля (индексирование начинается с 1, отрицательные числа используются для отсчета с конца). Если параметр не задан, весь JSON разбирается как объект верхнего уровня. Необязательный параметр. + +**Возвращаемые значения** + +Массив с ключами JSON. + +Тип: [Array](../data-types/array.md)([String](../data-types/string.md)). + +**Пример** + +Запрос: + +```sql +SELECT JSONExtractKeys('{"a": "hello", "b": [-100, 200.0, 300]}'); +``` + +Результат: + +``` +text +┌─JSONExtractKeys('{"a": "hello", "b": [-100, 200.0, 300]}')─┐ +│ ['a','b'] │ +└────────────────────────────────────────────────────────────┘ +``` + ## JSONExtractRaw(json\[, indices_or_keys\]…) {#jsonextractrawjson-indices-or-keys} Возвращает часть JSON в виде строки, содержащей неразобранную подстроку. diff --git a/docs/ru/sql-reference/operators/exists.md b/docs/ru/sql-reference/operators/exists.md new file mode 100644 index 00000000000..3e04304e222 --- /dev/null +++ b/docs/ru/sql-reference/operators/exists.md @@ -0,0 +1,44 @@ +# EXISTS {#exists-operator} + +Оператор `EXISTS` проверяет, сколько строк содержит результат выполнения подзапроса. Если результат пустой, то оператор возвращает `0`. В остальных случаях оператор возвращает `1`. + +`EXISTS` может быть использован в секции [WHERE](../../sql-reference/statements/select/where.md). + +!!! warning "Предупреждение" + Ссылки на таблицы или столбцы основного запроса не поддерживаются в подзапросе. + +**Синтаксис** + +```sql +WHERE EXISTS(subquery) +``` + +**Пример** + +Запрос с подзапросом, возвращающим несколько строк: + +``` sql +SELECT count() FROM numbers(10) WHERE EXISTS(SELECT number FROM numbers(10) WHERE number > 8); +``` + +Результат: + +``` text +┌─count()─┐ +│ 10 │ +└─────────┘ +``` + +Запрос с подзапросом, возвращающим пустой результат: + +``` sql +SELECT count() FROM numbers(10) WHERE EXISTS(SELECT number FROM numbers(10) WHERE number > 11); +``` + +Результат: + +``` text +┌─count()─┐ +│ 0 │ +└─────────┘ +``` diff --git a/docs/ru/sql-reference/operators/index.md b/docs/ru/sql-reference/operators/index.md index 0041be745fc..3a2d3d69119 100644 --- a/docs/ru/sql-reference/operators/index.md +++ b/docs/ru/sql-reference/operators/index.md @@ -72,7 +72,7 @@ toc_title: "Операторы" ## Операторы для работы с множествами {#operatory-dlia-raboty-s-mnozhestvami} -*Смотрите раздел [Операторы IN](../../sql-reference/operators/in.md#select-in-operators).* +Смотрите [операторы IN](../../sql-reference/operators/in.md#select-in-operators) и оператор [EXISTS](../../sql-reference/operators/exists.md). `a IN ...` - функция `in(a, b)` diff --git a/docs/ru/sql-reference/statements/alter/comment.md b/docs/ru/sql-reference/statements/alter/comment.md new file mode 100644 index 00000000000..85b662407b6 --- /dev/null +++ b/docs/ru/sql-reference/statements/alter/comment.md @@ -0,0 +1,58 @@ +--- +toc_priority: 51 +toc_title: COMMENT +--- + +# ALTER TABLE … MODIFY COMMENT {#alter-modify-comment} + +Добавляет, изменяет или удаляет комментарий к таблице, независимо от того, был ли он установлен раньше или нет. Изменение комментария отражается как в системной таблице [system.tables](../../../operations/system-tables/tables.md), так и в результате выполнения запроса `SHOW CREATE TABLE`. + +**Синтаксис** + +``` sql +ALTER TABLE [db].name [ON CLUSTER cluster] MODIFY COMMENT 'Comment' +``` + +**Примеры** + +Создание таблицы с комментарием (для более подробной информации смотрите секцию [COMMENT](../../../sql-reference/statements/create/table.md#comment-table)): + +``` sql +CREATE TABLE table_with_comment +( + `k` UInt64, + `s` String +) +ENGINE = Memory() +COMMENT 'The temporary table'; +``` + +Изменение комментария: + +``` sql +ALTER TABLE table_with_comment MODIFY COMMENT 'new comment on a table'; +SELECT comment FROM system.tables WHERE database = currentDatabase() AND name = 'table_with_comment'; +``` + +Вывод нового комментария: + +```text +┌─comment────────────────┐ +│ new comment on a table │ +└────────────────────────┘ +``` + +Удаление комментария: + +``` sql +ALTER TABLE table_with_comment MODIFY COMMENT ''; +SELECT comment FROM system.tables WHERE database = currentDatabase() AND name = 'table_with_comment'; +``` + +Вывод удаленного комментария: + +```text +┌─comment─┐ +│ │ +└─────────┘ +``` diff --git a/docs/ru/sql-reference/statements/alter/index.md b/docs/ru/sql-reference/statements/alter/index.md index 73ee201b56b..2d7dca92287 100644 --- a/docs/ru/sql-reference/statements/alter/index.md +++ b/docs/ru/sql-reference/statements/alter/index.md @@ -41,6 +41,8 @@ ALTER TABLE [db].name [ON CLUSTER cluster] ADD|DROP|CLEAR|COMMENT|MODIFY COLUMN - [ROW POLICY](../../../sql-reference/statements/alter/row-policy.md) - [SETTINGS PROFILE](../../../sql-reference/statements/alter/settings-profile.md) +Выражение [ALTER TABLE ... MODIFY COMMENT](../../../sql-reference/statements/alter/comment.md) добавляет, изменяет или удаляет комментарий к таблице, независимо от того, был ли он установлен раньше или нет. + ### Мутации {#mutations} Мутации - разновидность запроса ALTER, позволяющая изменять или удалять данные в таблице. В отличие от стандартных запросов [ALTER TABLE … DELETE](../../../sql-reference/statements/alter/delete.md) и [ALTER TABLE … UPDATE](../../../sql-reference/statements/alter/update.md), рассчитанных на точечное изменение данных, область применения мутаций - достаточно тяжёлые изменения, затрагивающие много строк в таблице. Поддержана для движков таблиц семейства [MergeTree](../../../engines/table-engines/mergetree-family/mergetree.md), в том числе для движков с репликацией. diff --git a/docs/ru/sql-reference/statements/create/dictionary.md b/docs/ru/sql-reference/statements/create/dictionary.md index 25546549feb..c8b123c7b19 100644 --- a/docs/ru/sql-reference/statements/create/dictionary.md +++ b/docs/ru/sql-reference/statements/create/dictionary.md @@ -5,8 +5,12 @@ toc_title: "Словарь" # CREATE DICTIONARY {#create-dictionary-query} +Создаёт [внешний словарь](../../../sql-reference/dictionaries/external-dictionaries/external-dicts.md) с заданной [структурой](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md), [источником](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md), [способом размещения в памяти](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md) и [периодом обновления](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md). + +**Синтаксис** + ``` sql -CREATE DICTIONARY [IF NOT EXISTS] [db.]dictionary_name [ON CLUSTER cluster] +CREATE DICTIONARY [OR REPLACE][IF NOT EXISTS] [db.]dictionary_name [ON CLUSTER cluster] ( key1 type1 [DEFAULT|EXPRESSION expr1] [IS_OBJECT_ID], key2 type2 [DEFAULT|EXPRESSION expr2], @@ -17,14 +21,77 @@ PRIMARY KEY key1, key2 SOURCE(SOURCE_NAME([param1 value1 ... paramN valueN])) LAYOUT(LAYOUT_NAME([param_name param_value])) LIFETIME({MIN min_val MAX max_val | max_val}) +SETTINGS(setting_name = setting_value, setting_name = setting_value, ...) +COMMENT 'Comment' ``` -Создаёт [внешний словарь](../../../sql-reference/dictionaries/external-dictionaries/external-dicts.md) с заданной [структурой](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md), [источником](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md), [способом размещения в памяти](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md) и [периодом обновления](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md). - Структура внешнего словаря состоит из атрибутов. Атрибуты словаря задаются как столбцы таблицы. Единственным обязательным свойством атрибута является его тип, все остальные свойства могут иметь значения по умолчанию. В зависимости от [способа размещения словаря в памяти](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md), ключами словаря могут быть один и более атрибутов. -Смотрите [Внешние словари](../../../sql-reference/dictionaries/external-dictionaries/external-dicts.md). +Более подробную информацию смотрите в разделе [внешние словари](../../../sql-reference/dictionaries/external-dictionaries/external-dicts.md). - \ No newline at end of file +Вы можете добавить комментарий к словарю при его создании, используя секцию `COMMENT`. + +**Пример** + +Входная таблица `source_table`: + +``` text +┌─id─┬─value──┐ +│ 1 │ First │ +│ 2 │ Second │ +└────┴────────┘ +``` + +Создание словаря: + +``` sql +CREATE DICTIONARY dictionary_with_comment +( + id UInt64, + value String +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() TABLE 'source_table')) +LAYOUT(FLAT()) +LIFETIME(MIN 0 MAX 1000) +COMMENT 'The temporary dictionary'; +``` + +Вывод словаря: + +``` sql +SHOW CREATE DICTIONARY dictionary_with_comment; +``` + +```text +┌─statement───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┐ +│ CREATE DICTIONARY default.dictionary_with_comment +( + `id` UInt64, + `value` String +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() TABLE 'source_table')) +LIFETIME(MIN 0 MAX 1000) +LAYOUT(FLAT()) +COMMENT 'The temporary dictionary' │ +└─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘ +``` + +Вывод комментария к словарю: + +``` sql +SELECT comment FROM system.dictionaries WHERE name == 'dictionary_with_comment' AND database == currentDatabase(); +``` + +```text +┌─comment──────────────────┐ +│ The temporary dictionary │ +└──────────────────────────┘ +``` + +**См. также** + +- [system.dictionaries](../../../operations/system-tables/dictionaries.md) — эта таблица содержит информацию о [внешних словарях](../../../sql-reference/dictionaries/external-dictionaries/external-dicts.md). diff --git a/docs/ru/sql-reference/statements/select/where.md b/docs/ru/sql-reference/statements/select/where.md index 8cb8e634303..e176780c43e 100644 --- a/docs/ru/sql-reference/statements/select/where.md +++ b/docs/ru/sql-reference/statements/select/where.md @@ -4,26 +4,52 @@ toc_title: WHERE # Секция WHERE {#select-where} -Позволяет задать выражение, которое ClickHouse использует для фильтрации данных перед всеми другими действиями в запросе кроме выражений, содержащихся в секции [PREWHERE](prewhere.md#prewhere-clause). Обычно, это выражение с логическими операторами. +Позволяет задать выражение, которое ClickHouse использует для фильтрации данных перед всеми другими действиями в запросе кроме выражений, содержащихся в секции [PREWHERE](prewhere.md#prewhere-clause). Обычно это выражение с логическими операторами. Результат выражения должен иметь тип `UInt8`. ClickHouse использует в выражении индексы, если это позволяет [движок таблицы](../../../engines/table-engines/index.md). -Если в секции необходимо проверить [NULL](../../../sql-reference/syntax.md#null-literal), то используйте операторы [IS NULL](../../operators/index.md#operator-is-null) и [IS NOT NULL](../../operators/index.md#is-not-null), а также соответствующие функции `isNull` и `isNotNull`. В противном случае выражение будет считаться всегда не выполненным. +!!! note "Примечание" + Существует оптимизация фильтрации под названием [PREWHERE](prewhere.md). + +Если в секции необходимо проверить [NULL](../../../sql-reference/syntax.md#null-literal), то используйте операторы [IS NULL](../../operators/index.md#operator-is-null) и [IS NOT NULL](../../operators/index.md#is-not-null), а также соответствующие функции [isNull](../../../sql-reference/functions/functions-for-nulls.md#isnull) и [isNotNull](../../../sql-reference/functions/functions-for-nulls.md#isnotnull). В противном случае выражение будет считаться всегда не выполненным. + +**Пример** + +Чтобы найти числа, которые кратны 3 и больше 10, можно выполнить запрос к [таблице numbers](../../../sql-reference/table-functions/numbers.md): + +``` sql +SELECT number FROM numbers(20) WHERE (number > 10) AND (number % 3 == 0); +``` + +Результат: + +``` text +┌─number─┐ +│ 12 │ +│ 15 │ +│ 18 │ +└────────┘ +``` Пример проверки на `NULL`: ``` sql -SELECT * FROM t_null WHERE y IS NULL +CREATE TABLE t_null(x Int8, y Nullable(Int8)) ENGINE=MergeTree() ORDER BY x; +INSERT INTO t_null VALUES (1, NULL), (2, 3); + +SELECT * FROM t_null WHERE y IS NULL; +SELECT * FROM t_null WHERE y != 0; ``` +Результат: + ``` text ┌─x─┬────y─┐ │ 1 │ ᴺᵁᴸᴸ │ └───┴──────┘ +┌─x─┬─y─┐ +│ 2 │ 3 │ +└───┴───┘ ``` - -!!! note "Примечание" - Существует оптимизация фильтрации под названием [prewhere](prewhere.md). - diff --git a/docs/ru/sql-reference/table-functions/mysql.md b/docs/ru/sql-reference/table-functions/mysql.md index e881961e3d9..5ba7e8f1f0f 100644 --- a/docs/ru/sql-reference/table-functions/mysql.md +++ b/docs/ru/sql-reference/table-functions/mysql.md @@ -107,5 +107,5 @@ SELECT * FROM mysql('localhost:3306', 'test', 'test', 'bayonet', '123'); **Смотрите также** - [Движок таблиц ‘MySQL’](../../sql-reference/table-functions/mysql.md) -- [Использование MySQL как источника данных для внешнего словаря](../../sql-reference/table-functions/mysql.md#dicts-external_dicts_dict_sources-mysql) +- [Использование MySQL как источника данных для внешнего словаря](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md#dicts-external_dicts_dict_sources-mysql) diff --git a/docs/ru/sql-reference/table-functions/postgresql.md b/docs/ru/sql-reference/table-functions/postgresql.md index f5c79e5305a..a8ae7cfb80b 100644 --- a/docs/ru/sql-reference/table-functions/postgresql.md +++ b/docs/ru/sql-reference/table-functions/postgresql.md @@ -127,6 +127,6 @@ CREATE TABLE pg_table_schema_with_dots (a UInt32) **См. также** - [Движок таблиц PostgreSQL](../../sql-reference/table-functions/postgresql.md) -- [Использование PostgreSQL как источника данных для внешнего словаря](../../sql-reference/table-functions/postgresql.md#dicts-external_dicts_dict_sources-postgresql) +- [Использование PostgreSQL как источника данных для внешнего словаря](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md#dicts-external_dicts_dict_sources-postgresql) [Оригинальная статья](https://clickhouse.com/docs/ru/sql-reference/table-functions/postgresql/) diff --git a/docs/tools/README.md b/docs/tools/README.md index 4ab44b0d945..fb87c188257 100644 --- a/docs/tools/README.md +++ b/docs/tools/README.md @@ -1,6 +1,6 @@ ## How ClickHouse documentation is generated? {#how-clickhouse-documentation-is-generated} -ClickHouse documentation is built using [build.py](build.py) script that uses [mkdocs](https://www.mkdocs.org) library and it’s dependencies to separately build all version of documentations (all languages in either single and multi page mode) as static HTMLs and then a PDF for each single page version. The results are then put in the correct directory structure. It is recommended to use Python 3.7 to run this script. +ClickHouse documentation is built using [build.py](build.py) script that uses [mkdocs](https://www.mkdocs.org) library and it’s dependencies to separately build all version of documentations (all languages in either single and multi page mode) as static HTMLs for each single page version. The results are then put in the correct directory structure. It is recommended to use Python 3.7 to run this script. [release.sh](release.sh) also pulls static files needed for [official ClickHouse website](https://clickhouse.com) from [../../website](../../website) folder then pushes to specified GitHub repo to be served via [GitHub Pages](https://pages.github.com). @@ -22,16 +22,12 @@ It’ll take some effort to go through, but the result will be very close to pro For the first time you’ll need to: -#### 1. Install [wkhtmltopdf](https://wkhtmltopdf.org/) - -Follow the instructions on it's official website: - -#### 2. Install CLI tools from npm +#### 1. Install CLI tools from npm 1. `sudo apt-get install npm` for Debian/Ubuntu or `brew install npm` on Mac OS X. 2. `sudo npm install -g purify-css amphtml-validator`. -#### 3. Set up virtualenv +#### 2. Set up virtualenv ``` bash $ cd ClickHouse/docs/tools @@ -41,7 +37,7 @@ $ source venv/bin/activate $ pip3 install -r requirements.txt ``` -#### 4. Run build.py +#### 3. Run build.py When all prerequisites are installed, running `build.py` without args (there are some, check `build.py --help`) will generate `ClickHouse/docs/build` folder with complete static html website. diff --git a/docs/tools/build.py b/docs/tools/build.py index 08329c33271..785928cf4ab 100755 --- a/docs/tools/build.py +++ b/docs/tools/build.py @@ -118,7 +118,7 @@ def build_for_lang(lang, args): ) # Clean to be safe if last build finished abnormally - single_page.remove_temporary_files(lang, args) + single_page.remove_temporary_files(lang, args) raw_config['nav'] = nav.build_docs_nav(lang, args) @@ -192,7 +192,6 @@ if __name__ == '__main__': arg_parser.add_argument('--skip-multi-page', action='store_true') arg_parser.add_argument('--skip-single-page', action='store_true') arg_parser.add_argument('--skip-amp', action='store_true') - arg_parser.add_argument('--skip-pdf', action='store_true') arg_parser.add_argument('--skip-website', action='store_true') arg_parser.add_argument('--skip-blog', action='store_true') arg_parser.add_argument('--skip-git-log', action='store_true') @@ -228,7 +227,6 @@ if __name__ == '__main__': args.skip_multi_page = True args.skip_blog = True args.skip_website = True - args.skip_pdf = True args.skip_amp = True if args.skip_git_log or args.skip_amp: diff --git a/docs/tools/single_page.py b/docs/tools/single_page.py index 2c0ddebf3c7..cf41e2b78c2 100644 --- a/docs/tools/single_page.py +++ b/docs/tools/single_page.py @@ -195,44 +195,6 @@ def build_single_page_version(lang, args, nav, cfg): test.test_single_page( os.path.join(test_dir, 'single', 'index.html'), lang) - if not args.skip_pdf: - single_page_index_html = os.path.join(test_dir, 'single', 'index.html') - single_page_pdf = os.path.abspath( - os.path.join(single_page_output_path, f'clickhouse_{lang}.pdf') - ) - - with open(single_page_index_html, 'r') as f: - soup = bs4.BeautifulSoup( - f.read(), - features='html.parser' - ) - soup_prefix = f'file://{test_dir}' - for img in soup.findAll('img'): - if img['src'].startswith('/'): - img['src'] = soup_prefix + img['src'] - for script in soup.findAll('script'): - script_src = script.get('src') - if script_src: - script['src'] = soup_prefix + script_src.split('?', 1)[0] - for link in soup.findAll('link'): - link['href'] = soup_prefix + link['href'].split('?', 1)[0] - - with open(single_page_index_html, 'w') as f: - f.write(str(soup)) - - create_pdf_command = [ - 'wkhtmltopdf', - '--print-media-type', - '--log-level', 'warn', - single_page_index_html, single_page_pdf - ] - - logging.info(' '.join(create_pdf_command)) - try: - subprocess.check_call(' '.join(create_pdf_command), shell=True) - except: - pass # TODO: fix pdf issues - logging.info(f'Finished building single page version for {lang}') remove_temporary_files(lang, args) diff --git a/docs/zh/operations/performance-test.md b/docs/zh/operations/performance-test.md index 9f5f4fe61f1..d3643969c2e 100644 --- a/docs/zh/operations/performance-test.md +++ b/docs/zh/operations/performance-test.md @@ -1,82 +1,74 @@ --- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd toc_priority: 54 toc_title: "\u6D4B\u8BD5\u786C\u4EF6" --- -# 如何使用ClickHouse测试您的硬件 {#how-to-test-your-hardware-with-clickhouse} +# 如何使用 ClickHouse 测试您的硬件 {#how-to-test-your-hardware-with-clickhouse} -使用此指令,您可以在任何服务器上运行基本的ClickHouse性能测试,而无需安装ClickHouse软件包。 +你可以在任何服务器上运行基本的 ClickHouse 性能测试,而无需安装 ClickHouse 软件包。 -1. 转到 “commits” 页数:https://github.com/ClickHouse/ClickHouse/commits/master -2. 点击第一个绿色复选标记或红色十字与绿色 “ClickHouse Build Check” 然后点击 “Details” 附近链接 “ClickHouse Build Check”. 在一些提交中没有这样的链接,例如与文档的提交。 在这种情况下,请选择具有此链接的最近提交。 +## 自动运行 -3. 将链接复制到 “clickhouse” 二进制为amd64或aarch64. +你可以使用一个简单脚本来运行基准测试。 -4. ssh到服务器并使用wget下载它: +1. 下载脚本 +``` +wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/benchmark/hardware.sh +``` - +2. 运行脚本 +``` +chmod a+x ./hardware.sh +./hardware.sh +``` - # For amd64: - wget https://clickhouse-builds.s3.yandex.net/0/00ba767f5d2a929394ea3be193b1f79074a1c4bc/1578163263_binary/clickhouse - # For aarch64: - wget https://clickhouse-builds.s3.yandex.net/0/00ba767f5d2a929394ea3be193b1f79074a1c4bc/1578161264_binary/clickhouse - # Then do: - chmod a+x clickhouse +3. 复制输出的信息并将它发送给 feedback@clickhouse.com -1. 下载配置: +所有的结果都在这里公布: https://clickhouse.com/benchmark/hardware/ - - wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/programs/server/config.xml - wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/programs/server/users.xml - mkdir config.d - wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/programs/server/config.d/path.xml -O config.d/path.xml - wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/programs/server/config.d/log_to_console.xml -O config.d/log_to_console.xml +## 人工运行 -1. 下载基准测试文件: +或者,你可以按照以下步骤实施基准测试。 +```bash +# For amd64: +wget https://builds.clickhouse.com/master/amd64/clickhouse +# For aarch64: +wget https://builds.clickhouse.com/master/aarch64/clickhouse +# Then do: +chmod a+x clickhouse +``` - +2. 下载基准文件 +```bash +wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/benchmark/clickhouse/benchmark-new.sh +chmod a+x benchmark-new.sh +wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/benchmark/clickhouse/queries.sql +``` - wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/benchmark/clickhouse/benchmark-new.sh - chmod a+x benchmark-new.sh - wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/benchmark/clickhouse/queries.sql +3. 根据 [Yandex.Metrica 数据集](../getting-started/example-datasets/metrica.md) 中的说明下载测试数据(“ hits ” 数据表包含 1 亿行记录)。 +```bash +wget https://datasets.clickhouse.com/hits/partitions/hits_100m_obfuscated_v1.tar.xz +tar xvf hits_100m_obfuscated_v1.tar.xz -C . +mv hits_100m_obfuscated_v1/* . +``` -1. 根据下载测试数据 [Yandex梅里卡数据集](../getting-started/example-datasets/metrica.md) 说明 (“hits” 表包含100万行)。 +4. 运行服务器: +```bash +./clickhouse server +``` - +5. 检查数据:在另一个终端中通过 ssh 登陆服务器 +```bash +./clickhouse client --query "SELECT count() FROM hits_100m_obfuscated" +100000000 +``` +6. 运行基准测试: +```bash +./benchmark-new.sh hits_100m_obfuscated +``` - wget https://datasets.clickhouse.com/hits/partitions/hits_100m_obfuscated_v1.tar.xz - tar xvf hits_100m_obfuscated_v1.tar.xz -C . - mv hits_100m_obfuscated_v1/* . +7. 将有关硬件配置的型号和信息发送到 clickhouse-feedback@yandex-team.com -1. 运行服务器: - - - - ./clickhouse server - -1. 检查数据:ssh到另一个终端中的服务器 - - - - ./clickhouse client --query "SELECT count() FROM hits_100m_obfuscated" - 100000000 - -1. 编辑benchmark-new.sh,改变 `clickhouse-client` 到 `./clickhouse client` 并添加 `–-max_memory_usage 100000000000` 参数。 - - - - mcedit benchmark-new.sh - -1. 运行基准测试: - - - - ./benchmark-new.sh hits_100m_obfuscated - -1. 将有关硬件配置的编号和信息发送到clickhouse-feedback@yandex-team.com - -所有结果都在这里公布:https://clickhouse.技术/基准/硬件/ +所有结果都在这里公布:https://clickhouse.com/benchmark/hardware/ diff --git a/docs/zh/operations/settings/permissions-for-queries.md b/docs/zh/operations/settings/permissions-for-queries.md index 93e439ae206..b1cac1882c0 100644 --- a/docs/zh/operations/settings/permissions-for-queries.md +++ b/docs/zh/operations/settings/permissions-for-queries.md @@ -1,6 +1,4 @@ --- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd toc_priority: 58 toc_title: "\u67E5\u8BE2\u6743\u9650" --- diff --git a/docs/zh/operations/settings/settings-profiles.md b/docs/zh/operations/settings/settings-profiles.md index 77dff88d50b..a9e20b8895e 100644 --- a/docs/zh/operations/settings/settings-profiles.md +++ b/docs/zh/operations/settings/settings-profiles.md @@ -1,32 +1,30 @@ --- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd toc_priority: 61 -toc_title: "\u8BBE\u7F6E\u914D\u7F6E\u6587\u4EF6" +toc_title: "\u8BBE\u7F6E\u914D\u7F6E" --- -# 设置配置文件 {#settings-profiles} +# 设置配置 {#settings-profiles} -设置配置文件是以相同名称分组的设置的集合。 +设置配置是设置的集合,并按照相同的名称进行分组。 !!! note "信息" - ClickHouse还支持 [SQL驱动的工作流](../access-rights.md#access-control) 用于管理设置配置文件。 我们建议使用它。 + ClickHouse 还支持用 [SQL驱动的工作流](../../operations/access-rights.md#access-control) 管理设置配置。我们建议使用它。 -配置文件可以有任何名称。 配置文件可以有任何名称。 您可以为不同的用户指定相同的配置文件。 您可以在设置配置文件中编写的最重要的事情是 `readonly=1`,这确保只读访问。 +设置配置可以任意命名。你可以为不同的用户指定相同的设置配置。您可以在设置配置中写入的最重要的内容是 `readonly=1`,这将确保只读访问。 -设置配置文件可以彼此继承。 要使用继承,请指示一个或多个 `profile` 配置文件中列出的其他设置之前的设置。 如果在不同的配置文件中定义了一个设置,则使用最新定义。 +设置配置可以彼此继承。要使用继承,请在文件中列举的其他设置之前,指定一个或多个 `profile` 设置。如果在不同的设置配置中定义了同一个设置,则使用最新的定义。 -要应用配置文件中的所有设置,请设置 `profile` 设置。 +要应用设置配置中的所有设置,请设定 `profile` 设置。 示例: -安装 `web` 侧写 +添加 `web` 配置。 ``` sql SET profile = 'web' ``` -设置配置文件在用户配置文件中声明。 这通常是 `users.xml`. +设置配置在用户配置文件中声明。这通常是指 `users.xml`. 示例: @@ -72,10 +70,10 @@ SET profile = 'web' ``` -该示例指定了两个配置文件: `default` 和 `web`. +这个示例指定了两个配置: `default` 和 `web` 。 -该 `default` 配置文件有一个特殊用途:它必须始终存在并在启动服务器时应用。 换句话说, `default` 配置文件包含默认设置。 +这个 `default` 配置有一个特殊用途:它必须始终存在并在启动服务时应用。换句话说, `default` 配置包含默认设置。 -该 `web` 配置文件是一个常规的配置文件,可以使用设置 `SET` 查询或在HTTP查询中使用URL参数。 +`web` 配置是一个常规的配置,它可以通过 `SET` 查询进行设定,也可以通过在HTTP查询中使用URL参数进行设定。 [原始文章](https://clickhouse.com/docs/en/operations/settings/settings_profiles/) diff --git a/docs/zh/sql-reference/data-types/special-data-types/set.md b/docs/zh/sql-reference/data-types/special-data-types/set.md index f814ab04325..b66f25cef18 100644 --- a/docs/zh/sql-reference/data-types/special-data-types/set.md +++ b/docs/zh/sql-reference/data-types/special-data-types/set.md @@ -1,3 +1,3 @@ -# 设置 {#set} +# 集合 {#set} 可以用在 IN 表达式的右半部分。 diff --git a/docs/zh/sql-reference/statements/show.md b/docs/zh/sql-reference/statements/show.md index 85c5744f26b..0dfd5943a0d 100644 --- a/docs/zh/sql-reference/statements/show.md +++ b/docs/zh/sql-reference/statements/show.md @@ -12,6 +12,7 @@ SHOW CREATE [TEMPORARY] [TABLE|DICTIONARY] [db.]table [INTO OUTFILE filename] [F ``` 返回单个字符串类型的 ‘statement’列,其中只包含了一个值 - 用来创建指定对象的 `CREATE` 语句。 +注意,如果使用该查询去获取系统表的 `CREATE` 语句,你得到的是一个虚构的语句,仅用来展示系统的表结构,而不能实际创建表。 ## SHOW DATABASES {#show-databases} diff --git a/programs/client/Client.cpp b/programs/client/Client.cpp index 0ad0764d721..383b9bb5e52 100644 --- a/programs/client/Client.cpp +++ b/programs/client/Client.cpp @@ -60,10 +60,7 @@ namespace CurrentMetrics { - extern const Metric Revision; - extern const Metric VersionInteger; extern const Metric MemoryTracking; - extern const Metric MaxDDLEntryID; } namespace fs = std::filesystem; @@ -330,7 +327,9 @@ std::vector Client::loadWarningMessages() { std::vector messages; connection->sendQuery(connection_parameters.timeouts, "SELECT message FROM system.warnings", "" /* query_id */, - QueryProcessingStage::Complete, nullptr, nullptr, false); + QueryProcessingStage::Complete, + &global_context->getSettingsRef(), + &global_context->getClientInfo(), false); while (true) { Packet packet = connection->receivePacket(); @@ -403,36 +402,6 @@ void Client::initialize(Poco::Util::Application & self) } -void Client::prepareForInteractive() -{ - clearTerminal(); - showClientVersion(); - - if (delayed_interactive) - std::cout << std::endl; - - /// Load Warnings at the beginning of connection - if (!config().has("no-warnings")) - { - try - { - std::vector messages = loadWarningMessages(); - if (!messages.empty()) - { - std::cout << "Warnings:" << std::endl; - for (const auto & message : messages) - std::cout << " * " << message << std::endl; - std::cout << std::endl; - } - } - catch (...) - { - /// Ignore exception - } - } -} - - int Client::main(const std::vector & /*args*/) try { @@ -459,11 +428,37 @@ try processConfig(); + /// Includes delayed_interactive. + if (is_interactive) + { + clearTerminal(); + showClientVersion(); + } + connect(); + /// Load Warnings at the beginning of connection + if (is_interactive && !config().has("no-warnings")) + { + try + { + std::vector messages = loadWarningMessages(); + if (!messages.empty()) + { + std::cout << "Warnings:" << std::endl; + for (const auto & message : messages) + std::cout << " * " << message << std::endl; + std::cout << std::endl; + } + } + catch (...) + { + /// Ignore exception + } + } + if (is_interactive && !delayed_interactive) { - prepareForInteractive(); runInteractive(); } else @@ -489,10 +484,7 @@ try } if (delayed_interactive) - { - prepareForInteractive(); runInteractive(); - } } return 0; @@ -566,9 +558,7 @@ void Client::connect() if (is_interactive) { std::cout << "Connected to " << server_name << " server version " << server_version << " revision " << server_revision << "." - << std::endl; - if (!delayed_interactive) - std::cout << std::endl; + << std::endl << std::endl; auto client_version_tuple = std::make_tuple(VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH); auto server_version_tuple = std::make_tuple(server_version_major, server_version_minor, server_version_patch); @@ -1013,7 +1003,6 @@ void Client::addOptions(OptionsDescription & options_description) ("password", po::value()->implicit_value("\n", ""), "password") ("ask-password", "ask-password") ("quota_key", po::value(), "A string to differentiate quotas when the user have keyed quotas configured on server") - ("pager", po::value(), "pager") ("testmode,T", "enable test hints in comments") ("max_client_network_bandwidth", po::value(), "the maximum speed of data exchange over the network for the client in bytes per second.") @@ -1114,8 +1103,6 @@ void Client::processOptions(const OptionsDescription & options_description, config().setString("host", options["host"].as()); if (options.count("interleave-queries-file")) interleave_queries_files = options["interleave-queries-file"].as>(); - if (options.count("pager")) - config().setString("pager", options["pager"].as()); if (options.count("port") && !options["port"].defaulted()) config().setInt("port", options["port"].as()); if (options.count("secure")) diff --git a/programs/client/Client.h b/programs/client/Client.h index b146134bc94..2def74ef3fc 100644 --- a/programs/client/Client.h +++ b/programs/client/Client.h @@ -20,7 +20,6 @@ protected: bool processWithFuzzing(const String & full_query) override; void connect() override; - void prepareForInteractive() override; void processError(const String & query) const override; String getName() const override { return "client"; } diff --git a/programs/copier/ClusterCopier.cpp b/programs/copier/ClusterCopier.cpp index 5b21a7c2aef..141a6d4f911 100644 --- a/programs/copier/ClusterCopier.cpp +++ b/programs/copier/ClusterCopier.cpp @@ -744,8 +744,8 @@ std::shared_ptr rewriteCreateQueryStorage(const ASTPtr & create_ if (create.storage == nullptr || new_storage_ast == nullptr) throw Exception("Storage is not specified", ErrorCodes::LOGICAL_ERROR); - res->database = new_table.first; - res->table = new_table.second; + res->setDatabase(new_table.first); + res->setTable(new_table.second); res->children.clear(); res->set(res->columns_list, create.columns_list->clone()); @@ -1659,9 +1659,11 @@ TaskStatus ClusterCopier::processPartitionPieceTaskImpl( void ClusterCopier::dropAndCreateLocalTable(const ASTPtr & create_ast) { const auto & create = create_ast->as(); - dropLocalTableIfExists({create.database, create.table}); + dropLocalTableIfExists({create.getDatabase(), create.getTable()}); - InterpreterCreateQuery interpreter(create_ast, getContext()); + auto create_context = Context::createCopy(getContext()); + + InterpreterCreateQuery interpreter(create_ast, create_context); interpreter.execute(); } @@ -1669,10 +1671,12 @@ void ClusterCopier::dropLocalTableIfExists(const DatabaseAndTableName & table_na { auto drop_ast = std::make_shared(); drop_ast->if_exists = true; - drop_ast->database = table_name.first; - drop_ast->table = table_name.second; + drop_ast->setDatabase(table_name.first); + drop_ast->setTable(table_name.second); - InterpreterDropQuery interpreter(drop_ast, getContext()); + auto drop_context = Context::createCopy(getContext()); + + InterpreterDropQuery interpreter(drop_ast, drop_context); interpreter.execute(); } diff --git a/programs/copier/ClusterCopierApp.cpp b/programs/copier/ClusterCopierApp.cpp index 8d7e4abce51..d1ea59ed119 100644 --- a/programs/copier/ClusterCopierApp.cpp +++ b/programs/copier/ClusterCopierApp.cpp @@ -167,10 +167,18 @@ void ClusterCopierApp::mainImpl() DatabaseCatalog::instance().attachDatabase(default_database, std::make_shared(default_database, context)); context->setCurrentDatabase(default_database); - /// Initialize query scope just in case. - CurrentThread::QueryScope query_scope(context); + /// Disable queries logging, since: + /// - There are bits that is not allowed for global context, like adding factories info (for the query_log) + /// - And anyway it is useless for copier. + context->setSetting("log_queries", false); - auto copier = std::make_unique(task_path, host_id, default_database, context, log); + auto local_context = Context::createCopy(context); + + /// Initialize query scope just in case. + CurrentThread::QueryScope query_scope(local_context); + + auto copier = std::make_unique( + task_path, host_id, default_database, local_context, log); copier->setSafeMode(is_safe_mode); copier->setCopyFaultProbability(copy_fault_probability); copier->setMoveFaultProbability(move_fault_probability); diff --git a/programs/keeper/Keeper.cpp b/programs/keeper/Keeper.cpp index 6e001c57e75..e45cd25ee8a 100644 --- a/programs/keeper/Keeper.cpp +++ b/programs/keeper/Keeper.cpp @@ -334,7 +334,11 @@ int Keeper::main(const std::vector & /*args*/) std::string include_from_path = config().getString("include_from", "/etc/metrika.xml"); - GlobalThreadPool::initialize(config().getUInt("max_thread_pool_size", 100)); + GlobalThreadPool::initialize( + config().getUInt("max_thread_pool_size", 100), + config().getUInt("max_thread_pool_free_size", 1000), + config().getUInt("thread_pool_queue_size", 10000) + ); static ServerErrorHandler error_handler; Poco::ErrorHandler::set(&error_handler); diff --git a/programs/local/LocalServer.cpp b/programs/local/LocalServer.cpp index f6849eb76de..14095aa8dd0 100644 --- a/programs/local/LocalServer.cpp +++ b/programs/local/LocalServer.cpp @@ -36,6 +36,7 @@ #include #include #include +#include #include #include #include @@ -396,14 +397,6 @@ void LocalServer::connect() } -void LocalServer::prepareForInteractive() -{ - clearTerminal(); - showClientVersion(); - std::cerr << std::endl; -} - - int LocalServer::main(const std::vector & /*args*/) try { @@ -436,11 +429,18 @@ try processConfig(); applyCmdSettings(global_context); + + if (is_interactive) + { + clearTerminal(); + showClientVersion(); + std::cerr << std::endl; + } + connect(); if (is_interactive && !delayed_interactive) { - prepareForInteractive(); runInteractive(); } else @@ -448,10 +448,7 @@ try runNonInteractive(); if (delayed_interactive) - { - prepareForInteractive(); runInteractive(); - } } cleanup(); @@ -626,7 +623,7 @@ void LocalServer::processConfig() fs::create_directories(fs::path(path) / "metadata/"); loadMetadataSystem(global_context); - attachSystemTablesLocal(*createMemoryDatabaseIfNotExists(global_context, DatabaseCatalog::SYSTEM_DATABASE)); + attachSystemTablesLocal(global_context, *createMemoryDatabaseIfNotExists(global_context, DatabaseCatalog::SYSTEM_DATABASE)); attachInformationSchema(global_context, *createMemoryDatabaseIfNotExists(global_context, DatabaseCatalog::INFORMATION_SCHEMA)); attachInformationSchema(global_context, *createMemoryDatabaseIfNotExists(global_context, DatabaseCatalog::INFORMATION_SCHEMA_UPPERCASE)); loadMetadata(global_context); @@ -637,7 +634,7 @@ void LocalServer::processConfig() } else if (!config().has("no-system-tables")) { - attachSystemTablesLocal(*createMemoryDatabaseIfNotExists(global_context, DatabaseCatalog::SYSTEM_DATABASE)); + attachSystemTablesLocal(global_context, *createMemoryDatabaseIfNotExists(global_context, DatabaseCatalog::SYSTEM_DATABASE)); attachInformationSchema(global_context, *createMemoryDatabaseIfNotExists(global_context, DatabaseCatalog::INFORMATION_SCHEMA)); attachInformationSchema(global_context, *createMemoryDatabaseIfNotExists(global_context, DatabaseCatalog::INFORMATION_SCHEMA_UPPERCASE)); } diff --git a/programs/local/LocalServer.h b/programs/local/LocalServer.h index e87e6bd9a0d..ce0df06c86a 100644 --- a/programs/local/LocalServer.h +++ b/programs/local/LocalServer.h @@ -34,7 +34,6 @@ protected: bool executeMultiQuery(const String & all_queries_text) override; void connect() override; - void prepareForInteractive() override; void processError(const String & query) const override; String getName() const override { return "local"; } diff --git a/programs/server/Server.cpp b/programs/server/Server.cpp index ce1ea42e322..27240c2341f 100644 --- a/programs/server/Server.cpp +++ b/programs/server/Server.cpp @@ -513,7 +513,11 @@ if (ThreadFuzzer::instance().isEffective()) // Initialize global thread pool. Do it before we fetch configs from zookeeper // nodes (`from_zk`), because ZooKeeper interface uses the pool. We will // ignore `max_thread_pool_size` in configs we fetch from ZK, but oh well. - GlobalThreadPool::initialize(config().getUInt("max_thread_pool_size", 10000)); + GlobalThreadPool::initialize( + config().getUInt("max_thread_pool_size", 10000), + config().getUInt("max_thread_pool_free_size", 1000), + config().getUInt("thread_pool_queue_size", 10000) + ); ConnectionCollector::init(global_context, config().getUInt("max_threads_for_connection_collector", 10)); @@ -884,7 +888,15 @@ if (ThreadFuzzer::instance().isEffective()) access_control.setCustomSettingsPrefixes(config().getString("custom_settings_prefixes")); /// Initialize access storages. - access_control.addStoragesFromMainConfig(config(), config_path, [&] { return global_context->getZooKeeper(); }); + try + { + access_control.addStoragesFromMainConfig(config(), config_path, [&] { return global_context->getZooKeeper(); }); + } + catch (...) + { + tryLogCurrentException(log); + throw; + } /// Reload config in SYSTEM RELOAD CONFIG query. global_context->setConfigReloadCallback([&]() @@ -1142,7 +1154,7 @@ if (ThreadFuzzer::instance().isEffective()) global_context->initializeSystemLogs(); global_context->setSystemZooKeeperLogAfterInitializationIfNeeded(); /// After the system database is created, attach virtual system tables (in addition to query_log and part_log) - attachSystemTablesServer(*database_catalog.getSystemDatabase(), has_zookeeper); + attachSystemTablesServer(global_context, *database_catalog.getSystemDatabase(), has_zookeeper); attachInformationSchema(global_context, *database_catalog.getDatabase(DatabaseCatalog::INFORMATION_SCHEMA)); attachInformationSchema(global_context, *database_catalog.getDatabase(DatabaseCatalog::INFORMATION_SCHEMA_UPPERCASE)); /// Firstly remove partially dropped databases, to avoid race with MaterializedMySQLSyncThread, @@ -1252,7 +1264,7 @@ if (ThreadFuzzer::instance().isEffective()) /// This object will periodically calculate some metrics. AsynchronousMetrics async_metrics( global_context, config().getUInt("asynchronous_metrics_update_period_s", 1), servers_to_start_before_tables, servers); - attachSystemTablesAsync(*DatabaseCatalog::instance().getSystemDatabase(), async_metrics); + attachSystemTablesAsync(global_context, *DatabaseCatalog::instance().getSystemDatabase(), async_metrics); for (const auto & listen_host : listen_hosts) { diff --git a/src/Access/AccessControl.cpp b/src/Access/AccessControl.cpp index 378d8e2c264..54ba8853b17 100644 --- a/src/Access/AccessControl.cpp +++ b/src/Access/AccessControl.cpp @@ -6,11 +6,13 @@ #include #include #include +#include #include #include #include #include #include +#include #include #include #include @@ -170,7 +172,8 @@ void AccessControl::addUsersConfigStorage(const String & storage_name_, const Po auto new_storage = std::make_shared(storage_name_, check_setting_name_function); new_storage->setConfig(users_config_); addStorage(new_storage); - LOG_DEBUG(getLogger(), "Added {} access storage '{}', path: {}", String(new_storage->getStorageType()), new_storage->getStorageName(), new_storage->getPath()); + LOG_DEBUG(getLogger(), "Added {} access storage '{}', path: {}", + String(new_storage->getStorageType()), new_storage->getStorageName(), new_storage->getPath()); } void AccessControl::addUsersConfigStorage( @@ -484,6 +487,16 @@ std::shared_ptr AccessControl::getEnabledRowPolicies(c } +std::shared_ptr AccessControl::tryGetDefaultRowPolicies(const UUID & user_id) const +{ + auto user = tryRead(user_id); + if (!user) + return nullptr; + auto default_roles = getEnabledRoles(user->granted_roles.findGranted(user->default_roles), {})->getRolesInfo()->enabled_roles; + return getEnabledRowPolicies(user_id, default_roles); +} + + std::shared_ptr AccessControl::getEnabledQuota( const UUID & user_id, const String & user_name, diff --git a/src/Access/AccessControl.h b/src/Access/AccessControl.h index 779334f2a45..12e9986a13a 100644 --- a/src/Access/AccessControl.h +++ b/src/Access/AccessControl.h @@ -133,6 +133,8 @@ public: const UUID & user_id, const boost::container::flat_set & enabled_roles) const; + std::shared_ptr tryGetDefaultRowPolicies(const UUID & user_id) const; + std::shared_ptr getEnabledQuota( const UUID & user_id, const String & user_name, diff --git a/src/Access/AccessEntityIO.cpp b/src/Access/AccessEntityIO.cpp index 199b3b22efc..86aed1b5ad4 100644 --- a/src/Access/AccessEntityIO.cpp +++ b/src/Access/AccessEntityIO.cpp @@ -39,9 +39,6 @@ namespace ErrorCodes extern const int INCORRECT_ACCESS_ENTITY_DEFINITION; } -using EntityType = IAccessStorage::EntityType; -using EntityTypeInfo = IAccessStorage::EntityTypeInfo; - namespace { /// Special parser for the 'ATTACH access entity' queries. @@ -80,7 +77,7 @@ String serializeAccessEntity(const IAccessEntity & entity) /// Build list of ATTACH queries. ASTs queries; queries.push_back(InterpreterShowCreateAccessEntityQuery::getAttachQuery(entity)); - if ((entity.getType() == EntityType::USER) || (entity.getType() == EntityType::ROLE)) + if ((entity.getType() == AccessEntityType::USER) || (entity.getType() == AccessEntityType::ROLE)) boost::range::push_back(queries, InterpreterShowGrantsQuery::getAttachGrantQueries(entity)); /// Serialize the list of ATTACH queries to a string. diff --git a/src/Access/AccessEntityIO.h b/src/Access/AccessEntityIO.h index 94dc027430e..aa0a3e7cf63 100644 --- a/src/Access/AccessEntityIO.h +++ b/src/Access/AccessEntityIO.h @@ -1,9 +1,12 @@ #pragma once -#include +#include +#include namespace DB { +struct IAccessEntity; +using AccessEntityPtr = std::shared_ptr; String serializeAccessEntity(const IAccessEntity & entity); diff --git a/src/Access/AccessRights.cpp b/src/Access/AccessRights.cpp index 6cc1292a9f5..19b069546ee 100644 --- a/src/Access/AccessRights.cpp +++ b/src/Access/AccessRights.cpp @@ -23,37 +23,27 @@ namespace friend bool operator<(const ProtoElement & left, const ProtoElement & right) { - static constexpr auto compare_name = [](const boost::container::small_vector & left_name, - const boost::container::small_vector & right_name, - size_t i) + /// Compare components alphabetically. + size_t min_size = std::min(left.full_name.size(), right.full_name.size()); + for (size_t i = 0; i != min_size; ++i) { - if (i < left_name.size()) - { - if (i < right_name.size()) - return left_name[i].compare(right_name[i]); - else - return 1; /// left_name is longer => left_name > right_name - } - else if (i < right_name.size()) - return 1; /// right_name is longer => left < right - else - return 0; /// left_name == right_name - }; + int cmp = left.full_name[i].compare(right.full_name[i]); + if (cmp != 0) + return cmp < 0; + } - if (int cmp = compare_name(left.full_name, right.full_name, 0)) - return cmp < 0; - - if (int cmp = compare_name(left.full_name, right.full_name, 1)) - return cmp < 0; + /// Names with less number of components first. + if (left.full_name.size() != right.full_name.size()) + return left.full_name.size() < right.full_name.size(); + /// Grants before partial revokes. if (left.is_partial_revoke != right.is_partial_revoke) - return right.is_partial_revoke; + return right.is_partial_revoke; /// if left is grant, right is partial revoke, we assume left < right + /// Grants with grant option after other grants. + /// Revoke grant option after normal revokes. if (left.grant_option != right.grant_option) - return right.grant_option; - - if (int cmp = compare_name(left.full_name, right.full_name, 2)) - return cmp < 0; + return right.grant_option; /// if left is without grant option, and right is with grant option, we assume left < right return (left.access_flags < right.access_flags); } diff --git a/src/Access/Common/AccessEntityType.cpp b/src/Access/Common/AccessEntityType.cpp new file mode 100644 index 00000000000..b9c618a9fc0 --- /dev/null +++ b/src/Access/Common/AccessEntityType.cpp @@ -0,0 +1,86 @@ +#include +#include +#include +#include +#include + + +namespace DB +{ +namespace ErrorCodes +{ + extern const int UNKNOWN_USER; + extern const int UNKNOWN_ROLE; + extern const int UNKNOWN_ROW_POLICY; + extern const int UNKNOWN_QUOTA; + extern const int THERE_IS_NO_PROFILE; + extern const int LOGICAL_ERROR; +} + + +String toString(AccessEntityType type) +{ + return AccessEntityTypeInfo::get(type).name; +} + +String AccessEntityTypeInfo::formatEntityNameWithType(const String & entity_name) const +{ + String msg = name_for_output_with_entity_name; + msg += " "; + msg += backQuote(entity_name); + return msg; +} + +const AccessEntityTypeInfo & AccessEntityTypeInfo::get(AccessEntityType type_) +{ + static constexpr auto make_info = [](const char * raw_name_, const char * plural_raw_name_, char unique_char_, int not_found_error_code_) + { + String init_names[2] = {raw_name_, plural_raw_name_}; + String init_aliases[2]; + for (size_t i = 0; i != std::size(init_names); ++i) + { + String & init_name = init_names[i]; + String & init_alias = init_aliases[i]; + boost::to_upper(init_name); + boost::replace_all(init_name, "_", " "); + if (auto underscore_pos = init_name.find_first_of(' '); underscore_pos != String::npos) + init_alias = init_name.substr(underscore_pos + 1); + } + String init_name_for_output_with_entity_name = init_names[0]; + boost::to_lower(init_name_for_output_with_entity_name); + return AccessEntityTypeInfo{raw_name_, plural_raw_name_, std::move(init_names[0]), std::move(init_aliases[0]), std::move(init_names[1]), std::move(init_aliases[1]), std::move(init_name_for_output_with_entity_name), unique_char_, not_found_error_code_}; + }; + + switch (type_) + { + case AccessEntityType::USER: + { + static const auto info = make_info("USER", "USERS", 'U', ErrorCodes::UNKNOWN_USER); + return info; + } + case AccessEntityType::ROLE: + { + static const auto info = make_info("ROLE", "ROLES", 'R', ErrorCodes::UNKNOWN_ROLE); + return info; + } + case AccessEntityType::SETTINGS_PROFILE: + { + static const auto info = make_info("SETTINGS_PROFILE", "SETTINGS_PROFILES", 'S', ErrorCodes::THERE_IS_NO_PROFILE); + return info; + } + case AccessEntityType::ROW_POLICY: + { + static const auto info = make_info("ROW_POLICY", "ROW_POLICIES", 'P', ErrorCodes::UNKNOWN_ROW_POLICY); + return info; + } + case AccessEntityType::QUOTA: + { + static const auto info = make_info("QUOTA", "QUOTAS", 'Q', ErrorCodes::UNKNOWN_QUOTA); + return info; + } + case AccessEntityType::MAX: break; + } + throw Exception("Unknown type: " + std::to_string(static_cast(type_)), ErrorCodes::LOGICAL_ERROR); +} + +} diff --git a/src/Access/Common/AccessEntityType.h b/src/Access/Common/AccessEntityType.h new file mode 100644 index 00000000000..44caeecb37a --- /dev/null +++ b/src/Access/Common/AccessEntityType.h @@ -0,0 +1,40 @@ +#pragma once + +#include + + +namespace DB +{ + +/// Represents the type of an access entity (see the IAccessEntity class). +enum class AccessEntityType +{ + USER, + ROLE, + SETTINGS_PROFILE, + ROW_POLICY, + QUOTA, + + MAX, +}; + +String toString(AccessEntityType type); + +struct AccessEntityTypeInfo +{ + const char * const raw_name; + const char * const plural_raw_name; + const String name; /// Uppercased with spaces instead of underscores, e.g. "SETTINGS PROFILE". + const String alias; /// Alias of the keyword or empty string, e.g. "PROFILE". + const String plural_name; /// Uppercased with spaces plural name, e.g. "SETTINGS PROFILES". + const String plural_alias; /// Uppercased with spaces plural name alias, e.g. "PROFILES". + const String name_for_output_with_entity_name; /// Lowercased with spaces instead of underscores, e.g. "settings profile". + const char unique_char; /// Unique character for this type. E.g. 'P' for SETTINGS_PROFILE. + const int not_found_error_code; + + String formatEntityNameWithType(const String & entity_name) const; + + static const AccessEntityTypeInfo & get(AccessEntityType type_); +}; + +} diff --git a/src/Access/Common/AuthenticationData.cpp b/src/Access/Common/AuthenticationData.cpp index 012e7546270..7412d7336e3 100644 --- a/src/Access/Common/AuthenticationData.cpp +++ b/src/Access/Common/AuthenticationData.cpp @@ -133,7 +133,16 @@ void AuthenticationData::setPasswordHashHex(const String & hash) { Digest digest; digest.resize(hash.size() / 2); - boost::algorithm::unhex(hash.begin(), hash.end(), digest.data()); + + try + { + boost::algorithm::unhex(hash.begin(), hash.end(), digest.data()); + } + catch (const std::exception &) + { + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Cannot read password hash in hex, check for valid characters [0-9a-fA-F] and length"); + } + setPasswordHashBinary(digest); } diff --git a/src/Access/Common/QuotaDefs.cpp b/src/Access/Common/QuotaDefs.cpp new file mode 100644 index 00000000000..5d36a0bdd01 --- /dev/null +++ b/src/Access/Common/QuotaDefs.cpp @@ -0,0 +1,189 @@ +#include +#include + +#include + +#include +#include +#include +#include +#include + + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int LOGICAL_ERROR; +} + + +String toString(QuotaType type) +{ + return QuotaTypeInfo::get(type).raw_name; +} + +String QuotaTypeInfo::valueToString(QuotaValue value) const +{ + if (!(value % output_denominator)) + return std::to_string(value / output_denominator); + else + return boost::lexical_cast(static_cast(value) / output_denominator); +} + +QuotaValue QuotaTypeInfo::stringToValue(const String & str) const +{ + if (output_denominator == 1) + return static_cast(std::strtoul(str.c_str(), nullptr, 10)); + else + return static_cast(std::strtod(str.c_str(), nullptr) * output_denominator); +} + +String QuotaTypeInfo::valueToStringWithName(QuotaValue value) const +{ + String res = name; + res += " = "; + res += valueToString(value); + return res; +} + +const QuotaTypeInfo & QuotaTypeInfo::get(QuotaType type) +{ + static constexpr auto make_info = [](const char * raw_name_, UInt64 output_denominator_) + { + String init_name = raw_name_; + boost::to_lower(init_name); + String init_keyword = raw_name_; + boost::replace_all(init_keyword, "_", " "); + bool init_output_as_float = (output_denominator_ != 1); + return QuotaTypeInfo{raw_name_, std::move(init_name), std::move(init_keyword), init_output_as_float, output_denominator_}; + }; + + switch (type) + { + case QuotaType::QUERIES: + { + static const auto info = make_info("QUERIES", 1); + return info; + } + case QuotaType::QUERY_SELECTS: + { + static const auto info = make_info("QUERY_SELECTS", 1); + return info; + } + case QuotaType::QUERY_INSERTS: + { + static const auto info = make_info("QUERY_INSERTS", 1); + return info; + } + case QuotaType::ERRORS: + { + static const auto info = make_info("ERRORS", 1); + return info; + } + case QuotaType::RESULT_ROWS: + { + static const auto info = make_info("RESULT_ROWS", 1); + return info; + } + case QuotaType::RESULT_BYTES: + { + static const auto info = make_info("RESULT_BYTES", 1); + return info; + } + case QuotaType::READ_ROWS: + { + static const auto info = make_info("READ_ROWS", 1); + return info; + } + case QuotaType::READ_BYTES: + { + static const auto info = make_info("READ_BYTES", 1); + return info; + } + case QuotaType::EXECUTION_TIME: + { + static const auto info = make_info("EXECUTION_TIME", 1000000000 /* execution_time is stored in nanoseconds */); + return info; + } + case QuotaType::MAX: break; + } + throw Exception("Unexpected quota type: " + std::to_string(static_cast(type)), ErrorCodes::LOGICAL_ERROR); +} + +String toString(QuotaKeyType type) +{ + return QuotaKeyTypeInfo::get(type).raw_name; +} + +const QuotaKeyTypeInfo & QuotaKeyTypeInfo::get(QuotaKeyType type) +{ + static constexpr auto make_info = [](const char * raw_name_) + { + String init_name = raw_name_; + boost::to_lower(init_name); + std::vector init_base_types; + String replaced = boost::algorithm::replace_all_copy(init_name, "_or_", "|"); + Strings tokens; + boost::algorithm::split(tokens, replaced, boost::is_any_of("|")); + if (tokens.size() > 1) + { + for (const auto & token : tokens) + { + for (auto kt : collections::range(QuotaKeyType::MAX)) + { + if (QuotaKeyTypeInfo::get(kt).name == token) + { + init_base_types.push_back(kt); + break; + } + } + } + } + return QuotaKeyTypeInfo{raw_name_, std::move(init_name), std::move(init_base_types)}; + }; + + switch (type) + { + case QuotaKeyType::NONE: + { + static const auto info = make_info("NONE"); + return info; + } + case QuotaKeyType::USER_NAME: + { + static const auto info = make_info("USER_NAME"); + return info; + } + case QuotaKeyType::IP_ADDRESS: + { + static const auto info = make_info("IP_ADDRESS"); + return info; + } + case QuotaKeyType::FORWARDED_IP_ADDRESS: + { + static const auto info = make_info("FORWARDED_IP_ADDRESS"); + return info; + } + case QuotaKeyType::CLIENT_KEY: + { + static const auto info = make_info("CLIENT_KEY"); + return info; + } + case QuotaKeyType::CLIENT_KEY_OR_USER_NAME: + { + static const auto info = make_info("CLIENT_KEY_OR_USER_NAME"); + return info; + } + case QuotaKeyType::CLIENT_KEY_OR_IP_ADDRESS: + { + static const auto info = make_info("CLIENT_KEY_OR_IP_ADDRESS"); + return info; + } + case QuotaKeyType::MAX: break; + } + throw Exception("Unexpected quota key type: " + std::to_string(static_cast(type)), ErrorCodes::LOGICAL_ERROR); +} + +} diff --git a/src/Access/Common/QuotaDefs.h b/src/Access/Common/QuotaDefs.h new file mode 100644 index 00000000000..cfd8a07d9ff --- /dev/null +++ b/src/Access/Common/QuotaDefs.h @@ -0,0 +1,67 @@ +#pragma once + +#include + + +namespace DB +{ +/// We use UInt64 to count used resources. +using QuotaValue = UInt64; + +/// Kinds of resource what we wish to quota. +enum class QuotaType +{ + QUERIES, /// Number of queries. + QUERY_SELECTS, /// Number of select queries. + QUERY_INSERTS, /// Number of inserts queries. + ERRORS, /// Number of queries with exceptions. + RESULT_ROWS, /// Number of rows returned as result. + RESULT_BYTES, /// Number of bytes returned as result. + READ_ROWS, /// Number of rows read from tables. + READ_BYTES, /// Number of bytes read from tables. + EXECUTION_TIME, /// Total amount of query execution time in nanoseconds. + + MAX +}; + +String toString(QuotaType type); + +struct QuotaTypeInfo +{ + const char * const raw_name = ""; + const String name; /// Lowercased with underscores, e.g. "result_rows". + const String keyword; /// Uppercased with spaces, e.g. "RESULT ROWS". + const bool output_as_float = false; + const UInt64 output_denominator = 1; + String valueToString(QuotaValue value) const; + QuotaValue stringToValue(const String & str) const; + String valueToStringWithName(QuotaValue value) const; + static const QuotaTypeInfo & get(QuotaType type); +}; + +/// Key to share quota consumption. +/// Users with the same key share the same amount of resource. +enum class QuotaKeyType +{ + NONE, /// All users share the same quota. + USER_NAME, /// Connections with the same user name share the same quota. + IP_ADDRESS, /// Connections from the same IP share the same quota. + FORWARDED_IP_ADDRESS, /// Use X-Forwarded-For HTTP header instead of IP address. + CLIENT_KEY, /// Client should explicitly supply a key to use. + CLIENT_KEY_OR_USER_NAME, /// Same as CLIENT_KEY, but use USER_NAME if the client doesn't supply a key. + CLIENT_KEY_OR_IP_ADDRESS, /// Same as CLIENT_KEY, but use IP_ADDRESS if the client doesn't supply a key. + + MAX +}; + +String toString(QuotaKeyType type); + +struct QuotaKeyTypeInfo +{ + const char * const raw_name; + const String name; /// Lowercased with underscores, e.g. "client_key". + const std::vector base_types; /// For combined types keeps base types, e.g. for CLIENT_KEY_OR_USER_NAME it keeps [KeyType::CLIENT_KEY, KeyAccessEntityType::USER_NAME]. + static const QuotaKeyTypeInfo & get(QuotaKeyType type); +}; + +} diff --git a/src/Access/Common/RowPolicyDefs.cpp b/src/Access/Common/RowPolicyDefs.cpp new file mode 100644 index 00000000000..953709d9519 --- /dev/null +++ b/src/Access/Common/RowPolicyDefs.cpp @@ -0,0 +1,81 @@ +#include +#include +#include +#include + + +namespace DB +{ +namespace ErrorCodes +{ + extern const int LOGICAL_ERROR; +} + +String RowPolicyName::toString() const +{ + String name; + name.reserve(database.length() + table_name.length() + short_name.length() + 6); + name += backQuoteIfNeed(short_name); + name += " ON "; + if (!database.empty()) + { + name += backQuoteIfNeed(database); + name += '.'; + } + name += backQuoteIfNeed(table_name); + return name; +} + +String toString(RowPolicyFilterType type) +{ + return RowPolicyFilterTypeInfo::get(type).raw_name; +} + +const RowPolicyFilterTypeInfo & RowPolicyFilterTypeInfo::get(RowPolicyFilterType type_) +{ + static constexpr auto make_info = [](const char * raw_name_) + { + String init_name = raw_name_; + boost::to_lower(init_name); + size_t underscore_pos = init_name.find('_'); + String init_command = init_name.substr(0, underscore_pos); + boost::to_upper(init_command); + bool init_is_check = (std::string_view{init_name}.substr(underscore_pos + 1) == "check"); + return RowPolicyFilterTypeInfo{raw_name_, std::move(init_name), std::move(init_command), init_is_check}; + }; + + switch (type_) + { + case RowPolicyFilterType::SELECT_FILTER: + { + static const auto info = make_info("SELECT_FILTER"); + return info; + } +#if 0 /// Row-level security for INSERT, UPDATE, DELETE is not implemented yet. + case RowPolicyFilterType::INSERT_CHECK: + { + static const auto info = make_info("INSERT_CHECK"); + return info; + } + case RowPolicyFilterType::UPDATE_FILTER: + { + static const auto info = make_info("UPDATE_FILTER"); + return info; + } + case RowPolicyFilterType::UPDATE_CHECK: + { + static const auto info = make_info("UPDATE_CHECK"); + return info; + } + case RowPolicyFilterType::DELETE_FILTER: + { + static const auto info = make_info("DELETE_FILTER"); + return info; + } +#endif + case RowPolicyFilterType::MAX: break; + } + throw Exception("Unknown type: " + std::to_string(static_cast(type_)), ErrorCodes::LOGICAL_ERROR); +} + +} diff --git a/src/Access/Common/RowPolicyDefs.h b/src/Access/Common/RowPolicyDefs.h new file mode 100644 index 00000000000..792884c56df --- /dev/null +++ b/src/Access/Common/RowPolicyDefs.h @@ -0,0 +1,57 @@ +#pragma once + +#include + + +namespace DB +{ + +/// Represents the full name of a row policy, e.g. "myfilter ON mydb.mytable". +struct RowPolicyName +{ + String short_name; + String database; + String table_name; + + bool empty() const { return short_name.empty(); } + String toString() const; + auto toTuple() const { return std::tie(short_name, database, table_name); } + friend bool operator ==(const RowPolicyName & left, const RowPolicyName & right) { return left.toTuple() == right.toTuple(); } + friend bool operator !=(const RowPolicyName & left, const RowPolicyName & right) { return left.toTuple() != right.toTuple(); } +}; + + +/// Types of the filters of row policies. +/// Currently only RowPolicyFilterType::SELECT is supported. +enum class RowPolicyFilterType +{ + /// Filter is a SQL conditional expression used to figure out which rows should be visible + /// for user or available for modification. If the expression returns NULL or false for some rows + /// those rows are silently suppressed. + SELECT_FILTER, + +#if 0 /// Row-level security for INSERT, UPDATE, DELETE is not implemented yet. + /// Check is a SQL condition expression used to check whether a row can be written into + /// the table. If the expression returns NULL or false an exception is thrown. + /// If a conditional expression here is empty it means no filtering is applied. + INSERT_CHECK, + UPDATE_FILTER, + UPDATE_CHECK, + DELETE_FILTER, +#endif + + MAX +}; + +String toString(RowPolicyFilterType type); + +struct RowPolicyFilterTypeInfo +{ + const char * const raw_name; + const String name; /// Lowercased with underscores, e.g. "select_filter". + const String command; /// Uppercased without last word, e.g. "SELECT". + const bool is_check; /// E.g. false for SELECT_FILTER. + static const RowPolicyFilterTypeInfo & get(RowPolicyFilterType type); +}; + +} diff --git a/src/Access/ContextAccess.cpp b/src/Access/ContextAccess.cpp index e9164b4ae44..b254a59376d 100644 --- a/src/Access/ContextAccess.cpp +++ b/src/Access/ContextAccess.cpp @@ -269,11 +269,11 @@ std::shared_ptr ContextAccess::getEnabledRowPolicies() return no_row_policies; } -ASTPtr ContextAccess::getRowPolicyCondition(const String & database, const String & table_name, RowPolicy::ConditionType index, const ASTPtr & extra_condition) const +ASTPtr ContextAccess::getRowPolicyFilter(const String & database, const String & table_name, RowPolicyFilterType filter_type, const ASTPtr & combine_with_expr) const { std::lock_guard lock{mutex}; if (enabled_row_policies) - return enabled_row_policies->getCondition(database, table_name, index, extra_condition); + return enabled_row_policies->getFilter(database, table_name, filter_type, combine_with_expr); return nullptr; } diff --git a/src/Access/ContextAccess.h b/src/Access/ContextAccess.h index a7c91faf43b..d7cc999e95f 100644 --- a/src/Access/ContextAccess.h +++ b/src/Access/ContextAccess.h @@ -1,13 +1,14 @@ #pragma once #include -#include +#include #include #include #include #include #include #include +#include #include @@ -81,7 +82,7 @@ public: /// Returns the row policy filter for a specified table. /// The function returns nullptr if there is no filter to apply. - ASTPtr getRowPolicyCondition(const String & database, const String & table_name, RowPolicy::ConditionType index, const ASTPtr & extra_condition = nullptr) const; + ASTPtr getRowPolicyFilter(const String & database, const String & table_name, RowPolicyFilterType filter_type, const ASTPtr & combine_with_expr = nullptr) const; /// Returns the quota to track resource consumption. std::shared_ptr getQuota() const; diff --git a/src/Access/DiskAccessStorage.cpp b/src/Access/DiskAccessStorage.cpp index 247bcc1ee89..1428e546d34 100644 --- a/src/Access/DiskAccessStorage.cpp +++ b/src/Access/DiskAccessStorage.cpp @@ -1,23 +1,16 @@ #include #include -#include -#include -#include -#include -#include #include #include #include #include -#include #include #include -#include -#include #include #include #include #include +#include #include #include #include @@ -34,10 +27,6 @@ namespace ErrorCodes namespace { - using EntityType = IAccessStorage::EntityType; - using EntityTypeInfo = IAccessStorage::EntityTypeInfo; - - /// Reads a file containing ATTACH queries and then parses it to build an access entity. AccessEntityPtr readEntityFile(const String & file_path) { @@ -144,9 +133,9 @@ namespace /// Calculates the path for storing a map of name of access entity to UUID for access entities of some type. - String getListFilePath(const String & directory_path, EntityType type) + String getListFilePath(const String & directory_path, AccessEntityType type) { - String file_name = EntityTypeInfo::get(type).plural_raw_name; + String file_name = AccessEntityTypeInfo::get(type).plural_raw_name; boost::to_lower(file_name); return directory_path + file_name + ".list"; } @@ -238,7 +227,7 @@ bool DiskAccessStorage::isPathEqual(const String & directory_path_) const void DiskAccessStorage::clear() { entries_by_id.clear(); - for (auto type : collections::range(EntityType::MAX)) + for (auto type : collections::range(AccessEntityType::MAX)) entries_by_name_and_type[static_cast(type)].clear(); } @@ -248,7 +237,7 @@ bool DiskAccessStorage::readLists() clear(); bool ok = true; - for (auto type : collections::range(EntityType::MAX)) + for (auto type : collections::range(AccessEntityType::MAX)) { auto & entries_by_name = entries_by_name_and_type[static_cast(type)]; auto file_path = getListFilePath(directory_path, type); @@ -321,7 +310,7 @@ bool DiskAccessStorage::writeLists() } -void DiskAccessStorage::scheduleWriteLists(EntityType type) +void DiskAccessStorage::scheduleWriteLists(AccessEntityType type) { if (failed_to_write_lists) return; /// We don't try to write list files after the first fail. @@ -407,14 +396,14 @@ bool DiskAccessStorage::rebuildLists() entries_by_name[entry.name] = &entry; } - for (auto type : collections::range(EntityType::MAX)) + for (auto type : collections::range(AccessEntityType::MAX)) types_of_lists_to_write.insert(type); return true; } -std::optional DiskAccessStorage::findImpl(EntityType type, const String & name) const +std::optional DiskAccessStorage::findImpl(AccessEntityType type, const String & name) const { std::lock_guard lock{mutex}; const auto & entries_by_name = entries_by_name_and_type[static_cast(type)]; @@ -426,7 +415,7 @@ std::optional DiskAccessStorage::findImpl(EntityType type, const String & } -std::vector DiskAccessStorage::findAllImpl(EntityType type) const +std::vector DiskAccessStorage::findAllImpl(AccessEntityType type) const { std::lock_guard lock{mutex}; const auto & entries_by_name = entries_by_name_and_type[static_cast(type)]; @@ -489,7 +478,7 @@ UUID DiskAccessStorage::insertImpl(const AccessEntityPtr & new_entity, bool repl void DiskAccessStorage::insertNoLock(const UUID & id, const AccessEntityPtr & new_entity, bool replace_if_exists, Notifications & notifications) { const String & name = new_entity->getName(); - EntityType type = new_entity->getType(); + AccessEntityType type = new_entity->getType(); if (readonly) throwReadonlyCannotInsert(type, name); @@ -543,7 +532,7 @@ void DiskAccessStorage::removeNoLock(const UUID & id, Notifications & notificati throwNotFound(id); Entry & entry = it->second; - EntityType type = entry.type; + AccessEntityType type = entry.type; if (readonly) throwReadonlyCannotRemove(type, entry.name); @@ -591,7 +580,7 @@ void DiskAccessStorage::updateNoLock(const UUID & id, const UpdateFunc & update_ const String & new_name = new_entity->getName(); const String & old_name = old_entity->getName(); - const EntityType type = entry.type; + const AccessEntityType type = entry.type; auto & entries_by_name = entries_by_name_and_type[static_cast(type)]; bool name_changed = (new_name != old_name); @@ -671,7 +660,7 @@ scope_guard DiskAccessStorage::subscribeForChangesImpl(const UUID & id, const On }; } -scope_guard DiskAccessStorage::subscribeForChangesImpl(EntityType type, const OnChangedHandler & handler) const +scope_guard DiskAccessStorage::subscribeForChangesImpl(AccessEntityType type, const OnChangedHandler & handler) const { std::lock_guard lock{mutex}; auto & handlers = handlers_by_type[static_cast(type)]; @@ -698,7 +687,7 @@ bool DiskAccessStorage::hasSubscriptionImpl(const UUID & id) const return false; } -bool DiskAccessStorage::hasSubscriptionImpl(EntityType type) const +bool DiskAccessStorage::hasSubscriptionImpl(AccessEntityType type) const { std::lock_guard lock{mutex}; const auto & handlers = handlers_by_type[static_cast(type)]; diff --git a/src/Access/DiskAccessStorage.h b/src/Access/DiskAccessStorage.h index 5e3f4a1eafa..853a18590f0 100644 --- a/src/Access/DiskAccessStorage.h +++ b/src/Access/DiskAccessStorage.h @@ -27,8 +27,8 @@ public: bool isReadOnly() const { return readonly; } private: - std::optional findImpl(EntityType type, const String & name) const override; - std::vector findAllImpl(EntityType type) const override; + std::optional findImpl(AccessEntityType type, const String & name) const override; + std::vector findAllImpl(AccessEntityType type) const override; bool existsImpl(const UUID & id) const override; AccessEntityPtr readImpl(const UUID & id) const override; String readNameImpl(const UUID & id) const override; @@ -37,14 +37,14 @@ private: void removeImpl(const UUID & id) override; void updateImpl(const UUID & id, const UpdateFunc & update_func) override; scope_guard subscribeForChangesImpl(const UUID & id, const OnChangedHandler & handler) const override; - scope_guard subscribeForChangesImpl(EntityType type, const OnChangedHandler & handler) const override; + scope_guard subscribeForChangesImpl(AccessEntityType type, const OnChangedHandler & handler) const override; bool hasSubscriptionImpl(const UUID & id) const override; - bool hasSubscriptionImpl(EntityType type) const override; + bool hasSubscriptionImpl(AccessEntityType type) const override; void clear(); bool readLists(); bool writeLists(); - void scheduleWriteLists(EntityType type); + void scheduleWriteLists(AccessEntityType type); bool rebuildLists(); void listsWritingThreadFunc(); @@ -63,7 +63,7 @@ private: { UUID id; String name; - EntityType type; + AccessEntityType type; mutable AccessEntityPtr entity; /// may be nullptr, if the entity hasn't been loaded yet. mutable std::list handlers_by_id; }; @@ -73,13 +73,13 @@ private: String directory_path; std::atomic readonly; std::unordered_map entries_by_id; - std::unordered_map entries_by_name_and_type[static_cast(EntityType::MAX)]; - boost::container::flat_set types_of_lists_to_write; + std::unordered_map entries_by_name_and_type[static_cast(AccessEntityType::MAX)]; + boost::container::flat_set types_of_lists_to_write; bool failed_to_write_lists = false; /// Whether writing of the list files has been failed since the recent restart of the server. ThreadFromGlobalPool lists_writing_thread; /// List files are written in a separate thread. std::condition_variable lists_writing_thread_should_exit; /// Signals `lists_writing_thread` to exit. bool lists_writing_thread_is_waiting = false; - mutable std::list handlers_by_type[static_cast(EntityType::MAX)]; + mutable std::list handlers_by_type[static_cast(AccessEntityType::MAX)]; mutable std::mutex mutex; }; } diff --git a/src/Access/EnabledQuota.cpp b/src/Access/EnabledQuota.cpp index d776b2b851a..f0c6004bd77 100644 --- a/src/Access/EnabledQuota.cpp +++ b/src/Access/EnabledQuota.cpp @@ -20,16 +20,16 @@ struct EnabledQuota::Impl [[noreturn]] static void throwQuotaExceed( const String & user_name, const String & quota_name, - ResourceType resource_type, - ResourceAmount used, - ResourceAmount max, + QuotaType quota_type, + QuotaValue used, + QuotaValue max, std::chrono::seconds duration, std::chrono::system_clock::time_point end_of_interval) { - const auto & type_info = Quota::ResourceTypeInfo::get(resource_type); + const auto & type_info = QuotaTypeInfo::get(quota_type); throw Exception( "Quota for user " + backQuote(user_name) + " for " + to_string(duration) + " has been exceeded: " - + type_info.outputWithAmount(used) + "/" + type_info.amountToString(max) + ". " + + type_info.valueToStringWithName(used) + "/" + type_info.valueToString(max) + ". " + "Interval will end at " + to_string(end_of_interval) + ". " + "Name of quota template: " + backQuote(quota_name), ErrorCodes::QUOTA_EXPIRED); } @@ -52,9 +52,7 @@ struct EnabledQuota::Impl return end; } - /// We reset counters only if the interval's end has been calculated before. - /// If it hasn't we just calculate the interval's end for the first time and don't reset counters yet. - bool need_reset_counters = (end_loaded.count() != 0); + bool need_reset_counters = false; do { @@ -66,7 +64,12 @@ struct EnabledQuota::Impl UInt64 n = static_cast((current_time - end + duration) / duration); end = end + duration * n; if (end_of_interval.compare_exchange_strong(end_loaded, end.time_since_epoch())) + { + /// We reset counters only if the interval's end has been calculated before. + /// If it hasn't we just calculate the interval's end for the first time and don't reset counters yet. + need_reset_counters = (end_loaded.count() != 0); break; + } end = std::chrono::system_clock::time_point{end_loaded}; } while (current_time >= end); @@ -83,15 +86,16 @@ struct EnabledQuota::Impl static void used( const String & user_name, const Intervals & intervals, - ResourceType resource_type, - ResourceAmount amount, + QuotaType quota_type, + QuotaValue value, std::chrono::system_clock::time_point current_time, bool check_exceeded) { for (const auto & interval : intervals.intervals) { - ResourceAmount used = (interval.used[resource_type] += amount); - ResourceAmount max = interval.max[resource_type]; + auto quota_type_i = static_cast(quota_type); + QuotaValue used = (interval.used[quota_type_i] += value); + QuotaValue max = interval.max[quota_type_i]; if (!max) continue; if (used > max) @@ -100,12 +104,12 @@ struct EnabledQuota::Impl auto end_of_interval = getEndOfInterval(interval, current_time, counters_were_reset); if (counters_were_reset) { - used = (interval.used[resource_type] += amount); + used = (interval.used[quota_type_i] += value); if ((used > max) && check_exceeded) - throwQuotaExceed(user_name, intervals.quota_name, resource_type, used, max, interval.duration, end_of_interval); + throwQuotaExceed(user_name, intervals.quota_name, quota_type, used, max, interval.duration, end_of_interval); } else if (check_exceeded) - throwQuotaExceed(user_name, intervals.quota_name, resource_type, used, max, interval.duration, end_of_interval); + throwQuotaExceed(user_name, intervals.quota_name, quota_type, used, max, interval.duration, end_of_interval); } } } @@ -113,13 +117,14 @@ struct EnabledQuota::Impl static void checkExceeded( const String & user_name, const Intervals & intervals, - ResourceType resource_type, + QuotaType quota_type, std::chrono::system_clock::time_point current_time) { + auto quota_type_i = static_cast(quota_type); for (const auto & interval : intervals.intervals) { - ResourceAmount used = interval.used[resource_type]; - ResourceAmount max = interval.max[resource_type]; + QuotaValue used = interval.used[quota_type_i]; + QuotaValue max = interval.max[quota_type_i]; if (!max) continue; if (used > max) @@ -127,7 +132,7 @@ struct EnabledQuota::Impl bool counters_were_reset = false; std::chrono::system_clock::time_point end_of_interval = getEndOfInterval(interval, current_time, counters_were_reset); if (!counters_were_reset) - throwQuotaExceed(user_name, intervals.quota_name, resource_type, used, max, interval.duration, end_of_interval); + throwQuotaExceed(user_name, intervals.quota_name, quota_type, used, max, interval.duration, end_of_interval); } } } @@ -137,18 +142,19 @@ struct EnabledQuota::Impl const Intervals & intervals, std::chrono::system_clock::time_point current_time) { - for (auto resource_type : collections::range(Quota::MAX_RESOURCE_TYPE)) - checkExceeded(user_name, intervals, resource_type, current_time); + for (auto quota_type : collections::range(QuotaType::MAX)) + checkExceeded(user_name, intervals, quota_type, current_time); } }; EnabledQuota::Interval::Interval() { - for (auto resource_type : collections::range(MAX_RESOURCE_TYPE)) + for (auto quota_type : collections::range(QuotaType::MAX)) { - used[resource_type].store(0); - max[resource_type] = 0; + auto quota_type_i = static_cast(quota_type); + used[quota_type_i].store(0); + max[quota_type_i] = 0; } } @@ -161,10 +167,11 @@ EnabledQuota::Interval & EnabledQuota::Interval::operator =(const Interval & src randomize_interval = src.randomize_interval; duration = src.duration; end_of_interval.store(src.end_of_interval.load()); - for (auto resource_type : collections::range(MAX_RESOURCE_TYPE)) + for (auto quota_type : collections::range(QuotaType::MAX)) { - max[resource_type] = src.max[resource_type]; - used[resource_type].store(src.used[resource_type].load()); + auto quota_type_i = static_cast(quota_type); + max[quota_type_i] = src.max[quota_type_i]; + used[quota_type_i].store(src.used[quota_type_i].load()); } return *this; } @@ -187,11 +194,12 @@ std::optional EnabledQuota::Intervals::getUsage(std::chrono::system_ out.randomize_interval = in.randomize_interval; bool counters_were_reset = false; out.end_of_interval = Impl::getEndOfInterval(in, current_time, counters_were_reset); - for (auto resource_type : collections::range(MAX_RESOURCE_TYPE)) + for (auto quota_type : collections::range(QuotaType::MAX)) { - if (in.max[resource_type]) - out.max[resource_type] = in.max[resource_type]; - out.used[resource_type] = in.used[resource_type]; + auto quota_type_i = static_cast(quota_type); + if (in.max[quota_type_i]) + out.max[quota_type_i] = in.max[quota_type_i]; + out.used[quota_type_i] = in.used[quota_type_i]; } } return usage; @@ -205,45 +213,45 @@ EnabledQuota::EnabledQuota(const Params & params_) : params(params_) EnabledQuota::~EnabledQuota() = default; -void EnabledQuota::used(ResourceType resource_type, ResourceAmount amount, bool check_exceeded) const +void EnabledQuota::used(QuotaType quota_type, QuotaValue value, bool check_exceeded) const { - used({resource_type, amount}, check_exceeded); + used({quota_type, value}, check_exceeded); } -void EnabledQuota::used(const std::pair & resource, bool check_exceeded) const +void EnabledQuota::used(const std::pair & usage1, bool check_exceeded) const { auto loaded = intervals.load(); auto current_time = std::chrono::system_clock::now(); - Impl::used(getUserName(), *loaded, resource.first, resource.second, current_time, check_exceeded); + Impl::used(getUserName(), *loaded, usage1.first, usage1.second, current_time, check_exceeded); } -void EnabledQuota::used(const std::pair & resource1, const std::pair & resource2, bool check_exceeded) const +void EnabledQuota::used(const std::pair & usage1, const std::pair & usage2, bool check_exceeded) const { auto loaded = intervals.load(); auto current_time = std::chrono::system_clock::now(); - Impl::used(getUserName(), *loaded, resource1.first, resource1.second, current_time, check_exceeded); - Impl::used(getUserName(), *loaded, resource2.first, resource2.second, current_time, check_exceeded); + Impl::used(getUserName(), *loaded, usage1.first, usage1.second, current_time, check_exceeded); + Impl::used(getUserName(), *loaded, usage2.first, usage2.second, current_time, check_exceeded); } -void EnabledQuota::used(const std::pair & resource1, const std::pair & resource2, const std::pair & resource3, bool check_exceeded) const +void EnabledQuota::used(const std::pair & usage1, const std::pair & usage2, const std::pair & usage3, bool check_exceeded) const { auto loaded = intervals.load(); auto current_time = std::chrono::system_clock::now(); - Impl::used(getUserName(), *loaded, resource1.first, resource1.second, current_time, check_exceeded); - Impl::used(getUserName(), *loaded, resource2.first, resource2.second, current_time, check_exceeded); - Impl::used(getUserName(), *loaded, resource3.first, resource3.second, current_time, check_exceeded); + Impl::used(getUserName(), *loaded, usage1.first, usage1.second, current_time, check_exceeded); + Impl::used(getUserName(), *loaded, usage2.first, usage2.second, current_time, check_exceeded); + Impl::used(getUserName(), *loaded, usage3.first, usage3.second, current_time, check_exceeded); } -void EnabledQuota::used(const std::vector> & resources, bool check_exceeded) const +void EnabledQuota::used(const std::vector> & usages, bool check_exceeded) const { auto loaded = intervals.load(); auto current_time = std::chrono::system_clock::now(); - for (const auto & resource : resources) - Impl::used(getUserName(), *loaded, resource.first, resource.second, current_time, check_exceeded); + for (const auto & usage : usages) + Impl::used(getUserName(), *loaded, usage.first, usage.second, current_time, check_exceeded); } @@ -254,10 +262,10 @@ void EnabledQuota::checkExceeded() const } -void EnabledQuota::checkExceeded(ResourceType resource_type) const +void EnabledQuota::checkExceeded(QuotaType quota_type) const { auto loaded = intervals.load(); - Impl::checkExceeded(getUserName(), *loaded, resource_type, std::chrono::system_clock::now()); + Impl::checkExceeded(getUserName(), *loaded, quota_type, std::chrono::system_clock::now()); } diff --git a/src/Access/EnabledQuota.h b/src/Access/EnabledQuota.h index 7ae107e45e3..097afe861d2 100644 --- a/src/Access/EnabledQuota.h +++ b/src/Access/EnabledQuota.h @@ -1,13 +1,15 @@ #pragma once -#include +#include #include #include +#include #include #include #include #include #include +#include namespace DB @@ -37,21 +39,18 @@ public: friend bool operator >=(const Params & lhs, const Params & rhs) { return !(lhs < rhs); } }; - using ResourceType = Quota::ResourceType; - using ResourceAmount = Quota::ResourceAmount; - ~EnabledQuota(); /// Tracks resource consumption. If the quota exceeded and `check_exceeded == true`, throws an exception. - void used(ResourceType resource_type, ResourceAmount amount, bool check_exceeded = true) const; - void used(const std::pair & resource, bool check_exceeded = true) const; - void used(const std::pair & resource1, const std::pair & resource2, bool check_exceeded = true) const; - void used(const std::pair & resource1, const std::pair & resource2, const std::pair & resource3, bool check_exceeded = true) const; - void used(const std::vector> & resources, bool check_exceeded = true) const; + void used(QuotaType quota_type, QuotaValue value, bool check_exceeded = true) const; + void used(const std::pair & usage1, bool check_exceeded = true) const; + void used(const std::pair & usage1, const std::pair & usage2, bool check_exceeded = true) const; + void used(const std::pair & usage1, const std::pair & usage2, const std::pair & usage3, bool check_exceeded = true) const; + void used(const std::vector> & usages, bool check_exceeded = true) const; /// Checks if the quota exceeded. If so, throws an exception. void checkExceeded() const; - void checkExceeded(ResourceType resource_type) const; + void checkExceeded(QuotaType quota_type) const; /// Returns the information about quota consumption. std::optional getUsage() const; @@ -66,12 +65,10 @@ private: const String & getUserName() const { return params.user_name; } - static constexpr auto MAX_RESOURCE_TYPE = Quota::MAX_RESOURCE_TYPE; - struct Interval { - mutable std::atomic used[MAX_RESOURCE_TYPE]; - ResourceAmount max[MAX_RESOURCE_TYPE]; + mutable std::atomic used[static_cast(QuotaType::MAX)]; + QuotaValue max[static_cast(QuotaType::MAX)]; std::chrono::seconds duration = std::chrono::seconds::zero(); bool randomize_interval = false; mutable std::atomic end_of_interval; diff --git a/src/Access/EnabledRowPolicies.cpp b/src/Access/EnabledRowPolicies.cpp index 674dab3e0f0..e4f592884fe 100644 --- a/src/Access/EnabledRowPolicies.cpp +++ b/src/Access/EnabledRowPolicies.cpp @@ -6,9 +6,9 @@ namespace DB { -size_t EnabledRowPolicies::Hash::operator()(const MixedConditionKey & key) const +size_t EnabledRowPolicies::Hash::operator()(const MixedFiltersKey & key) const { - return std::hash{}(key.database) - std::hash{}(key.table_name) + static_cast(key.condition_type); + return std::hash{}(key.database) - std::hash{}(key.table_name) + static_cast(key.filter_type); } @@ -23,36 +23,36 @@ EnabledRowPolicies::EnabledRowPolicies(const Params & params_) : params(params_) EnabledRowPolicies::~EnabledRowPolicies() = default; -ASTPtr EnabledRowPolicies::getCondition(const String & database, const String & table_name, ConditionType condition_type) const +ASTPtr EnabledRowPolicies::getFilter(const String & database, const String & table_name, RowPolicyFilterType filter_type) const { /// We don't lock `mutex` here. - auto loaded = map_of_mixed_conditions.load(); - auto it = loaded->find({database, table_name, condition_type}); + auto loaded = mixed_filters.load(); + auto it = loaded->find({database, table_name, filter_type}); if (it == loaded->end()) return {}; - auto condition = it->second.ast; + auto filter = it->second.ast; bool value; - if (tryGetLiteralBool(condition.get(), value) && value) + if (tryGetLiteralBool(filter.get(), value) && value) return nullptr; /// The condition is always true, no need to check it. - return condition; + return filter; } -ASTPtr EnabledRowPolicies::getCondition(const String & database, const String & table_name, ConditionType type, const ASTPtr & extra_condition) const +ASTPtr EnabledRowPolicies::getFilter(const String & database, const String & table_name, RowPolicyFilterType filter_type, const ASTPtr & combine_with_expr) const { - ASTPtr condition = getCondition(database, table_name, type); - if (condition && extra_condition) - condition = makeASTForLogicalAnd({condition, extra_condition}); - else if (!condition) - condition = extra_condition; + ASTPtr filter = getFilter(database, table_name, filter_type); + if (filter && combine_with_expr) + filter = makeASTForLogicalAnd({filter, combine_with_expr}); + else if (!filter) + filter = combine_with_expr; bool value; - if (tryGetLiteralBool(condition.get(), value) && value) + if (tryGetLiteralBool(filter.get(), value) && value) return nullptr; /// The condition is always true, no need to check it. - return condition; + return filter; } } diff --git a/src/Access/EnabledRowPolicies.h b/src/Access/EnabledRowPolicies.h index 8ed923e98e4..7fb4e707273 100644 --- a/src/Access/EnabledRowPolicies.h +++ b/src/Access/EnabledRowPolicies.h @@ -1,8 +1,9 @@ #pragma once -#include +#include #include #include +#include #include #include #include @@ -35,43 +36,42 @@ public: EnabledRowPolicies(); ~EnabledRowPolicies(); - using ConditionType = RowPolicy::ConditionType; - /// Returns prepared filter for a specific table and operations. /// The function can return nullptr, that means there is no filters applied. /// The returned filter can be a combination of the filters defined by multiple row policies. - ASTPtr getCondition(const String & database, const String & table_name, ConditionType type) const; - ASTPtr getCondition(const String & database, const String & table_name, ConditionType type, const ASTPtr & extra_condition) const; + ASTPtr getFilter(const String & database, const String & table_name, RowPolicyFilterType filter_type) const; + ASTPtr getFilter(const String & database, const String & table_name, RowPolicyFilterType filter_type, const ASTPtr & combine_with_expr) const; private: friend class RowPolicyCache; EnabledRowPolicies(const Params & params_); - struct MixedConditionKey + struct MixedFiltersKey { std::string_view database; std::string_view table_name; - ConditionType condition_type; + RowPolicyFilterType filter_type; - auto toTuple() const { return std::tie(database, table_name, condition_type); } - friend bool operator==(const MixedConditionKey & left, const MixedConditionKey & right) { return left.toTuple() == right.toTuple(); } - friend bool operator!=(const MixedConditionKey & left, const MixedConditionKey & right) { return left.toTuple() != right.toTuple(); } + auto toTuple() const { return std::tie(database, table_name, filter_type); } + friend bool operator==(const MixedFiltersKey & left, const MixedFiltersKey & right) { return left.toTuple() == right.toTuple(); } + friend bool operator!=(const MixedFiltersKey & left, const MixedFiltersKey & right) { return left.toTuple() != right.toTuple(); } }; - struct Hash - { - size_t operator()(const MixedConditionKey & key) const; - }; - - struct MixedCondition + struct MixedFiltersResult { ASTPtr ast; std::shared_ptr> database_and_table_name; }; - using MapOfMixedConditions = std::unordered_map; + + struct Hash + { + size_t operator()(const MixedFiltersKey & key) const; + }; + + using MixedFiltersMap = std::unordered_map; const Params params; - mutable boost::atomic_shared_ptr map_of_mixed_conditions; + mutable boost::atomic_shared_ptr mixed_filters; }; } diff --git a/src/Access/GSSAcceptor.cpp b/src/Access/GSSAcceptor.cpp index 207baf955e0..02fa3f8e1d3 100644 --- a/src/Access/GSSAcceptor.cpp +++ b/src/Access/GSSAcceptor.cpp @@ -18,7 +18,7 @@ namespace ErrorCodes extern const int KERBEROS_ERROR; } -GSSAcceptorContext::GSSAcceptorContext(const GSSAcceptorContext::Params& params_) +GSSAcceptorContext::GSSAcceptorContext(const GSSAcceptorContext::Params & params_) : params(params_) { } @@ -50,7 +50,6 @@ std::recursive_mutex gss_global_mutex; struct PrincipalName { explicit PrincipalName(String principal); -// operator String() const; String name; std::vector instances; @@ -75,24 +74,6 @@ PrincipalName::PrincipalName(String principal) } } -/* -PrincipalName::operator String() const -{ - String principal = name; - - for (const auto & instance : instances) - { - principal += '/'; - principal += instance; - } - - principal += '@'; - principal += realm; - - return principal; -} -*/ - String bufferToString(const gss_buffer_desc & buf) { String str; diff --git a/src/Access/GSSAcceptor.h b/src/Access/GSSAcceptor.h index 4501622cd81..4b57b575455 100644 --- a/src/Access/GSSAcceptor.h +++ b/src/Access/GSSAcceptor.h @@ -30,7 +30,7 @@ public: String realm; }; - explicit GSSAcceptorContext(const Params& params_); + explicit GSSAcceptorContext(const Params & params_); virtual ~GSSAcceptorContext() override; GSSAcceptorContext(const GSSAcceptorContext &) = delete; diff --git a/src/Access/IAccessEntity.h b/src/Access/IAccessEntity.h index d0fbde8da97..04faa879040 100644 --- a/src/Access/IAccessEntity.h +++ b/src/Access/IAccessEntity.h @@ -1,24 +1,13 @@ #pragma once -#include +#include #include -#include -#include +#include #include namespace DB { -namespace ErrorCodes -{ - extern const int UNKNOWN_USER; - extern const int UNKNOWN_ROLE; - extern const int UNKNOWN_ROW_POLICY; - extern const int UNKNOWN_QUOTA; - extern const int THERE_IS_NO_PROFILE; - extern const int LOGICAL_ERROR; -} - /// Access entity is a set of data which have a name and a type. Access entity control something related to the access control. /// Entities can be stored to a file or another storage, see IAccessStorage. @@ -29,41 +18,14 @@ struct IAccessEntity virtual ~IAccessEntity() = default; virtual std::shared_ptr clone() const = 0; - enum class Type - { - USER, - ROLE, - SETTINGS_PROFILE, - ROW_POLICY, - QUOTA, + virtual AccessEntityType getType() const = 0; - MAX, - }; - - virtual Type getType() const = 0; - - struct TypeInfo - { - const char * const raw_name; - const char * const plural_raw_name; - const String name; /// Uppercased with spaces instead of underscores, e.g. "SETTINGS PROFILE". - const String alias; /// Alias of the keyword or empty string, e.g. "PROFILE". - const String plural_name; /// Uppercased with spaces plural name, e.g. "SETTINGS PROFILES". - const String plural_alias; /// Uppercased with spaces plural name alias, e.g. "PROFILES". - const String name_for_output_with_entity_name; /// Lowercased with spaces instead of underscores, e.g. "settings profile". - const char unique_char; /// Unique character for this type. E.g. 'P' for SETTINGS_PROFILE. - const int not_found_error_code; - - static const TypeInfo & get(Type type_); - String outputWithEntityName(const String & entity_name) const; - }; - - const TypeInfo & getTypeInfo() const { return TypeInfo::get(getType()); } - String outputTypeAndName() const { return getTypeInfo().outputWithEntityName(getName()); } + const AccessEntityTypeInfo & getTypeInfo() const { return AccessEntityTypeInfo::get(getType()); } + String formatTypeWithName() const { return getTypeInfo().formatEntityNameWithType(getName()); } template bool isTypeOf() const { return isTypeOf(EntityClassT::TYPE); } - bool isTypeOf(Type type) const { return type == getType(); } + bool isTypeOf(AccessEntityType type) const { return type == getType(); } virtual void setName(const String & name_) { name = name_; } const String & getName() const { return name; } @@ -98,70 +60,4 @@ protected: using AccessEntityPtr = std::shared_ptr; - -inline const IAccessEntity::TypeInfo & IAccessEntity::TypeInfo::get(Type type_) -{ - static constexpr auto make_info = [](const char * raw_name_, const char * plural_raw_name_, char unique_char_, int not_found_error_code_) - { - String init_names[2] = {raw_name_, plural_raw_name_}; - String init_aliases[2]; - for (size_t i = 0; i != std::size(init_names); ++i) - { - String & init_name = init_names[i]; - String & init_alias = init_aliases[i]; - boost::to_upper(init_name); - boost::replace_all(init_name, "_", " "); - if (auto underscore_pos = init_name.find_first_of(" "); underscore_pos != String::npos) - init_alias = init_name.substr(underscore_pos + 1); - } - String init_name_for_output_with_entity_name = init_names[0]; - boost::to_lower(init_name_for_output_with_entity_name); - return TypeInfo{raw_name_, plural_raw_name_, std::move(init_names[0]), std::move(init_aliases[0]), std::move(init_names[1]), std::move(init_aliases[1]), std::move(init_name_for_output_with_entity_name), unique_char_, not_found_error_code_}; - }; - - switch (type_) - { - case Type::USER: - { - static const auto info = make_info("USER", "USERS", 'U', ErrorCodes::UNKNOWN_USER); - return info; - } - case Type::ROLE: - { - static const auto info = make_info("ROLE", "ROLES", 'R', ErrorCodes::UNKNOWN_ROLE); - return info; - } - case Type::SETTINGS_PROFILE: - { - static const auto info = make_info("SETTINGS_PROFILE", "SETTINGS_PROFILES", 'S', ErrorCodes::THERE_IS_NO_PROFILE); - return info; - } - case Type::ROW_POLICY: - { - static const auto info = make_info("ROW_POLICY", "ROW_POLICIES", 'P', ErrorCodes::UNKNOWN_ROW_POLICY); - return info; - } - case Type::QUOTA: - { - static const auto info = make_info("QUOTA", "QUOTAS", 'Q', ErrorCodes::UNKNOWN_QUOTA); - return info; - } - case Type::MAX: break; - } - throw Exception("Unknown type: " + std::to_string(static_cast(type_)), ErrorCodes::LOGICAL_ERROR); -} - -inline String IAccessEntity::TypeInfo::outputWithEntityName(const String & entity_name) const -{ - String msg = name_for_output_with_entity_name; - msg += " "; - msg += backQuote(entity_name); - return msg; -} - -inline String toString(IAccessEntity::Type type) -{ - return IAccessEntity::TypeInfo::get(type).name; -} - } diff --git a/src/Access/IAccessStorage.cpp b/src/Access/IAccessStorage.cpp index a0ad5d4ec79..51c2525d923 100644 --- a/src/Access/IAccessStorage.cpp +++ b/src/Access/IAccessStorage.cpp @@ -8,6 +8,8 @@ #include #include #include +#include +#include namespace DB @@ -26,20 +28,16 @@ namespace ErrorCodes namespace { - using EntityType = IAccessStorage::EntityType; - using EntityTypeInfo = IAccessStorage::EntityTypeInfo; - - String outputID(const UUID & id) { return "ID(" + toString(id) + ")"; } - String outputTypeAndNameOrID(const IAccessStorage & storage, const UUID & id) + String formatTypeWithNameOrID(const IAccessStorage & storage, const UUID & id) { auto entity = storage.tryRead(id); if (entity) - return entity->outputTypeAndName(); + return entity->formatTypeWithName(); return outputID(id); } @@ -132,19 +130,19 @@ namespace } -std::vector IAccessStorage::findAll(EntityType type) const +std::vector IAccessStorage::findAll(AccessEntityType type) const { return findAllImpl(type); } -std::optional IAccessStorage::find(EntityType type, const String & name) const +std::optional IAccessStorage::find(AccessEntityType type, const String & name) const { return findImpl(type, name); } -std::vector IAccessStorage::find(EntityType type, const Strings & names) const +std::vector IAccessStorage::find(AccessEntityType type, const Strings & names) const { std::vector ids; ids.reserve(names.size()); @@ -158,7 +156,7 @@ std::vector IAccessStorage::find(EntityType type, const Strings & names) c } -UUID IAccessStorage::getID(EntityType type, const String & name) const +UUID IAccessStorage::getID(AccessEntityType type, const String & name) const { auto id = findImpl(type, name); if (id) @@ -167,7 +165,7 @@ UUID IAccessStorage::getID(EntityType type, const String & name) const } -std::vector IAccessStorage::getIDs(EntityType type, const Strings & names) const +std::vector IAccessStorage::getIDs(AccessEntityType type, const Strings & names) const { std::vector ids; ids.reserve(names.size()); @@ -253,7 +251,7 @@ std::vector IAccessStorage::insert(const std::vector & mu if (tracker.errors()) { - auto get_name_function = [&](size_t i) { return multiple_entities[i]->outputTypeAndName(); }; + auto get_name_function = [&](size_t i) { return multiple_entities[i]->formatTypeWithName(); }; tracker.showErrors("Couldn't insert {failed_names}. Successfully inserted: {succeeded_names}", get_name_function); } @@ -306,7 +304,7 @@ std::vector IAccessStorage::insertOrReplace(const std::vectoroutputTypeAndName(); }; + auto get_name_function = [&](size_t i) { return multiple_entities[i]->formatTypeWithName(); }; tracker.showErrors("Couldn't insert {failed_names}. Successfully inserted: {succeeded_names}", get_name_function); } @@ -332,7 +330,7 @@ void IAccessStorage::remove(const std::vector & ids) if (tracker.errors()) { - auto get_name_function = [&](size_t i) { return outputTypeAndNameOrID(*this, ids[i]); }; + auto get_name_function = [&](size_t i) { return formatTypeWithNameOrID(*this, ids[i]); }; tracker.showErrors("Couldn't remove {failed_names}. Successfully removed: {succeeded_names}", get_name_function); } } @@ -376,7 +374,7 @@ void IAccessStorage::update(const std::vector & ids, const UpdateFunc & up if (tracker.errors()) { - auto get_name_function = [&](size_t i) { return outputTypeAndNameOrID(*this, ids[i]); }; + auto get_name_function = [&](size_t i) { return formatTypeWithNameOrID(*this, ids[i]); }; tracker.showErrors("Couldn't update {failed_names}. Successfully updated: {succeeded_names}", get_name_function); } } @@ -402,7 +400,7 @@ std::vector IAccessStorage::tryUpdate(const std::vector & ids, const } -scope_guard IAccessStorage::subscribeForChanges(EntityType type, const OnChangedHandler & handler) const +scope_guard IAccessStorage::subscribeForChanges(AccessEntityType type, const OnChangedHandler & handler) const { return subscribeForChangesImpl(type, handler); } @@ -423,7 +421,7 @@ scope_guard IAccessStorage::subscribeForChanges(const std::vector & ids, c } -bool IAccessStorage::hasSubscription(EntityType type) const +bool IAccessStorage::hasSubscription(AccessEntityType type) const { return hasSubscriptionImpl(type); } @@ -481,7 +479,7 @@ UUID IAccessStorage::loginImpl( return *id; } } - throwNotFound(EntityType::USER, credentials.getUserName()); + throwNotFound(AccessEntityType::USER, credentials.getUserName()); } @@ -542,68 +540,68 @@ void IAccessStorage::throwNotFound(const UUID & id) const } -void IAccessStorage::throwNotFound(EntityType type, const String & name) const +void IAccessStorage::throwNotFound(AccessEntityType type, const String & name) const { - int error_code = EntityTypeInfo::get(type).not_found_error_code; - throw Exception("There is no " + outputEntityTypeAndName(type, name) + " in " + getStorageName(), error_code); + int error_code = AccessEntityTypeInfo::get(type).not_found_error_code; + throw Exception("There is no " + formatEntityTypeWithName(type, name) + " in " + getStorageName(), error_code); } -void IAccessStorage::throwBadCast(const UUID & id, EntityType type, const String & name, EntityType required_type) +void IAccessStorage::throwBadCast(const UUID & id, AccessEntityType type, const String & name, AccessEntityType required_type) { throw Exception( - outputID(id) + ": " + outputEntityTypeAndName(type, name) + " expected to be of type " + toString(required_type), + outputID(id) + ": " + formatEntityTypeWithName(type, name) + " expected to be of type " + toString(required_type), ErrorCodes::LOGICAL_ERROR); } -void IAccessStorage::throwIDCollisionCannotInsert(const UUID & id, EntityType type, const String & name, EntityType existing_type, const String & existing_name) const +void IAccessStorage::throwIDCollisionCannotInsert(const UUID & id, AccessEntityType type, const String & name, AccessEntityType existing_type, const String & existing_name) const { throw Exception( - outputEntityTypeAndName(type, name) + ": cannot insert because the " + outputID(id) + " is already used by " - + outputEntityTypeAndName(existing_type, existing_name) + " in " + getStorageName(), + formatEntityTypeWithName(type, name) + ": cannot insert because the " + outputID(id) + " is already used by " + + formatEntityTypeWithName(existing_type, existing_name) + " in " + getStorageName(), ErrorCodes::ACCESS_ENTITY_ALREADY_EXISTS); } -void IAccessStorage::throwNameCollisionCannotInsert(EntityType type, const String & name) const +void IAccessStorage::throwNameCollisionCannotInsert(AccessEntityType type, const String & name) const { throw Exception( - outputEntityTypeAndName(type, name) + ": cannot insert because " + outputEntityTypeAndName(type, name) + " already exists in " + formatEntityTypeWithName(type, name) + ": cannot insert because " + formatEntityTypeWithName(type, name) + " already exists in " + getStorageName(), ErrorCodes::ACCESS_ENTITY_ALREADY_EXISTS); } -void IAccessStorage::throwNameCollisionCannotRename(EntityType type, const String & old_name, const String & new_name) const +void IAccessStorage::throwNameCollisionCannotRename(AccessEntityType type, const String & old_name, const String & new_name) const { throw Exception( - outputEntityTypeAndName(type, old_name) + ": cannot rename to " + backQuote(new_name) + " because " - + outputEntityTypeAndName(type, new_name) + " already exists in " + getStorageName(), + formatEntityTypeWithName(type, old_name) + ": cannot rename to " + backQuote(new_name) + " because " + + formatEntityTypeWithName(type, new_name) + " already exists in " + getStorageName(), ErrorCodes::ACCESS_ENTITY_ALREADY_EXISTS); } -void IAccessStorage::throwReadonlyCannotInsert(EntityType type, const String & name) const +void IAccessStorage::throwReadonlyCannotInsert(AccessEntityType type, const String & name) const { throw Exception( - "Cannot insert " + outputEntityTypeAndName(type, name) + " to " + getStorageName() + " because this storage is readonly", + "Cannot insert " + formatEntityTypeWithName(type, name) + " to " + getStorageName() + " because this storage is readonly", ErrorCodes::ACCESS_STORAGE_READONLY); } -void IAccessStorage::throwReadonlyCannotUpdate(EntityType type, const String & name) const +void IAccessStorage::throwReadonlyCannotUpdate(AccessEntityType type, const String & name) const { throw Exception( - "Cannot update " + outputEntityTypeAndName(type, name) + " in " + getStorageName() + " because this storage is readonly", + "Cannot update " + formatEntityTypeWithName(type, name) + " in " + getStorageName() + " because this storage is readonly", ErrorCodes::ACCESS_STORAGE_READONLY); } -void IAccessStorage::throwReadonlyCannotRemove(EntityType type, const String & name) const +void IAccessStorage::throwReadonlyCannotRemove(AccessEntityType type, const String & name) const { throw Exception( - "Cannot remove " + outputEntityTypeAndName(type, name) + " from " + getStorageName() + " because this storage is readonly", + "Cannot remove " + formatEntityTypeWithName(type, name) + " from " + getStorageName() + " because this storage is readonly", ErrorCodes::ACCESS_STORAGE_READONLY); } diff --git a/src/Access/IAccessStorage.h b/src/Access/IAccessStorage.h index e9df2e3c1a4..ccbb1ffe5bc 100644 --- a/src/Access/IAccessStorage.h +++ b/src/Access/IAccessStorage.h @@ -34,33 +34,30 @@ public: /// Returns a JSON with the parameters of the storage. It's up to the storage type to fill the JSON. virtual String getStorageParamsJSON() const { return "{}"; } - using EntityType = IAccessEntity::Type; - using EntityTypeInfo = IAccessEntity::TypeInfo; - /// Returns the identifiers of all the entities of a specified type contained in the storage. - std::vector findAll(EntityType type) const; + std::vector findAll(AccessEntityType type) const; template std::vector findAll() const { return findAll(EntityClassT::TYPE); } /// Searches for an entity with specified type and name. Returns std::nullopt if not found. - std::optional find(EntityType type, const String & name) const; + std::optional find(AccessEntityType type, const String & name) const; template std::optional find(const String & name) const { return find(EntityClassT::TYPE, name); } - std::vector find(EntityType type, const Strings & names) const; + std::vector find(AccessEntityType type, const Strings & names) const; template std::vector find(const Strings & names) const { return find(EntityClassT::TYPE, names); } /// Searches for an entity with specified name and type. Throws an exception if not found. - UUID getID(EntityType type, const String & name) const; + UUID getID(AccessEntityType type, const String & name) const; template UUID getID(const String & name) const { return getID(EntityClassT::TYPE, name); } - std::vector getIDs(EntityType type, const Strings & names) const; + std::vector getIDs(AccessEntityType type, const Strings & names) const; template std::vector getIDs(const Strings & names) const { return getIDs(EntityClassT::TYPE, names); } @@ -132,7 +129,7 @@ public: /// Subscribes for all changes. /// Can return nullptr if cannot subscribe (identifier not found) or if it doesn't make sense (the storage is read-only). - scope_guard subscribeForChanges(EntityType type, const OnChangedHandler & handler) const; + scope_guard subscribeForChanges(AccessEntityType type, const OnChangedHandler & handler) const; template scope_guard subscribeForChanges(OnChangedHandler handler) const { return subscribeForChanges(EntityClassT::TYPE, handler); } @@ -142,7 +139,7 @@ public: scope_guard subscribeForChanges(const UUID & id, const OnChangedHandler & handler) const; scope_guard subscribeForChanges(const std::vector & ids, const OnChangedHandler & handler) const; - bool hasSubscription(EntityType type) const; + bool hasSubscription(AccessEntityType type) const; bool hasSubscription(const UUID & id) const; /// Finds a user, check the provided credentials and returns the ID of the user if they are valid. @@ -154,8 +151,8 @@ public: UUID getIDOfLoggedUser(const String & user_name) const; protected: - virtual std::optional findImpl(EntityType type, const String & name) const = 0; - virtual std::vector findAllImpl(EntityType type) const = 0; + virtual std::optional findImpl(AccessEntityType type, const String & name) const = 0; + virtual std::vector findAllImpl(AccessEntityType type) const = 0; virtual bool existsImpl(const UUID & id) const = 0; virtual AccessEntityPtr readImpl(const UUID & id) const = 0; virtual String readNameImpl(const UUID & id) const = 0; @@ -164,9 +161,9 @@ protected: virtual void removeImpl(const UUID & id) = 0; virtual void updateImpl(const UUID & id, const UpdateFunc & update_func) = 0; virtual scope_guard subscribeForChangesImpl(const UUID & id, const OnChangedHandler & handler) const = 0; - virtual scope_guard subscribeForChangesImpl(EntityType type, const OnChangedHandler & handler) const = 0; + virtual scope_guard subscribeForChangesImpl(AccessEntityType type, const OnChangedHandler & handler) const = 0; virtual bool hasSubscriptionImpl(const UUID & id) const = 0; - virtual bool hasSubscriptionImpl(EntityType type) const = 0; + virtual bool hasSubscriptionImpl(AccessEntityType type) const = 0; virtual UUID loginImpl(const Credentials & credentials, const Poco::Net::IPAddress & address, const ExternalAuthenticators & external_authenticators) const; virtual bool areCredentialsValidImpl(const User & user, const Credentials & credentials, const ExternalAuthenticators & external_authenticators) const; virtual bool isAddressAllowedImpl(const User & user, const Poco::Net::IPAddress & address) const; @@ -174,17 +171,17 @@ protected: static UUID generateRandomID(); Poco::Logger * getLogger() const; - static String outputEntityTypeAndName(EntityType type, const String & name) { return EntityTypeInfo::get(type).outputWithEntityName(name); } + static String formatEntityTypeWithName(AccessEntityType type, const String & name) { return AccessEntityTypeInfo::get(type).formatEntityNameWithType(name); } [[noreturn]] void throwNotFound(const UUID & id) const; - [[noreturn]] void throwNotFound(EntityType type, const String & name) const; - [[noreturn]] static void throwBadCast(const UUID & id, EntityType type, const String & name, EntityType required_type); + [[noreturn]] void throwNotFound(AccessEntityType type, const String & name) const; + [[noreturn]] static void throwBadCast(const UUID & id, AccessEntityType type, const String & name, AccessEntityType required_type); [[noreturn]] void throwIDCollisionCannotInsert( - const UUID & id, EntityType type, const String & name, EntityType existing_type, const String & existing_name) const; - [[noreturn]] void throwNameCollisionCannotInsert(EntityType type, const String & name) const; - [[noreturn]] void throwNameCollisionCannotRename(EntityType type, const String & old_name, const String & new_name) const; - [[noreturn]] void throwReadonlyCannotInsert(EntityType type, const String & name) const; - [[noreturn]] void throwReadonlyCannotUpdate(EntityType type, const String & name) const; - [[noreturn]] void throwReadonlyCannotRemove(EntityType type, const String & name) const; + const UUID & id, AccessEntityType type, const String & name, AccessEntityType existing_type, const String & existing_name) const; + [[noreturn]] void throwNameCollisionCannotInsert(AccessEntityType type, const String & name) const; + [[noreturn]] void throwNameCollisionCannotRename(AccessEntityType type, const String & old_name, const String & new_name) const; + [[noreturn]] void throwReadonlyCannotInsert(AccessEntityType type, const String & name) const; + [[noreturn]] void throwReadonlyCannotUpdate(AccessEntityType type, const String & name) const; + [[noreturn]] void throwReadonlyCannotRemove(AccessEntityType type, const String & name) const; [[noreturn]] static void throwAddressNotAllowed(const Poco::Net::IPAddress & address); [[noreturn]] static void throwInvalidCredentials(); [[noreturn]] static void throwCannotAuthenticate(const String & user_name); diff --git a/src/Access/LDAPAccessStorage.cpp b/src/Access/LDAPAccessStorage.cpp index 92c9b15612b..c89d8c1f953 100644 --- a/src/Access/LDAPAccessStorage.cpp +++ b/src/Access/LDAPAccessStorage.cpp @@ -412,14 +412,14 @@ String LDAPAccessStorage::getStorageParamsJSON() const } -std::optional LDAPAccessStorage::findImpl(EntityType type, const String & name) const +std::optional LDAPAccessStorage::findImpl(AccessEntityType type, const String & name) const { std::scoped_lock lock(mutex); return memory_storage.find(type, name); } -std::vector LDAPAccessStorage::findAllImpl(EntityType type) const +std::vector LDAPAccessStorage::findAllImpl(AccessEntityType type) const { std::scoped_lock lock(mutex); return memory_storage.findAll(type); @@ -482,7 +482,7 @@ scope_guard LDAPAccessStorage::subscribeForChangesImpl(const UUID & id, const On } -scope_guard LDAPAccessStorage::subscribeForChangesImpl(EntityType type, const OnChangedHandler & handler) const +scope_guard LDAPAccessStorage::subscribeForChangesImpl(AccessEntityType type, const OnChangedHandler & handler) const { std::scoped_lock lock(mutex); return memory_storage.subscribeForChanges(type, handler); @@ -496,7 +496,7 @@ bool LDAPAccessStorage::hasSubscriptionImpl(const UUID & id) const } -bool LDAPAccessStorage::hasSubscriptionImpl(EntityType type) const +bool LDAPAccessStorage::hasSubscriptionImpl(AccessEntityType type) const { std::scoped_lock lock(mutex); return memory_storage.hasSubscription(type); diff --git a/src/Access/LDAPAccessStorage.h b/src/Access/LDAPAccessStorage.h index c1512117186..feb6ee4d92a 100644 --- a/src/Access/LDAPAccessStorage.h +++ b/src/Access/LDAPAccessStorage.h @@ -42,8 +42,8 @@ public: // IAccessStorage implementations. virtual String getStorageParamsJSON() const override; private: // IAccessStorage implementations. - virtual std::optional findImpl(EntityType type, const String & name) const override; - virtual std::vector findAllImpl(EntityType type) const override; + virtual std::optional findImpl(AccessEntityType type, const String & name) const override; + virtual std::vector findAllImpl(AccessEntityType type) const override; virtual bool existsImpl(const UUID & id) const override; virtual AccessEntityPtr readImpl(const UUID & id) const override; virtual String readNameImpl(const UUID & id) const override; @@ -52,9 +52,9 @@ private: // IAccessStorage implementations. virtual void removeImpl(const UUID & id) override; virtual void updateImpl(const UUID & id, const UpdateFunc & update_func) override; virtual scope_guard subscribeForChangesImpl(const UUID & id, const OnChangedHandler & handler) const override; - virtual scope_guard subscribeForChangesImpl(EntityType type, const OnChangedHandler & handler) const override; + virtual scope_guard subscribeForChangesImpl(AccessEntityType type, const OnChangedHandler & handler) const override; virtual bool hasSubscriptionImpl(const UUID & id) const override; - virtual bool hasSubscriptionImpl(EntityType type) const override; + virtual bool hasSubscriptionImpl(AccessEntityType type) const override; virtual UUID loginImpl(const Credentials & credentials, const Poco::Net::IPAddress & address, const ExternalAuthenticators & external_authenticators) const override; virtual UUID getIDOfLoggedUserImpl(const String & user_name) const override; diff --git a/src/Access/MemoryAccessStorage.cpp b/src/Access/MemoryAccessStorage.cpp index 68a24146107..d3c99204bd3 100644 --- a/src/Access/MemoryAccessStorage.cpp +++ b/src/Access/MemoryAccessStorage.cpp @@ -13,7 +13,7 @@ MemoryAccessStorage::MemoryAccessStorage(const String & storage_name_) } -std::optional MemoryAccessStorage::findImpl(EntityType type, const String & name) const +std::optional MemoryAccessStorage::findImpl(AccessEntityType type, const String & name) const { std::lock_guard lock{mutex}; const auto & entries_by_name = entries_by_name_and_type[static_cast(type)]; @@ -26,7 +26,7 @@ std::optional MemoryAccessStorage::findImpl(EntityType type, const String } -std::vector MemoryAccessStorage::findAllImpl(EntityType type) const +std::vector MemoryAccessStorage::findAllImpl(AccessEntityType type) const { std::lock_guard lock{mutex}; std::vector result; @@ -77,7 +77,7 @@ UUID MemoryAccessStorage::insertImpl(const AccessEntityPtr & new_entity, bool re void MemoryAccessStorage::insertNoLock(const UUID & id, const AccessEntityPtr & new_entity, bool replace_if_exists, Notifications & notifications) { const String & name = new_entity->getName(); - EntityType type = new_entity->getType(); + AccessEntityType type = new_entity->getType(); /// Check that we can insert. auto it = entries_by_id.find(id); @@ -125,7 +125,7 @@ void MemoryAccessStorage::removeNoLock(const UUID & id, Notifications & notifica Entry & entry = it->second; const String & name = entry.entity->getName(); - EntityType type = entry.entity->getType(); + AccessEntityType type = entry.entity->getType(); prepareNotifications(entry, true, notifications); @@ -266,7 +266,7 @@ void MemoryAccessStorage::prepareNotifications(const Entry & entry, bool remove, } -scope_guard MemoryAccessStorage::subscribeForChangesImpl(EntityType type, const OnChangedHandler & handler) const +scope_guard MemoryAccessStorage::subscribeForChangesImpl(AccessEntityType type, const OnChangedHandler & handler) const { std::lock_guard lock{mutex}; auto & handlers = handlers_by_type[static_cast(type)]; @@ -317,7 +317,7 @@ bool MemoryAccessStorage::hasSubscriptionImpl(const UUID & id) const } -bool MemoryAccessStorage::hasSubscriptionImpl(EntityType type) const +bool MemoryAccessStorage::hasSubscriptionImpl(AccessEntityType type) const { std::lock_guard lock{mutex}; const auto & handlers = handlers_by_type[static_cast(type)]; diff --git a/src/Access/MemoryAccessStorage.h b/src/Access/MemoryAccessStorage.h index 3558fcc6088..ea7b0193471 100644 --- a/src/Access/MemoryAccessStorage.h +++ b/src/Access/MemoryAccessStorage.h @@ -24,8 +24,8 @@ public: void setAll(const std::vector> & all_entities); private: - std::optional findImpl(EntityType type, const String & name) const override; - std::vector findAllImpl(EntityType type) const override; + std::optional findImpl(AccessEntityType type, const String & name) const override; + std::vector findAllImpl(AccessEntityType type) const override; bool existsImpl(const UUID & id) const override; AccessEntityPtr readImpl(const UUID & id) const override; String readNameImpl(const UUID & id) const override; @@ -34,9 +34,9 @@ private: void removeImpl(const UUID & id) override; void updateImpl(const UUID & id, const UpdateFunc & update_func) override; scope_guard subscribeForChangesImpl(const UUID & id, const OnChangedHandler & handler) const override; - scope_guard subscribeForChangesImpl(EntityType type, const OnChangedHandler & handler) const override; + scope_guard subscribeForChangesImpl(AccessEntityType type, const OnChangedHandler & handler) const override; bool hasSubscriptionImpl(const UUID & id) const override; - bool hasSubscriptionImpl(EntityType type) const override; + bool hasSubscriptionImpl(AccessEntityType type) const override; struct Entry { @@ -53,7 +53,7 @@ private: mutable std::recursive_mutex mutex; std::unordered_map entries_by_id; /// We want to search entries both by ID and by the pair of name and type. - std::unordered_map entries_by_name_and_type[static_cast(EntityType::MAX)]; - mutable std::list handlers_by_type[static_cast(EntityType::MAX)]; + std::unordered_map entries_by_name_and_type[static_cast(AccessEntityType::MAX)]; + mutable std::list handlers_by_type[static_cast(AccessEntityType::MAX)]; }; } diff --git a/src/Access/MultipleAccessStorage.cpp b/src/Access/MultipleAccessStorage.cpp index bb2ef4a6df4..61bc84e8ab2 100644 --- a/src/Access/MultipleAccessStorage.cpp +++ b/src/Access/MultipleAccessStorage.cpp @@ -1,6 +1,7 @@ #include #include #include +#include #include #include #include @@ -98,7 +99,7 @@ std::shared_ptr MultipleAccessStorage::getStoragesInternal() con } -std::optional MultipleAccessStorage::findImpl(EntityType type, const String & name) const +std::optional MultipleAccessStorage::findImpl(AccessEntityType type, const String & name) const { auto storages = getStoragesInternal(); for (const auto & storage : *storages) @@ -115,7 +116,7 @@ std::optional MultipleAccessStorage::findImpl(EntityType type, const Strin } -std::vector MultipleAccessStorage::findAllImpl(EntityType type) const +std::vector MultipleAccessStorage::findAllImpl(AccessEntityType type) const { std::vector all_ids; auto storages = getStoragesInternal(); @@ -219,7 +220,7 @@ UUID MultipleAccessStorage::insertImpl(const AccessEntityPtr & entity, bool repl } if (!storage_for_insertion) - throw Exception("Not found a storage to insert " + entity->outputTypeAndName(), ErrorCodes::ACCESS_STORAGE_FOR_INSERTION_NOT_FOUND); + throw Exception("Not found a storage to insert " + entity->formatTypeWithName(), ErrorCodes::ACCESS_STORAGE_FOR_INSERTION_NOT_FOUND); auto id = replace_if_exists ? storage_for_insertion->insertOrReplace(entity) : storage_for_insertion->insert(entity); std::lock_guard lock{mutex}; @@ -253,8 +254,8 @@ void MultipleAccessStorage::updateImpl(const UUID & id, const UpdateFunc & updat if (storage->find(new_entity->getType(), new_entity->getName())) { throw Exception( - old_entity->outputTypeAndName() + ": cannot rename to " + backQuote(new_entity->getName()) + " because " - + new_entity->outputTypeAndName() + " already exists in " + storage->getStorageName(), + old_entity->formatTypeWithName() + ": cannot rename to " + backQuote(new_entity->getName()) + " because " + + new_entity->formatTypeWithName() + " already exists in " + storage->getStorageName(), ErrorCodes::ACCESS_ENTITY_ALREADY_EXISTS); } } @@ -286,7 +287,7 @@ bool MultipleAccessStorage::hasSubscriptionImpl(const UUID & id) const } -scope_guard MultipleAccessStorage::subscribeForChangesImpl(EntityType type, const OnChangedHandler & handler) const +scope_guard MultipleAccessStorage::subscribeForChangesImpl(AccessEntityType type, const OnChangedHandler & handler) const { std::unique_lock lock{mutex}; auto & handlers = handlers_by_type[static_cast(type)]; @@ -306,7 +307,7 @@ scope_guard MultipleAccessStorage::subscribeForChangesImpl(EntityType type, cons } -bool MultipleAccessStorage::hasSubscriptionImpl(EntityType type) const +bool MultipleAccessStorage::hasSubscriptionImpl(AccessEntityType type) const { std::lock_guard lock{mutex}; const auto & handlers = handlers_by_type[static_cast(type)]; @@ -321,10 +322,10 @@ void MultipleAccessStorage::updateSubscriptionsToNestedStorages(std::unique_lock { /// lock is already locked. - std::vector> added_subscriptions[static_cast(EntityType::MAX)]; + std::vector> added_subscriptions[static_cast(AccessEntityType::MAX)]; std::vector removed_subscriptions; - for (auto type : collections::range(EntityType::MAX)) + for (auto type : collections::range(AccessEntityType::MAX)) { auto & handlers = handlers_by_type[static_cast(type)]; auto & subscriptions = subscriptions_to_nested_storages[static_cast(type)]; @@ -364,7 +365,7 @@ void MultipleAccessStorage::updateSubscriptionsToNestedStorages(std::unique_lock lock.unlock(); removed_subscriptions.clear(); - for (auto type : collections::range(EntityType::MAX)) + for (auto type : collections::range(AccessEntityType::MAX)) { if (!added_subscriptions[static_cast(type)].empty()) { @@ -384,7 +385,7 @@ void MultipleAccessStorage::updateSubscriptionsToNestedStorages(std::unique_lock /// Lock the mutex again to store added subscriptions to the nested storages. lock.lock(); - for (auto type : collections::range(EntityType::MAX)) + for (auto type : collections::range(AccessEntityType::MAX)) { if (!added_subscriptions[static_cast(type)].empty()) { @@ -418,7 +419,7 @@ UUID MultipleAccessStorage::loginImpl(const Credentials & credentials, const Poc } catch (...) { - if (!storage->find(EntityType::USER, credentials.getUserName())) + if (!storage->find(AccessEntityType::USER, credentials.getUserName())) { /// The authentication failed because there no users with such name in the `storage` /// thus we can try to search in other nested storages. @@ -427,7 +428,7 @@ UUID MultipleAccessStorage::loginImpl(const Credentials & credentials, const Poc throw; } } - throwNotFound(EntityType::USER, credentials.getUserName()); + throwNotFound(AccessEntityType::USER, credentials.getUserName()); } @@ -445,7 +446,7 @@ UUID MultipleAccessStorage::getIDOfLoggedUserImpl(const String & user_name) cons } catch (...) { - if (!storage->find(EntityType::USER, user_name)) + if (!storage->find(AccessEntityType::USER, user_name)) { /// The authentication failed because there no users with such name in the `storage` /// thus we can try to search in other nested storages. @@ -454,7 +455,7 @@ UUID MultipleAccessStorage::getIDOfLoggedUserImpl(const String & user_name) cons throw; } } - throwNotFound(EntityType::USER, user_name); + throwNotFound(AccessEntityType::USER, user_name); } } diff --git a/src/Access/MultipleAccessStorage.h b/src/Access/MultipleAccessStorage.h index d1c8ec97b36..462f97d6fa9 100644 --- a/src/Access/MultipleAccessStorage.h +++ b/src/Access/MultipleAccessStorage.h @@ -35,8 +35,8 @@ public: StoragePtr getStorage(const UUID & id); protected: - std::optional findImpl(EntityType type, const String & name) const override; - std::vector findAllImpl(EntityType type) const override; + std::optional findImpl(AccessEntityType type, const String & name) const override; + std::vector findAllImpl(AccessEntityType type) const override; bool existsImpl(const UUID & id) const override; AccessEntityPtr readImpl(const UUID & id) const override; String readNameImpl(const UUID &id) const override; @@ -45,9 +45,9 @@ protected: void removeImpl(const UUID & id) override; void updateImpl(const UUID & id, const UpdateFunc & update_func) override; scope_guard subscribeForChangesImpl(const UUID & id, const OnChangedHandler & handler) const override; - scope_guard subscribeForChangesImpl(EntityType type, const OnChangedHandler & handler) const override; + scope_guard subscribeForChangesImpl(AccessEntityType type, const OnChangedHandler & handler) const override; bool hasSubscriptionImpl(const UUID & id) const override; - bool hasSubscriptionImpl(EntityType type) const override; + bool hasSubscriptionImpl(AccessEntityType type) const override; UUID loginImpl(const Credentials & credentials, const Poco::Net::IPAddress & address, const ExternalAuthenticators & external_authenticators) const override; UUID getIDOfLoggedUserImpl(const String & user_name) const override; @@ -58,8 +58,8 @@ private: std::shared_ptr nested_storages; mutable LRUCache ids_cache; - mutable std::list handlers_by_type[static_cast(EntityType::MAX)]; - mutable std::unordered_map subscriptions_to_nested_storages[static_cast(EntityType::MAX)]; + mutable std::list handlers_by_type[static_cast(AccessEntityType::MAX)]; + mutable std::unordered_map subscriptions_to_nested_storages[static_cast(AccessEntityType::MAX)]; mutable std::mutex mutex; }; diff --git a/src/Access/Quota.h b/src/Access/Quota.h index 5d23cf11d42..487af49d684 100644 --- a/src/Access/Quota.h +++ b/src/Access/Quota.h @@ -1,20 +1,13 @@ #pragma once #include +#include #include -#include -#include -#include #include namespace DB { -namespace ErrorCodes -{ - extern const int LOGICAL_ERROR; -} - /** Quota for resources consumption for specific interval. * Used to limit resource usage by user. @@ -26,40 +19,10 @@ namespace ErrorCodes */ struct Quota : public IAccessEntity { - using ResourceAmount = UInt64; - - enum ResourceType - { - QUERIES, /// Number of queries. - QUERY_SELECTS, /// Number of select queries. - QUERY_INSERTS, /// Number of inserts queries. - ERRORS, /// Number of queries with exceptions. - RESULT_ROWS, /// Number of rows returned as result. - RESULT_BYTES, /// Number of bytes returned as result. - READ_ROWS, /// Number of rows read from tables. - READ_BYTES, /// Number of bytes read from tables. - EXECUTION_TIME, /// Total amount of query execution time in nanoseconds. - - MAX_RESOURCE_TYPE - }; - - struct ResourceTypeInfo - { - const char * const raw_name = ""; - const String name; /// Lowercased with underscores, e.g. "result_rows". - const String keyword; /// Uppercased with spaces, e.g. "RESULT ROWS". - const bool output_as_float = false; - const UInt64 output_denominator = 1; - String amountToString(ResourceAmount amount) const; - ResourceAmount amountFromString(const String & str) const; - String outputWithAmount(ResourceAmount amount) const; - static const ResourceTypeInfo & get(ResourceType type); - }; - /// Amount of resources available to consume for each duration. struct Limits { - std::optional max[MAX_RESOURCE_TYPE]; + std::optional max[static_cast(QuotaType::MAX)]; std::chrono::seconds duration = std::chrono::seconds::zero(); /// Intervals can be randomized (to avoid DoS if intervals for many users end at one time). @@ -73,206 +36,16 @@ struct Quota : public IAccessEntity /// Key to share quota consumption. /// Users with the same key share the same amount of resource. - enum class KeyType - { - NONE, /// All users share the same quota. - USER_NAME, /// Connections with the same user name share the same quota. - IP_ADDRESS, /// Connections from the same IP share the same quota. - FORWARDED_IP_ADDRESS, /// Use X-Forwarded-For HTTP header instead of IP address. - CLIENT_KEY, /// Client should explicitly supply a key to use. - CLIENT_KEY_OR_USER_NAME, /// Same as CLIENT_KEY, but use USER_NAME if the client doesn't supply a key. - CLIENT_KEY_OR_IP_ADDRESS, /// Same as CLIENT_KEY, but use IP_ADDRESS if the client doesn't supply a key. - - MAX - }; - - struct KeyTypeInfo - { - const char * const raw_name; - const String name; /// Lowercased with underscores, e.g. "client_key". - const std::vector base_types; /// For combined types keeps base types, e.g. for CLIENT_KEY_OR_USER_NAME it keeps [KeyType::CLIENT_KEY, KeyType::USER_NAME]. - static const KeyTypeInfo & get(KeyType type); - }; - - KeyType key_type = KeyType::NONE; + QuotaKeyType key_type = QuotaKeyType::NONE; /// Which roles or users should use this quota. RolesOrUsersSet to_roles; bool equal(const IAccessEntity & other) const override; std::shared_ptr clone() const override { return cloneImpl(); } - static constexpr const Type TYPE = Type::QUOTA; - Type getType() const override { return TYPE; } + static constexpr const auto TYPE = AccessEntityType::QUOTA; + AccessEntityType getType() const override { return TYPE; } }; - -inline String Quota::ResourceTypeInfo::amountToString(ResourceAmount amount) const -{ - if (!(amount % output_denominator)) - return std::to_string(amount / output_denominator); - else - return boost::lexical_cast(static_cast(amount) / output_denominator); -} - -inline Quota::ResourceAmount Quota::ResourceTypeInfo::amountFromString(const String & str) const -{ - if (output_denominator == 1) - return static_cast(std::strtoul(str.c_str(), nullptr, 10)); - else - return static_cast(std::strtod(str.c_str(), nullptr) * output_denominator); -} - -inline String Quota::ResourceTypeInfo::outputWithAmount(ResourceAmount amount) const -{ - String res = name; - res += " = "; - res += amountToString(amount); - return res; -} - -inline String toString(Quota::ResourceType type) -{ - return Quota::ResourceTypeInfo::get(type).raw_name; -} - -inline const Quota::ResourceTypeInfo & Quota::ResourceTypeInfo::get(ResourceType type) -{ - static constexpr auto make_info = [](const char * raw_name_, UInt64 output_denominator_) - { - String init_name = raw_name_; - boost::to_lower(init_name); - String init_keyword = raw_name_; - boost::replace_all(init_keyword, "_", " "); - bool init_output_as_float = (output_denominator_ != 1); - return ResourceTypeInfo{raw_name_, std::move(init_name), std::move(init_keyword), init_output_as_float, output_denominator_}; - }; - - switch (type) - { - case Quota::QUERIES: - { - static const auto info = make_info("QUERIES", 1); - return info; - } - case Quota::QUERY_SELECTS: - { - static const auto info = make_info("QUERY_SELECTS", 1); - return info; - } - case Quota::QUERY_INSERTS: - { - static const auto info = make_info("QUERY_INSERTS", 1); - return info; - } - case Quota::ERRORS: - { - static const auto info = make_info("ERRORS", 1); - return info; - } - case Quota::RESULT_ROWS: - { - static const auto info = make_info("RESULT_ROWS", 1); - return info; - } - case Quota::RESULT_BYTES: - { - static const auto info = make_info("RESULT_BYTES", 1); - return info; - } - case Quota::READ_ROWS: - { - static const auto info = make_info("READ_ROWS", 1); - return info; - } - case Quota::READ_BYTES: - { - static const auto info = make_info("READ_BYTES", 1); - return info; - } - case Quota::EXECUTION_TIME: - { - static const auto info = make_info("EXECUTION_TIME", 1000000000 /* execution_time is stored in nanoseconds */); - return info; - } - case Quota::MAX_RESOURCE_TYPE: break; - } - throw Exception("Unexpected resource type: " + std::to_string(static_cast(type)), ErrorCodes::LOGICAL_ERROR); -} - - -inline String toString(Quota::KeyType type) -{ - return Quota::KeyTypeInfo::get(type).raw_name; -} - -inline const Quota::KeyTypeInfo & Quota::KeyTypeInfo::get(KeyType type) -{ - static constexpr auto make_info = [](const char * raw_name_) - { - String init_name = raw_name_; - boost::to_lower(init_name); - std::vector init_base_types; - String replaced = boost::algorithm::replace_all_copy(init_name, "_or_", "|"); - Strings tokens; - boost::algorithm::split(tokens, replaced, boost::is_any_of("|")); - if (tokens.size() > 1) - { - for (const auto & token : tokens) - { - for (auto kt : collections::range(KeyType::MAX)) - { - if (KeyTypeInfo::get(kt).name == token) - { - init_base_types.push_back(kt); - break; - } - } - } - } - return KeyTypeInfo{raw_name_, std::move(init_name), std::move(init_base_types)}; - }; - - switch (type) - { - case KeyType::NONE: - { - static const auto info = make_info("NONE"); - return info; - } - case KeyType::USER_NAME: - { - static const auto info = make_info("USER_NAME"); - return info; - } - case KeyType::IP_ADDRESS: - { - static const auto info = make_info("IP_ADDRESS"); - return info; - } - case KeyType::FORWARDED_IP_ADDRESS: - { - static const auto info = make_info("FORWARDED_IP_ADDRESS"); - return info; - } - case KeyType::CLIENT_KEY: - { - static const auto info = make_info("CLIENT_KEY"); - return info; - } - case KeyType::CLIENT_KEY_OR_USER_NAME: - { - static const auto info = make_info("CLIENT_KEY_OR_USER_NAME"); - return info; - } - case KeyType::CLIENT_KEY_OR_IP_ADDRESS: - { - static const auto info = make_info("CLIENT_KEY_OR_IP_ADDRESS"); - return info; - } - case KeyType::MAX: break; - } - throw Exception("Unexpected quota key type: " + std::to_string(static_cast(type)), ErrorCodes::LOGICAL_ERROR); -} - using QuotaPtr = std::shared_ptr; } diff --git a/src/Access/QuotaCache.cpp b/src/Access/QuotaCache.cpp index e5fa9114a51..566c2409205 100644 --- a/src/Access/QuotaCache.cpp +++ b/src/Access/QuotaCache.cpp @@ -1,4 +1,5 @@ #include +#include #include #include #include @@ -44,26 +45,25 @@ void QuotaCache::QuotaInfo::setQuota(const QuotaPtr & quota_, const UUID & quota String QuotaCache::QuotaInfo::calculateKey(const EnabledQuota & enabled) const { const auto & params = enabled.params; - using KeyType = Quota::KeyType; switch (quota->key_type) { - case KeyType::NONE: + case QuotaKeyType::NONE: { return ""; } - case KeyType::USER_NAME: + case QuotaKeyType::USER_NAME: { return params.user_name; } - case KeyType::IP_ADDRESS: + case QuotaKeyType::IP_ADDRESS: { return params.client_address.toString(); } - case KeyType::FORWARDED_IP_ADDRESS: + case QuotaKeyType::FORWARDED_IP_ADDRESS: { return params.forwarded_address; } - case KeyType::CLIENT_KEY: + case QuotaKeyType::CLIENT_KEY: { if (!params.client_key.empty()) return params.client_key; @@ -71,19 +71,19 @@ String QuotaCache::QuotaInfo::calculateKey(const EnabledQuota & enabled) const "Quota " + quota->getName() + " (for user " + params.user_name + ") requires a client supplied key.", ErrorCodes::QUOTA_REQUIRES_CLIENT_KEY); } - case KeyType::CLIENT_KEY_OR_USER_NAME: + case QuotaKeyType::CLIENT_KEY_OR_USER_NAME: { if (!params.client_key.empty()) return params.client_key; return params.user_name; } - case KeyType::CLIENT_KEY_OR_IP_ADDRESS: + case QuotaKeyType::CLIENT_KEY_OR_IP_ADDRESS: { if (!params.client_key.empty()) return params.client_key; return params.client_address.toString(); } - case KeyType::MAX: break; + case QuotaKeyType::MAX: break; } throw Exception("Unexpected quota key type: " + std::to_string(static_cast(quota->key_type)), ErrorCodes::LOGICAL_ERROR); } @@ -113,7 +113,6 @@ boost::shared_ptr QuotaCache::QuotaInfo::rebuildI new_intervals->quota_key = key; auto & intervals = new_intervals->intervals; intervals.reserve(quota->all_limits.size()); - static constexpr auto MAX_RESOURCE_TYPE = Quota::MAX_RESOURCE_TYPE; for (const auto & limits : quota->all_limits) { intervals.emplace_back(); @@ -124,11 +123,12 @@ boost::shared_ptr QuotaCache::QuotaInfo::rebuildI if (limits.randomize_interval) end_of_interval += randomDuration(limits.duration); interval.end_of_interval = end_of_interval.time_since_epoch(); - for (auto resource_type : collections::range(MAX_RESOURCE_TYPE)) + for (auto quota_type : collections::range(QuotaType::MAX)) { - if (limits.max[resource_type]) - interval.max[resource_type] = *limits.max[resource_type]; - interval.used[resource_type] = 0; + auto quota_type_i = static_cast(quota_type); + if (limits.max[quota_type_i]) + interval.max[quota_type_i] = *limits.max[quota_type_i]; + interval.used[quota_type_i] = 0; } } @@ -159,9 +159,10 @@ boost::shared_ptr QuotaCache::QuotaInfo::rebuildI /// Found an interval with the same duration, we need to copy its usage information to `result`. const auto & current_interval = *lower_bound; - for (auto resource_type : collections::range(MAX_RESOURCE_TYPE)) + for (auto quota_type : collections::range(QuotaType::MAX)) { - new_interval.used[resource_type].store(current_interval.used[resource_type].load()); + auto quota_type_i = static_cast(quota_type); + new_interval.used[quota_type_i].store(current_interval.used[quota_type_i].load()); new_interval.end_of_interval.store(current_interval.end_of_interval.load()); } } diff --git a/src/Access/QuotaCache.h b/src/Access/QuotaCache.h index 487c7a26487..77682230370 100644 --- a/src/Access/QuotaCache.h +++ b/src/Access/QuotaCache.h @@ -11,7 +11,9 @@ namespace DB { class AccessControl; - +struct Quota; +using QuotaPtr = std::shared_ptr; +struct RolesOrUsersSet; /// Stores information how much amount of resources have been consumed and how much are left. class QuotaCache diff --git a/src/Access/QuotaUsage.h b/src/Access/QuotaUsage.h index 9e53c0cf7d7..1d7e5258c47 100644 --- a/src/Access/QuotaUsage.h +++ b/src/Access/QuotaUsage.h @@ -1,7 +1,8 @@ #pragma once -#include +#include #include +#include namespace DB @@ -9,14 +10,10 @@ namespace DB /// The information about a quota consumption. struct QuotaUsage { - using ResourceType = Quota::ResourceType; - using ResourceAmount = Quota::ResourceAmount; - static constexpr auto MAX_RESOURCE_TYPE = Quota::MAX_RESOURCE_TYPE; - struct Interval { - ResourceAmount used[MAX_RESOURCE_TYPE]; - std::optional max[MAX_RESOURCE_TYPE]; + QuotaValue used[static_cast(QuotaType::MAX)]; + std::optional max[static_cast(QuotaType::MAX)]; std::chrono::seconds duration = std::chrono::seconds::zero(); bool randomize_interval = false; std::chrono::system_clock::time_point end_of_interval; diff --git a/src/Access/ReplicatedAccessStorage.cpp b/src/Access/ReplicatedAccessStorage.cpp index 7b29aab3a89..93b8a5c992a 100644 --- a/src/Access/ReplicatedAccessStorage.cpp +++ b/src/Access/ReplicatedAccessStorage.cpp @@ -94,7 +94,7 @@ static void retryOnZooKeeperUserError(size_t attempts, Func && function) UUID ReplicatedAccessStorage::insertImpl(const AccessEntityPtr & new_entity, bool replace_if_exists) { const UUID id = generateRandomID(); - const EntityTypeInfo type_info = EntityTypeInfo::get(new_entity->getType()); + const AccessEntityTypeInfo type_info = AccessEntityTypeInfo::get(new_entity->getType()); const String & name = new_entity->getName(); LOG_DEBUG(getLogger(), "Inserting entity of type {} named {} with id {}", type_info.name, name, toString(id)); @@ -113,8 +113,8 @@ void ReplicatedAccessStorage::insertZooKeeper( const zkutil::ZooKeeperPtr & zookeeper, const UUID & id, const AccessEntityPtr & new_entity, bool replace_if_exists) { const String & name = new_entity->getName(); - const EntityType type = new_entity->getType(); - const EntityTypeInfo type_info = EntityTypeInfo::get(type); + const AccessEntityType type = new_entity->getType(); + const AccessEntityTypeInfo type_info = AccessEntityTypeInfo::get(type); const String entity_uuid = toString(id); /// The entity data will be stored here, this ensures all entities have unique ids @@ -143,7 +143,7 @@ void ReplicatedAccessStorage::insertZooKeeper( String existing_entity_definition = zookeeper->get(entity_path); AccessEntityPtr existing_entity = deserializeAccessEntity(existing_entity_definition, entity_path); - EntityType existing_type = existing_entity->getType(); + AccessEntityType existing_type = existing_entity->getType(); String existing_name = existing_entity->getName(); throwIDCollisionCannotInsert(id, type, name, existing_type, existing_name); } @@ -204,7 +204,7 @@ void ReplicatedAccessStorage::removeZooKeeper(const zkutil::ZooKeeperPtr & zooke throwNotFound(id); const AccessEntityPtr entity = deserializeAccessEntity(entity_definition, entity_path); - const EntityTypeInfo type_info = EntityTypeInfo::get(entity->getType()); + const AccessEntityTypeInfo type_info = AccessEntityTypeInfo::get(entity->getType()); const String & name = entity->getName(); const String entity_name_path = zookeeper_path + "/" + type_info.unique_char + "/" + escapeForFileName(name); @@ -248,7 +248,7 @@ void ReplicatedAccessStorage::updateZooKeeper(const zkutil::ZooKeeperPtr & zooke if (!new_entity->isTypeOf(old_entity->getType())) throwBadCast(id, new_entity->getType(), new_entity->getName(), old_entity->getType()); - const EntityTypeInfo type_info = EntityTypeInfo::get(new_entity->getType()); + const AccessEntityTypeInfo type_info = AccessEntityTypeInfo::get(new_entity->getType()); Coordination::Requests ops; const String new_entity_definition = serializeAccessEntity(*new_entity); @@ -309,7 +309,7 @@ void ReplicatedAccessStorage::resetAfterError() while (refresh_queue.tryPop(id)) {} std::lock_guard lock{mutex}; - for (const auto type : collections::range(EntityType::MAX)) + for (const auto type : collections::range(AccessEntityType::MAX)) entries_by_name_and_type[static_cast(type)].clear(); entries_by_id.clear(); } @@ -334,10 +334,10 @@ void ReplicatedAccessStorage::createRootNodes(const zkutil::ZooKeeperPtr & zooke zookeeper->createAncestors(zookeeper_path); zookeeper->createIfNotExists(zookeeper_path, ""); zookeeper->createIfNotExists(zookeeper_path + "/uuid", ""); - for (const auto type : collections::range(EntityType::MAX)) + for (const auto type : collections::range(AccessEntityType::MAX)) { /// Create a znode for each type of AccessEntity - const auto type_info = EntityTypeInfo::get(type); + const auto type_info = AccessEntityTypeInfo::get(type); zookeeper->createIfNotExists(zookeeper_path + "/" + type_info.unique_char, ""); } } @@ -440,7 +440,7 @@ void ReplicatedAccessStorage::refreshEntityNoLock(const zkutil::ZooKeeperPtr & z void ReplicatedAccessStorage::setEntityNoLock(const UUID & id, const AccessEntityPtr & entity, Notifications & notifications) { LOG_DEBUG(getLogger(), "Setting id {} to entity named {}", toString(id), entity->getName()); - const EntityType type = entity->getType(); + const AccessEntityType type = entity->getType(); const String & name = entity->getName(); /// If the type+name already exists and is a different entity, remove old entity @@ -454,7 +454,7 @@ void ReplicatedAccessStorage::setEntityNoLock(const UUID & id, const AccessEntit if (auto it = entries_by_id.find(id); it != entries_by_id.end()) { const AccessEntityPtr & existing_entity = it->second.entity; - const EntityType existing_type = existing_entity->getType(); + const AccessEntityType existing_type = existing_entity->getType(); const String & existing_name = existing_entity->getName(); if (existing_type != type || existing_name != name) { @@ -482,7 +482,7 @@ void ReplicatedAccessStorage::removeEntityNoLock(const UUID & id, Notifications } const Entry & entry = it->second; - const EntityType type = entry.entity->getType(); + const AccessEntityType type = entry.entity->getType(); const String & name = entry.entity->getName(); prepareNotifications(entry, true, notifications); @@ -500,7 +500,7 @@ void ReplicatedAccessStorage::removeEntityNoLock(const UUID & id, Notifications } -std::optional ReplicatedAccessStorage::findImpl(EntityType type, const String & name) const +std::optional ReplicatedAccessStorage::findImpl(AccessEntityType type, const String & name) const { std::lock_guard lock{mutex}; const auto & entries_by_name = entries_by_name_and_type[static_cast(type)]; @@ -513,7 +513,7 @@ std::optional ReplicatedAccessStorage::findImpl(EntityType type, const Str } -std::vector ReplicatedAccessStorage::findAllImpl(EntityType type) const +std::vector ReplicatedAccessStorage::findAllImpl(AccessEntityType type) const { std::lock_guard lock{mutex}; std::vector result; @@ -560,7 +560,7 @@ void ReplicatedAccessStorage::prepareNotifications(const Entry & entry, bool rem } -scope_guard ReplicatedAccessStorage::subscribeForChangesImpl(EntityType type, const OnChangedHandler & handler) const +scope_guard ReplicatedAccessStorage::subscribeForChangesImpl(AccessEntityType type, const OnChangedHandler & handler) const { std::lock_guard lock{mutex}; auto & handlers = handlers_by_type[static_cast(type)]; @@ -611,7 +611,7 @@ bool ReplicatedAccessStorage::hasSubscriptionImpl(const UUID & id) const } -bool ReplicatedAccessStorage::hasSubscriptionImpl(EntityType type) const +bool ReplicatedAccessStorage::hasSubscriptionImpl(AccessEntityType type) const { std::lock_guard lock{mutex}; const auto & handlers = handlers_by_type[static_cast(type)]; diff --git a/src/Access/ReplicatedAccessStorage.h b/src/Access/ReplicatedAccessStorage.h index 458bc0d614b..54dbfbf5b7d 100644 --- a/src/Access/ReplicatedAccessStorage.h +++ b/src/Access/ReplicatedAccessStorage.h @@ -69,8 +69,8 @@ private: mutable std::list handlers_by_id; }; - std::optional findImpl(EntityType type, const String & name) const override; - std::vector findAllImpl(EntityType type) const override; + std::optional findImpl(AccessEntityType type, const String & name) const override; + std::vector findAllImpl(AccessEntityType type) const override; bool existsImpl(const UUID & id) const override; AccessEntityPtr readImpl(const UUID & id) const override; String readNameImpl(const UUID & id) const override; @@ -78,13 +78,13 @@ private: void prepareNotifications(const Entry & entry, bool remove, Notifications & notifications) const; scope_guard subscribeForChangesImpl(const UUID & id, const OnChangedHandler & handler) const override; - scope_guard subscribeForChangesImpl(EntityType type, const OnChangedHandler & handler) const override; + scope_guard subscribeForChangesImpl(AccessEntityType type, const OnChangedHandler & handler) const override; bool hasSubscriptionImpl(const UUID & id) const override; - bool hasSubscriptionImpl(EntityType type) const override; + bool hasSubscriptionImpl(AccessEntityType type) const override; mutable std::mutex mutex; std::unordered_map entries_by_id; - std::unordered_map entries_by_name_and_type[static_cast(EntityType::MAX)]; - mutable std::list handlers_by_type[static_cast(EntityType::MAX)]; + std::unordered_map entries_by_name_and_type[static_cast(AccessEntityType::MAX)]; + mutable std::list handlers_by_type[static_cast(AccessEntityType::MAX)]; }; } diff --git a/src/Access/Role.h b/src/Access/Role.h index 131bbd69195..c7f1e107d24 100644 --- a/src/Access/Role.h +++ b/src/Access/Role.h @@ -17,8 +17,8 @@ struct Role : public IAccessEntity bool equal(const IAccessEntity & other) const override; std::shared_ptr clone() const override { return cloneImpl(); } - static constexpr const Type TYPE = Type::ROLE; - Type getType() const override { return TYPE; } + static constexpr const auto TYPE = AccessEntityType::ROLE; + AccessEntityType getType() const override { return TYPE; } }; using RolePtr = std::shared_ptr; diff --git a/src/Access/RowPolicy.cpp b/src/Access/RowPolicy.cpp index 7441f915a46..c09675e0e34 100644 --- a/src/Access/RowPolicy.cpp +++ b/src/Access/RowPolicy.cpp @@ -13,34 +13,34 @@ namespace ErrorCodes void RowPolicy::setDatabase(const String & database) { - name_parts.database = database; - IAccessEntity::setName(name_parts.getName()); + full_name.database = database; + IAccessEntity::setName(full_name.toString()); } void RowPolicy::setTableName(const String & table_name) { - name_parts.table_name = table_name; - IAccessEntity::setName(name_parts.getName()); + full_name.table_name = table_name; + IAccessEntity::setName(full_name.toString()); } void RowPolicy::setShortName(const String & short_name) { - name_parts.short_name = short_name; - IAccessEntity::setName(name_parts.getName()); + full_name.short_name = short_name; + IAccessEntity::setName(full_name.toString()); } -void RowPolicy::setNameParts(const String & short_name, const String & database, const String & table_name) +void RowPolicy::setFullName(const String & short_name, const String & database, const String & table_name) { - name_parts.short_name = short_name; - name_parts.database = database; - name_parts.table_name = table_name; - IAccessEntity::setName(name_parts.getName()); + full_name.short_name = short_name; + full_name.database = database; + full_name.table_name = table_name; + IAccessEntity::setName(full_name.toString()); } -void RowPolicy::setNameParts(const NameParts & name_parts_) +void RowPolicy::setFullName(const RowPolicyName & full_name_) { - name_parts = name_parts_; - IAccessEntity::setName(name_parts.getName()); + full_name = full_name_; + IAccessEntity::setName(full_name.toString()); } void RowPolicy::setName(const String &) @@ -54,7 +54,7 @@ bool RowPolicy::equal(const IAccessEntity & other) const if (!IAccessEntity::equal(other)) return false; const auto & other_policy = typeid_cast(other); - return (name_parts == other_policy.name_parts) && boost::range::equal(conditions, other_policy.conditions) + return (full_name == other_policy.full_name) && boost::range::equal(filters, other_policy.filters) && restrictive == other_policy.restrictive && (to_roles == other_policy.to_roles); } diff --git a/src/Access/RowPolicy.h b/src/Access/RowPolicy.h index 723db545dbe..9c143aff725 100644 --- a/src/Access/RowPolicy.h +++ b/src/Access/RowPolicy.h @@ -2,77 +2,32 @@ #include #include +#include #include #include namespace DB { -namespace ErrorCodes -{ - extern const int LOGICAL_ERROR; -} - /** Represents a row level security policy for a table. */ struct RowPolicy : public IAccessEntity { - struct NameParts - { - String short_name; - String database; - String table_name; - - bool empty() const { return short_name.empty(); } - String getName() const; - String toString() const { return getName(); } - auto toTuple() const { return std::tie(short_name, database, table_name); } - friend bool operator ==(const NameParts & left, const NameParts & right) { return left.toTuple() == right.toTuple(); } - friend bool operator !=(const NameParts & left, const NameParts & right) { return left.toTuple() != right.toTuple(); } - }; - void setShortName(const String & short_name); void setDatabase(const String & database); void setTableName(const String & table_name); - void setNameParts(const String & short_name, const String & database, const String & table_name); - void setNameParts(const NameParts & name_parts); + void setFullName(const String & short_name, const String & database, const String & table_name); + void setFullName(const RowPolicyName & full_name_); - const String & getDatabase() const { return name_parts.database; } - const String & getTableName() const { return name_parts.table_name; } - const String & getShortName() const { return name_parts.short_name; } - const NameParts & getNameParts() const { return name_parts; } + const String & getDatabase() const { return full_name.database; } + const String & getTableName() const { return full_name.table_name; } + const String & getShortName() const { return full_name.short_name; } + const RowPolicyName & getFullName() const { return full_name; } - /// Filter is a SQL conditional expression used to figure out which rows should be visible - /// for user or available for modification. If the expression returns NULL or false for some rows - /// those rows are silently suppressed. - /// Check is a SQL condition expression used to check whether a row can be written into - /// the table. If the expression returns NULL or false an exception is thrown. - /// If a conditional expression here is empty it means no filtering is applied. - enum ConditionType - { - SELECT_FILTER, - -#if 0 /// Row-level security for INSERT, UPDATE, DELETE is not implemented yet. - INSERT_CHECK, - UPDATE_FILTER, - UPDATE_CHECK, - DELETE_FILTER, -#endif - - MAX_CONDITION_TYPE - }; - - struct ConditionTypeInfo - { - const char * const raw_name; - const String name; /// Lowercased with underscores, e.g. "select_filter". - const String command; /// Uppercased without last word, e.g. "SELECT". - const bool is_check; /// E.g. false for SELECT_FILTER. - static const ConditionTypeInfo & get(ConditionType type); - }; - - std::array conditions; + /// A SQL conditional expression used to figure out which rows should be visible + /// for user or available for modification. + std::array(RowPolicyFilterType::MAX)> filters; /// Sets that the policy is permissive. /// A row is only accessible if at least one of the permissive policies passes, @@ -88,88 +43,19 @@ struct RowPolicy : public IAccessEntity bool equal(const IAccessEntity & other) const override; std::shared_ptr clone() const override { return cloneImpl(); } - static constexpr const Type TYPE = Type::ROW_POLICY; - Type getType() const override { return TYPE; } + static constexpr const auto TYPE = AccessEntityType::ROW_POLICY; + AccessEntityType getType() const override { return TYPE; } /// Which roles or users should use this row policy. RolesOrUsersSet to_roles; private: - void setName(const String & name_) override; + void setName(const String &) override; - NameParts name_parts; + RowPolicyName full_name; bool restrictive = false; }; using RowPolicyPtr = std::shared_ptr; - -inline const RowPolicy::ConditionTypeInfo & RowPolicy::ConditionTypeInfo::get(ConditionType type_) -{ - static constexpr auto make_info = [](const char * raw_name_) - { - String init_name = raw_name_; - boost::to_lower(init_name); - size_t underscore_pos = init_name.find('_'); - String init_command = init_name.substr(0, underscore_pos); - boost::to_upper(init_command); - bool init_is_check = (std::string_view{init_name}.substr(underscore_pos + 1) == "check"); - return ConditionTypeInfo{raw_name_, std::move(init_name), std::move(init_command), init_is_check}; - }; - - switch (type_) - { - case SELECT_FILTER: - { - static const ConditionTypeInfo info = make_info("SELECT_FILTER"); - return info; - } -#if 0 /// Row-level security for INSERT, UPDATE, DELETE is not implemented yet. - case INSERT_CHECK: - { - static const ConditionTypeInfo info = make_info("INSERT_CHECK"); - return info; - } - case UPDATE_FILTER: - { - static const ConditionTypeInfo info = make_info("UPDATE_FILTER"); - return info; - } - case UPDATE_CHECK: - { - static const ConditionTypeInfo info = make_info("UPDATE_CHECK"); - return info; - } - case DELETE_FILTER: - { - static const ConditionTypeInfo info = make_info("DELETE_FILTER"); - return info; - } -#endif - case MAX_CONDITION_TYPE: break; - } - throw Exception("Unknown type: " + std::to_string(static_cast(type_)), ErrorCodes::LOGICAL_ERROR); -} - -inline String toString(RowPolicy::ConditionType type) -{ - return RowPolicy::ConditionTypeInfo::get(type).raw_name; -} - - -inline String RowPolicy::NameParts::getName() const -{ - String name; - name.reserve(database.length() + table_name.length() + short_name.length() + 6); - name += backQuoteIfNeed(short_name); - name += " ON "; - if (!database.empty()) - { - name += backQuoteIfNeed(database); - name += '.'; - } - name += backQuoteIfNeed(table_name); - return name; -} - } diff --git a/src/Access/RowPolicyCache.cpp b/src/Access/RowPolicyCache.cpp index b5b6dd99438..55bec427158 100644 --- a/src/Access/RowPolicyCache.cpp +++ b/src/Access/RowPolicyCache.cpp @@ -1,6 +1,7 @@ #include -#include #include +#include +#include #include #include #include @@ -15,35 +16,31 @@ namespace DB { namespace { - using ConditionType = RowPolicy::ConditionType; - constexpr auto MAX_CONDITION_TYPE = RowPolicy::MAX_CONDITION_TYPE; - - - /// Accumulates conditions from multiple row policies and joins them using the AND logical operation. - class ConditionsMixer + /// Accumulates filters from multiple row policies and joins them using the AND logical operation. + class FiltersMixer { public: - void add(const ASTPtr & condition, bool is_restrictive) + void add(const ASTPtr & filter, bool is_restrictive) { if (is_restrictive) - restrictions.push_back(condition); + restrictions.push_back(filter); else - permissions.push_back(condition); + permissions.push_back(filter); } ASTPtr getResult() && { - /// Process permissive conditions. + /// Process permissive filters. restrictions.push_back(makeASTForLogicalOr(std::move(permissions))); - /// Process restrictive conditions. - auto condition = makeASTForLogicalAnd(std::move(restrictions)); + /// Process restrictive filters. + auto result = makeASTForLogicalAnd(std::move(restrictions)); bool value; - if (tryGetLiteralBool(condition.get(), value) && value) - condition = nullptr; /// The condition is always true, no need to check it. + if (tryGetLiteralBool(result.get(), value) && value) + result = nullptr; /// The condition is always true, no need to check it. - return condition; + return result; } private: @@ -59,33 +56,34 @@ void RowPolicyCache::PolicyInfo::setPolicy(const RowPolicyPtr & policy_) roles = &policy->to_roles; database_and_table_name = std::make_shared>(policy->getDatabase(), policy->getTableName()); - for (auto type : collections::range(0, MAX_CONDITION_TYPE)) + for (auto filter_type : collections::range(0, RowPolicyFilterType::MAX)) { - parsed_conditions[type] = nullptr; - const String & condition = policy->conditions[type]; - if (condition.empty()) + auto filter_type_i = static_cast(filter_type); + parsed_filters[filter_type_i] = nullptr; + const String & filter = policy->filters[filter_type_i]; + if (filter.empty()) continue; - auto previous_range = std::pair(std::begin(policy->conditions), std::begin(policy->conditions) + type); - const auto * previous_it = std::find(previous_range.first, previous_range.second, condition); + auto previous_range = std::pair(std::begin(policy->filters), std::begin(policy->filters) + filter_type_i); + const auto * previous_it = std::find(previous_range.first, previous_range.second, filter); if (previous_it != previous_range.second) { - /// The condition is already parsed before. - parsed_conditions[type] = parsed_conditions[previous_it - previous_range.first]; + /// The filter is already parsed before. + parsed_filters[filter_type_i] = parsed_filters[previous_it - previous_range.first]; continue; } - /// Try to parse the condition. + /// Try to parse the filter. try { ParserExpression parser; - parsed_conditions[type] = parseQuery(parser, condition, 0, DBMS_DEFAULT_MAX_PARSER_DEPTH); + parsed_filters[filter_type_i] = parseQuery(parser, filter, 0, DBMS_DEFAULT_MAX_PARSER_DEPTH); } catch (...) { tryLogCurrentException( &Poco::Logger::get("RowPolicy"), - String("Could not parse the condition ") + toString(type) + " of row policy " + String("Could not parse the condition ") + toString(filter_type) + " of row policy " + backQuote(policy->getName())); } } @@ -119,7 +117,7 @@ std::shared_ptr RowPolicyCache::getEnabledRowPolicies( auto res = std::shared_ptr(new EnabledRowPolicies(params)); enabled_row_policies.emplace(std::move(params), res); - mixConditionsFor(*res); + mixFiltersFor(*res); return res; } @@ -165,7 +163,7 @@ void RowPolicyCache::rowPolicyAddedOrChanged(const UUID & policy_id, const RowPo auto & info = it->second; info.setPolicy(new_policy); - mixConditions(); + mixFilters(); } @@ -173,11 +171,11 @@ void RowPolicyCache::rowPolicyRemoved(const UUID & policy_id) { std::lock_guard lock{mutex}; all_policies.erase(policy_id); - mixConditions(); + mixFilters(); } -void RowPolicyCache::mixConditions() +void RowPolicyCache::mixFilters() { /// `mutex` is already locked. for (auto i = enabled_row_policies.begin(), e = enabled_row_policies.end(); i != e;) @@ -187,58 +185,59 @@ void RowPolicyCache::mixConditions() i = enabled_row_policies.erase(i); else { - mixConditionsFor(*elem); + mixFiltersFor(*elem); ++i; } } } -void RowPolicyCache::mixConditionsFor(EnabledRowPolicies & enabled) +void RowPolicyCache::mixFiltersFor(EnabledRowPolicies & enabled) { /// `mutex` is already locked. - using MapOfMixedConditions = EnabledRowPolicies::MapOfMixedConditions; - using MixedConditionKey = EnabledRowPolicies::MixedConditionKey; + using MixedFiltersMap = EnabledRowPolicies::MixedFiltersMap; + using MixedFiltersKey = EnabledRowPolicies::MixedFiltersKey; using Hash = EnabledRowPolicies::Hash; struct MixerWithNames { - ConditionsMixer mixer; + FiltersMixer mixer; std::shared_ptr> database_and_table_name; }; - std::unordered_map map_of_mixers; + std::unordered_map mixers; for (const auto & [policy_id, info] : all_policies) { const auto & policy = *info.policy; bool match = info.roles->match(enabled.params.user_id, enabled.params.enabled_roles); - MixedConditionKey key; + MixedFiltersKey key; key.database = info.database_and_table_name->first; key.table_name = info.database_and_table_name->second; - for (auto type : collections::range(0, MAX_CONDITION_TYPE)) + for (auto filter_type : collections::range(0, RowPolicyFilterType::MAX)) { - if (info.parsed_conditions[type]) + auto filter_type_i = static_cast(filter_type); + if (info.parsed_filters[filter_type_i]) { - key.condition_type = type; - auto & mixer = map_of_mixers[key]; + key.filter_type = filter_type; + auto & mixer = mixers[key]; mixer.database_and_table_name = info.database_and_table_name; if (match) - mixer.mixer.add(info.parsed_conditions[type], policy.isRestrictive()); + mixer.mixer.add(info.parsed_filters[filter_type_i], policy.isRestrictive()); } } } - auto map_of_mixed_conditions = boost::make_shared(); - for (auto & [key, mixer] : map_of_mixers) + auto mixed_filters = boost::make_shared(); + for (auto & [key, mixer] : mixers) { - auto & mixed_condition = (*map_of_mixed_conditions)[key]; - mixed_condition.database_and_table_name = mixer.database_and_table_name; - mixed_condition.ast = std::move(mixer.mixer).getResult(); + auto & mixed_filter = (*mixed_filters)[key]; + mixed_filter.database_and_table_name = mixer.database_and_table_name; + mixed_filter.ast = std::move(mixer.mixer).getResult(); } - enabled.map_of_mixed_conditions.store(map_of_mixed_conditions); + enabled.mixed_filters.store(mixed_filters); } } diff --git a/src/Access/RowPolicyCache.h b/src/Access/RowPolicyCache.h index 6834def58b6..dc416fe59f0 100644 --- a/src/Access/RowPolicyCache.h +++ b/src/Access/RowPolicyCache.h @@ -10,6 +10,9 @@ namespace DB { class AccessControl; +struct RolesOrUsersSet; +struct RowPolicy; +using RowPolicyPtr = std::shared_ptr; /// Stores read and parsed row policies. class RowPolicyCache @@ -29,14 +32,14 @@ private: RowPolicyPtr policy; const RolesOrUsersSet * roles = nullptr; std::shared_ptr> database_and_table_name; - ASTPtr parsed_conditions[RowPolicy::MAX_CONDITION_TYPE]; + ASTPtr parsed_filters[static_cast(RowPolicyFilterType::MAX)]; }; void ensureAllRowPoliciesRead(); void rowPolicyAddedOrChanged(const UUID & policy_id, const RowPolicyPtr & new_policy); void rowPolicyRemoved(const UUID & policy_id); - void mixConditions(); - void mixConditionsFor(EnabledRowPolicies & enabled); + void mixFilters(); + void mixFiltersFor(EnabledRowPolicies & enabled); const AccessControl & access_control; std::unordered_map all_policies; diff --git a/src/Access/SettingsProfile.h b/src/Access/SettingsProfile.h index 210aa47c358..e554924b45e 100644 --- a/src/Access/SettingsProfile.h +++ b/src/Access/SettingsProfile.h @@ -18,8 +18,8 @@ struct SettingsProfile : public IAccessEntity bool equal(const IAccessEntity & other) const override; std::shared_ptr clone() const override { return cloneImpl(); } - static constexpr const Type TYPE = Type::SETTINGS_PROFILE; - Type getType() const override { return TYPE; } + static constexpr const auto TYPE = AccessEntityType::SETTINGS_PROFILE; + AccessEntityType getType() const override { return TYPE; } }; using SettingsProfilePtr = std::shared_ptr; diff --git a/src/Access/User.h b/src/Access/User.h index 34badd5f847..b9167d68f15 100644 --- a/src/Access/User.h +++ b/src/Access/User.h @@ -26,8 +26,8 @@ struct User : public IAccessEntity bool equal(const IAccessEntity & other) const override; std::shared_ptr clone() const override { return cloneImpl(); } - static constexpr const Type TYPE = Type::USER; - Type getType() const override { return TYPE; } + static constexpr const auto TYPE = AccessEntityType::USER; + AccessEntityType getType() const override { return TYPE; } }; using UserPtr = std::shared_ptr; diff --git a/src/Access/UsersConfigAccessStorage.cpp b/src/Access/UsersConfigAccessStorage.cpp index 2d202c5094d..dbaf4e002b1 100644 --- a/src/Access/UsersConfigAccessStorage.cpp +++ b/src/Access/UsersConfigAccessStorage.cpp @@ -33,15 +33,12 @@ namespace ErrorCodes namespace { - using EntityType = IAccessStorage::EntityType; - using EntityTypeInfo = IAccessStorage::EntityTypeInfo; - - UUID generateID(EntityType type, const String & name) + UUID generateID(AccessEntityType type, const String & name) { Poco::MD5Engine md5; md5.update(name); char type_storage_chars[] = " USRSXML"; - type_storage_chars[0] = EntityTypeInfo::get(type).unique_char; + type_storage_chars[0] = AccessEntityTypeInfo::get(type).unique_char; md5.update(type_storage_chars, strlen(type_storage_chars)); UUID result; memcpy(&result, md5.digest().data(), md5.digestLength()); @@ -114,7 +111,7 @@ namespace { auto profile_name = config.getString(profile_name_config); SettingsProfileElement profile_element; - profile_element.parent_profile = generateID(EntityType::SETTINGS_PROFILE, profile_name); + profile_element.parent_profile = generateID(AccessEntityType::SETTINGS_PROFILE, profile_name); user->settings.push_back(std::move(profile_element)); } @@ -211,8 +208,19 @@ namespace std::vector users; users.reserve(user_names.size()); + for (const auto & user_name : user_names) - users.push_back(parseUser(config, user_name)); + { + try + { + users.push_back(parseUser(config, user_name)); + } + catch (Exception & e) + { + e.addMessage(fmt::format("while parsing user '{}' in users configuration file", user_name)); + throw; + } + } return users; } @@ -223,16 +231,15 @@ namespace auto quota = std::make_shared(); quota->setName(quota_name); - using KeyType = Quota::KeyType; String quota_config = "quotas." + quota_name; if (config.has(quota_config + ".keyed_by_ip")) - quota->key_type = KeyType::IP_ADDRESS; + quota->key_type = QuotaKeyType::IP_ADDRESS; else if (config.has(quota_config + ".keyed_by_forwarded_ip")) - quota->key_type = KeyType::FORWARDED_IP_ADDRESS; + quota->key_type = QuotaKeyType::FORWARDED_IP_ADDRESS; else if (config.has(quota_config + ".keyed")) - quota->key_type = KeyType::CLIENT_KEY_OR_USER_NAME; + quota->key_type = QuotaKeyType::CLIENT_KEY_OR_USER_NAME; else - quota->key_type = KeyType::USER_NAME; + quota->key_type = QuotaKeyType::USER_NAME; Poco::Util::AbstractConfiguration::Keys interval_keys; config.keys(quota_config, interval_keys); @@ -252,12 +259,12 @@ namespace limits.duration = duration; limits.randomize_interval = config.getBool(interval_config + ".randomize", false); - for (auto resource_type : collections::range(Quota::MAX_RESOURCE_TYPE)) + for (auto quota_type : collections::range(QuotaType::MAX)) { - const auto & type_info = Quota::ResourceTypeInfo::get(resource_type); + const auto & type_info = QuotaTypeInfo::get(quota_type); auto value = config.getString(interval_config + "." + type_info.name, "0"); if (value != "0") - limits.max[resource_type] = type_info.amountFromString(value); + limits.max[static_cast(quota_type)] = type_info.stringToValue(value); } } @@ -274,19 +281,30 @@ namespace for (const auto & user_name : user_names) { if (config.has("users." + user_name + ".quota")) - quota_to_user_ids[config.getString("users." + user_name + ".quota")].push_back(generateID(EntityType::USER, user_name)); + quota_to_user_ids[config.getString("users." + user_name + ".quota")].push_back(generateID(AccessEntityType::USER, user_name)); } Poco::Util::AbstractConfiguration::Keys quota_names; config.keys("quotas", quota_names); + std::vector quotas; quotas.reserve(quota_names.size()); + for (const auto & quota_name : quota_names) { - auto it = quota_to_user_ids.find(quota_name); - const std::vector & quota_users = (it != quota_to_user_ids.end()) ? std::move(it->second) : std::vector{}; - quotas.push_back(parseQuota(config, quota_name, quota_users)); + try + { + auto it = quota_to_user_ids.find(quota_name); + const std::vector & quota_users = (it != quota_to_user_ids.end()) ? std::move(it->second) : std::vector{}; + quotas.push_back(parseQuota(config, quota_name, quota_users)); + } + catch (Exception & e) + { + e.addMessage(fmt::format("while parsing quota '{}' in users configuration file", quota_name)); + throw; + } } + return quotas; } @@ -351,9 +369,9 @@ namespace String filter = (it != user_to_filters.end()) ? it->second : "1"; auto policy = std::make_shared(); - policy->setNameParts(user_name, database, table_name); - policy->conditions[RowPolicy::SELECT_FILTER] = filter; - policy->to_roles.add(generateID(EntityType::USER, user_name)); + policy->setFullName(user_name, database, table_name); + policy->filters[static_cast(RowPolicyFilterType::SELECT_FILTER)] = filter; + policy->to_roles.add(generateID(AccessEntityType::USER, user_name)); policies.push_back(policy); } } @@ -415,7 +433,7 @@ namespace { String parent_profile_name = config.getString(profile_config + "." + key); SettingsProfileElement profile_element; - profile_element.parent_profile = generateID(EntityType::SETTINGS_PROFILE, parent_profile_name); + profile_element.parent_profile = generateID(AccessEntityType::SETTINGS_PROFILE, parent_profile_name); profile->elements.emplace_back(std::move(profile_element)); continue; } @@ -444,11 +462,24 @@ namespace const Poco::Util::AbstractConfiguration & config, Fn auto && check_setting_name_function) { - std::vector profiles; Poco::Util::AbstractConfiguration::Keys profile_names; config.keys("profiles", profile_names); + + std::vector profiles; + profiles.reserve(profile_names.size()); + for (const auto & profile_name : profile_names) - profiles.push_back(parseSettingsProfile(config, profile_name, check_setting_name_function)); + { + try + { + profiles.push_back(parseSettingsProfile(config, profile_name, check_setting_name_function)); + } + catch (Exception & e) + { + e.addMessage(fmt::format("while parsing profile '{}' in users configuration file", profile_name)); + throw; + } + } return profiles; } @@ -503,16 +534,24 @@ void UsersConfigAccessStorage::setConfig(const Poco::Util::AbstractConfiguration void UsersConfigAccessStorage::parseFromConfig(const Poco::Util::AbstractConfiguration & config) { - std::vector> all_entities; - for (const auto & entity : parseUsers(config)) - all_entities.emplace_back(generateID(*entity), entity); - for (const auto & entity : parseQuotas(config)) - all_entities.emplace_back(generateID(*entity), entity); - for (const auto & entity : parseRowPolicies(config)) - all_entities.emplace_back(generateID(*entity), entity); - for (const auto & entity : parseSettingsProfiles(config, check_setting_name_function)) - all_entities.emplace_back(generateID(*entity), entity); - memory_storage.setAll(all_entities); + try + { + std::vector> all_entities; + for (const auto & entity : parseUsers(config)) + all_entities.emplace_back(generateID(*entity), entity); + for (const auto & entity : parseQuotas(config)) + all_entities.emplace_back(generateID(*entity), entity); + for (const auto & entity : parseRowPolicies(config)) + all_entities.emplace_back(generateID(*entity), entity); + for (const auto & entity : parseSettingsProfiles(config, check_setting_name_function)) + all_entities.emplace_back(generateID(*entity), entity); + memory_storage.setAll(all_entities); + } + catch (Exception & e) + { + e.addMessage(fmt::format("while loading {}", path.empty() ? "configuration" : ("configuration file " + quoteString(path)))); + throw; + } } void UsersConfigAccessStorage::load( @@ -552,13 +591,13 @@ void UsersConfigAccessStorage::startPeriodicReloading() config_reloader->start(); } -std::optional UsersConfigAccessStorage::findImpl(EntityType type, const String & name) const +std::optional UsersConfigAccessStorage::findImpl(AccessEntityType type, const String & name) const { return memory_storage.find(type, name); } -std::vector UsersConfigAccessStorage::findAllImpl(EntityType type) const +std::vector UsersConfigAccessStorage::findAllImpl(AccessEntityType type) const { return memory_storage.findAll(type); } @@ -608,7 +647,7 @@ scope_guard UsersConfigAccessStorage::subscribeForChangesImpl(const UUID & id, c } -scope_guard UsersConfigAccessStorage::subscribeForChangesImpl(EntityType type, const OnChangedHandler & handler) const +scope_guard UsersConfigAccessStorage::subscribeForChangesImpl(AccessEntityType type, const OnChangedHandler & handler) const { return memory_storage.subscribeForChanges(type, handler); } @@ -620,7 +659,7 @@ bool UsersConfigAccessStorage::hasSubscriptionImpl(const UUID & id) const } -bool UsersConfigAccessStorage::hasSubscriptionImpl(EntityType type) const +bool UsersConfigAccessStorage::hasSubscriptionImpl(AccessEntityType type) const { return memory_storage.hasSubscription(type); } diff --git a/src/Access/UsersConfigAccessStorage.h b/src/Access/UsersConfigAccessStorage.h index 97164fdb233..7fb08790f77 100644 --- a/src/Access/UsersConfigAccessStorage.h +++ b/src/Access/UsersConfigAccessStorage.h @@ -43,8 +43,8 @@ public: private: void parseFromConfig(const Poco::Util::AbstractConfiguration & config); - std::optional findImpl(EntityType type, const String & name) const override; - std::vector findAllImpl(EntityType type) const override; + std::optional findImpl(AccessEntityType type, const String & name) const override; + std::vector findAllImpl(AccessEntityType type) const override; bool existsImpl(const UUID & id) const override; AccessEntityPtr readImpl(const UUID & id) const override; String readNameImpl(const UUID & id) const override; @@ -53,9 +53,9 @@ private: void removeImpl(const UUID & id) override; void updateImpl(const UUID & id, const UpdateFunc & update_func) override; scope_guard subscribeForChangesImpl(const UUID & id, const OnChangedHandler & handler) const override; - scope_guard subscribeForChangesImpl(EntityType type, const OnChangedHandler & handler) const override; + scope_guard subscribeForChangesImpl(AccessEntityType type, const OnChangedHandler & handler) const override; bool hasSubscriptionImpl(const UUID & id) const override; - bool hasSubscriptionImpl(EntityType type) const override; + bool hasSubscriptionImpl(AccessEntityType type) const override; MemoryAccessStorage memory_storage; CheckSettingNameFunction check_setting_name_function; diff --git a/src/Backups/BackupUtils.cpp b/src/Backups/BackupUtils.cpp index bd5a31e0bf7..5da87cfd6f7 100644 --- a/src/Backups/BackupUtils.cpp +++ b/src/Backups/BackupUtils.cpp @@ -312,11 +312,11 @@ namespace String getDataPathInBackup(const IAST & create_query) { const auto & create = create_query.as(); - if (create.table.empty()) + if (!create.table) return {}; if (create.temporary) - return getDataPathInBackup({DatabaseCatalog::TEMPORARY_DATABASE, create.table}); - return getDataPathInBackup({create.database, create.table}); + return getDataPathInBackup({DatabaseCatalog::TEMPORARY_DATABASE, create.getTable()}); + return getDataPathInBackup({create.getDatabase(), create.getTable()}); } String getMetadataPathInBackup(const DatabaseAndTableName & table_name) @@ -336,11 +336,11 @@ namespace String getMetadataPathInBackup(const IAST & create_query) { const auto & create = create_query.as(); - if (create.table.empty()) - return getMetadataPathInBackup(create.database); + if (!create.table) + return getMetadataPathInBackup(create.getDatabase()); if (create.temporary) - return getMetadataPathInBackup({DatabaseCatalog::TEMPORARY_DATABASE, create.table}); - return getMetadataPathInBackup({create.database, create.table}); + return getMetadataPathInBackup({DatabaseCatalog::TEMPORARY_DATABASE, create.getTable()}); + return getMetadataPathInBackup({create.getDatabase(), create.getTable()}); } void backupCreateQuery(const IAST & create_query, BackupEntries & backup_entries) @@ -419,7 +419,7 @@ namespace /// We create and execute `create` query for the database name. auto create_query = std::make_shared(); - create_query->database = database_name; + create_query->setDatabase(database_name); create_query->if_not_exists = true; InterpreterCreateQuery create_interpreter{create_query, context}; create_interpreter.execute(); @@ -460,7 +460,7 @@ namespace restore_tasks.emplace_back([table_name, new_create_query, partitions, context, backup]() -> RestoreDataTasks { - DatabaseAndTableName new_table_name{new_create_query->database, new_create_query->table}; + DatabaseAndTableName new_table_name{new_create_query->getDatabase(), new_create_query->getTable()}; if (new_create_query->temporary) new_table_name.first = DatabaseCatalog::TEMPORARY_DATABASE; @@ -536,7 +536,7 @@ namespace restore_tasks.emplace_back([database_name, new_create_query, except_list, context, backup, renaming_config]() -> RestoreDataTasks { - const String & new_database_name = new_create_query->database; + const String & new_database_name = new_create_query->getDatabase(); context->checkAccess(AccessType::SHOW_TABLES, new_database_name); if (!DatabaseCatalog::instance().isDatabaseExist(new_database_name)) diff --git a/src/Backups/renameInCreateQuery.cpp b/src/Backups/renameInCreateQuery.cpp index 4c78844d266..7a94a755f67 100644 --- a/src/Backups/renameInCreateQuery.cpp +++ b/src/Backups/renameInCreateQuery.cpp @@ -48,21 +48,23 @@ namespace { if (create.temporary) { - if (create.table.empty()) + if (!create.table) throw Exception(ErrorCodes::LOGICAL_ERROR, "Table name specified in the CREATE TEMPORARY TABLE query must not be empty"); - create.table = data.renaming_config->getNewTemporaryTableName(create.table); + create.setTable(data.renaming_config->getNewTemporaryTableName(create.getTable())); } - else if (create.table.empty()) + else if (!create.table) { - if (create.database.empty()) + if (!create.database) throw Exception(ErrorCodes::LOGICAL_ERROR, "Database name specified in the CREATE DATABASE query must not be empty"); - create.database = data.renaming_config->getNewDatabaseName(create.database); + create.setDatabase(data.renaming_config->getNewDatabaseName(create.getDatabase())); } else { - if (create.database.empty()) + if (!create.database) throw Exception(ErrorCodes::LOGICAL_ERROR, "Database name specified in the CREATE TABLE query must not be empty"); - std::tie(create.database, create.table) = data.renaming_config->getNewTableName({create.database, create.table}); + auto table_and_database_name = data.renaming_config->getNewTableName({create.getDatabase(), create.getTable()}); + create.setDatabase(table_and_database_name.first); + create.setTable(table_and_database_name.second); } create.uuid = UUIDHelpers::Nil; diff --git a/src/Bridge/LibraryBridgeHelper.cpp b/src/Bridge/LibraryBridgeHelper.cpp index bd0604ec8e0..f03f3e93fd5 100644 --- a/src/Bridge/LibraryBridgeHelper.cpp +++ b/src/Bridge/LibraryBridgeHelper.cpp @@ -75,7 +75,7 @@ bool LibraryBridgeHelper::bridgeHandShake() String result; try { - ReadWriteBufferFromHTTP buf(createRequestURI(PING), Poco::Net::HTTPRequest::HTTP_GET, {}, http_timeouts); + ReadWriteBufferFromHTTP buf(createRequestURI(PING), Poco::Net::HTTPRequest::HTTP_GET, {}, http_timeouts, credentials); readString(result, buf); } catch (...) @@ -240,7 +240,7 @@ bool LibraryBridgeHelper::executeRequest(const Poco::URI & uri, ReadWriteBufferF uri, Poco::Net::HTTPRequest::HTTP_POST, std::move(out_stream_callback), - http_timeouts); + http_timeouts, credentials); bool res; readBoolText(res, buf); @@ -255,8 +255,8 @@ Pipe LibraryBridgeHelper::loadBase(const Poco::URI & uri, ReadWriteBufferFromHTT Poco::Net::HTTPRequest::HTTP_POST, std::move(out_stream_callback), http_timeouts, + credentials, 0, - Poco::Net::HTTPBasicCredentials{}, DBMS_DEFAULT_BUFFER_SIZE, getContext()->getReadSettings(), ReadWriteBufferFromHTTP::HTTPHeaderEntries{}); diff --git a/src/Bridge/LibraryBridgeHelper.h b/src/Bridge/LibraryBridgeHelper.h index a209fff6ca0..393cadebfb5 100644 --- a/src/Bridge/LibraryBridgeHelper.h +++ b/src/Bridge/LibraryBridgeHelper.h @@ -101,6 +101,7 @@ private: size_t bridge_port; bool library_initialized = false; ConnectionTimeouts http_timeouts; + Poco::Net::HTTPBasicCredentials credentials{}; }; } diff --git a/src/Bridge/XDBCBridgeHelper.h b/src/Bridge/XDBCBridgeHelper.h index d321c1f23de..3cc73dc557f 100644 --- a/src/Bridge/XDBCBridgeHelper.h +++ b/src/Bridge/XDBCBridgeHelper.h @@ -76,7 +76,7 @@ protected: { try { - ReadWriteBufferFromHTTP buf(getPingURI(), Poco::Net::HTTPRequest::HTTP_GET, {}, ConnectionTimeouts::getHTTPTimeouts(getContext())); + ReadWriteBufferFromHTTP buf(getPingURI(), Poco::Net::HTTPRequest::HTTP_GET, {}, ConnectionTimeouts::getHTTPTimeouts(getContext()), credentials); return checkString(PING_OK_ANSWER, buf); } catch (...) @@ -135,6 +135,8 @@ private: std::optional quote_style; std::optional is_schema_allowed; + Poco::Net::HTTPBasicCredentials credentials{}; + protected: using URLParams = std::vector>; @@ -166,7 +168,7 @@ protected: uri.setPath(SCHEMA_ALLOWED_HANDLER); uri.addQueryParameter("connection_string", getConnectionString()); - ReadWriteBufferFromHTTP buf(uri, Poco::Net::HTTPRequest::HTTP_POST, {}, ConnectionTimeouts::getHTTPTimeouts(getContext())); + ReadWriteBufferFromHTTP buf(uri, Poco::Net::HTTPRequest::HTTP_POST, {}, ConnectionTimeouts::getHTTPTimeouts(getContext()), credentials); bool res; readBoolText(res, buf); @@ -186,7 +188,7 @@ protected: uri.setPath(IDENTIFIER_QUOTE_HANDLER); uri.addQueryParameter("connection_string", getConnectionString()); - ReadWriteBufferFromHTTP buf(uri, Poco::Net::HTTPRequest::HTTP_POST, {}, ConnectionTimeouts::getHTTPTimeouts(getContext())); + ReadWriteBufferFromHTTP buf(uri, Poco::Net::HTTPRequest::HTTP_POST, {}, ConnectionTimeouts::getHTTPTimeouts(getContext()), credentials); std::string character; readStringBinary(character, buf); diff --git a/src/Client/ClientBase.cpp b/src/Client/ClientBase.cpp index 85913b3925f..d274b673091 100644 --- a/src/Client/ClientBase.cpp +++ b/src/Client/ClientBase.cpp @@ -56,6 +56,8 @@ #include #include #include +#include + namespace fs = std::filesystem; using namespace std::literals; @@ -71,6 +73,14 @@ static const NameSet exit_strings "q", "й", "\\q", "\\Q", "\\й", "\\Й", ":q", "Жй" }; +static const std::initializer_list> backslash_aliases +{ + { "\\l", "SHOW DATABASES" }, + { "\\d", "SHOW TABLES" }, + { "\\c", "USE" }, +}; + + namespace ErrorCodes { extern const int BAD_ARGUMENTS; @@ -397,7 +407,7 @@ void ClientBase::initBlockOutputStream(const Block & block, ASTPtr parsed_query) output_format = global_context->getOutputFormat( current_format, out_file_buf ? *out_file_buf : *out_buf, block); - output_format->doWritePrefix(); + output_format->setAutoFlush(); } } @@ -482,7 +492,7 @@ void ClientBase::processOrdinaryQuery(const String & query_to_execute, ASTPtr pa ReplaceQueryParameterVisitor visitor(query_parameters); visitor.visit(parsed_query); - /// Get new query after substitutions. Note that it cannot be done for INSERT query with embedded data. + /// Get new query after substitutions. query = serializeAST(*parsed_query); } @@ -677,7 +687,7 @@ void ClientBase::onEndOfStream() progress_indication.clearProgressOutput(); if (output_format) - output_format->doWriteSuffix(); + output_format->finalize(); resetOutput(); @@ -816,6 +826,17 @@ bool ClientBase::receiveSampleBlock(Block & out, ColumnsDescription & columns_de void ClientBase::processInsertQuery(const String & query_to_execute, ASTPtr parsed_query) { + auto query = query_to_execute; + if (!query_parameters.empty()) + { + /// Replace ASTQueryParameter with ASTLiteral for prepared statements. + ReplaceQueryParameterVisitor visitor(query_parameters); + visitor.visit(parsed_query); + + /// Get new query after substitutions. + query = serializeAST(*parsed_query); + } + /// Process the query that requires transferring data blocks to the server. const auto parsed_insert_query = parsed_query->as(); if ((!parsed_insert_query.data && !parsed_insert_query.infile) && (is_interactive || (!stdin_is_a_tty && std_in.eof()))) @@ -823,7 +844,7 @@ void ClientBase::processInsertQuery(const String & query_to_execute, ASTPtr pars connection->sendQuery( connection_parameters.timeouts, - query_to_execute, + query, global_context->getCurrentQueryId(), query_processing_stage, &global_context->getSettingsRef(), @@ -876,8 +897,7 @@ void ClientBase::sendData(Block & sample, const ColumnsDescription & columns_des /// Get name of this file (path to file) const auto & in_file_node = parsed_insert_query->infile->as(); const auto in_file = in_file_node.value.safeGet(); - /// Get name of table - const auto table_name = parsed_insert_query->table_id.getTableName(); + std::string compression_method; /// Compression method can be specified in query if (parsed_insert_query->compression) @@ -1313,6 +1333,12 @@ bool ClientBase::processQueryText(const String & text) } +String ClientBase::prompt() const +{ + return boost::replace_all_copy(prompt_by_server_display_name, "{database}", config().getString("database", "default")); +} + + void ClientBase::runInteractive() { if (config().has("query_id")) @@ -1414,6 +1440,24 @@ void ClientBase::runInteractive() has_vertical_output_suffix = true; } + for (const auto& [alias, command] : backslash_aliases) + { + auto it = std::search(input.begin(), input.end(), alias.begin(), alias.end()); + if (it != input.end() && std::all_of(input.begin(), it, isWhitespaceASCII)) + { + it += alias.size(); + if (it == input.end() || isWhitespaceASCII(*it)) + { + String new_input = command; + // append the rest of input to the command + // for parameters support, e.g. \c db_name -> USE db_name + new_input.append(it, input.end()); + input = std::move(new_input); + break; + } + } + } + try { if (!processQueryText(input)) @@ -1677,6 +1721,7 @@ void ClientBase::init(int argc, char ** argv) ("profile-events-delay-ms", po::value()->default_value(profile_events.delay_ms), "Delay between printing `ProfileEvents` packets (-1 - print only totals, 0 - print every single packet)") ("interactive", "Process queries-file or --query query and start interactive mode") + ("pager", po::value(), "Pipe all output into this command (less or similar)") ; addOptions(options_description); @@ -1748,6 +1793,8 @@ void ClientBase::init(int argc, char ** argv) config().setBool("verbose", true); if (options.count("interactive")) config().setBool("interactive", true); + if (options.count("pager")) + config().setString("pager", options["pager"].as()); if (options.count("log-level")) Poco::Logger::root().setLevel(options["log-level"].as()); diff --git a/src/Client/ClientBase.h b/src/Client/ClientBase.h index 40fc6cacd31..bad1395e699 100644 --- a/src/Client/ClientBase.h +++ b/src/Client/ClientBase.h @@ -59,7 +59,6 @@ protected: virtual bool executeMultiQuery(const String & all_queries_text) = 0; virtual void connect() = 0; - virtual void prepareForInteractive() = 0; virtual void processError(const String & query) const = 0; virtual String getName() const = 0; @@ -129,10 +128,7 @@ private: void initBlockOutputStream(const Block & block, ASTPtr parsed_query); void initLogsOutputStream(); - inline String prompt() const - { - return boost::replace_all_copy(prompt_by_server_display_name, "{database}", config().getString("database", "default")); - } + String prompt() const; void resetOutput(); void outputQueryInfo(bool echo_query_); diff --git a/src/Client/Connection.cpp b/src/Client/Connection.cpp index d7d80606804..ca10160fa88 100644 --- a/src/Client/Connection.cpp +++ b/src/Client/Connection.cpp @@ -30,6 +30,7 @@ #include #include #include +#include #include #include diff --git a/src/Client/HedgedConnectionsFactory.cpp b/src/Client/HedgedConnectionsFactory.cpp index 5a42cc0d5c4..a1b816deecb 100644 --- a/src/Client/HedgedConnectionsFactory.cpp +++ b/src/Client/HedgedConnectionsFactory.cpp @@ -7,6 +7,8 @@ namespace ProfileEvents { extern const Event HedgedRequestsChangeReplica; + extern const Event DistributedConnectionFailTry; + extern const Event DistributedConnectionFailAtAll; } namespace DB diff --git a/src/Client/QueryFuzzer.cpp b/src/Client/QueryFuzzer.cpp index ec267e44a99..71c0a399c09 100644 --- a/src/Client/QueryFuzzer.cpp +++ b/src/Client/QueryFuzzer.cpp @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include @@ -447,6 +448,11 @@ void QueryFuzzer::fuzz(ASTPtr & ast) { fuzz(with_union->list_of_selects); } + else if (auto * with_intersect_except = typeid_cast(ast.get())) + { + auto selects = with_intersect_except->getListOfSelects(); + fuzz(selects); + } else if (auto * tables = typeid_cast(ast.get())) { fuzz(tables->children); diff --git a/src/Columns/ColumnDecimal.cpp b/src/Columns/ColumnDecimal.cpp index 48f890edaaf..70e2b4a6d96 100644 --- a/src/Columns/ColumnDecimal.cpp +++ b/src/Columns/ColumnDecimal.cpp @@ -247,7 +247,7 @@ ColumnPtr ColumnDecimal::filter(const IColumn::Filter & filt, ssize_t result_ while (filt_pos < filt_end_aligned) { - UInt64 mask = Bytes64MaskToBits64Mask(filt_pos); + UInt64 mask = bytes64MaskToBits64Mask(filt_pos); if (0xffffffffffffffff == mask) { diff --git a/src/Columns/ColumnFixedString.cpp b/src/Columns/ColumnFixedString.cpp index cc4e7b12a04..1eb2d4d5b1f 100644 --- a/src/Columns/ColumnFixedString.cpp +++ b/src/Columns/ColumnFixedString.cpp @@ -242,7 +242,7 @@ ColumnPtr ColumnFixedString::filter(const IColumn::Filter & filt, ssize_t result while (filt_pos < filt_end_aligned) { - uint64_t mask = Bytes64MaskToBits64Mask(filt_pos); + uint64_t mask = bytes64MaskToBits64Mask(filt_pos); if (0xffffffffffffffff == mask) { diff --git a/src/Columns/ColumnVector.cpp b/src/Columns/ColumnVector.cpp index b9079857aa4..13ba522b2ac 100644 --- a/src/Columns/ColumnVector.cpp +++ b/src/Columns/ColumnVector.cpp @@ -321,7 +321,7 @@ ColumnPtr ColumnVector::filter(const IColumn::Filter & filt, ssize_t result_s while (filt_pos < filt_end_aligned) { - UInt64 mask = Bytes64MaskToBits64Mask(filt_pos); + UInt64 mask = bytes64MaskToBits64Mask(filt_pos); if (0xffffffffffffffff == mask) { diff --git a/src/Columns/ColumnsCommon.cpp b/src/Columns/ColumnsCommon.cpp index 6d16790e2f8..701b888fb25 100644 --- a/src/Columns/ColumnsCommon.cpp +++ b/src/Columns/ColumnsCommon.cpp @@ -235,7 +235,7 @@ namespace while (filt_pos < filt_end_aligned) { - uint64_t mask = Bytes64MaskToBits64Mask(filt_pos); + uint64_t mask = bytes64MaskToBits64Mask(filt_pos); if (0xffffffffffffffff == mask) { diff --git a/src/Columns/ColumnsCommon.h b/src/Columns/ColumnsCommon.h index b84b1de7d67..0c307cac291 100644 --- a/src/Columns/ColumnsCommon.h +++ b/src/Columns/ColumnsCommon.h @@ -21,7 +21,7 @@ namespace ErrorCodes } /// Transform 64-byte mask to 64-bit mask -inline UInt64 Bytes64MaskToBits64Mask(const UInt8 * bytes64) +inline UInt64 bytes64MaskToBits64Mask(const UInt8 * bytes64) { #if defined(__AVX512F__) && defined(__AVX512BW__) static const __m512i zero64 = _mm512_setzero_epi32(); @@ -46,10 +46,8 @@ inline UInt64 Bytes64MaskToBits64Mask(const UInt8 * bytes64) _mm_loadu_si128(reinterpret_cast(bytes64 + 48)), zero16))) << 48) & 0xffff000000000000); #else UInt64 res = 0; - const UInt8 * pos = bytes64; - const UInt8 * end = pos + 64; - for (; pos < end; ++pos) - res |= ((*pos == 0)<<(pos-bytes64)); + for (size_t i = 0; i < 64; ++i) + res |= static_cast(0 == bytes64[i]) << i; #endif return ~res; } diff --git a/src/Common/DNSResolver.cpp b/src/Common/DNSResolver.cpp index 79ab7f82fe0..36d0c13b153 100644 --- a/src/Common/DNSResolver.cpp +++ b/src/Common/DNSResolver.cpp @@ -15,7 +15,7 @@ namespace ProfileEvents { - extern Event DNSError; + extern const Event DNSError; } namespace std diff --git a/src/Common/ErrorCodes.cpp b/src/Common/ErrorCodes.cpp index 13d6632037a..2525204eab7 100644 --- a/src/Common/ErrorCodes.cpp +++ b/src/Common/ErrorCodes.cpp @@ -595,6 +595,9 @@ M(625, IO_SETUP_ERROR) \ M(626, CANNOT_SKIP_UNKNOWN_FIELD) \ M(627, BACKUP_ENGINE_NOT_FOUND) \ + M(628, OFFSET_FETCH_WITHOUT_ORDER_BY) \ + M(629, HTTP_RANGE_NOT_SATISFIABLE) \ + M(630, HAVE_DEPENDENT_OBJECTS) \ \ M(999, KEEPER_EXCEPTION) \ M(1000, POCO_EXCEPTION) \ diff --git a/src/Common/ProfileEvents.cpp b/src/Common/ProfileEvents.cpp index ee44b9eb927..3f580dcb370 100644 --- a/src/Common/ProfileEvents.cpp +++ b/src/Common/ProfileEvents.cpp @@ -30,6 +30,8 @@ M(UncompressedCacheWeightLost, "") \ M(MMappedFileCacheHits, "") \ M(MMappedFileCacheMisses, "") \ + M(OpenedFileCacheHits, "") \ + M(OpenedFileCacheMisses, "") \ M(AIOWrite, "Number of writes with Linux or FreeBSD AIO interface") \ M(AIOWriteBytes, "Number of bytes written with Linux or FreeBSD AIO interface") \ M(AIORead, "Number of reads with Linux or FreeBSD AIO interface") \ @@ -187,8 +189,6 @@ M(SystemTimeMicroseconds, "Total time spent in processing (queries and other tasks) threads executing CPU instructions in OS kernel space. This include time CPU pipeline was stalled due to cache misses, branch mispredictions, hyper-threading, etc.") \ M(SoftPageFaults, "") \ M(HardPageFaults, "") \ - M(VoluntaryContextSwitches, "") \ - M(InvoluntaryContextSwitches, "") \ \ M(OSIOWaitMicroseconds, "Total time a thread spent waiting for a result of IO operation, from the OS point of view. This is real IO that doesn't include page cache.") \ M(OSCPUWaitMicroseconds, "Total time a thread was ready for execution but waiting to be scheduled by OS, from the OS point of view.") \ diff --git a/src/Common/StackTrace.cpp b/src/Common/StackTrace.cpp index 6c52d31c9a1..18e2e223744 100644 --- a/src/Common/StackTrace.cpp +++ b/src/Common/StackTrace.cpp @@ -190,6 +190,8 @@ static void * getCallerAddress(const ucontext_t & context) return reinterpret_cast(context.uc_mcontext.pc); #elif defined(__powerpc64__) return reinterpret_cast(context.uc_mcontext.gp_regs[PT_NIP]); +#elif defined(__riscv) + return reinterpret_cast(context.uc_mcontext.__gregs[REG_PC]); #else return nullptr; #endif diff --git a/src/Common/ThreadPool.cpp b/src/Common/ThreadPool.cpp index 8ad70d85643..9b01987c7cf 100644 --- a/src/Common/ThreadPool.cpp +++ b/src/Common/ThreadPool.cpp @@ -320,7 +320,7 @@ template class ThreadPoolImpl; std::unique_ptr GlobalThreadPool::the_instance; -void GlobalThreadPool::initialize(size_t max_threads) +void GlobalThreadPool::initialize(size_t max_threads, size_t max_free_threads, size_t queue_size) { if (the_instance) { @@ -328,9 +328,7 @@ void GlobalThreadPool::initialize(size_t max_threads) "The global thread pool is initialized twice"); } - the_instance.reset(new GlobalThreadPool(max_threads, - 1000 /*max_free_threads*/, 10000 /*max_queue_size*/, - false /*shutdown_on_exception*/)); + the_instance.reset(new GlobalThreadPool(max_threads, max_free_threads, queue_size, false /*shutdown_on_exception*/)); } GlobalThreadPool & GlobalThreadPool::instance() diff --git a/src/Common/ThreadPool.h b/src/Common/ThreadPool.h index 6e803d77601..c4421a5b374 100644 --- a/src/Common/ThreadPool.h +++ b/src/Common/ThreadPool.h @@ -147,7 +147,7 @@ class GlobalThreadPool : public FreeThreadPool, private boost::noncopyable {} public: - static void initialize(size_t max_threads = 10000); + static void initialize(size_t max_threads = 10000, size_t max_free_threads = 1000, size_t queue_size = 10000); static GlobalThreadPool & instance(); }; diff --git a/src/Common/ThreadProfileEvents.cpp b/src/Common/ThreadProfileEvents.cpp index 3e2bb2bdf6e..66cf5f49ab8 100644 --- a/src/Common/ThreadProfileEvents.cpp +++ b/src/Common/ThreadProfileEvents.cpp @@ -24,6 +24,45 @@ #include +namespace ProfileEvents +{ +#if defined(__linux__) + extern const Event OSIOWaitMicroseconds; + extern const Event OSCPUWaitMicroseconds; + extern const Event OSCPUVirtualTimeMicroseconds; + extern const Event OSReadChars; + extern const Event OSWriteChars; + extern const Event OSReadBytes; + extern const Event OSWriteBytes; + + extern const Event PerfCpuCycles; + extern const Event PerfInstructions; + extern const Event PerfCacheReferences; + extern const Event PerfCacheMisses; + extern const Event PerfBranchInstructions; + extern const Event PerfBranchMisses; + extern const Event PerfBusCycles; + extern const Event PerfStalledCyclesFrontend; + extern const Event PerfStalledCyclesBackend; + extern const Event PerfRefCpuCycles; + + extern const Event PerfCpuClock; + extern const Event PerfTaskClock; + extern const Event PerfContextSwitches; + extern const Event PerfCpuMigrations; + extern const Event PerfAlignmentFaults; + extern const Event PerfEmulationFaults; + extern const Event PerfMinEnabledTime; + extern const Event PerfMinEnabledRunningTime; + extern const Event PerfDataTLBReferences; + extern const Event PerfDataTLBMisses; + extern const Event PerfInstructionTLBReferences; + extern const Event PerfInstructionTLBMisses; + extern const Event PerfLocalMemoryReferences; + extern const Event PerfLocalMemoryMisses; +#endif +} + namespace DB { diff --git a/src/Common/ThreadProfileEvents.h b/src/Common/ThreadProfileEvents.h index 5d8f879eca7..c1ff0df84f0 100644 --- a/src/Common/ThreadProfileEvents.h +++ b/src/Common/ThreadProfileEvents.h @@ -24,44 +24,6 @@ namespace ProfileEvents extern const Event SystemTimeMicroseconds; extern const Event SoftPageFaults; extern const Event HardPageFaults; - extern const Event VoluntaryContextSwitches; - extern const Event InvoluntaryContextSwitches; - -#if defined(__linux__) - extern const Event OSIOWaitMicroseconds; - extern const Event OSCPUWaitMicroseconds; - extern const Event OSCPUVirtualTimeMicroseconds; - extern const Event OSReadChars; - extern const Event OSWriteChars; - extern const Event OSReadBytes; - extern const Event OSWriteBytes; - - extern const Event PerfCpuCycles; - extern const Event PerfInstructions; - extern const Event PerfCacheReferences; - extern const Event PerfCacheMisses; - extern const Event PerfBranchInstructions; - extern const Event PerfBranchMisses; - extern const Event PerfBusCycles; - extern const Event PerfStalledCyclesFrontend; - extern const Event PerfStalledCyclesBackend; - extern const Event PerfRefCpuCycles; - - extern const Event PerfCpuClock; - extern const Event PerfTaskClock; - extern const Event PerfContextSwitches; - extern const Event PerfCpuMigrations; - extern const Event PerfAlignmentFaults; - extern const Event PerfEmulationFaults; - extern const Event PerfMinEnabledTime; - extern const Event PerfMinEnabledRunningTime; - extern const Event PerfDataTLBReferences; - extern const Event PerfDataTLBMisses; - extern const Event PerfInstructionTLBReferences; - extern const Event PerfInstructionTLBMisses; - extern const Event PerfLocalMemoryReferences; - extern const Event PerfLocalMemoryMisses; -#endif } namespace DB diff --git a/src/Common/ZooKeeper/ZooKeeperCommon.cpp b/src/Common/ZooKeeper/ZooKeeperCommon.cpp index bc8fe0fe365..6a449cf0122 100644 --- a/src/Common/ZooKeeper/ZooKeeperCommon.cpp +++ b/src/Common/ZooKeeper/ZooKeeperCommon.cpp @@ -489,20 +489,20 @@ void ZooKeeperMultiResponse::writeImpl(WriteBuffer & out) const } } -ZooKeeperResponsePtr ZooKeeperHeartbeatRequest::makeResponse() const { return std::make_shared(); } -ZooKeeperResponsePtr ZooKeeperSyncRequest::makeResponse() const { return std::make_shared(); } -ZooKeeperResponsePtr ZooKeeperAuthRequest::makeResponse() const { return std::make_shared(); } -ZooKeeperResponsePtr ZooKeeperCreateRequest::makeResponse() const { return std::make_shared(); } -ZooKeeperResponsePtr ZooKeeperRemoveRequest::makeResponse() const { return std::make_shared(); } -ZooKeeperResponsePtr ZooKeeperExistsRequest::makeResponse() const { return std::make_shared(); } -ZooKeeperResponsePtr ZooKeeperGetRequest::makeResponse() const { return std::make_shared(); } -ZooKeeperResponsePtr ZooKeeperSetRequest::makeResponse() const { return std::make_shared(); } -ZooKeeperResponsePtr ZooKeeperListRequest::makeResponse() const { return std::make_shared(); } -ZooKeeperResponsePtr ZooKeeperCheckRequest::makeResponse() const { return std::make_shared(); } -ZooKeeperResponsePtr ZooKeeperMultiRequest::makeResponse() const { return std::make_shared(requests); } -ZooKeeperResponsePtr ZooKeeperCloseRequest::makeResponse() const { return std::make_shared(); } -ZooKeeperResponsePtr ZooKeeperSetACLRequest::makeResponse() const { return std::make_shared(); } -ZooKeeperResponsePtr ZooKeeperGetACLRequest::makeResponse() const { return std::make_shared(); } +ZooKeeperResponsePtr ZooKeeperHeartbeatRequest::makeResponse() const { return setTime(std::make_shared()); } +ZooKeeperResponsePtr ZooKeeperSyncRequest::makeResponse() const { return setTime(std::make_shared()); } +ZooKeeperResponsePtr ZooKeeperAuthRequest::makeResponse() const { return setTime(std::make_shared()); } +ZooKeeperResponsePtr ZooKeeperCreateRequest::makeResponse() const { return setTime(std::make_shared()); } +ZooKeeperResponsePtr ZooKeeperRemoveRequest::makeResponse() const { return setTime(std::make_shared()); } +ZooKeeperResponsePtr ZooKeeperExistsRequest::makeResponse() const { return setTime(std::make_shared()); } +ZooKeeperResponsePtr ZooKeeperGetRequest::makeResponse() const { return setTime(std::make_shared()); } +ZooKeeperResponsePtr ZooKeeperSetRequest::makeResponse() const { return setTime(std::make_shared()); } +ZooKeeperResponsePtr ZooKeeperListRequest::makeResponse() const { return setTime(std::make_shared()); } +ZooKeeperResponsePtr ZooKeeperCheckRequest::makeResponse() const { return setTime(std::make_shared()); } +ZooKeeperResponsePtr ZooKeeperMultiRequest::makeResponse() const { return setTime(std::make_shared(requests)); } +ZooKeeperResponsePtr ZooKeeperCloseRequest::makeResponse() const { return setTime(std::make_shared()); } +ZooKeeperResponsePtr ZooKeeperSetACLRequest::makeResponse() const { return setTime(std::make_shared()); } +ZooKeeperResponsePtr ZooKeeperGetACLRequest::makeResponse() const { return setTime(std::make_shared()); } void ZooKeeperSessionIDRequest::writeImpl(WriteBuffer & out) const { @@ -690,6 +690,40 @@ std::shared_ptr ZooKeeperRequest::read(ReadBuffer & in) return request; } +ZooKeeperRequest::~ZooKeeperRequest() +{ + if (!request_created_time_ns) + return; + UInt64 elapsed_ns = clock_gettime_ns() - request_created_time_ns; + constexpr UInt64 max_request_time_ns = 1000000000ULL; /// 1 sec + if (max_request_time_ns < elapsed_ns) + { + LOG_TEST(&Poco::Logger::get(__PRETTY_FUNCTION__), "Processing of request xid={} took {} ms", xid, elapsed_ns / 1000000UL); + } +} + +ZooKeeperResponsePtr ZooKeeperRequest::setTime(ZooKeeperResponsePtr response) const +{ + if (request_created_time_ns) + { + response->response_created_time_ns = clock_gettime_ns(); + } + return response; +} + +ZooKeeperResponse::~ZooKeeperResponse() +{ + if (!response_created_time_ns) + return; + UInt64 elapsed_ns = clock_gettime_ns() - response_created_time_ns; + constexpr UInt64 max_request_time_ns = 1000000000ULL; /// 1 sec + if (max_request_time_ns < elapsed_ns) + { + LOG_TEST(&Poco::Logger::get(__PRETTY_FUNCTION__), "Processing of response xid={} took {} ms", xid, elapsed_ns / 1000000UL); + } +} + + ZooKeeperRequestPtr ZooKeeperRequestFactory::get(OpNum op_num) const { auto it = op_num_to_request.find(op_num); @@ -708,7 +742,12 @@ ZooKeeperRequestFactory & ZooKeeperRequestFactory::instance() template void registerZooKeeperRequest(ZooKeeperRequestFactory & factory) { - factory.registerRequest(num, [] { return std::make_shared(); }); + factory.registerRequest(num, [] + { + auto res = std::make_shared(); + res->request_created_time_ns = clock_gettime_ns(); + return res; + }); } ZooKeeperRequestFactory::ZooKeeperRequestFactory() diff --git a/src/Common/ZooKeeper/ZooKeeperCommon.h b/src/Common/ZooKeeper/ZooKeeperCommon.h index a51ca9a487e..237c98d8497 100644 --- a/src/Common/ZooKeeper/ZooKeeperCommon.h +++ b/src/Common/ZooKeeper/ZooKeeperCommon.h @@ -30,9 +30,11 @@ struct ZooKeeperResponse : virtual Response XID xid = 0; int64_t zxid = 0; + UInt64 response_created_time_ns = 0; + ZooKeeperResponse() = default; ZooKeeperResponse(const ZooKeeperResponse &) = default; - virtual ~ZooKeeperResponse() override = default; + ~ZooKeeperResponse() override; virtual void readImpl(ReadBuffer &) = 0; virtual void writeImpl(WriteBuffer &) const = 0; virtual void write(WriteBuffer & out) const; @@ -54,9 +56,11 @@ struct ZooKeeperRequest : virtual Request bool restored_from_zookeeper_log = false; + UInt64 request_created_time_ns = 0; + ZooKeeperRequest() = default; ZooKeeperRequest(const ZooKeeperRequest &) = default; - virtual ~ZooKeeperRequest() override = default; + ~ZooKeeperRequest() override; virtual OpNum getOpNum() const = 0; @@ -69,6 +73,7 @@ struct ZooKeeperRequest : virtual Request static std::shared_ptr read(ReadBuffer & in); virtual ZooKeeperResponsePtr makeResponse() const = 0; + ZooKeeperResponsePtr setTime(ZooKeeperResponsePtr response) const; virtual bool isReadRequest() const = 0; virtual void createLogElements(LogElements & elems) const; diff --git a/src/Common/setThreadName.cpp b/src/Common/setThreadName.cpp index 958404b9ad1..727bf23b891 100644 --- a/src/Common/setThreadName.cpp +++ b/src/Common/setThreadName.cpp @@ -12,6 +12,8 @@ #include #include +#define THREAD_NAME_SIZE 16 + namespace DB { @@ -23,13 +25,13 @@ namespace ErrorCodes /// Cache thread_name to avoid prctl(PR_GET_NAME) for query_log/text_log -static thread_local std::string thread_name; +static thread_local char thread_name[THREAD_NAME_SIZE]{}; void setThreadName(const char * name) { #ifndef NDEBUG - if (strlen(name) > 15) + if (strlen(name) > THREAD_NAME_SIZE - 1) throw DB::Exception("Thread name cannot be longer than 15 bytes", DB::ErrorCodes::PTHREAD_ERROR); #endif @@ -45,28 +47,25 @@ void setThreadName(const char * name) #endif DB::throwFromErrno("Cannot set thread name with prctl(PR_SET_NAME, ...)", DB::ErrorCodes::PTHREAD_ERROR); - thread_name = name; + memcpy(thread_name, name, 1 + strlen(name)); } -const std::string & getThreadName() +const char * getThreadName() { - if (!thread_name.empty()) + if (thread_name[0]) return thread_name; - thread_name.resize(16); - #if defined(__APPLE__) || defined(OS_SUNOS) - if (pthread_getname_np(pthread_self(), thread_name.data(), thread_name.size())) + if (pthread_getname_np(pthread_self(), thread_name, THREAD_NAME_SIZE)) throw DB::Exception("Cannot get thread name with pthread_getname_np()", DB::ErrorCodes::PTHREAD_ERROR); #elif defined(__FreeBSD__) // TODO: make test. freebsd will have this function soon https://freshbsd.org/commit/freebsd/r337983 -// if (pthread_get_name_np(pthread_self(), thread_name.data(), thread_name.size())) +// if (pthread_get_name_np(pthread_self(), thread_name, THREAD_NAME_SIZE)) // throw DB::Exception("Cannot get thread name with pthread_get_name_np()", DB::ErrorCodes::PTHREAD_ERROR); #else - if (0 != prctl(PR_GET_NAME, thread_name.data(), 0, 0, 0)) + if (0 != prctl(PR_GET_NAME, thread_name, 0, 0, 0)) DB::throwFromErrno("Cannot get thread name with prctl(PR_GET_NAME)", DB::ErrorCodes::PTHREAD_ERROR); #endif - thread_name.resize(std::strlen(thread_name.data())); return thread_name; } diff --git a/src/Common/setThreadName.h b/src/Common/setThreadName.h index ea988885db2..1834ea9696f 100644 --- a/src/Common/setThreadName.h +++ b/src/Common/setThreadName.h @@ -7,4 +7,4 @@ */ void setThreadName(const char * name); -const std::string & getThreadName(); +const char * getThreadName(); diff --git a/src/Compression/LZ4_decompress_faster.cpp b/src/Compression/LZ4_decompress_faster.cpp index 85cd9fa3ab0..5018d392e3c 100644 --- a/src/Compression/LZ4_decompress_faster.cpp +++ b/src/Compression/LZ4_decompress_faster.cpp @@ -70,7 +70,7 @@ inline void copyOverlap8(UInt8 * op, const UInt8 *& match, size_t offset) } -#if defined(__x86_64__) || defined(__PPC__) +#if defined(__x86_64__) || defined(__PPC__) || defined(__riscv) /** We use 'xmm' (128bit SSE) registers here to shuffle 16 bytes. * @@ -261,7 +261,7 @@ inline void copyOverlap16(UInt8 * op, const UInt8 *& match, const size_t offset) } -#if defined(__x86_64__) || defined(__PPC__) +#if defined(__x86_64__) || defined(__PPC__) || defined (__riscv) inline void copyOverlap16Shuffle(UInt8 * op, const UInt8 *& match, const size_t offset) { diff --git a/src/Coordination/KeeperDispatcher.cpp b/src/Coordination/KeeperDispatcher.cpp index c29ea4fac45..e6fe1a9834a 100644 --- a/src/Coordination/KeeperDispatcher.cpp +++ b/src/Coordination/KeeperDispatcher.cpp @@ -206,7 +206,10 @@ void KeeperDispatcher::setResponse(int64_t session_id, const Coordination::ZooKe /// Session was disconnected, just skip this response if (session_response_callback == session_to_response_callback.end()) + { + LOG_TEST(log, "Cannot write response xid={}, op={}, session {} disconnected", response->xid, response->getOpNum(), session_id); return; + } session_response_callback->second(response); diff --git a/src/Core/BaseSettings.h b/src/Core/BaseSettings.h index 7dca93212a2..f6c63804059 100644 --- a/src/Core/BaseSettings.h +++ b/src/Core/BaseSettings.h @@ -97,6 +97,7 @@ public: const char * getTypeName() const; const char * getDescription() const; bool isCustom() const; + bool isObsolete() const; bool operator==(const SettingFieldRef & other) const { return (getName() == other.getName()) && (getValue() == other.getValue()); } bool operator!=(const SettingFieldRef & other) const { return !(*this == other); } @@ -182,6 +183,7 @@ struct BaseSettingsHelpers { IMPORTANT = 0x01, CUSTOM = 0x02, + OBSOLETE = 0x04, }; static void writeFlags(Flags flags, WriteBuffer & out); static Flags readFlags(ReadBuffer & in); @@ -745,6 +747,17 @@ bool BaseSettings::SettingFieldRef::isCustom() const return false; } +template +bool BaseSettings::SettingFieldRef::isObsolete() const +{ + if constexpr (Traits::allow_custom_settings) + { + if (custom_setting) + return false; + } + return accessor->isObsolete(index); +} + #define DECLARE_SETTINGS_TRAITS(SETTINGS_TRAITS_NAME, LIST_OF_SETTINGS_MACRO) \ DECLARE_SETTINGS_TRAITS_COMMON(SETTINGS_TRAITS_NAME, LIST_OF_SETTINGS_MACRO, 0) @@ -769,6 +782,7 @@ bool BaseSettings::SettingFieldRef::isCustom() const const char * getTypeName(size_t index) const { return field_infos[index].type; } \ const char * getDescription(size_t index) const { return field_infos[index].description; } \ bool isImportant(size_t index) const { return field_infos[index].is_important; } \ + bool isObsolete(size_t index) const { return field_infos[index].is_obsolete; } \ Field castValueUtil(size_t index, const Field & value) const { return field_infos[index].cast_value_util_function(value); } \ String valueToStringUtil(size_t index, const Field & value) const { return field_infos[index].value_to_string_util_function(value); } \ Field stringToValueUtil(size_t index, const String & str) const { return field_infos[index].string_to_value_util_function(str); } \ @@ -789,6 +803,7 @@ bool BaseSettings::SettingFieldRef::isCustom() const const char * type; \ const char * description; \ bool is_important; \ + bool is_obsolete; \ Field (*cast_value_util_function)(const Field &); \ String (*value_to_string_util_function)(const Field &); \ Field (*string_to_value_util_function)(const String &); \ @@ -816,7 +831,7 @@ bool BaseSettings::SettingFieldRef::isCustom() const static const Accessor the_instance = [] \ { \ Accessor res; \ - constexpr int IMPORTANT = 1; \ + constexpr int IMPORTANT = 0x01; \ UNUSED(IMPORTANT); \ LIST_OF_SETTINGS_MACRO(IMPLEMENT_SETTINGS_TRAITS_) \ for (size_t i : collections::range(res.field_infos.size())) \ @@ -845,6 +860,7 @@ bool BaseSettings::SettingFieldRef::isCustom() const #define IMPLEMENT_SETTINGS_TRAITS_(TYPE, NAME, DEFAULT, DESCRIPTION, FLAGS) \ res.field_infos.emplace_back( \ FieldInfo{#NAME, #TYPE, DESCRIPTION, FLAGS & IMPORTANT, \ + static_cast(FLAGS & BaseSettingsHelpers::Flags::OBSOLETE), \ [](const Field & value) -> Field { return static_cast(SettingField##TYPE{value}); }, \ [](const Field & value) -> String { return SettingField##TYPE{value}.toString(); }, \ [](const String & str) -> Field { SettingField##TYPE temp; temp.parseFromString(str); return static_cast(temp); }, \ diff --git a/src/Core/Defines.h b/src/Core/Defines.h index 215bf6780d9..9665a20a397 100644 --- a/src/Core/Defines.h +++ b/src/Core/Defines.h @@ -44,7 +44,7 @@ /// The boundary on which the blocks for asynchronous file operations should be aligned. #define DEFAULT_AIO_FILE_BLOCK_SIZE 4096 -#define DEFAULT_HTTP_READ_BUFFER_TIMEOUT 1800 +#define DEFAULT_HTTP_READ_BUFFER_TIMEOUT 180 #define DEFAULT_HTTP_READ_BUFFER_CONNECTION_TIMEOUT 1 /// Maximum number of http-connections between two endpoints /// the number is unmotivated diff --git a/src/Core/Settings.h b/src/Core/Settings.h index 3863046b511..05a54a34656 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -149,8 +149,6 @@ class IColumn; M(UInt64, merge_tree_coarse_index_granularity, 8, "If the index segment can contain the required keys, divide it into as many parts and recursively check them.", 0) \ M(UInt64, merge_tree_max_rows_to_use_cache, (128 * 8192), "The maximum number of rows per request, to use the cache of uncompressed data. If the request is large, the cache is not used. (For large queries not to flush out the cache.)", 0) \ M(UInt64, merge_tree_max_bytes_to_use_cache, (192 * 10 * 1024 * 1024), "The maximum number of bytes per request, to use the cache of uncompressed data. If the request is large, the cache is not used. (For large queries not to flush out the cache.)", 0) \ - M(UInt64, merge_tree_clear_old_temporary_directories_interval_seconds, 60, "The period of executing the clear old temporary directories operation in background.", 0) \ - M(UInt64, merge_tree_clear_old_parts_interval_seconds, 1, "The period of executing the clear old parts operation in background.", 0) \ M(Bool, do_not_merge_across_partitions_select_final, false, "Merge parts only in one partition in select final", 0) \ \ M(UInt64, mysql_max_rows_to_insert, 65536, "The maximum number of rows in MySQL batch insertion of the MySQL storage engine", 0) \ @@ -333,9 +331,8 @@ class IColumn; M(OverflowMode, join_overflow_mode, OverflowMode::THROW, "What to do when the limit is exceeded.", 0) \ M(Bool, join_any_take_last_row, false, "When disabled (default) ANY JOIN will take the first found row for a key. When enabled, it will take the last row seen if there are multiple rows for the same key.", IMPORTANT) \ M(JoinAlgorithm, join_algorithm, JoinAlgorithm::HASH, "Specify join algorithm: 'auto', 'hash', 'partial_merge', 'prefer_partial_merge'. 'auto' tries to change HashJoin to MergeJoin on the fly to avoid out of memory.", 0) \ - M(Bool, partial_merge_join_optimizations, true, "Enable optimizations in partial merge join", 0) \ M(UInt64, default_max_bytes_in_join, 1000000000, "Maximum size of right-side table if limit is required but max_bytes_in_join is not set.", 0) \ - M(UInt64, partial_merge_join_left_table_buffer_bytes, 32000000, "If not 0 group left table blocks in bigger ones for left-side table in partial merge join. It uses up to 2x of specified memory per joining thread. In current version work only with 'partial_merge_join_optimizations = 1'.", 0) \ + M(UInt64, partial_merge_join_left_table_buffer_bytes, 0, "If not 0 group left table blocks in bigger ones for left-side table in partial merge join. It uses up to 2x of specified memory per joining thread.", 0) \ M(UInt64, partial_merge_join_rows_in_right_blocks, 65536, "Split right-hand joining data in blocks of specified size. It's a portion of data indexed by min-max values and possibly unloaded on disk.", 0) \ M(UInt64, join_on_disk_max_files_to_merge, 64, "For MergeJoin on disk set how much files it's allowed to sort simultaneously. Then this value bigger then more memory used and then less disk I/O needed. Minimum is 2.", 0) \ M(String, temporary_files_codec, "LZ4", "Set compression codec for temporary files (sort and join on disk). I.e. LZ4, NONE.", 0) \ @@ -426,6 +423,7 @@ class IColumn; M(Bool, optimize_move_functions_out_of_any, false, "Move functions out of aggregate functions 'any', 'anyLast'.", 0) \ M(Bool, optimize_normalize_count_variants, true, "Rewrite aggregate functions that semantically equals to count() as count().", 0) \ M(Bool, optimize_injective_functions_inside_uniq, true, "Delete injective functions of one argument inside uniq*() functions.", 0) \ + M(Bool, convert_query_to_cnf, false, "Convert SELECT query to CNF", 0) \ M(Bool, optimize_arithmetic_operations_in_aggregate_functions, true, "Move arithmetic operations out of aggregation functions", 0) \ M(Bool, optimize_duplicate_order_by_and_distinct, true, "Remove duplicate ORDER BY and DISTINCT if it's possible", 0) \ M(Bool, optimize_redundant_functions_in_order_by, true, "Remove functions from ORDER BY if its argument is also in ORDER BY", 0) \ @@ -433,6 +431,9 @@ class IColumn; M(Bool, optimize_if_transform_strings_to_enum, false, "Replaces string-type arguments in If and Transform to enum. Disabled by default cause it could make inconsistent change in distributed query that would lead to its fail.", 0) \ M(Bool, optimize_monotonous_functions_in_order_by, true, "Replace monotonous function with its argument in ORDER BY", 0) \ M(Bool, optimize_functions_to_subcolumns, false, "Transform functions to subcolumns, if possible, to reduce amount of read data. E.g. 'length(arr)' -> 'arr.size0', 'col IS NULL' -> 'col.null' ", 0) \ + M(Bool, optimize_using_constraints, false, "Use constraints for query optimization", 0) \ + M(Bool, optimize_substitute_columns, false, "Use constraints for column substitution", 0) \ + M(Bool, optimize_append_index, false, "Use constraints in order to append index condition (indexHint)", 0) \ M(Bool, normalize_function_names, true, "Normalize function names to their canonical names", 0) \ M(Bool, allow_experimental_alter_materialized_view_structure, false, "Allow atomic alter on Materialized views. Work in progress.", 0) \ M(Bool, enable_early_constant_folding, true, "Enable query optimization where we analyze function and subqueries results and rewrite query if there're constants there", 0) \ @@ -466,7 +467,6 @@ class IColumn; M(Bool, asterisk_include_alias_columns, false, "Include ALIAS columns for wildcard query", 0) \ M(Bool, optimize_skip_merged_partitions, false, "Skip partitions with one part with level > 0 in optimize final", 0) \ M(Bool, optimize_on_insert, true, "Do the same transformation for inserted block of data as if merge was done on this block.", 0) \ - M(Bool, allow_experimental_map_type, true, "Obsolete setting, does nothing.", 0) \ M(Bool, allow_experimental_projection_optimization, false, "Enable projection optimization when processing SELECT queries", 0) \ M(Bool, force_optimize_projection, false, "If projection optimization is enabled, SELECT queries need to use projection", 0) \ M(Bool, async_socket_for_remote, true, "Asynchronously read from socket executing remote query", 0) \ @@ -508,7 +508,7 @@ class IColumn; M(ShortCircuitFunctionEvaluation, short_circuit_function_evaluation, ShortCircuitFunctionEvaluation::ENABLE, "Setting for short-circuit function evaluation configuration. Possible values: 'enable' - use short-circuit function evaluation for functions that are suitable for it, 'disable' - disable short-circuit function evaluation, 'force_enable' - use short-circuit function evaluation for all functions.", 0) \ \ M(String, local_filesystem_read_method, "pread", "Method of reading data from local filesystem, one of: read, pread, mmap, pread_threadpool.", 0) \ - M(String, remote_filesystem_read_method, "read", "Method of reading data from remote filesystem, one of: read, read_threadpool.", 0) \ + M(String, remote_filesystem_read_method, "threadpool", "Method of reading data from remote filesystem, one of: read, threadpool.", 0) \ M(Bool, local_filesystem_read_prefetch, false, "Should use prefetching when reading data from local filesystem.", 0) \ M(Bool, remote_filesystem_read_prefetch, true, "Should use prefetching when reading data from remote filesystem.", 0) \ M(Int64, read_priority, 0, "Priority to read data from local filesystem. Only supported for 'pread_threadpool' method.", 0) \ @@ -526,29 +526,42 @@ class IColumn; M(Int64, remote_fs_read_max_backoff_ms, 10000, "Max wait time when trying to read data for remote disk", 0) \ M(Int64, remote_fs_read_backoff_max_tries, 5, "Max attempts to read with backoff", 0) \ \ + M(UInt64, http_max_tries, 1, "Max attempts to read via http.", 0) \ + M(UInt64, http_retry_initial_backoff_ms, 100, "Min milliseconds for backoff, when retrying read via http", 0) \ + M(UInt64, http_retry_max_backoff_ms, 10000, "Max milliseconds for backoff, when retrying read via http", 0) \ + \ M(Bool, force_remove_data_recursively_on_drop, false, "Recursively remove data on DROP query. Avoids 'Directory not empty' error, but may silently remove detached data", 0) \ + M(Bool, check_table_dependencies, true, "Check that DDL query (such as DROP TABLE or RENAME) will not break dependencies", 0) \ \ /** Experimental functions */ \ M(Bool, allow_experimental_funnel_functions, false, "Enable experimental functions for funnel analysis.", 0) \ M(Bool, allow_experimental_nlp_functions, false, "Enable experimental functions for natural language processing.", 0) \ - \ - \ - /** Obsolete settings that do nothing but left for compatibility reasons. Remove each one after half a year of obsolescence. */ \ - M(UInt64, max_memory_usage_for_all_queries, 0, "Obsolete setting, does nothing.", 0) \ - M(UInt64, multiple_joins_rewriter_version, 0, "Obsolete setting, does nothing.", 0) \ - M(Bool, enable_debug_queries, false, "Obsolete setting, does nothing.", 0) \ - M(Bool, allow_experimental_database_atomic, true, "Obsolete setting, does nothing.", 0) \ - M(Bool, allow_experimental_bigint_types, true, "Obsolete setting, does nothing.", 0) \ - M(Bool, allow_experimental_window_functions, true, "Obsolete setting, does nothing.", 0) \ - M(HandleKafkaErrorMode, handle_kafka_error_mode, HandleKafkaErrorMode::DEFAULT, "Obsolete setting, does nothing.", 0) \ - M(Bool, database_replicated_ddl_output, true, "Obsolete setting, does nothing.", 0) \ - M(UInt64, replication_alter_columns_timeout, 60, "Obsolete setting, does nothing.", 0) \ - M(UInt64, odbc_max_field_size, 0, "Obsolete setting, does nothing.", 0) \ - /** The section above is for obsolete settings. Do not add anything there. */ - // End of COMMON_SETTINGS -// Please add settings related to formats into the FORMAT_FACTORY_SETTINGS below. +// Please add settings related to formats into the FORMAT_FACTORY_SETTINGS and move obsolete settings to OBSOLETE_SETTINGS. + +#define MAKE_OBSOLETE(M, TYPE, NAME, DEFAULT) \ + M(TYPE, NAME, DEFAULT, "Obsolete setting, does nothing.", BaseSettingsHelpers::Flags::OBSOLETE) + +#define OBSOLETE_SETTINGS(M) \ + /** Obsolete settings that do nothing but left for compatibility reasons. Remove each one after half a year of obsolescence. */ \ + MAKE_OBSOLETE(M, UInt64, max_memory_usage_for_all_queries, 0) \ + MAKE_OBSOLETE(M, UInt64, multiple_joins_rewriter_version, 0) \ + MAKE_OBSOLETE(M, Bool, enable_debug_queries, false) \ + MAKE_OBSOLETE(M, Bool, allow_experimental_database_atomic, true) \ + MAKE_OBSOLETE(M, Bool, allow_experimental_bigint_types, true) \ + MAKE_OBSOLETE(M, Bool, allow_experimental_window_functions, true) \ + MAKE_OBSOLETE(M, HandleKafkaErrorMode, handle_kafka_error_mode, HandleKafkaErrorMode::DEFAULT) \ + MAKE_OBSOLETE(M, Bool, database_replicated_ddl_output, true) \ + MAKE_OBSOLETE(M, UInt64, replication_alter_columns_timeout, 60) \ + MAKE_OBSOLETE(M, UInt64, odbc_max_field_size, 0) \ + MAKE_OBSOLETE(M, Bool, allow_experimental_map_type, true) \ + MAKE_OBSOLETE(M, UInt64, merge_tree_clear_old_temporary_directories_interval_seconds, 60) \ + MAKE_OBSOLETE(M, UInt64, merge_tree_clear_old_parts_interval_seconds, 1) \ + MAKE_OBSOLETE(M, UInt64, partial_merge_join_optimizations, 0) \ + + /** The section above is for obsolete settings. Do not add anything there. */ + #define FORMAT_FACTORY_SETTINGS(M) \ M(Char, format_csv_delimiter, ',', "The character to be considered as a delimiter in CSV data. If setting with a string, a string has to have a length of 1.", 0) \ @@ -609,7 +622,7 @@ class IColumn; M(String, format_template_row, "", "Path to file which contains format string for rows (for Template format)", 0) \ M(String, format_template_rows_between_delimiter, "\n", "Delimiter between rows (for Template format)", 0) \ \ - M(String, format_custom_escaping_rule, "Escaped", "Field escaping rule (for CustomSeparated format)", 0) \ + M(EscapingRule, format_custom_escaping_rule, "Escaped", "Field escaping rule (for CustomSeparated format)", 0) \ M(String, format_custom_field_delimiter, "\t", "Delimiter between fields (for CustomSeparated format)", 0) \ M(String, format_custom_row_before_delimiter, "", "Delimiter before field of the first column (for CustomSeparated format)", 0) \ M(String, format_custom_row_after_delimiter, "\n", "Delimiter after field of the last column (for CustomSeparated format)", 0) \ @@ -618,7 +631,7 @@ class IColumn; M(String, format_custom_result_after_delimiter, "", "Suffix after result set (for CustomSeparated format)", 0) \ \ M(String, format_regexp, "", "Regular expression (for Regexp format)", 0) \ - M(String, format_regexp_escaping_rule, "Raw", "Field escaping rule (for Regexp format)", 0) \ + M(EscapingRule, format_regexp_escaping_rule, "Raw", "Field escaping rule (for Regexp format)", 0) \ M(Bool, format_regexp_skip_unmatched, false, "Skip lines unmatched by regular expression (for Regexp format", 0) \ \ M(Bool, output_format_enable_streaming, false, "Enable streaming in output formats that support it.", 0) \ @@ -636,6 +649,7 @@ class IColumn; #define LIST_OF_SETTINGS(M) \ COMMON_SETTINGS(M) \ + OBSOLETE_SETTINGS(M) \ FORMAT_FACTORY_SETTINGS(M) DECLARE_SETTINGS_TRAITS_ALLOW_CUSTOM_SETTINGS(SettingsTraits, LIST_OF_SETTINGS) diff --git a/src/Core/SettingsEnums.cpp b/src/Core/SettingsEnums.cpp index f5497588891..b62575c9730 100644 --- a/src/Core/SettingsEnums.cpp +++ b/src/Core/SettingsEnums.cpp @@ -121,4 +121,13 @@ IMPLEMENT_SETTING_ENUM(EnumComparingMode, ErrorCodes::BAD_ARGUMENTS, {{"by_names", FormatSettings::EnumComparingMode::BY_NAMES}, {"by_values", FormatSettings::EnumComparingMode::BY_VALUES}, {"by_names_case_insensitive", FormatSettings::EnumComparingMode::BY_NAMES_CASE_INSENSITIVE}}) + +IMPLEMENT_SETTING_ENUM(EscapingRule, ErrorCodes::BAD_ARGUMENTS, + {{"None", FormatSettings::EscapingRule::None}, + {"Escaped", FormatSettings::EscapingRule::Escaped}, + {"Quoted", FormatSettings::EscapingRule::Quoted}, + {"CSV", FormatSettings::EscapingRule::CSV}, + {"JSON", FormatSettings::EscapingRule::JSON}, + {"XML", FormatSettings::EscapingRule::XML}, + {"Raw", FormatSettings::EscapingRule::Raw}}) } diff --git a/src/Core/SettingsEnums.h b/src/Core/SettingsEnums.h index 4bdb3c83ea5..106589f5d24 100644 --- a/src/Core/SettingsEnums.h +++ b/src/Core/SettingsEnums.h @@ -170,4 +170,6 @@ DECLARE_SETTING_ENUM(ShortCircuitFunctionEvaluation) DECLARE_SETTING_ENUM_WITH_RENAME(EnumComparingMode, FormatSettings::EnumComparingMode) +DECLARE_SETTING_ENUM_WITH_RENAME(EscapingRule, FormatSettings::EscapingRule) + } diff --git a/src/DataTypes/DataTypeTuple.cpp b/src/DataTypes/DataTypeTuple.cpp index 4e1a5a05d45..0660f371258 100644 --- a/src/DataTypes/DataTypeTuple.cpp +++ b/src/DataTypes/DataTypeTuple.cpp @@ -29,6 +29,7 @@ namespace ErrorCodes extern const int NOT_FOUND_COLUMN_IN_BLOCK; extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; extern const int SIZES_OF_COLUMNS_IN_TUPLE_DOESNT_MATCH; + extern const int ILLEGAL_INDEX; } @@ -193,6 +194,14 @@ size_t DataTypeTuple::getPositionByName(const String & name) const throw Exception("Tuple doesn't have element with name '" + name + "'", ErrorCodes::NOT_FOUND_COLUMN_IN_BLOCK); } +String DataTypeTuple::getNameByPosition(size_t i) const +{ + if (i == 0 || i > names.size()) + throw Exception(ErrorCodes::ILLEGAL_INDEX, "Index of tuple element ({}) if out range ([1, {}])", i, names.size()); + + return names[i - 1]; +} + bool DataTypeTuple::textCanContainOnlyValidUTF8() const { diff --git a/src/DataTypes/DataTypeTuple.h b/src/DataTypes/DataTypeTuple.h index 8dae8b7765b..d168d73efbf 100644 --- a/src/DataTypes/DataTypeTuple.h +++ b/src/DataTypes/DataTypeTuple.h @@ -61,6 +61,7 @@ public: const Strings & getElementNames() const { return names; } size_t getPositionByName(const String & name) const; + String getNameByPosition(size_t i) const; bool haveExplicitNames() const { return have_explicit_names; } bool serializeNames() const { return serialize_names; } diff --git a/src/DataTypes/convertMySQLDataType.cpp b/src/DataTypes/convertMySQLDataType.cpp index 5ad8c43d0cf..d11a2a2211a 100644 --- a/src/DataTypes/convertMySQLDataType.cpp +++ b/src/DataTypes/convertMySQLDataType.cpp @@ -20,15 +20,6 @@ namespace DB { -ASTPtr dataTypeConvertToQuery(const DataTypePtr & data_type) -{ - WhichDataType which(data_type); - - if (!which.isNullable()) - return std::make_shared(data_type->getName()); - - return makeASTFunction("Nullable", dataTypeConvertToQuery(typeid_cast(data_type.get())->getNestedType())); -} DataTypePtr convertMySQLDataType(MultiEnum type_support, const std::string & mysql_data_type, diff --git a/src/DataTypes/convertMySQLDataType.h b/src/DataTypes/convertMySQLDataType.h index f1c4a73d6f7..543119bc60e 100644 --- a/src/DataTypes/convertMySQLDataType.h +++ b/src/DataTypes/convertMySQLDataType.h @@ -9,11 +9,6 @@ namespace DB { enum class MySQLDataTypesSupport; -/// Convert data type to query. for example -/// DataTypeUInt8 -> ASTIdentifier(UInt8) -/// DataTypeNullable(DataTypeUInt8) -> ASTFunction(ASTIdentifier(UInt8)) -ASTPtr dataTypeConvertToQuery(const DataTypePtr & data_type); - /// Convert MySQL type to ClickHouse data type. DataTypePtr convertMySQLDataType(MultiEnum type_support, const std::string & mysql_data_type, bool is_nullable, bool is_unsigned, size_t length, size_t precision, size_t scale); diff --git a/src/Databases/DDLDependencyVisitor.cpp b/src/Databases/DDLDependencyVisitor.cpp index 98f697ef641..e439a58ecf2 100644 --- a/src/Databases/DDLDependencyVisitor.cpp +++ b/src/Databases/DDLDependencyVisitor.cpp @@ -4,11 +4,24 @@ #include #include #include +#include #include namespace DB { +TableNamesSet getDependenciesSetFromCreateQuery(ContextPtr global_context, const ASTPtr & ast) +{ + assert(global_context == global_context->getGlobalContext()); + TableLoadingDependenciesVisitor::Data data; + data.default_database = global_context->getCurrentDatabase(); + data.create_query = ast; + data.global_context = global_context; + TableLoadingDependenciesVisitor visitor{data}; + visitor.visit(ast); + return data.dependencies; +} + void DDLDependencyVisitor::visit(const ASTPtr & ast, Data & data) { /// Looking for functions in column default expressions and dictionary source definition @@ -100,8 +113,9 @@ void DDLDependencyVisitor::extractTableNameFromArgument(const ASTFunction & func qualified_name = std::move(*maybe_qualified_name); } - else if (const auto * identifier = arg->as()) + else if (const auto * identifier = dynamic_cast(arg)) { + /// ASTIdentifier or ASTTableIdentifier auto table_identifier = identifier->createTable(); /// Just return if table identified is invalid if (!table_identifier) diff --git a/src/Databases/DDLDependencyVisitor.h b/src/Databases/DDLDependencyVisitor.h index 5779aee7d33..b5ca976f665 100644 --- a/src/Databases/DDLDependencyVisitor.h +++ b/src/Databases/DDLDependencyVisitor.h @@ -10,6 +10,10 @@ class ASTFunction; class ASTFunctionWithKeyValueArguments; class ASTStorage; +using TableNamesSet = std::unordered_set; + +TableNamesSet getDependenciesSetFromCreateQuery(ContextPtr global_context, const ASTPtr & ast); + /// Visits ASTCreateQuery and extracts names of table (or dictionary) dependencies /// from column default expressions (joinGet, dictGet, etc) /// or dictionary source (for dictionaries from local ClickHouse table). @@ -19,7 +23,6 @@ class DDLDependencyVisitor public: struct Data { - using TableNamesSet = std::set; String default_database; TableNamesSet dependencies; ContextPtr global_context; diff --git a/src/Databases/DatabaseAtomic.cpp b/src/Databases/DatabaseAtomic.cpp index ae90f1a6900..91a83ebf35c 100644 --- a/src/Databases/DatabaseAtomic.cpp +++ b/src/Databases/DatabaseAtomic.cpp @@ -84,7 +84,7 @@ void DatabaseAtomic::drop(ContextPtr) fs::remove_all(getMetadataPath()); } -void DatabaseAtomic::attachTable(const String & name, const StoragePtr & table, const String & relative_table_path) +void DatabaseAtomic::attachTable(ContextPtr /* context_ */, const String & name, const StoragePtr & table, const String & relative_table_path) { assert(relative_table_path != data_path && !relative_table_path.empty()); DetachedTables not_in_use; @@ -96,7 +96,7 @@ void DatabaseAtomic::attachTable(const String & name, const StoragePtr & table, table_name_to_path.emplace(std::make_pair(name, relative_table_path)); } -StoragePtr DatabaseAtomic::detachTable(const String & name) +StoragePtr DatabaseAtomic::detachTable(ContextPtr /* context */, const String & name) { DetachedTables not_in_use; std::unique_lock lock(mutex); @@ -295,9 +295,9 @@ void DatabaseAtomic::commitCreateTable(const ASTCreateQuery & query, const Stora try { std::unique_lock lock{mutex}; - if (query.database != database_name) + if (query.getDatabase() != database_name) throw Exception(ErrorCodes::UNKNOWN_DATABASE, "Database was renamed to `{}`, cannot create table in `{}`", - database_name, query.database); + database_name, query.getDatabase()); /// Do some checks before renaming file from .tmp to .sql not_in_use = cleanupDetachedTables(); assertDetachedTableNotInUse(query.uuid); @@ -314,8 +314,8 @@ void DatabaseAtomic::commitCreateTable(const ASTCreateQuery & query, const Stora /// It throws if `table_metadata_path` already exists (it's possible if table was detached) renameNoReplace(table_metadata_tmp_path, table_metadata_path); /// Commit point (a sort of) - attachTableUnlocked(query.table, table, lock); /// Should never throw - table_name_to_path.emplace(query.table, table_data_path); + attachTableUnlocked(query.getTable(), table, lock); /// Should never throw + table_name_to_path.emplace(query.getTable(), table_data_path); } catch (...) { @@ -325,7 +325,7 @@ void DatabaseAtomic::commitCreateTable(const ASTCreateQuery & query, const Stora throw; } if (table->storesDataOnDisk()) - tryCreateSymlink(query.table, table_data_path); + tryCreateSymlink(query.getTable(), table_data_path); } void DatabaseAtomic::commitAlterTable(const StorageID & table_id, const String & table_metadata_tmp_path, const String & table_metadata_path, @@ -514,9 +514,17 @@ void DatabaseAtomic::tryCreateMetadataSymlink() } } -void DatabaseAtomic::renameDatabase(const String & new_name) +void DatabaseAtomic::renameDatabase(ContextPtr query_context, const String & new_name) { /// CREATE, ATTACH, DROP, DETACH and RENAME DATABASE must hold DDLGuard + + if (query_context->getSettingsRef().check_table_dependencies) + { + std::lock_guard lock(mutex); + for (auto & table : tables) + DatabaseCatalog::instance().checkTableCanBeRemovedOrRenamed({database_name, table.first}); + } + try { fs::remove(path_to_metadata_symlink); @@ -535,7 +543,13 @@ void DatabaseAtomic::renameDatabase(const String & new_name) { std::lock_guard lock(mutex); - DatabaseCatalog::instance().updateDatabaseName(database_name, new_name); + { + Strings table_names; + table_names.reserve(tables.size()); + for (auto & table : tables) + table_names.push_back(table.first); + DatabaseCatalog::instance().updateDatabaseName(database_name, new_name, table_names); + } database_name = new_name; for (auto & table : tables) diff --git a/src/Databases/DatabaseAtomic.h b/src/Databases/DatabaseAtomic.h index 1fe13f8b27f..eae700d28c5 100644 --- a/src/Databases/DatabaseAtomic.h +++ b/src/Databases/DatabaseAtomic.h @@ -25,7 +25,7 @@ public: String getEngineName() const override { return "Atomic"; } UUID getUUID() const override { return db_uuid; } - void renameDatabase(const String & new_name) override; + void renameDatabase(ContextPtr query_context, const String & new_name) override; void renameTable( ContextPtr context, @@ -37,8 +37,8 @@ public: void dropTable(ContextPtr context, const String & table_name, bool no_delay) override; - void attachTable(const String & name, const StoragePtr & table, const String & relative_table_path) override; - StoragePtr detachTable(const String & name) override; + void attachTable(ContextPtr context, const String & name, const StoragePtr & table, const String & relative_table_path) override; + StoragePtr detachTable(ContextPtr context, const String & name) override; String getTableDataPath(const String & table_name) const override; String getTableDataPath(const ASTCreateQuery & query) const override; diff --git a/src/Databases/DatabaseFactory.cpp b/src/Databases/DatabaseFactory.cpp index 03d91324672..264807534b3 100644 --- a/src/Databases/DatabaseFactory.cpp +++ b/src/Databases/DatabaseFactory.cpp @@ -1,5 +1,6 @@ #include +#include #include #include #include @@ -12,9 +13,9 @@ #include #include #include -#include +#include #include -#include +#include #include "config_core.h" @@ -55,6 +56,7 @@ namespace ErrorCodes extern const int BAD_ARGUMENTS; extern const int UNKNOWN_DATABASE_ENGINE; extern const int CANNOT_CREATE_DATABASE; + extern const int NOT_IMPLEMENTED; } DatabasePtr DatabaseFactory::get(const ASTCreateQuery & create, const String & metadata_path, ContextPtr context) @@ -103,7 +105,7 @@ static inline ValueType safeGetLiteralValue(const ASTPtr &ast, const String &eng DatabasePtr DatabaseFactory::getImpl(const ASTCreateQuery & create, const String & metadata_path, ContextPtr context) { auto * engine_define = create.storage; - const String & database_name = create.database; + const String & database_name = create.getDatabase(); const String & engine_name = engine_define->engine->name; const UUID & uuid = create.uuid; @@ -211,14 +213,22 @@ DatabasePtr DatabaseFactory::getImpl(const ASTCreateQuery & create, const String if (engine_define->settings) materialize_mode_settings->loadFromQuery(*engine_define); - if (create.uuid == UUIDHelpers::Nil) - return std::make_shared>( - context, database_name, metadata_path, uuid, configuration.database, std::move(mysql_pool), - std::move(client), std::move(materialize_mode_settings)); - else - return std::make_shared>( - context, database_name, metadata_path, uuid, configuration.database, std::move(mysql_pool), - std::move(client), std::move(materialize_mode_settings)); + if (uuid == UUIDHelpers::Nil) + { + auto print_create_ast = create.clone(); + print_create_ast->as()->attach = false; + throw Exception( + fmt::format( + "The MaterializedMySQL database engine no longer supports Ordinary databases. To re-create the database, delete " + "the old one by executing \"rm -rf {}{{,.sql}}\", then re-create the database with the following query: {}", + metadata_path, + queryToString(print_create_ast)), + ErrorCodes::NOT_IMPLEMENTED); + } + + return std::make_shared( + context, database_name, metadata_path, uuid, configuration.database, std::move(mysql_pool), + std::move(client), std::move(materialize_mode_settings)); } catch (...) { diff --git a/src/Databases/DatabaseLazy.cpp b/src/Databases/DatabaseLazy.cpp index f4336fc7ef9..4b3e06e318e 100644 --- a/src/Databases/DatabaseLazy.cpp +++ b/src/Databases/DatabaseLazy.cpp @@ -39,7 +39,7 @@ DatabaseLazy::DatabaseLazy(const String & name_, const String & metadata_path_, void DatabaseLazy::loadStoredObjects( ContextMutablePtr local_context, bool /* force_restore */, bool /*force_attach*/, bool /* skip_startup_tables */) { - iterateMetadataFiles(local_context, [this](const String & file_name) + iterateMetadataFiles(local_context, [this, &local_context](const String & file_name) { const std::string table_name = unescapeForFileName(file_name.substr(0, file_name.size() - 4)); @@ -50,7 +50,7 @@ void DatabaseLazy::loadStoredObjects( return; } - attachTable(table_name, nullptr, {}); + attachTable(local_context, table_name, nullptr, {}); }); } @@ -160,7 +160,7 @@ bool DatabaseLazy::empty() const return tables_cache.empty(); } -void DatabaseLazy::attachTable(const String & table_name, const StoragePtr & table, const String &) +void DatabaseLazy::attachTable(ContextPtr /* context_ */, const String & table_name, const StoragePtr & table, const String &) { LOG_DEBUG(log, "Attach table {}.", backQuote(table_name)); std::lock_guard lock(mutex); @@ -175,7 +175,7 @@ void DatabaseLazy::attachTable(const String & table_name, const StoragePtr & tab it->second.expiration_iterator = cache_expiration_queue.emplace(cache_expiration_queue.end(), current_time, table_name); } -StoragePtr DatabaseLazy::detachTable(const String & table_name) +StoragePtr DatabaseLazy::detachTable(ContextPtr /* context */, const String & table_name) { StoragePtr res; { diff --git a/src/Databases/DatabaseLazy.h b/src/Databases/DatabaseLazy.h index 45c816c2e76..3a7d7b14be1 100644 --- a/src/Databases/DatabaseLazy.h +++ b/src/Databases/DatabaseLazy.h @@ -64,9 +64,9 @@ public: DatabaseTablesIteratorPtr getTablesIterator(ContextPtr context, const FilterByNameFunction & filter_by_table_name) const override; - void attachTable(const String & table_name, const StoragePtr & table, const String & relative_table_path) override; + void attachTable(ContextPtr context, const String & table_name, const StoragePtr & table, const String & relative_table_path) override; - StoragePtr detachTable(const String & table_name) override; + StoragePtr detachTable(ContextPtr context, const String & table_name) override; void shutdown() override; diff --git a/src/Databases/DatabaseMemory.cpp b/src/Databases/DatabaseMemory.cpp index 8d1220cc1a1..8128bb8378e 100644 --- a/src/Databases/DatabaseMemory.cpp +++ b/src/Databases/DatabaseMemory.cpp @@ -2,6 +2,7 @@ #include #include #include +#include #include #include #include @@ -74,7 +75,7 @@ void DatabaseMemory::dropTable( ASTPtr DatabaseMemory::getCreateDatabaseQuery() const { auto create_query = std::make_shared(); - create_query->database = getDatabaseName(); + create_query->setDatabase(getDatabaseName()); create_query->set(create_query->storage, std::make_shared()); create_query->storage->set(create_query->storage->engine, makeASTFunction(getEngineName())); @@ -111,7 +112,7 @@ void DatabaseMemory::drop(ContextPtr local_context) std::filesystem::remove_all(local_context->getPath() + data_path); } -void DatabaseMemory::alterTable(ContextPtr, const StorageID & table_id, const StorageInMemoryMetadata & metadata) +void DatabaseMemory::alterTable(ContextPtr local_context, const StorageID & table_id, const StorageInMemoryMetadata & metadata) { std::lock_guard lock{mutex}; auto it = create_queries.find(table_id.table_name); @@ -119,6 +120,8 @@ void DatabaseMemory::alterTable(ContextPtr, const StorageID & table_id, const St throw Exception(ErrorCodes::UNKNOWN_TABLE, "Cannot alter: There is no metadata of table {}", table_id.getNameForLogs()); applyMetadataChangesToCreateQuery(it->second, metadata); + TableNamesSet new_dependencies = getDependenciesSetFromCreateQuery(local_context->getGlobalContext(), it->second); + DatabaseCatalog::instance().updateLoadingDependencies(table_id, std::move(new_dependencies)); } } diff --git a/src/Databases/DatabaseMemory.h b/src/Databases/DatabaseMemory.h index a661d1c49b0..b854d9be1f3 100644 --- a/src/Databases/DatabaseMemory.h +++ b/src/Databases/DatabaseMemory.h @@ -42,7 +42,7 @@ public: /// TODO May be it's better to use DiskMemory for such tables. /// To save data on disk it's possible to explicitly CREATE DATABASE db ENGINE=Ordinary in clickhouse-local. String getTableDataPath(const String & table_name) const override { return data_path + escapeForFileName(table_name) + "/"; } - String getTableDataPath(const ASTCreateQuery & query) const override { return getTableDataPath(query.table); } + String getTableDataPath(const ASTCreateQuery & query) const override { return getTableDataPath(query.getTable()); } UUID tryGetTableUUID(const String & table_name) const override; diff --git a/src/Databases/DatabaseOnDisk.cpp b/src/Databases/DatabaseOnDisk.cpp index 55e5a3071bc..401d33984ad 100644 --- a/src/Databases/DatabaseOnDisk.cpp +++ b/src/Databases/DatabaseOnDisk.cpp @@ -51,7 +51,7 @@ std::pair createTableFromAST( bool force_restore) { ast_create_query.attach = true; - ast_create_query.database = database_name; + ast_create_query.setDatabase(database_name); if (ast_create_query.as_table_function) { @@ -60,9 +60,9 @@ std::pair createTableFromAST( ColumnsDescription columns; if (ast_create_query.columns_list && ast_create_query.columns_list->columns) columns = InterpreterCreateQuery::getColumnsDescription(*ast_create_query.columns_list->columns, context, true); - StoragePtr storage = table_function->execute(ast_create_query.as_table_function, context, ast_create_query.table, std::move(columns)); + StoragePtr storage = table_function->execute(ast_create_query.as_table_function, context, ast_create_query.getTable(), std::move(columns)); storage->renameInMemory(ast_create_query); - return {ast_create_query.table, storage}; + return {ast_create_query.getTable(), storage}; } ColumnsDescription columns; @@ -82,7 +82,7 @@ std::pair createTableFromAST( return { - ast_create_query.table, + ast_create_query.getTable(), StorageFactory::instance().get( ast_create_query, table_data_path_relative, @@ -112,7 +112,7 @@ String getObjectDefinitionFromCreateQuery(const ASTPtr & query) /// We remove everything that is not needed for ATTACH from the query. assert(!create->temporary); - create->database.clear(); + create->database.reset(); create->as_database.clear(); create->as_table.clear(); create->if_not_exists = false; @@ -129,7 +129,7 @@ String getObjectDefinitionFromCreateQuery(const ASTPtr & query) create->out_file = nullptr; if (create->uuid != UUIDHelpers::Nil) - create->table = TABLE_WITH_UUID_NAME_PLACEHOLDER; + create->setTable(TABLE_WITH_UUID_NAME_PLACEHOLDER); WriteBufferFromOwnString statement_buf; formatAST(*create, statement_buf, false); @@ -161,7 +161,7 @@ void DatabaseOnDisk::createTable( { const auto & settings = local_context->getSettingsRef(); const auto & create = query->as(); - assert(table_name == create.table); + assert(table_name == create.getTable()); /// Create a file with metadata if necessary - if the query is not ATTACH. /// Write the query of `ATTACH table` to it. @@ -185,7 +185,7 @@ void DatabaseOnDisk::createTable( { /// Metadata already exists, table was detached removeDetachedPermanentlyFlag(local_context, table_name, table_metadata_path, true); - attachTable(table_name, table, getTableDataPath(create)); + attachTable(local_context, table_name, table, getTableDataPath(create)); return; } @@ -246,12 +246,12 @@ void DatabaseOnDisk::removeDetachedPermanentlyFlag(ContextPtr, const String & ta void DatabaseOnDisk::commitCreateTable(const ASTCreateQuery & query, const StoragePtr & table, const String & table_metadata_tmp_path, const String & table_metadata_path, - ContextPtr /*query_context*/) + ContextPtr query_context) { try { /// Add a table to the map of known tables. - attachTable(query.table, table, getTableDataPath(query)); + attachTable(query_context, query.getTable(), table, getTableDataPath(query)); /// If it was ATTACH query and file with table metadata already exist /// (so, ATTACH is done after DETACH), then rename atomically replaces old file with new one. @@ -264,9 +264,9 @@ void DatabaseOnDisk::commitCreateTable(const ASTCreateQuery & query, const Stora } } -void DatabaseOnDisk::detachTablePermanently(ContextPtr, const String & table_name) +void DatabaseOnDisk::detachTablePermanently(ContextPtr query_context, const String & table_name) { - auto table = detachTable(table_name); + auto table = detachTable(query_context, table_name); fs::path detached_permanently_flag(getObjectMetadataPath(table_name) + detached_suffix); try @@ -288,7 +288,7 @@ void DatabaseOnDisk::dropTable(ContextPtr local_context, const String & table_na if (table_data_path_relative.empty()) throw Exception(ErrorCodes::LOGICAL_ERROR, "Path is empty"); - StoragePtr table = detachTable(table_name); + StoragePtr table = detachTable(local_context, table_name); /// This is possible for Lazy database. if (!table) @@ -309,7 +309,7 @@ void DatabaseOnDisk::dropTable(ContextPtr local_context, const String & table_na catch (...) { LOG_WARNING(log, getCurrentExceptionMessage(__PRETTY_FUNCTION__)); - attachTable(table_name, table, table_data_path_relative); + attachTable(local_context, table_name, table, table_data_path_relative); if (renamed) fs::rename(table_metadata_path_drop, table_metadata_path); throw; @@ -371,8 +371,8 @@ void DatabaseOnDisk::renameTable( String table_metadata_path; ASTPtr attach_query; /// DatabaseLazy::detachTable may return nullptr even if table exists, so we need tryGetTable for this case. - StoragePtr table = tryGetTable(table_name, getContext()); - detachTable(table_name); + StoragePtr table = tryGetTable(table_name, local_context); + detachTable(local_context, table_name); UUID prev_uuid = UUIDHelpers::Nil; try { @@ -382,8 +382,8 @@ void DatabaseOnDisk::renameTable( table_metadata_path = getObjectMetadataPath(table_name); attach_query = parseQueryFromMetadata(log, local_context, table_metadata_path); auto & create = attach_query->as(); - create.database = to_database.getDatabaseName(); - create.table = to_table_name; + create.setDatabase(to_database.getDatabaseName()); + create.setTable(to_table_name); if (from_ordinary_to_atomic) create.uuid = UUIDHelpers::generateV4(); if (from_atomic_to_ordinary) @@ -397,12 +397,12 @@ void DatabaseOnDisk::renameTable( } catch (const Exception &) { - attachTable(table_name, table, table_data_relative_path); + attachTable(local_context, table_name, table, table_data_relative_path); throw; } catch (const Poco::Exception & e) { - attachTable(table_name, table, table_data_relative_path); + attachTable(local_context, table_name, table, table_data_relative_path); /// Better diagnostics. throw Exception{Exception::CreateFromPocoTag{}, e}; } @@ -430,7 +430,11 @@ void DatabaseOnDisk::renameTable( ASTPtr DatabaseOnDisk::getCreateTableQueryImpl(const String & table_name, ContextPtr, bool throw_on_error) const { ASTPtr ast; - bool has_table = tryGetTable(table_name, getContext()) != nullptr; + StoragePtr storage = tryGetTable(table_name, getContext()); + bool has_table = storage != nullptr; + bool is_system_storage = false; + if (has_table) + is_system_storage = storage->isSystemStorage(); auto table_metadata_path = getObjectMetadataPath(table_name); try { @@ -441,6 +445,8 @@ ASTPtr DatabaseOnDisk::getCreateTableQueryImpl(const String & table_name, Contex if (!has_table && e.code() == ErrorCodes::FILE_DOESNT_EXIST && throw_on_error) throw Exception{"Table " + backQuote(table_name) + " doesn't exist", ErrorCodes::CANNOT_GET_CREATE_TABLE_QUERY}; + else if (is_system_storage) + ast = getCreateQueryFromStorage(table_name, storage, throw_on_error); else if (throw_on_error) throw; } @@ -458,7 +464,7 @@ ASTPtr DatabaseOnDisk::getCreateDatabaseQuery() const ast = parseQueryFromMetadata(log, getContext(), database_metadata_path, true); auto & ast_create_query = ast->as(); ast_create_query.attach = false; - ast_create_query.database = database_name; + ast_create_query.setDatabase(database_name); } if (!ast) { @@ -642,18 +648,18 @@ ASTPtr DatabaseOnDisk::parseQueryFromMetadata( return nullptr; auto & create = ast->as(); - if (!create.table.empty() && create.uuid != UUIDHelpers::Nil) + if (create.table && create.uuid != UUIDHelpers::Nil) { String table_name = unescapeForFileName(fs::path(metadata_file_path).stem()); - if (create.table != TABLE_WITH_UUID_NAME_PLACEHOLDER && logger) + if (create.getTable() != TABLE_WITH_UUID_NAME_PLACEHOLDER && logger) LOG_WARNING( logger, "File {} contains both UUID and table name. Will use name `{}` instead of `{}`", metadata_file_path, table_name, - create.table); - create.table = table_name; + create.getTable()); + create.setTable(table_name); } return ast; @@ -667,12 +673,38 @@ ASTPtr DatabaseOnDisk::getCreateQueryFromMetadata(const String & database_metada { auto & ast_create_query = ast->as(); ast_create_query.attach = false; - ast_create_query.database = getDatabaseName(); + ast_create_query.setDatabase(getDatabaseName()); } return ast; } +ASTPtr DatabaseOnDisk::getCreateQueryFromStorage(const String & table_name, const StoragePtr & storage, bool throw_on_error) const +{ + auto metadata_ptr = storage->getInMemoryMetadataPtr(); + if (metadata_ptr == nullptr) + { + if (throw_on_error) + throw Exception(ErrorCodes::CANNOT_GET_CREATE_TABLE_QUERY, "Cannot get metadata of {}.{}", backQuote(getDatabaseName()), backQuote(table_name)); + else + return nullptr; + } + + /// setup create table query storage info. + auto ast_engine = std::make_shared(); + ast_engine->name = storage->getName(); + auto ast_storage = std::make_shared(); + ast_storage->set(ast_storage->engine, ast_engine); + + auto create_table_query = DB::getCreateQueryFromStorage(storage, ast_storage, false, + getContext()->getSettingsRef().max_parser_depth, throw_on_error); + + create_table_query->set(create_table_query->as()->comment, + std::make_shared("SYSTEM TABLE is built on the fly.")); + + return create_table_query; +} + void DatabaseOnDisk::modifySettingsMetadata(const SettingsChanges & settings_changes, ContextPtr query_context) { std::lock_guard lock(modify_settings_mutex); diff --git a/src/Databases/DatabaseOnDisk.h b/src/Databases/DatabaseOnDisk.h index 81d5bd5adcb..9bc8fa3bcef 100644 --- a/src/Databases/DatabaseOnDisk.h +++ b/src/Databases/DatabaseOnDisk.h @@ -63,7 +63,7 @@ public: String getDataPath() const override { return data_path; } String getTableDataPath(const String & table_name) const override { return data_path + escapeForFileName(table_name) + "/"; } - String getTableDataPath(const ASTCreateQuery & query) const override { return getTableDataPath(query.table); } + String getTableDataPath(const ASTCreateQuery & query) const override { return getTableDataPath(query.getTable()); } String getMetadataPath() const override { return metadata_path; } static ASTPtr parseQueryFromMetadata(Poco::Logger * log, ContextPtr context, const String & metadata_file_path, bool throw_on_error = true, bool remove_empty = false); @@ -89,6 +89,7 @@ protected: bool throw_on_error) const override; ASTPtr getCreateQueryFromMetadata(const String & metadata_path, bool throw_on_error) const; + ASTPtr getCreateQueryFromStorage(const String & table_name, const StoragePtr & storage, bool throw_on_error) const; virtual void commitCreateTable(const ASTCreateQuery & query, const StoragePtr & table, const String & table_metadata_tmp_path, const String & table_metadata_path, ContextPtr query_context); diff --git a/src/Databases/DatabaseOrdinary.cpp b/src/Databases/DatabaseOrdinary.cpp index 22b5430a08a..b7a0aff24d6 100644 --- a/src/Databases/DatabaseOrdinary.cpp +++ b/src/Databases/DatabaseOrdinary.cpp @@ -50,12 +50,12 @@ namespace context, force_restore); - database.attachTable(table_name, table, database.getTableDataPath(query)); + database.attachTable(context, table_name, table, database.getTableDataPath(query)); } catch (Exception & e) { e.addMessage( - "Cannot attach table " + backQuote(database_name) + "." + backQuote(query.table) + " from metadata file " + metadata_path + "Cannot attach table " + backQuote(database_name) + "." + backQuote(query.getTable()) + " from metadata file " + metadata_path + " from query " + serializeAST(query)); throw; } @@ -168,7 +168,7 @@ void DatabaseOrdinary::loadTablesMetadata(ContextPtr local_context, ParsedTables if (ast) { auto * create_query = ast->as(); - create_query->database = database_name; + create_query->setDatabase(database_name); if (fs::exists(full_path.string() + detached_suffix)) { @@ -181,27 +181,21 @@ void DatabaseOrdinary::loadTablesMetadata(ContextPtr local_context, ParsedTables return; } - TableLoadingDependenciesVisitor::Data data; - data.default_database = metadata.default_database; - data.create_query = ast; - data.global_context = getContext(); - TableLoadingDependenciesVisitor visitor{data}; - visitor.visit(ast); - QualifiedTableName qualified_name{database_name, create_query->table}; + TableNamesSet loading_dependencies = getDependenciesSetFromCreateQuery(getContext(), ast); + QualifiedTableName qualified_name{database_name, create_query->getTable()}; std::lock_guard lock{metadata.mutex}; metadata.parsed_tables[qualified_name] = ParsedTableMetadata{full_path.string(), ast}; - if (data.dependencies.empty()) + if (loading_dependencies.empty()) { metadata.independent_database_objects.emplace_back(std::move(qualified_name)); } else { - for (const auto & dependency : data.dependencies) - { - metadata.dependencies_info[dependency].dependent_database_objects.push_back(qualified_name); - ++metadata.dependencies_info[qualified_name].dependencies_count; - } + for (const auto & dependency : loading_dependencies) + metadata.dependencies_info[dependency].dependent_database_objects.insert(qualified_name); + assert(metadata.dependencies_info[qualified_name].dependencies.empty()); + metadata.dependencies_info[qualified_name].dependencies = std::move(loading_dependencies); } metadata.total_dictionaries += create_query->is_dictionary; } @@ -303,6 +297,9 @@ void DatabaseOrdinary::alterTable(ContextPtr local_context, const StorageID & ta out.close(); } + TableNamesSet new_dependencies = getDependenciesSetFromCreateQuery(local_context->getGlobalContext(), ast); + DatabaseCatalog::instance().updateLoadingDependencies(table_id, std::move(new_dependencies)); + commitAlterTable(table_id, table_metadata_tmp_path, table_metadata_path, statement, local_context); } diff --git a/src/Databases/DatabaseReplicated.cpp b/src/Databases/DatabaseReplicated.cpp index 3f253ed0924..89d1f96857e 100644 --- a/src/Databases/DatabaseReplicated.cpp +++ b/src/Databases/DatabaseReplicated.cpp @@ -349,9 +349,9 @@ void DatabaseReplicated::checkQueryValid(const ASTPtr & query, ContextPtr query_ /// Replicas will set correct name of current database in query context (database name can be different on replicas) if (auto * ddl_query = dynamic_cast(query.get())) { - if (ddl_query->database != getDatabaseName()) + if (ddl_query->getDatabase() != getDatabaseName()) throw Exception(ErrorCodes::UNKNOWN_DATABASE, "Database was renamed"); - ddl_query->database.clear(); + ddl_query->database.reset(); if (auto * create = query->as()) { @@ -391,7 +391,7 @@ void DatabaseReplicated::checkQueryValid(const ASTPtr & query, ContextPtr query_ /// NOTE: we cannot check here that substituted values will be actually different on shards and replicas. Macros::MacroExpansionInfo info; - info.table_id = {getDatabaseName(), create->table, create->uuid}; + info.table_id = {getDatabaseName(), create->getTable(), create->uuid}; query_context->getMacros()->expand(maybe_path, info); bool maybe_shard_macros = info.expanded_other; info.expanded_other = false; @@ -715,13 +715,13 @@ ASTPtr DatabaseReplicated::parseQueryFromMetadataInZooKeeper(const String & node auto ast = parseQuery(parser, query, description, 0, getContext()->getSettingsRef().max_parser_depth); auto & create = ast->as(); - if (create.uuid == UUIDHelpers::Nil || create.table != TABLE_WITH_UUID_NAME_PLACEHOLDER || !create.database.empty()) + if (create.uuid == UUIDHelpers::Nil || create.getTable() != TABLE_WITH_UUID_NAME_PLACEHOLDER || create.database) throw Exception(ErrorCodes::LOGICAL_ERROR, "Got unexpected query from {}: {}", node_name, query); bool is_materialized_view_with_inner_table = create.is_materialized_view && create.to_table_id.empty(); - create.database = getDatabaseName(); - create.table = unescapeForFileName(node_name); + create.setDatabase(getDatabaseName()); + create.setTable(unescapeForFileName(node_name)); create.attach = is_materialized_view_with_inner_table; return ast; @@ -811,7 +811,7 @@ void DatabaseReplicated::commitCreateTable(const ASTCreateQuery & query, const S assert(!ddl_worker->isCurrentlyActive() || txn); if (txn && txn->isInitialQuery()) { - String metadata_zk_path = zookeeper_path + "/metadata/" + escapeForFileName(query.table); + String metadata_zk_path = zookeeper_path + "/metadata/" + escapeForFileName(query.getTable()); String statement = getObjectDefinitionFromCreateQuery(query.clone()); /// zk::multi(...) will throw if `metadata_zk_path` exists txn->addOp(zkutil::makeCreateRequest(metadata_zk_path, statement, zkutil::CreateMode::Persistent)); diff --git a/src/Databases/DatabaseReplicated.h b/src/Databases/DatabaseReplicated.h index 34cfb7df151..cc25b2128fb 100644 --- a/src/Databases/DatabaseReplicated.h +++ b/src/Databases/DatabaseReplicated.h @@ -46,7 +46,9 @@ public: /// then it will be executed on all replicas. BlockIO tryEnqueueReplicatedDDL(const ASTPtr & query, ContextPtr query_context); - void stopReplication(); + bool hasReplicationThread() const override { return true; } + + void stopReplication() override; String getFullReplicaName() const; static std::pair parseFullReplicaName(const String & name); diff --git a/src/Databases/DatabasesCommon.cpp b/src/Databases/DatabasesCommon.cpp index e5e54f287ec..523b657b065 100644 --- a/src/Databases/DatabasesCommon.cpp +++ b/src/Databases/DatabasesCommon.cpp @@ -20,6 +20,8 @@ namespace ErrorCodes extern const int UNKNOWN_TABLE; extern const int UNKNOWN_DATABASE; extern const int NOT_IMPLEMENTED; + extern const int LOGICAL_ERROR; + extern const int CANNOT_GET_CREATE_TABLE_QUERY; } void applyMetadataChangesToCreateQuery(const ASTPtr & query, const StorageInMemoryMetadata & metadata) @@ -29,7 +31,7 @@ void applyMetadataChangesToCreateQuery(const ASTPtr & query, const StorageInMemo bool has_structure = ast_create_query.columns_list && ast_create_query.columns_list->columns; if (ast_create_query.as_table_function && !has_structure) throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Cannot alter table {} because it was created AS table function" - " and doesn't have structure in metadata", backQuote(ast_create_query.table)); + " and doesn't have structure in metadata", backQuote(ast_create_query.getTable())); assert(has_structure); ASTPtr new_columns = InterpreterCreateQuery::formatColumns(metadata.columns); @@ -85,6 +87,66 @@ void applyMetadataChangesToCreateQuery(const ASTPtr & query, const StorageInMemo } +ASTPtr getCreateQueryFromStorage(const StoragePtr & storage, const ASTPtr & ast_storage, bool only_ordinary, uint32_t max_parser_depth, bool throw_on_error) +{ + auto table_id = storage->getStorageID(); + auto metadata_ptr = storage->getInMemoryMetadataPtr(); + if (metadata_ptr == nullptr) + { + if (throw_on_error) + throw Exception(ErrorCodes::CANNOT_GET_CREATE_TABLE_QUERY, "Cannot get metadata of {}.{}", backQuote(table_id.database_name), backQuote(table_id.table_name)); + else + return nullptr; + } + + auto create_table_query = std::make_shared(); + create_table_query->attach = false; + create_table_query->setTable(table_id.table_name); + create_table_query->setDatabase(table_id.database_name); + create_table_query->set(create_table_query->storage, ast_storage); + + /// setup create table query columns info. + { + auto ast_columns_list = std::make_shared(); + auto ast_expression_list = std::make_shared(); + NamesAndTypesList columns; + if (only_ordinary) + columns = metadata_ptr->columns.getOrdinary(); + else + columns = metadata_ptr->columns.getAll(); + for (const auto & column_name_and_type: columns) + { + const auto & ast_column_declaration = std::make_shared(); + ast_column_declaration->name = column_name_and_type.name; + /// parser typename + { + ASTPtr ast_type; + auto type_name = column_name_and_type.type->getName(); + const auto * string_end = type_name.c_str() + type_name.length(); + Expected expected; + expected.max_parsed_pos = string_end; + Tokens tokens(type_name.c_str(), string_end); + IParser::Pos pos(tokens, max_parser_depth); + ParserDataType parser; + if (!parser.parse(pos, ast_type, expected)) + { + if (throw_on_error) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot parser metadata of {}.{}", backQuote(table_id.database_name), backQuote(table_id.table_name)); + else + return nullptr; + } + ast_column_declaration->type = ast_type; + } + ast_expression_list->children.emplace_back(ast_column_declaration); + } + + ast_columns_list->set(ast_columns_list->columns, ast_expression_list); + create_table_query->set(create_table_query->columns_list, ast_columns_list); + } + return create_table_query; +} + + DatabaseWithOwnTablesBase::DatabaseWithOwnTablesBase(const String & name_, const String & logger, ContextPtr context_) : IDatabase(name_), WithContext(context_->getGlobalContext()), log(&Poco::Logger::get(logger)) { @@ -125,7 +187,7 @@ bool DatabaseWithOwnTablesBase::empty() const return tables.empty(); } -StoragePtr DatabaseWithOwnTablesBase::detachTable(const String & table_name) +StoragePtr DatabaseWithOwnTablesBase::detachTable(ContextPtr /* context_ */, const String & table_name) { std::unique_lock lock(mutex); return detachTableUnlocked(table_name, lock); @@ -152,7 +214,7 @@ StoragePtr DatabaseWithOwnTablesBase::detachTableUnlocked(const String & table_n return res; } -void DatabaseWithOwnTablesBase::attachTable(const String & table_name, const StoragePtr & table, const String &) +void DatabaseWithOwnTablesBase::attachTable(ContextPtr /* context_ */, const String & table_name, const StoragePtr & table, const String &) { std::unique_lock lock(mutex); attachTableUnlocked(table_name, table, lock); diff --git a/src/Databases/DatabasesCommon.h b/src/Databases/DatabasesCommon.h index 59a2ddc3c41..fe9cf5198f0 100644 --- a/src/Databases/DatabasesCommon.h +++ b/src/Databases/DatabasesCommon.h @@ -14,6 +14,7 @@ namespace DB { void applyMetadataChangesToCreateQuery(const ASTPtr & query, const StorageInMemoryMetadata & metadata); +ASTPtr getCreateQueryFromStorage(const StoragePtr & storage, const ASTPtr & ast_storage, bool only_ordinary, uint32_t max_parser_depth, bool throw_on_error); class Context; @@ -27,9 +28,9 @@ public: bool empty() const override; - void attachTable(const String & table_name, const StoragePtr & table, const String & relative_table_path) override; + void attachTable(ContextPtr context, const String & table_name, const StoragePtr & table, const String & relative_table_path) override; - StoragePtr detachTable(const String & table_name) override; + StoragePtr detachTable(ContextPtr context, const String & table_name) override; DatabaseTablesIteratorPtr getTablesIterator(ContextPtr context, const FilterByNameFunction & filter_by_table_name) const override; diff --git a/src/Databases/IDatabase.h b/src/Databases/IDatabase.h index 819a11c2785..b1aa4eb1aae 100644 --- a/src/Databases/IDatabase.h +++ b/src/Databases/IDatabase.h @@ -197,13 +197,13 @@ public: /// Add a table to the database, but do not add it to the metadata. The database may not support this method. /// /// Note: ATTACH TABLE statement actually uses createTable method. - virtual void attachTable(const String & /*name*/, const StoragePtr & /*table*/, [[maybe_unused]] const String & relative_table_path = {}) + virtual void attachTable(ContextPtr /* context */, const String & /*name*/, const StoragePtr & /*table*/, [[maybe_unused]] const String & relative_table_path = {}) { throw Exception("There is no ATTACH TABLE query for Database" + getEngineName(), ErrorCodes::NOT_IMPLEMENTED); } /// Forget about the table without deleting it, and return it. The database may not support this method. - virtual StoragePtr detachTable(const String & /*name*/) + virtual StoragePtr detachTable(ContextPtr /* context */, const String & /*name*/) { throw Exception("There is no DETACH TABLE query for Database" + getEngineName(), ErrorCodes::NOT_IMPLEMENTED); } @@ -279,7 +279,7 @@ public: /// Get UUID of database. virtual UUID getUUID() const { return UUIDHelpers::Nil; } - virtual void renameDatabase(const String & /*new_name*/) + virtual void renameDatabase(ContextPtr, const String & /*new_name*/) { throw Exception(getEngineName() + ": RENAME DATABASE is not supported", ErrorCodes::NOT_IMPLEMENTED); } @@ -323,6 +323,13 @@ public: getEngineName()); } + virtual bool hasReplicationThread() const { return false; } + + virtual void stopReplication() + { + throw Exception(ErrorCodes::LOGICAL_ERROR, "Database engine {} does not run a replication thread!", getEngineName()); + } + virtual ~IDatabase() = default; protected: diff --git a/src/Databases/MySQL/DatabaseMaterializedMySQL.cpp b/src/Databases/MySQL/DatabaseMaterializedMySQL.cpp index a2e03b10d47..5da5c660fb3 100644 --- a/src/Databases/MySQL/DatabaseMaterializedMySQL.cpp +++ b/src/Databases/MySQL/DatabaseMaterializedMySQL.cpp @@ -5,8 +5,6 @@ # include # include -# include -# include # include # include # include @@ -23,32 +21,9 @@ namespace DB namespace ErrorCodes { extern const int NOT_IMPLEMENTED; - extern const int LOGICAL_ERROR; } -template <> -DatabaseMaterializedMySQL::DatabaseMaterializedMySQL( - ContextPtr context_, - const String & database_name_, - const String & metadata_path_, - UUID /*uuid*/, - const String & mysql_database_name_, - mysqlxx::Pool && pool_, - MySQLClient && client_, - std::unique_ptr settings_) - : DatabaseOrdinary( - database_name_, - metadata_path_, - "data/" + escapeForFileName(database_name_) + "/", - "DatabaseMaterializedMySQL (" + database_name_ + ")", - context_) - , settings(std::move(settings_)) - , materialize_thread(context_, database_name_, mysql_database_name_, std::move(pool_), std::move(client_), settings.get()) -{ -} - -template <> -DatabaseMaterializedMySQL::DatabaseMaterializedMySQL( +DatabaseMaterializedMySQL::DatabaseMaterializedMySQL( ContextPtr context_, const String & database_name_, const String & metadata_path_, @@ -57,16 +32,15 @@ DatabaseMaterializedMySQL::DatabaseMaterializedMySQL( mysqlxx::Pool && pool_, MySQLClient && client_, std::unique_ptr settings_) - : DatabaseAtomic(database_name_, metadata_path_, uuid, "DatabaseMaterializedMySQL (" + database_name_ + ")", context_) + : DatabaseAtomic(database_name_, metadata_path_, uuid, "DatabaseMaterializedMySQL(" + database_name_ + ")", context_) , settings(std::move(settings_)) , materialize_thread(context_, database_name_, mysql_database_name_, std::move(pool_), std::move(client_), settings.get()) { } -template -void DatabaseMaterializedMySQL::rethrowExceptionIfNeed() const +void DatabaseMaterializedMySQL::rethrowExceptionIfNeeded() const { - std::unique_lock lock(Base::mutex); + std::unique_lock lock(mutex); if (!settings->allows_query_when_mysql_lost && exception) { @@ -84,17 +58,15 @@ void DatabaseMaterializedMySQL::rethrowExceptionIfNeed() const } } -template -void DatabaseMaterializedMySQL::setException(const std::exception_ptr & exception_) +void DatabaseMaterializedMySQL::setException(const std::exception_ptr & exception_) { - std::unique_lock lock(Base::mutex); + std::unique_lock lock(mutex); exception = exception_; } -template -void DatabaseMaterializedMySQL::startupTables(ThreadPool & thread_pool, bool force_restore, bool force_attach) +void DatabaseMaterializedMySQL::startupTables(ThreadPool & thread_pool, bool force_restore, bool force_attach) { - Base::startupTables(thread_pool, force_restore, force_attach); + DatabaseAtomic::startupTables(thread_pool, force_restore, force_attach); if (!force_attach) materialize_thread.assertMySQLAvailable(); @@ -103,149 +75,92 @@ void DatabaseMaterializedMySQL::startupTables(ThreadPool & thread_pool, bo started_up = true; } -template -void DatabaseMaterializedMySQL::createTable(ContextPtr context_, const String & name, const StoragePtr & table, const ASTPtr & query) +void DatabaseMaterializedMySQL::createTable(ContextPtr context_, const String & name, const StoragePtr & table, const ASTPtr & query) { - assertCalledFromSyncThreadOrDrop("create table"); - Base::createTable(context_, name, table, query); + checkIsInternalQuery(context_, "CREATE TABLE"); + DatabaseAtomic::createTable(context_, name, table, query); } -template -void DatabaseMaterializedMySQL::dropTable(ContextPtr context_, const String & name, bool no_delay) +void DatabaseMaterializedMySQL::dropTable(ContextPtr context_, const String & name, bool no_delay) { - assertCalledFromSyncThreadOrDrop("drop table"); - Base::dropTable(context_, name, no_delay); + checkIsInternalQuery(context_, "DROP TABLE"); + DatabaseAtomic::dropTable(context_, name, no_delay); } -template -void DatabaseMaterializedMySQL::attachTable(const String & name, const StoragePtr & table, const String & relative_table_path) +void DatabaseMaterializedMySQL::attachTable(ContextPtr context_, const String & name, const StoragePtr & table, const String & relative_table_path) { - assertCalledFromSyncThreadOrDrop("attach table"); - Base::attachTable(name, table, relative_table_path); + checkIsInternalQuery(context_, "ATTACH TABLE"); + DatabaseAtomic::attachTable(context_, name, table, relative_table_path); } -template -StoragePtr DatabaseMaterializedMySQL::detachTable(const String & name) +StoragePtr DatabaseMaterializedMySQL::detachTable(ContextPtr context_, const String & name) { - assertCalledFromSyncThreadOrDrop("detach table"); - return Base::detachTable(name); + checkIsInternalQuery(context_, "DETACH TABLE"); + return DatabaseAtomic::detachTable(context_, name); } -template -void DatabaseMaterializedMySQL::renameTable(ContextPtr context_, const String & name, IDatabase & to_database, const String & to_name, bool exchange, bool dictionary) +void DatabaseMaterializedMySQL::renameTable(ContextPtr context_, const String & name, IDatabase & to_database, const String & to_name, bool exchange, bool dictionary) { - assertCalledFromSyncThreadOrDrop("rename table"); + checkIsInternalQuery(context_, "RENAME TABLE"); if (exchange) - throw Exception("MaterializedMySQL database not support exchange table.", ErrorCodes::NOT_IMPLEMENTED); + throw Exception("MaterializedMySQL database does not support EXCHANGE TABLE.", ErrorCodes::NOT_IMPLEMENTED); if (dictionary) - throw Exception("MaterializedMySQL database not support rename dictionary.", ErrorCodes::NOT_IMPLEMENTED); + throw Exception("MaterializedMySQL database does not support RENAME DICTIONARY.", ErrorCodes::NOT_IMPLEMENTED); - if (to_database.getDatabaseName() != Base::getDatabaseName()) + if (to_database.getDatabaseName() != DatabaseAtomic::getDatabaseName()) throw Exception("Cannot rename with other database for MaterializedMySQL database.", ErrorCodes::NOT_IMPLEMENTED); - Base::renameTable(context_, name, *this, to_name, exchange, dictionary); + DatabaseAtomic::renameTable(context_, name, *this, to_name, exchange, dictionary); } -template -void DatabaseMaterializedMySQL::alterTable(ContextPtr context_, const StorageID & table_id, const StorageInMemoryMetadata & metadata) +void DatabaseMaterializedMySQL::alterTable(ContextPtr context_, const StorageID & table_id, const StorageInMemoryMetadata & metadata) { - assertCalledFromSyncThreadOrDrop("alter table"); - Base::alterTable(context_, table_id, metadata); + checkIsInternalQuery(context_, "ALTER TABLE"); + DatabaseAtomic::alterTable(context_, table_id, metadata); } -template -void DatabaseMaterializedMySQL::drop(ContextPtr context_) +void DatabaseMaterializedMySQL::drop(ContextPtr context_) { /// Remove metadata info - fs::path metadata(Base::getMetadataPath() + "/.metadata"); + fs::path metadata(getMetadataPath() + "/.metadata"); if (fs::exists(metadata)) fs::remove(metadata); - Base::drop(context_); + DatabaseAtomic::drop(context_); } -template -StoragePtr DatabaseMaterializedMySQL::tryGetTable(const String & name, ContextPtr context_) const +StoragePtr DatabaseMaterializedMySQL::tryGetTable(const String & name, ContextPtr context_) const { - if (!MaterializedMySQLSyncThread::isMySQLSyncThread()) - { - StoragePtr nested_storage = Base::tryGetTable(name, context_); - - if (!nested_storage) - return {}; - - return std::make_shared(std::move(nested_storage), this); - } - - return Base::tryGetTable(name, context_); + StoragePtr nested_storage = DatabaseAtomic::tryGetTable(name, context_); + if (context_->isInternalQuery()) + return nested_storage; + return std::make_shared(std::move(nested_storage), this); } -template DatabaseTablesIteratorPtr -DatabaseMaterializedMySQL::getTablesIterator(ContextPtr context_, const DatabaseOnDisk::FilterByNameFunction & filter_by_table_name) const +DatabaseMaterializedMySQL::getTablesIterator(ContextPtr context_, const DatabaseOnDisk::FilterByNameFunction & filter_by_table_name) const { - if (!MaterializedMySQLSyncThread::isMySQLSyncThread()) - { - DatabaseTablesIteratorPtr iterator = Base::getTablesIterator(context_, filter_by_table_name); - return std::make_unique(std::move(iterator), this); - } - - return Base::getTablesIterator(context_, filter_by_table_name); + DatabaseTablesIteratorPtr iterator = DatabaseAtomic::getTablesIterator(context_, filter_by_table_name); + if (context_->isInternalQuery()) + return iterator; + return std::make_unique(std::move(iterator), this); } -template -void DatabaseMaterializedMySQL::assertCalledFromSyncThreadOrDrop(const char * method) const +void DatabaseMaterializedMySQL::checkIsInternalQuery(ContextPtr context_, const char * method) const { - if (!MaterializedMySQLSyncThread::isMySQLSyncThread() && started_up) - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "MaterializedMySQL database not support {}", method); + if (started_up && context_ && !context_->isInternalQuery()) + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "MaterializedMySQL database does not support {}", method); } -template -void DatabaseMaterializedMySQL::shutdownSynchronizationThread() +void DatabaseMaterializedMySQL::stopReplication() { materialize_thread.stopSynchronization(); started_up = false; } -template class Helper, typename... Args> -auto castToMaterializedMySQLAndCallHelper(Database * database, Args && ... args) -{ - using Ordinary = DatabaseMaterializedMySQL; - using Atomic = DatabaseMaterializedMySQL; - using ToOrdinary = typename std::conditional_t, const Ordinary *, Ordinary *>; - using ToAtomic = typename std::conditional_t, const Atomic *, Atomic *>; - if (auto * database_materialize = typeid_cast(database)) - return (database_materialize->*Helper::v)(std::forward(args)...); - if (auto * database_materialize = typeid_cast(database)) - return (database_materialize->*Helper::v)(std::forward(args)...); - - throw Exception("LOGICAL_ERROR: cannot cast to DatabaseMaterializedMySQL, it is a bug.", ErrorCodes::LOGICAL_ERROR); -} - -template struct HelperSetException { static constexpr auto v = &T::setException; }; -void setSynchronizationThreadException(const DatabasePtr & materialized_mysql_db, const std::exception_ptr & exception) -{ - castToMaterializedMySQLAndCallHelper(materialized_mysql_db.get(), exception); -} - -template struct HelperStopSync { static constexpr auto v = &T::shutdownSynchronizationThread; }; -void stopDatabaseSynchronization(const DatabasePtr & materialized_mysql_db) -{ - castToMaterializedMySQLAndCallHelper(materialized_mysql_db.get()); -} - -template struct HelperRethrow { static constexpr auto v = &T::rethrowExceptionIfNeed; }; -void rethrowSyncExceptionIfNeed(const IDatabase * materialized_mysql_db) -{ - castToMaterializedMySQLAndCallHelper(materialized_mysql_db); -} - -template class DatabaseMaterializedMySQL; -template class DatabaseMaterializedMySQL; - } #endif diff --git a/src/Databases/MySQL/DatabaseMaterializedMySQL.h b/src/Databases/MySQL/DatabaseMaterializedMySQL.h index ac32607a22c..32686784f2a 100644 --- a/src/Databases/MySQL/DatabaseMaterializedMySQL.h +++ b/src/Databases/MySQL/DatabaseMaterializedMySQL.h @@ -6,7 +6,9 @@ #include #include +#include #include +#include #include #include @@ -17,17 +19,20 @@ namespace DB * * All table structure and data will be written to the local file system */ -template -class DatabaseMaterializedMySQL : public Base +class DatabaseMaterializedMySQL : public DatabaseAtomic { public: - DatabaseMaterializedMySQL( - ContextPtr context, const String & database_name_, const String & metadata_path_, UUID uuid, - const String & mysql_database_name_, mysqlxx::Pool && pool_, - MySQLClient && client_, std::unique_ptr settings_); + ContextPtr context, + const String & database_name_, + const String & metadata_path_, + UUID uuid, + const String & mysql_database_name_, + mysqlxx::Pool && pool_, + MySQLClient && client_, + std::unique_ptr settings_); - void rethrowExceptionIfNeed() const; + void rethrowExceptionIfNeeded() const; void setException(const std::exception_ptr & exception); protected: @@ -49,9 +54,9 @@ public: void dropTable(ContextPtr context_, const String & name, bool no_delay) override; - void attachTable(const String & name, const StoragePtr & table, const String & relative_table_path) override; + void attachTable(ContextPtr context_, const String & name, const StoragePtr & table, const String & relative_table_path) override; - StoragePtr detachTable(const String & name) override; + StoragePtr detachTable(ContextPtr context_, const String & name) override; void renameTable(ContextPtr context_, const String & name, IDatabase & to_database, const String & to_name, bool exchange, bool dictionary) override; @@ -63,18 +68,15 @@ public: DatabaseTablesIteratorPtr getTablesIterator(ContextPtr context_, const DatabaseOnDisk::FilterByNameFunction & filter_by_table_name) const override; - void assertCalledFromSyncThreadOrDrop(const char * method) const; + void checkIsInternalQuery(ContextPtr context_, const char * method) const; - void shutdownSynchronizationThread(); + bool hasReplicationThread() const override { return true; } + + void stopReplication() override; friend class DatabaseMaterializedTablesIterator; }; - -void setSynchronizationThreadException(const DatabasePtr & materialized_mysql_db, const std::exception_ptr & exception); -void stopDatabaseSynchronization(const DatabasePtr & materialized_mysql_db); -void rethrowSyncExceptionIfNeed(const IDatabase * materialized_mysql_db); - } #endif diff --git a/src/Databases/MySQL/DatabaseMySQL.cpp b/src/Databases/MySQL/DatabaseMySQL.cpp index feebf33f58a..f62e06aff8d 100644 --- a/src/Databases/MySQL/DatabaseMySQL.cpp +++ b/src/Databases/MySQL/DatabaseMySQL.cpp @@ -113,53 +113,6 @@ StoragePtr DatabaseMySQL::tryGetTable(const String & mysql_table_name, ContextPt return StoragePtr{}; } -static ASTPtr getCreateQueryFromStorage(const StoragePtr & storage, const ASTPtr & database_engine_define) -{ - auto create_table_query = std::make_shared(); - - auto table_storage_define = database_engine_define->clone(); - create_table_query->set(create_table_query->storage, table_storage_define); - - auto columns_declare_list = std::make_shared(); - auto columns_expression_list = std::make_shared(); - - columns_declare_list->set(columns_declare_list->columns, columns_expression_list); - create_table_query->set(create_table_query->columns_list, columns_declare_list); - - { - /// init create query. - auto table_id = storage->getStorageID(); - create_table_query->table = table_id.table_name; - create_table_query->database = table_id.database_name; - - auto metadata_snapshot = storage->getInMemoryMetadataPtr(); - for (const auto & column_type_and_name : metadata_snapshot->getColumns().getOrdinary()) - { - const auto & column_declaration = std::make_shared(); - column_declaration->name = column_type_and_name.name; - column_declaration->type = dataTypeConvertToQuery(column_type_and_name.type); - columns_expression_list->children.emplace_back(column_declaration); - } - - ASTStorage * ast_storage = table_storage_define->as(); - ASTs storage_children = ast_storage->children; - auto storage_engine_arguments = ast_storage->engine->arguments; - - /// Add table_name to engine arguments - auto mysql_table_name = std::make_shared(table_id.table_name); - storage_engine_arguments->children.insert(storage_engine_arguments->children.begin() + 2, mysql_table_name); - - /// Unset settings - storage_children.erase( - std::remove_if(storage_children.begin(), storage_children.end(), - [&](const ASTPtr & element) { return element.get() == ast_storage->settings; }), - storage_children.end()); - ast_storage->settings = nullptr; - } - - return create_table_query; -} - ASTPtr DatabaseMySQL::getCreateTableQueryImpl(const String & table_name, ContextPtr local_context, bool throw_on_error) const { std::lock_guard lock(mutex); @@ -174,7 +127,27 @@ ASTPtr DatabaseMySQL::getCreateTableQueryImpl(const String & table_name, Context return nullptr; } - return getCreateQueryFromStorage(local_tables_cache[table_name].second, database_engine_define); + auto storage = local_tables_cache[table_name].second; + auto table_storage_define = database_engine_define->clone(); + { + ASTStorage * ast_storage = table_storage_define->as(); + ASTs storage_children = ast_storage->children; + auto storage_engine_arguments = ast_storage->engine->arguments; + + /// Add table_name to engine arguments + auto mysql_table_name = std::make_shared(table_name); + storage_engine_arguments->children.insert(storage_engine_arguments->children.begin() + 2, mysql_table_name); + + /// Unset settings + storage_children.erase( + std::remove_if(storage_children.begin(), storage_children.end(), + [&](const ASTPtr & element) { return element.get() == ast_storage->settings; }), + storage_children.end()); + ast_storage->settings = nullptr; + } + auto create_table_query = DB::getCreateQueryFromStorage(storage, table_storage_define, true, + getContext()->getSettingsRef().max_parser_depth, throw_on_error); + return create_table_query; } time_t DatabaseMySQL::getObjectMetadataModificationTime(const String & table_name) const @@ -192,7 +165,7 @@ time_t DatabaseMySQL::getObjectMetadataModificationTime(const String & table_nam ASTPtr DatabaseMySQL::getCreateDatabaseQuery() const { const auto & create_query = std::make_shared(); - create_query->database = getDatabaseName(); + create_query->setDatabase(getDatabaseName()); create_query->set(create_query->storage, database_engine_define); if (const auto comment_value = getDatabaseComment(); !comment_value.empty()) @@ -362,7 +335,7 @@ void DatabaseMySQL::cleanOutdatedTables() } } -void DatabaseMySQL::attachTable(const String & table_name, const StoragePtr & storage, const String &) +void DatabaseMySQL::attachTable(ContextPtr /* context_ */, const String & table_name, const StoragePtr & storage, const String &) { std::lock_guard lock{mutex}; @@ -385,7 +358,7 @@ void DatabaseMySQL::attachTable(const String & table_name, const StoragePtr & st fs::remove(remove_flag); } -StoragePtr DatabaseMySQL::detachTable(const String & table_name) +StoragePtr DatabaseMySQL::detachTable(ContextPtr /* context */, const String & table_name) { std::lock_guard lock{mutex}; @@ -482,7 +455,7 @@ DatabaseMySQL::~DatabaseMySQL() } } -void DatabaseMySQL::createTable(ContextPtr, const String & table_name, const StoragePtr & storage, const ASTPtr & create_query) +void DatabaseMySQL::createTable(ContextPtr local_context, const String & table_name, const StoragePtr & storage, const ASTPtr & create_query) { const auto & create = create_query->as(); @@ -500,7 +473,7 @@ void DatabaseMySQL::createTable(ContextPtr, const String & table_name, const Sto throw Exception("The MySQL database engine can only execute attach statements of type attach table database_name.table_name", ErrorCodes::UNEXPECTED_AST_STRUCTURE); - attachTable(table_name, storage, {}); + attachTable(local_context, table_name, storage, {}); } } diff --git a/src/Databases/MySQL/DatabaseMySQL.h b/src/Databases/MySQL/DatabaseMySQL.h index 363557fbacb..e57ac442db1 100644 --- a/src/Databases/MySQL/DatabaseMySQL.h +++ b/src/Databases/MySQL/DatabaseMySQL.h @@ -77,13 +77,13 @@ public: void loadStoredObjects(ContextMutablePtr, bool, bool force_attach, bool skip_startup_tables) override; - StoragePtr detachTable(const String & table_name) override; + StoragePtr detachTable(ContextPtr context, const String & table_name) override; void detachTablePermanently(ContextPtr context, const String & table_name) override; void dropTable(ContextPtr context, const String & table_name, bool no_delay) override; - void attachTable(const String & table_name, const StoragePtr & storage, const String & relative_table_path) override; + void attachTable(ContextPtr context, const String & table_name, const StoragePtr & storage, const String & relative_table_path) override; protected: ASTPtr getCreateTableQueryImpl(const String & name, ContextPtr context, bool throw_on_error) const override; diff --git a/src/Databases/MySQL/MaterializedMySQLSyncThread.cpp b/src/Databases/MySQL/MaterializedMySQLSyncThread.cpp index d0c5d12e848..0fa45c2e282 100644 --- a/src/Databases/MySQL/MaterializedMySQLSyncThread.cpp +++ b/src/Databases/MySQL/MaterializedMySQLSyncThread.cpp @@ -3,26 +3,27 @@ #if USE_MYSQL #include -# include -# include -# include -# include -# include -# include -# include -# include -# include -# include -# include -# include -# include -# include -# include -# include -# include -# include -# include -# include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include namespace DB { @@ -52,7 +53,7 @@ static ContextMutablePtr createQueryContext(ContextPtr context) auto query_context = Context::createCopy(context); query_context->setSettings(new_query_settings); - CurrentThread::QueryScope query_scope(query_context); + query_context->setInternalQuery(true); query_context->getClientInfo().query_kind = ClientInfo::QueryKind::SECONDARY_QUERY; query_context->setCurrentQueryId(""); // generate random query_id @@ -273,6 +274,8 @@ static inline void cleanOutdatedTables(const String & database_name, ContextPtr for (auto iterator = clean_database->getTablesIterator(context); iterator->isValid(); iterator->next()) { auto query_context = createQueryContext(context); + CurrentThread::QueryScope query_scope(query_context); + String comment = "Materialize MySQL step 1: execute MySQL DDL for dump data"; cleaning_table_name = backQuoteIfNeed(database_name) + "." + backQuoteIfNeed(iterator->name()); tryToExecuteQuery(" DROP TABLE " + cleaning_table_name, query_context, database_name, comment); @@ -324,6 +327,8 @@ static inline void dumpDataForTables( { const auto & table_name = iterator->first; auto query_context = createQueryContext(context); + CurrentThread::QueryScope query_scope(query_context); + String comment = "Materialize MySQL step 1: execute MySQL DDL for dump data"; tryToExecuteQuery(query_prefix + " " + iterator->second, query_context, database_name, comment); /// create table. @@ -742,6 +747,8 @@ void MaterializedMySQLSyncThread::executeDDLAtomic(const QueryEvent & query_even try { auto query_context = createQueryContext(getContext()); + CurrentThread::QueryScope query_scope(query_context); + String comment = "Materialize MySQL step 2: execute MySQL DDL for sync data"; String event_database = query_event.schema == mysql_database_name ? database_name : ""; tryToExecuteQuery(query_prefix + query_event.query, query_context, event_database, comment); @@ -759,15 +766,9 @@ void MaterializedMySQLSyncThread::executeDDLAtomic(const QueryEvent & query_even } } -bool MaterializedMySQLSyncThread::isMySQLSyncThread() -{ - return getThreadName() == MYSQL_BACKGROUND_THREAD_NAME; -} - void MaterializedMySQLSyncThread::setSynchronizationThreadException(const std::exception_ptr & exception) { - auto db = DatabaseCatalog::instance().getDatabase(database_name); - DB::setSynchronizationThreadException(db, exception); + assert_cast(DatabaseCatalog::instance().getDatabase(database_name).get())->setException(exception); } void MaterializedMySQLSyncThread::Buffers::add(size_t block_rows, size_t block_bytes, size_t written_rows, size_t written_bytes) @@ -791,6 +792,8 @@ void MaterializedMySQLSyncThread::Buffers::commit(ContextPtr context) for (auto & table_name_and_buffer : data) { auto query_context = createQueryContext(context); + CurrentThread::QueryScope query_scope(query_context); + auto input = std::make_shared(table_name_and_buffer.second->first); auto pipeline = getTableOutput(database, table_name_and_buffer.first, query_context, true); pipeline.complete(Pipe(std::move(input))); diff --git a/src/Databases/MySQL/MaterializedMySQLSyncThread.h b/src/Databases/MySQL/MaterializedMySQLSyncThread.h index 524426bada6..ba5022137bf 100644 --- a/src/Databases/MySQL/MaterializedMySQLSyncThread.h +++ b/src/Databases/MySQL/MaterializedMySQLSyncThread.h @@ -53,8 +53,6 @@ public: void assertMySQLAvailable(); - static bool isMySQLSyncThread(); - private: Poco::Logger * log; diff --git a/src/Databases/PostgreSQL/DatabaseMaterializedPostgreSQL.cpp b/src/Databases/PostgreSQL/DatabaseMaterializedPostgreSQL.cpp index 2b0d73a02ae..5ab0eb47ea5 100644 --- a/src/Databases/PostgreSQL/DatabaseMaterializedPostgreSQL.cpp +++ b/src/Databases/PostgreSQL/DatabaseMaterializedPostgreSQL.cpp @@ -238,7 +238,7 @@ ASTPtr DatabaseMaterializedPostgreSQL::createAlterSettingsQuery(const SettingCha auto * alter = query->as(); alter->alter_object = ASTAlterQuery::AlterObjectType::DATABASE; - alter->database = database_name; + alter->setDatabase(database_name); alter->set(alter->command_list, command_list); return query; @@ -266,11 +266,11 @@ void DatabaseMaterializedPostgreSQL::createTable(ContextPtr local_context, const DatabaseAtomic::createTable(StorageMaterializedPostgreSQL::makeNestedTableContext(local_context), table_name, table, query_copy); /// Attach MaterializedPostgreSQL table. - attachTable(table_name, table, {}); + attachTable(local_context, table_name, table, {}); } -void DatabaseMaterializedPostgreSQL::attachTable(const String & table_name, const StoragePtr & table, const String & relative_table_path) +void DatabaseMaterializedPostgreSQL::attachTable(ContextPtr context_, const String & table_name, const StoragePtr & table, const String & relative_table_path) { /// If there is query context then we need to attach materialized storage. /// If there is no query context then we need to attach internal storage from atomic database. @@ -310,12 +310,12 @@ void DatabaseMaterializedPostgreSQL::attachTable(const String & table_name, cons } else { - DatabaseAtomic::attachTable(table_name, table, relative_table_path); + DatabaseAtomic::attachTable(context_, table_name, table, relative_table_path); } } -StoragePtr DatabaseMaterializedPostgreSQL::detachTable(const String & table_name) +StoragePtr DatabaseMaterializedPostgreSQL::detachTable(ContextPtr context_, const String & table_name) { /// If there is query context then we need to detach materialized storage. /// If there is no query context then we need to detach internal storage from atomic database. @@ -369,7 +369,7 @@ StoragePtr DatabaseMaterializedPostgreSQL::detachTable(const String & table_name } else { - return DatabaseAtomic::detachTable(table_name); + return DatabaseAtomic::detachTable(context_, table_name); } } diff --git a/src/Databases/PostgreSQL/DatabaseMaterializedPostgreSQL.h b/src/Databases/PostgreSQL/DatabaseMaterializedPostgreSQL.h index 25e32fb0312..3b7f0f9d29d 100644 --- a/src/Databases/PostgreSQL/DatabaseMaterializedPostgreSQL.h +++ b/src/Databases/PostgreSQL/DatabaseMaterializedPostgreSQL.h @@ -49,15 +49,17 @@ public: void createTable(ContextPtr context, const String & table_name, const StoragePtr & table, const ASTPtr & query) override; - void attachTable(const String & table_name, const StoragePtr & table, const String & relative_table_path) override; + void attachTable(ContextPtr context, const String & table_name, const StoragePtr & table, const String & relative_table_path) override; - StoragePtr detachTable(const String & table_name) override; + StoragePtr detachTable(ContextPtr context, const String & table_name) override; void dropTable(ContextPtr local_context, const String & name, bool no_delay) override; void drop(ContextPtr local_context) override; - void stopReplication(); + bool hasReplicationThread() const override { return true; } + + void stopReplication() override; void applySettingsChanges(const SettingsChanges & settings_changes, ContextPtr query_context) override; diff --git a/src/Databases/PostgreSQL/DatabasePostgreSQL.cpp b/src/Databases/PostgreSQL/DatabasePostgreSQL.cpp index 5d3493d0c82..d333e476069 100644 --- a/src/Databases/PostgreSQL/DatabasePostgreSQL.cpp +++ b/src/Databases/PostgreSQL/DatabasePostgreSQL.cpp @@ -206,7 +206,7 @@ StoragePtr DatabasePostgreSQL::fetchTable(const String & table_name, ContextPtr, } -void DatabasePostgreSQL::attachTable(const String & table_name, const StoragePtr & storage, const String &) +void DatabasePostgreSQL::attachTable(ContextPtr /* context_ */, const String & table_name, const StoragePtr & storage, const String &) { std::lock_guard lock{mutex}; @@ -231,7 +231,7 @@ void DatabasePostgreSQL::attachTable(const String & table_name, const StoragePtr } -StoragePtr DatabasePostgreSQL::detachTable(const String & table_name) +StoragePtr DatabasePostgreSQL::detachTable(ContextPtr /* context_ */, const String & table_name) { std::lock_guard lock{mutex}; @@ -251,14 +251,14 @@ StoragePtr DatabasePostgreSQL::detachTable(const String & table_name) } -void DatabasePostgreSQL::createTable(ContextPtr, const String & table_name, const StoragePtr & storage, const ASTPtr & create_query) +void DatabasePostgreSQL::createTable(ContextPtr local_context, const String & table_name, const StoragePtr & storage, const ASTPtr & create_query) { const auto & create = create_query->as(); if (!create->attach) throw Exception("PostgreSQL database engine does not support create table", ErrorCodes::NOT_IMPLEMENTED); - attachTable(table_name, storage, {}); + attachTable(local_context, table_name, storage, {}); } @@ -355,7 +355,7 @@ void DatabasePostgreSQL::shutdown() ASTPtr DatabasePostgreSQL::getCreateDatabaseQuery() const { const auto & create_query = std::make_shared(); - create_query->database = getDatabaseName(); + create_query->setDatabase(getDatabaseName()); create_query->set(create_query->storage, database_engine_define); if (const auto comment_value = getDatabaseComment(); !comment_value.empty()) @@ -388,8 +388,8 @@ ASTPtr DatabasePostgreSQL::getCreateTableQueryImpl(const String & table_name, Co /// init create query. auto table_id = storage->getStorageID(); - create_table_query->table = table_id.table_name; - create_table_query->database = table_id.database_name; + create_table_query->setTable(table_id.table_name); + create_table_query->setDatabase(table_id.database_name); auto metadata_snapshot = storage->getInMemoryMetadataPtr(); for (const auto & column_type_and_name : metadata_snapshot->getColumns().getOrdinary()) diff --git a/src/Databases/PostgreSQL/DatabasePostgreSQL.h b/src/Databases/PostgreSQL/DatabasePostgreSQL.h index f801f9585d6..d41dbff1f54 100644 --- a/src/Databases/PostgreSQL/DatabasePostgreSQL.h +++ b/src/Databases/PostgreSQL/DatabasePostgreSQL.h @@ -55,8 +55,8 @@ public: void createTable(ContextPtr, const String & table_name, const StoragePtr & storage, const ASTPtr & create_query) override; void dropTable(ContextPtr, const String & table_name, bool no_delay) override; - void attachTable(const String & table_name, const StoragePtr & storage, const String & relative_table_path) override; - StoragePtr detachTable(const String & table_name) override; + void attachTable(ContextPtr context, const String & table_name, const StoragePtr & storage, const String & relative_table_path) override; + StoragePtr detachTable(ContextPtr context, const String & table_name) override; void drop(ContextPtr /*context*/) override; void shutdown() override; diff --git a/src/Databases/SQLite/DatabaseSQLite.cpp b/src/Databases/SQLite/DatabaseSQLite.cpp index 05bc2f24834..ec15ca90b9a 100644 --- a/src/Databases/SQLite/DatabaseSQLite.cpp +++ b/src/Databases/SQLite/DatabaseSQLite.cpp @@ -160,7 +160,7 @@ StoragePtr DatabaseSQLite::fetchTable(const String & table_name, ContextPtr loca ASTPtr DatabaseSQLite::getCreateDatabaseQuery() const { const auto & create_query = std::make_shared(); - create_query->database = getDatabaseName(); + create_query->setDatabase(getDatabaseName()); create_query->set(create_query->storage, database_engine_define); if (const auto comment_value = getDatabaseComment(); !comment_value.empty()) @@ -180,52 +180,19 @@ ASTPtr DatabaseSQLite::getCreateTableQueryImpl(const String & table_name, Contex database_name, table_name); return nullptr; } - - auto create_table_query = std::make_shared(); auto table_storage_define = database_engine_define->clone(); - create_table_query->set(create_table_query->storage, table_storage_define); - - auto columns_declare_list = std::make_shared(); - auto columns_expression_list = std::make_shared(); - - columns_declare_list->set(columns_declare_list->columns, columns_expression_list); - create_table_query->set(create_table_query->columns_list, columns_declare_list); - - /// init create query. - auto table_id = storage->getStorageID(); - create_table_query->table = table_id.table_name; - create_table_query->database = table_id.database_name; - - auto metadata_snapshot = storage->getInMemoryMetadataPtr(); - for (const auto & column_type_and_name : metadata_snapshot->getColumns().getOrdinary()) - { - const auto & column_declaration = std::make_shared(); - column_declaration->name = column_type_and_name.name; - column_declaration->type = getColumnDeclaration(column_type_and_name.type); - columns_expression_list->children.emplace_back(column_declaration); - } - ASTStorage * ast_storage = table_storage_define->as(); - ASTs storage_children = ast_storage->children; auto storage_engine_arguments = ast_storage->engine->arguments; - + auto table_id = storage->getStorageID(); /// Add table_name to engine arguments storage_engine_arguments->children.insert(storage_engine_arguments->children.begin() + 1, std::make_shared(table_id.table_name)); + auto create_table_query = DB::getCreateQueryFromStorage(storage, table_storage_define, true, + getContext()->getSettingsRef().max_parser_depth, throw_on_error); + return create_table_query; } - -ASTPtr DatabaseSQLite::getColumnDeclaration(const DataTypePtr & data_type) const -{ - WhichDataType which(data_type); - - if (which.isNullable()) - return makeASTFunction("Nullable", getColumnDeclaration(typeid_cast(data_type.get())->getNestedType())); - - return std::make_shared(data_type->getName()); -} - } #endif diff --git a/src/Databases/SQLite/DatabaseSQLite.h b/src/Databases/SQLite/DatabaseSQLite.h index d0b8d582844..c8df79d0f6a 100644 --- a/src/Databases/SQLite/DatabaseSQLite.h +++ b/src/Databases/SQLite/DatabaseSQLite.h @@ -12,7 +12,7 @@ namespace DB { -class DatabaseSQLite final : public IDatabase, protected WithContext +class DatabaseSQLite final : public IDatabase, WithContext { public: using SQLitePtr = std::shared_ptr; @@ -58,7 +58,6 @@ private: StoragePtr fetchTable(const String & table_name, ContextPtr context, bool table_checked) const; - ASTPtr getColumnDeclaration(const DataTypePtr & data_type) const; }; } diff --git a/src/Databases/TablesLoader.cpp b/src/Databases/TablesLoader.cpp index cd0e0609c15..31581a97fcc 100644 --- a/src/Databases/TablesLoader.cpp +++ b/src/Databases/TablesLoader.cpp @@ -1,5 +1,6 @@ #include #include +#include #include #include #include @@ -20,6 +21,26 @@ namespace ErrorCodes static constexpr size_t PRINT_MESSAGE_EACH_N_OBJECTS = 256; static constexpr size_t PRINT_MESSAGE_EACH_N_SECONDS = 5; +void mergeDependenciesGraphs(DependenciesInfos & main_dependencies_info, const DependenciesInfos & additional_info) +{ + for (const auto & table_and_info : additional_info) + { + const QualifiedTableName & table = table_and_info.first; + const TableNamesSet & dependent_tables = table_and_info.second.dependent_database_objects; + const TableNamesSet & dependencies = table_and_info.second.dependencies; + + DependenciesInfo & maybe_existing_info = main_dependencies_info[table]; + maybe_existing_info.dependent_database_objects.insert(dependent_tables.begin(), dependent_tables.end()); + if (!dependencies.empty()) + { + if (maybe_existing_info.dependencies.empty()) + maybe_existing_info.dependencies = dependencies; + else if (maybe_existing_info.dependencies != dependencies) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Have different dependencies for {}: {} and {}, it's a bug", + table, fmt::join(maybe_existing_info.dependencies, ", "), fmt::join(dependencies, ", ")); + } + } +} void logAboutProgress(Poco::Logger * log, size_t processed, size_t total, AtomicStopwatch & watch) { @@ -71,8 +92,14 @@ void TablesLoader::loadTables() logDependencyGraph(); + /// Remove tables that do not exist + removeUnresolvableDependencies(/* remove_loaded */ false); + + /// Update existing info (it's important for ATTACH DATABASE) + DatabaseCatalog::instance().addLoadingDependencies(metadata.dependencies_info); + /// Some tables were loaded by database with loadStoredObjects(...). Remove them from graph if necessary. - removeUnresolvableDependencies(); + removeUnresolvableDependencies(/* remove_loaded */ true); loadTablesInTopologicalOrder(pool); } @@ -85,20 +112,20 @@ void TablesLoader::startupTables() } -void TablesLoader::removeUnresolvableDependencies() +void TablesLoader::removeUnresolvableDependencies(bool remove_loaded) { - auto need_exclude_dependency = [this](const QualifiedTableName & dependency_name, const DependenciesInfo & info) + auto need_exclude_dependency = [this, remove_loaded](const QualifiedTableName & dependency_name, const DependenciesInfo & info) { /// Table exists and will be loaded if (metadata.parsed_tables.contains(dependency_name)) return false; /// Table exists and it's already loaded if (DatabaseCatalog::instance().isTableExist(StorageID(dependency_name.database, dependency_name.table), global_context)) - return true; + return remove_loaded; /// It's XML dictionary. It was loaded before tables and DDL dictionaries. if (dependency_name.database == metadata.default_database && global_context->getExternalDictionariesLoader().has(dependency_name.table)) - return true; + return remove_loaded; /// Some tables depends on table "dependency_name", but there is no such table in DatabaseCatalog and we don't have its metadata. /// We will ignore it and try to load dependent tables without "dependency_name" @@ -106,9 +133,9 @@ void TablesLoader::removeUnresolvableDependencies() LOG_WARNING(log, "Tables {} depend on {}, but seems like the it does not exist. Will ignore it and try to load existing tables", fmt::join(info.dependent_database_objects, ", "), dependency_name); - if (info.dependencies_count) + if (!info.dependencies.empty()) throw Exception(ErrorCodes::LOGICAL_ERROR, "Table {} does not exist, but we have seen its AST and found {} dependencies." - "It's a bug", dependency_name, info.dependencies_count); + "It's a bug", dependency_name, info.dependencies.size()); if (info.dependent_database_objects.empty()) throw Exception(ErrorCodes::LOGICAL_ERROR, "Table {} does not have dependencies and dependent tables as it expected to." "It's a bug", dependency_name); @@ -166,22 +193,25 @@ void TablesLoader::loadTablesInTopologicalOrder(ThreadPool & pool) DependenciesInfosIter TablesLoader::removeResolvedDependency(const DependenciesInfosIter & info_it, TableNames & independent_database_objects) { - auto & info = info_it->second; - if (info.dependencies_count) + const QualifiedTableName & table_name = info_it->first; + const DependenciesInfo & info = info_it->second; + if (!info.dependencies.empty()) throw Exception(ErrorCodes::LOGICAL_ERROR, "Table {} is in list of independent tables, but dependencies count is {}." - "It's a bug", info_it->first, info.dependencies_count); + "It's a bug", table_name, info.dependencies.size()); if (info.dependent_database_objects.empty()) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Table {} does not have dependent tables. It's a bug", info_it->first); + throw Exception(ErrorCodes::LOGICAL_ERROR, "Table {} does not have dependent tables. It's a bug", table_name); /// Decrement number of dependencies for each dependent table - for (auto & dependent_table : info.dependent_database_objects) + for (const auto & dependent_table : info.dependent_database_objects) { auto & dependent_info = metadata.dependencies_info[dependent_table]; - auto & dependencies_count = dependent_info.dependencies_count; - if (dependencies_count == 0) + auto & dependencies_set = dependent_info.dependencies; + if (dependencies_set.empty()) throw Exception(ErrorCodes::LOGICAL_ERROR, "Trying to decrement 0 dependencies counter for {}. It's a bug", dependent_table); - --dependencies_count; - if (dependencies_count == 0) + if (!dependencies_set.erase(table_name)) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot remove {} from dependencies set of {}, it contains only {}", + table_name, dependent_table, fmt::join(dependencies_set, ", ")); + if (dependencies_set.empty()) { independent_database_objects.push_back(dependent_table); if (dependent_info.dependent_database_objects.empty()) @@ -213,7 +243,7 @@ size_t TablesLoader::getNumberOfTablesWithDependencies() const { size_t number_of_tables_with_dependencies = 0; for (const auto & info : metadata.dependencies_info) - if (info.second.dependencies_count) + if (!info.second.dependencies.empty()) ++number_of_tables_with_dependencies; return number_of_tables_with_dependencies; } @@ -227,9 +257,9 @@ void TablesLoader::checkCyclicDependencies() const for (const auto & info : metadata.dependencies_info) { LOG_WARNING(log, "Cannot resolve dependencies: Table {} have {} dependencies and {} dependent tables. List of dependent tables: {}", - info.first, info.second.dependencies_count, + info.first, info.second.dependencies.size(), info.second.dependent_database_objects.size(), fmt::join(info.second.dependent_database_objects, ", ")); - assert(info.second.dependencies_count == 0); + assert(info.second.dependencies.empty()); } throw Exception(ErrorCodes::INFINITE_LOOP, "Cannot attach {} tables due to cyclic dependencies. " @@ -246,7 +276,7 @@ void TablesLoader::logDependencyGraph() const LOG_TEST(log, "Table {} have {} dependencies and {} dependent tables. List of dependent tables: {}", dependencies.first, - dependencies.second.dependencies_count, + dependencies.second.dependencies.size(), dependencies.second.dependent_database_objects.size(), fmt::join(dependencies.second.dependent_database_objects, ", ")); } diff --git a/src/Databases/TablesLoader.h b/src/Databases/TablesLoader.h index 12f6c2e86a5..a14a28c487a 100644 --- a/src/Databases/TablesLoader.h +++ b/src/Databases/TablesLoader.h @@ -34,18 +34,21 @@ struct ParsedTableMetadata using ParsedMetadata = std::map; using TableNames = std::vector; +using TableNamesSet = std::unordered_set; struct DependenciesInfo { - /// How many dependencies this table have - size_t dependencies_count = 0; - /// List of tables/dictionaries which depend on this table/dictionary - TableNames dependent_database_objects; + /// Set of dependencies + TableNamesSet dependencies; + /// Set of tables/dictionaries which depend on this table/dictionary + TableNamesSet dependent_database_objects; }; using DependenciesInfos = std::unordered_map; using DependenciesInfosIter = std::unordered_map::iterator; +void mergeDependenciesGraphs(DependenciesInfos & main_dependencies_info, const DependenciesInfos & additional_info); + struct ParsedTablesMetadata { String default_database; @@ -59,11 +62,12 @@ struct ParsedTablesMetadata /// List of tables/dictionaries that do not have any dependencies and can be loaded TableNames independent_database_objects; - /// Actually it contains two different maps (with, probably, intersecting keys): - /// 1. table/dictionary name -> number of dependencies + /// Adjacent list of dependency graph, contains two maps /// 2. table/dictionary name -> dependent tables/dictionaries list (adjacency list of dependencies graph). - /// If table A depends on table B, then there is an edge B --> A, i.e. dependencies_info[B].dependent_database_objects contains A. - /// And dependencies_info[C].dependencies_count is a number of incoming edges for vertex C (how many tables we have to load before C). + /// 1. table/dictionary name -> dependencies of table/dictionary (adjacency list of inverted dependencies graph) + /// If table A depends on table B, then there is an edge B --> A, i.e. dependencies_info[B].dependent_database_objects contains A + /// and dependencies_info[A].dependencies contain B. + /// We need inverted graph to effectively maintain it on DDL queries that can modify the graph. DependenciesInfos dependencies_info; }; @@ -94,7 +98,7 @@ private: ThreadPool pool; - void removeUnresolvableDependencies(); + void removeUnresolvableDependencies(bool remove_loaded); void loadTablesInTopologicalOrder(ThreadPool & pool); diff --git a/src/Dictionaries/HTTPDictionarySource.cpp b/src/Dictionaries/HTTPDictionarySource.cpp index 29f503fc160..aba6b40f206 100644 --- a/src/Dictionaries/HTTPDictionarySource.cpp +++ b/src/Dictionaries/HTTPDictionarySource.cpp @@ -98,11 +98,13 @@ Pipe HTTPDictionarySource::loadAll() Poco::Net::HTTPRequest::HTTP_GET, ReadWriteBufferFromHTTP::OutStreamCallback(), timeouts, - 0, credentials, + 0, DBMS_DEFAULT_BUFFER_SIZE, context->getReadSettings(), - configuration.header_entries); + configuration.header_entries, + ReadWriteBufferFromHTTP::Range{}, + RemoteHostFilter{}, false); return createWrappedBuffer(std::move(in_ptr)); } @@ -117,11 +119,13 @@ Pipe HTTPDictionarySource::loadUpdatedAll() Poco::Net::HTTPRequest::HTTP_GET, ReadWriteBufferFromHTTP::OutStreamCallback(), timeouts, - 0, credentials, + 0, DBMS_DEFAULT_BUFFER_SIZE, context->getReadSettings(), - configuration.header_entries); + configuration.header_entries, + ReadWriteBufferFromHTTP::Range{}, + RemoteHostFilter{}, false); return createWrappedBuffer(std::move(in_ptr)); } @@ -145,11 +149,13 @@ Pipe HTTPDictionarySource::loadIds(const std::vector & ids) Poco::Net::HTTPRequest::HTTP_POST, out_stream_callback, timeouts, - 0, credentials, + 0, DBMS_DEFAULT_BUFFER_SIZE, context->getReadSettings(), - configuration.header_entries); + configuration.header_entries, + ReadWriteBufferFromHTTP::Range{}, + RemoteHostFilter{}, false); return createWrappedBuffer(std::move(in_ptr)); } @@ -173,11 +179,13 @@ Pipe HTTPDictionarySource::loadKeys(const Columns & key_columns, const std::vect Poco::Net::HTTPRequest::HTTP_POST, out_stream_callback, timeouts, - 0, credentials, + 0, DBMS_DEFAULT_BUFFER_SIZE, context->getReadSettings(), - configuration.header_entries); + configuration.header_entries, + ReadWriteBufferFromHTTP::Range{}, + RemoteHostFilter{}, false); return createWrappedBuffer(std::move(in_ptr)); } diff --git a/src/Dictionaries/HashedDictionary.cpp b/src/Dictionaries/HashedDictionary.cpp index 728d8c1c352..1df152eec38 100644 --- a/src/Dictionaries/HashedDictionary.cpp +++ b/src/Dictionaries/HashedDictionary.cpp @@ -11,6 +11,7 @@ #include #include #include +#include namespace { diff --git a/src/Dictionaries/SSDCacheDictionaryStorage.h b/src/Dictionaries/SSDCacheDictionaryStorage.h index 7c53ecc2b2c..7f0ecdb5cb8 100644 --- a/src/Dictionaries/SSDCacheDictionaryStorage.h +++ b/src/Dictionaries/SSDCacheDictionaryStorage.h @@ -23,6 +23,11 @@ #include #include +namespace CurrentMetrics +{ + extern const Metric Write; +} + namespace ProfileEvents { extern const Event FileOpen; @@ -542,8 +547,13 @@ public: file_path, std::to_string(bytes_written)); + #if defined(OS_DARWIN) if (::fsync(file.fd) < 0) throwFromErrnoWithPath("Cannot fsync " + file_path, file_path, ErrorCodes::CANNOT_FSYNC); + #else + if (::fdatasync(file.fd) < 0) + throwFromErrnoWithPath("Cannot fdatasync " + file_path, file_path, ErrorCodes::CANNOT_FSYNC); + #endif current_block_index += buffer_size_in_blocks; diff --git a/src/Dictionaries/XDBCDictionarySource.cpp b/src/Dictionaries/XDBCDictionarySource.cpp index ba993ec5783..f827c0cd8d0 100644 --- a/src/Dictionaries/XDBCDictionarySource.cpp +++ b/src/Dictionaries/XDBCDictionarySource.cpp @@ -214,7 +214,8 @@ Pipe XDBCDictionarySource::loadFromQuery(const Poco::URI & url, const Block & re os << "query=" << escapeForFileName(query); }; - auto read_buf = std::make_unique(url, Poco::Net::HTTPRequest::HTTP_POST, write_body_callback, timeouts); + auto read_buf = std::make_unique( + url, Poco::Net::HTTPRequest::HTTP_POST, write_body_callback, timeouts, credentials); auto format = getContext()->getInputFormat(IXDBCBridgeHelper::DEFAULT_FORMAT, *read_buf, required_sample_block, max_block_size); format->addBuffer(std::move(read_buf)); diff --git a/src/Dictionaries/XDBCDictionarySource.h b/src/Dictionaries/XDBCDictionarySource.h index df31e8a87cf..52872fb2c5c 100644 --- a/src/Dictionaries/XDBCDictionarySource.h +++ b/src/Dictionaries/XDBCDictionarySource.h @@ -89,6 +89,7 @@ private: BridgeHelperPtr bridge_helper; Poco::URI bridge_url; ConnectionTimeouts timeouts; + Poco::Net::HTTPBasicCredentials credentials{}; }; } diff --git a/src/Dictionaries/getDictionaryConfigurationFromAST.cpp b/src/Dictionaries/getDictionaryConfigurationFromAST.cpp index 0c6944eeccc..f031e8ff038 100644 --- a/src/Dictionaries/getDictionaryConfigurationFromAST.cpp +++ b/src/Dictionaries/getDictionaryConfigurationFromAST.cpp @@ -536,12 +536,12 @@ getDictionaryConfigurationFromAST(const ASTCreateQuery & query, ContextPtr conte AutoPtr name_element(xml_document->createElement("name")); current_dictionary->appendChild(name_element); - AutoPtr name(xml_document->createTextNode(query.table)); + AutoPtr name(xml_document->createTextNode(query.getTable())); name_element->appendChild(name); AutoPtr database_element(xml_document->createElement("database")); current_dictionary->appendChild(database_element); - AutoPtr database(xml_document->createTextNode(!database_.empty() ? database_ : query.database)); + AutoPtr database(xml_document->createTextNode(!database_.empty() ? database_ : query.getDatabase())); database_element->appendChild(database); if (query.uuid != UUIDHelpers::Nil) diff --git a/src/Disks/DiskLocal.cpp b/src/Disks/DiskLocal.cpp index a701a4e2cb1..0a0764d41b1 100644 --- a/src/Disks/DiskLocal.cpp +++ b/src/Disks/DiskLocal.cpp @@ -12,6 +12,11 @@ #include +namespace CurrentMetrics +{ + extern const Metric DiskSpaceReservedForMerge; +} + namespace DB { @@ -106,10 +111,11 @@ private: }; -class DiskLocalDirectoryIterator : public IDiskDirectoryIterator +class DiskLocalDirectoryIterator final : public IDiskDirectoryIterator { public: - explicit DiskLocalDirectoryIterator(const String & disk_path_, const String & dir_path_) + DiskLocalDirectoryIterator() = default; + DiskLocalDirectoryIterator(const String & disk_path_, const String & dir_path_) : dir_path(dir_path_), entry(fs::path(disk_path_) / dir_path_) { } @@ -244,7 +250,11 @@ void DiskLocal::moveDirectory(const String & from_path, const String & to_path) DiskDirectoryIteratorPtr DiskLocal::iterateDirectory(const String & path) { - return std::make_unique(disk_path, path); + fs::path meta_path = fs::path(disk_path) / path; + if (fs::exists(meta_path) && fs::is_directory(meta_path)) + return std::make_unique(disk_path, path); + else + return std::make_unique(); } void DiskLocal::moveFile(const String & from_path, const String & to_path) diff --git a/src/Disks/DiskWebServer.cpp b/src/Disks/DiskWebServer.cpp index 55ea91c40c9..63e1cc0e6c5 100644 --- a/src/Disks/DiskWebServer.cpp +++ b/src/Disks/DiskWebServer.cpp @@ -38,10 +38,12 @@ void DiskWebServer::initialize(const String & uri_path) const LOG_TRACE(log, "Loading metadata for directory: {}", uri_path); try { + Poco::Net::HTTPBasicCredentials credentials{}; ReadWriteBufferFromHTTP metadata_buf(Poco::URI(fs::path(uri_path) / ".index"), Poco::Net::HTTPRequest::HTTP_GET, ReadWriteBufferFromHTTP::OutStreamCallback(), - ConnectionTimeouts::getHTTPTimeouts(getContext())); + ConnectionTimeouts::getHTTPTimeouts(getContext()), + credentials); String file_name; FileData file_data{}; @@ -166,7 +168,7 @@ std::unique_ptr DiskWebServer::readFile(const String & p RemoteMetadata meta(path, remote_path); meta.remote_fs_objects.emplace_back(std::make_pair(remote_path, iter->second.size)); - bool threadpool_read = read_settings.remote_fs_method == RemoteFSReadMethod::read_threadpool; + bool threadpool_read = read_settings.remote_fs_method == RemoteFSReadMethod::threadpool; auto web_impl = std::make_unique(path, url, meta, getContext(), threadpool_read, read_settings); diff --git a/src/Disks/HDFS/DiskHDFS.cpp b/src/Disks/HDFS/DiskHDFS.cpp index bddb5ebefc6..5264e6413e7 100644 --- a/src/Disks/HDFS/DiskHDFS.cpp +++ b/src/Disks/HDFS/DiskHDFS.cpp @@ -1,4 +1,5 @@ #include +#include #include #include @@ -9,6 +10,8 @@ #include #include +#include + #include #include @@ -56,9 +59,9 @@ DiskHDFS::DiskHDFS( const String & disk_name_, const String & hdfs_root_path_, SettingsPtr settings_, - const String & metadata_path_, + DiskPtr metadata_disk_, const Poco::Util::AbstractConfiguration & config_) - : IDiskRemote(disk_name_, hdfs_root_path_, metadata_path_, "DiskHDFS", settings_->thread_pool_size) + : IDiskRemote(disk_name_, hdfs_root_path_, metadata_disk_, "DiskHDFS", settings_->thread_pool_size) , config(config_) , hdfs_builder(createHDFSBuilder(hdfs_root_path_, config)) , hdfs_fs(createHDFSFS(hdfs_builder.get())) @@ -73,20 +76,11 @@ std::unique_ptr DiskHDFS::readFile(const String & path, LOG_TRACE(log, "Read from file by path: {}. Existing HDFS objects: {}", - backQuote(metadata_path + path), metadata.remote_fs_objects.size()); + backQuote(metadata_disk->getPath() + path), metadata.remote_fs_objects.size()); auto hdfs_impl = std::make_unique(path, config, remote_fs_root_path, metadata, read_settings.remote_fs_buffer_size); - - if (read_settings.remote_fs_method == RemoteFSReadMethod::read_threadpool) - { - auto reader = getThreadPoolReader(); - return std::make_unique(reader, read_settings, std::move(hdfs_impl)); - } - else - { - auto buf = std::make_unique(std::move(hdfs_impl)); - return std::make_unique(std::move(buf), settings->min_bytes_for_seek); - } + auto buf = std::make_unique(std::move(hdfs_impl)); + return std::make_unique(std::move(buf), settings->min_bytes_for_seek); } @@ -99,7 +93,7 @@ std::unique_ptr DiskHDFS::writeFile(const String & path auto hdfs_path = remote_fs_root_path + file_name; LOG_TRACE(log, "{} to file by path: {}. HDFS path: {}", mode == WriteMode::Rewrite ? "Write" : "Append", - backQuote(metadata_path + path), hdfs_path); + backQuote(metadata_disk->getPath() + path), hdfs_path); /// Single O_WRONLY in libhdfs adds O_TRUNC auto hdfs_buffer = std::make_unique(hdfs_path, @@ -175,11 +169,12 @@ void registerDiskHDFS(DiskFactory & factory) throw Exception(ErrorCodes::BAD_ARGUMENTS, "HDFS path must ends with '/', but '{}' doesn't.", uri); String metadata_path = context_->getPath() + "disks/" + name + "/"; + auto metadata_disk = std::make_shared(name + "-metadata", metadata_path, 0); return std::make_shared( name, uri, getSettings(config, config_prefix), - metadata_path, config); + metadata_disk, config); }; factory.registerDiskType("hdfs", creator); diff --git a/src/Disks/HDFS/DiskHDFS.h b/src/Disks/HDFS/DiskHDFS.h index 5bb947c9f58..881d6e2937c 100644 --- a/src/Disks/HDFS/DiskHDFS.h +++ b/src/Disks/HDFS/DiskHDFS.h @@ -39,7 +39,7 @@ public: const String & disk_name_, const String & hdfs_root_path_, SettingsPtr settings_, - const String & metadata_path_, + DiskPtr metadata_disk_, const Poco::Util::AbstractConfiguration & config_); DiskType getType() const override { return DiskType::HDFS; } diff --git a/src/Disks/IDisk.h b/src/Disks/IDisk.h index 03ed92e7d5e..0a63421ae5c 100644 --- a/src/Disks/IDisk.h +++ b/src/Disks/IDisk.h @@ -28,11 +28,6 @@ namespace Poco } } -namespace CurrentMetrics -{ - extern const Metric DiskSpaceReservedForMerge; -} - namespace DB { diff --git a/src/Disks/IDiskRemote.cpp b/src/Disks/IDiskRemote.cpp index cf1baafce6c..e920e6fd5b9 100644 --- a/src/Disks/IDiskRemote.cpp +++ b/src/Disks/IDiskRemote.cpp @@ -31,11 +31,11 @@ namespace ErrorCodes /// Load metadata by path or create empty if `create` flag is set. IDiskRemote::Metadata::Metadata( const String & remote_fs_root_path_, - const String & disk_path_, + DiskPtr metadata_disk_, const String & metadata_file_path_, bool create) : RemoteMetadata(remote_fs_root_path_, metadata_file_path_) - , disk_path(disk_path_) + , metadata_disk(metadata_disk_) , total_size(0), ref_count(0) { if (create) @@ -43,53 +43,54 @@ IDiskRemote::Metadata::Metadata( try { - ReadBufferFromFile buf(disk_path + metadata_file_path, 1024); /* reasonable buffer size for small file */ + const ReadSettings read_settings; + auto buf = metadata_disk->readFile(metadata_file_path, read_settings, 1024); /* reasonable buffer size for small file */ UInt32 version; - readIntText(version, buf); + readIntText(version, *buf); if (version < VERSION_ABSOLUTE_PATHS || version > VERSION_READ_ONLY_FLAG) throw Exception( ErrorCodes::UNKNOWN_FORMAT, "Unknown metadata file version. Path: {}. Version: {}. Maximum expected version: {}", - disk_path + metadata_file_path, toString(version), toString(VERSION_READ_ONLY_FLAG)); + metadata_disk->getPath() + metadata_file_path, toString(version), toString(VERSION_READ_ONLY_FLAG)); - assertChar('\n', buf); + assertChar('\n', *buf); UInt32 remote_fs_objects_count; - readIntText(remote_fs_objects_count, buf); - assertChar('\t', buf); - readIntText(total_size, buf); - assertChar('\n', buf); + readIntText(remote_fs_objects_count, *buf); + assertChar('\t', *buf); + readIntText(total_size, *buf); + assertChar('\n', *buf); remote_fs_objects.resize(remote_fs_objects_count); for (size_t i = 0; i < remote_fs_objects_count; ++i) { String remote_fs_object_path; size_t remote_fs_object_size; - readIntText(remote_fs_object_size, buf); - assertChar('\t', buf); - readEscapedString(remote_fs_object_path, buf); + readIntText(remote_fs_object_size, *buf); + assertChar('\t', *buf); + readEscapedString(remote_fs_object_path, *buf); if (version == VERSION_ABSOLUTE_PATHS) { if (!remote_fs_object_path.starts_with(remote_fs_root_path)) throw Exception(ErrorCodes::UNKNOWN_FORMAT, "Path in metadata does not correspond to root path. Path: {}, root path: {}, disk path: {}", - remote_fs_object_path, remote_fs_root_path, disk_path_); + remote_fs_object_path, remote_fs_root_path, metadata_disk->getPath()); remote_fs_object_path = remote_fs_object_path.substr(remote_fs_root_path.size()); } - assertChar('\n', buf); + assertChar('\n', *buf); remote_fs_objects[i] = {remote_fs_object_path, remote_fs_object_size}; } - readIntText(ref_count, buf); - assertChar('\n', buf); + readIntText(ref_count, *buf); + assertChar('\n', *buf); if (version >= VERSION_READ_ONLY_FLAG) { - readBoolText(read_only, buf); - assertChar('\n', buf); + readBoolText(read_only, *buf); + assertChar('\n', *buf); } } catch (Exception & e) @@ -110,33 +111,33 @@ void IDiskRemote::Metadata::addObject(const String & path, size_t size) /// Fsync metadata file if 'sync' flag is set. void IDiskRemote::Metadata::save(bool sync) { - WriteBufferFromFile buf(disk_path + metadata_file_path, 1024); + auto buf = metadata_disk->writeFile(metadata_file_path, 1024); - writeIntText(VERSION_RELATIVE_PATHS, buf); - writeChar('\n', buf); + writeIntText(VERSION_RELATIVE_PATHS, *buf); + writeChar('\n', *buf); - writeIntText(remote_fs_objects.size(), buf); - writeChar('\t', buf); - writeIntText(total_size, buf); - writeChar('\n', buf); + writeIntText(remote_fs_objects.size(), *buf); + writeChar('\t', *buf); + writeIntText(total_size, *buf); + writeChar('\n', *buf); for (const auto & [remote_fs_object_path, remote_fs_object_size] : remote_fs_objects) { - writeIntText(remote_fs_object_size, buf); - writeChar('\t', buf); - writeEscapedString(remote_fs_object_path, buf); - writeChar('\n', buf); + writeIntText(remote_fs_object_size, *buf); + writeChar('\t', *buf); + writeEscapedString(remote_fs_object_path, *buf); + writeChar('\n', *buf); } - writeIntText(ref_count, buf); - writeChar('\n', buf); + writeIntText(ref_count, *buf); + writeChar('\n', *buf); - writeBoolText(read_only, buf); - writeChar('\n', buf); + writeBoolText(read_only, *buf); + writeChar('\n', *buf); - buf.finalize(); + buf->finalize(); if (sync) - buf.sync(); + buf->sync(); } IDiskRemote::Metadata IDiskRemote::readOrCreateMetaForWriting(const String & path, WriteMode mode) @@ -164,23 +165,21 @@ IDiskRemote::Metadata IDiskRemote::readOrCreateMetaForWriting(const String & pat IDiskRemote::Metadata IDiskRemote::readMeta(const String & path) const { - return Metadata(remote_fs_root_path, metadata_path, path); + return Metadata(remote_fs_root_path, metadata_disk, path); } IDiskRemote::Metadata IDiskRemote::createMeta(const String & path) const { - return Metadata(remote_fs_root_path, metadata_path, path, true); + return Metadata(remote_fs_root_path, metadata_disk, path, true); } void IDiskRemote::removeMeta(const String & path, RemoteFSPathKeeperPtr fs_paths_keeper) { - LOG_DEBUG(log, "Remove file by path: {}", backQuote(metadata_path + path)); + LOG_DEBUG(log, "Remove file by path: {}", backQuote(metadata_disk->getPath() + path)); - fs::path file(metadata_path + path); - - if (!fs::is_regular_file(file)) + if (!metadata_disk->isFile(path)) throw Exception(ErrorCodes::CANNOT_DELETE_DIRECTORY, "Path '{}' is a directory", path); try @@ -190,7 +189,7 @@ void IDiskRemote::removeMeta(const String & path, RemoteFSPathKeeperPtr fs_paths /// If there is no references - delete content from remote FS. if (metadata.ref_count == 0) { - fs::remove(file); + metadata_disk->removeFile(path); for (const auto & [remote_fs_object_path, _] : metadata.remote_fs_objects) fs_paths_keeper->addPath(remote_fs_root_path + remote_fs_object_path); } @@ -198,7 +197,7 @@ void IDiskRemote::removeMeta(const String & path, RemoteFSPathKeeperPtr fs_paths { --metadata.ref_count; metadata.save(); - fs::remove(file); + metadata_disk->removeFile(path); } } catch (const Exception & e) @@ -209,7 +208,7 @@ void IDiskRemote::removeMeta(const String & path, RemoteFSPathKeeperPtr fs_paths LOG_WARNING(log, "Metadata file {} can't be read by reason: {}. Removing it forcibly.", backQuote(path), e.nested() ? e.nested()->message() : e.message()); - fs::remove(file); + metadata_disk->removeFile(path); } else throw; @@ -221,8 +220,7 @@ void IDiskRemote::removeMetaRecursive(const String & path, RemoteFSPathKeeperPtr { checkStackSize(); /// This is needed to prevent stack overflow in case of cyclic symlinks. - fs::path file = fs::path(metadata_path) / path; - if (fs::is_regular_file(file)) + if (metadata_disk->isFile(path)) { removeMeta(path, fs_paths_keeper); } @@ -230,7 +228,7 @@ void IDiskRemote::removeMetaRecursive(const String & path, RemoteFSPathKeeperPtr { for (auto it{iterateDirectory(path)}; it->isValid(); it->next()) removeMetaRecursive(it->path(), fs_paths_keeper); - fs::remove(file); + metadata_disk->removeDirectory(path); } } @@ -281,27 +279,27 @@ DiskRemoteReservation::~DiskRemoteReservation() IDiskRemote::IDiskRemote( const String & name_, const String & remote_fs_root_path_, - const String & metadata_path_, + DiskPtr metadata_disk_, const String & log_name_, size_t thread_pool_size) : IDisk(std::make_unique(log_name_, thread_pool_size)) , log(&Poco::Logger::get(log_name_)) , name(name_) , remote_fs_root_path(remote_fs_root_path_) - , metadata_path(metadata_path_) + , metadata_disk(metadata_disk_) { } bool IDiskRemote::exists(const String & path) const { - return fs::exists(fs::path(metadata_path) / path); + return metadata_disk->exists(path); } bool IDiskRemote::isFile(const String & path) const { - return fs::is_regular_file(fs::path(metadata_path) / path); + return metadata_disk->isFile(path); } @@ -325,7 +323,7 @@ void IDiskRemote::moveFile(const String & from_path, const String & to_path) if (exists(to_path)) throw Exception("File already exists: " + to_path, ErrorCodes::FILE_ALREADY_EXISTS); - fs::rename(fs::path(metadata_path) / from_path, fs::path(metadata_path) / to_path); + metadata_disk->moveFile(from_path, to_path); } @@ -355,7 +353,7 @@ void IDiskRemote::removeSharedFile(const String & path, bool keep_in_remote_fs) void IDiskRemote::removeSharedFileIfExists(const String & path, bool keep_in_remote_fs) { RemoteFSPathKeeperPtr fs_paths_keeper = createFSPathKeeper(); - if (fs::exists(fs::path(metadata_path) / path)) + if (metadata_disk->exists(path)) { removeMeta(path, fs_paths_keeper); if (!keep_in_remote_fs) @@ -385,19 +383,19 @@ void IDiskRemote::setReadOnly(const String & path) bool IDiskRemote::isDirectory(const String & path) const { - return fs::is_directory(fs::path(metadata_path) / path); + return metadata_disk->isDirectory(path); } void IDiskRemote::createDirectory(const String & path) { - fs::create_directory(fs::path(metadata_path) / path); + metadata_disk->createDirectory(path); } void IDiskRemote::createDirectories(const String & path) { - fs::create_directories(fs::path(metadata_path) / path); + metadata_disk->createDirectories(path); } @@ -411,17 +409,13 @@ void IDiskRemote::clearDirectory(const String & path) void IDiskRemote::removeDirectory(const String & path) { - fs::remove(fs::path(metadata_path) / path); + metadata_disk->removeDirectory(path); } DiskDirectoryIteratorPtr IDiskRemote::iterateDirectory(const String & path) { - fs::path meta_path = fs::path(metadata_path) / path; - if (fs::exists(meta_path) && fs::is_directory(meta_path)) - return std::make_unique(meta_path, path); - else - return std::make_unique(); + return metadata_disk->iterateDirectory(path); } @@ -434,13 +428,13 @@ void IDiskRemote::listFiles(const String & path, std::vector & file_name void IDiskRemote::setLastModified(const String & path, const Poco::Timestamp & timestamp) { - FS::setModificationTime(fs::path(metadata_path) / path, timestamp.epochTime()); + metadata_disk->setLastModified(path, timestamp); } Poco::Timestamp IDiskRemote::getLastModified(const String & path) { - return FS::getModificationTimestamp(fs::path(metadata_path) / path); + return metadata_disk->getLastModified(path); } @@ -452,7 +446,7 @@ void IDiskRemote::createHardLink(const String & src_path, const String & dst_pat src.save(); /// Create FS hardlink to metadata file. - DB::createHardLink(metadata_path + src_path, metadata_path + dst_path); + metadata_disk->createHardLink(src_path, dst_path); } @@ -490,7 +484,7 @@ bool IDiskRemote::tryReserve(UInt64 bytes) String IDiskRemote::getUniqueId(const String & path) const { - Metadata metadata(remote_fs_root_path, metadata_path, path); + Metadata metadata(remote_fs_root_path, metadata_disk, path); String id; if (!metadata.remote_fs_objects.empty()) id = metadata.remote_fs_root_path + metadata.remote_fs_objects[0].first; diff --git a/src/Disks/IDiskRemote.h b/src/Disks/IDiskRemote.h index 50c8d73c048..c9b8fe81d9f 100644 --- a/src/Disks/IDiskRemote.h +++ b/src/Disks/IDiskRemote.h @@ -12,6 +12,11 @@ namespace fs = std::filesystem; +namespace CurrentMetrics +{ + extern const Metric DiskSpaceReservedForMerge; +} + namespace DB { @@ -47,7 +52,7 @@ public: IDiskRemote( const String & name_, const String & remote_fs_root_path_, - const String & metadata_path_, + DiskPtr metadata_disk_, const String & log_name_, size_t thread_pool_size); @@ -55,7 +60,7 @@ public: const String & getName() const final override { return name; } - const String & getPath() const final override { return metadata_path; } + const String & getPath() const final override { return metadata_disk->getPath(); } Metadata readMeta(const String & path) const; @@ -136,7 +141,7 @@ protected: const String name; const String remote_fs_root_path; - const String metadata_path; + DiskPtr metadata_disk; private: void removeMeta(const String & path, RemoteFSPathKeeperPtr fs_paths_keeper); @@ -182,8 +187,7 @@ struct IDiskRemote::Metadata : RemoteMetadata static constexpr UInt32 VERSION_RELATIVE_PATHS = 2; static constexpr UInt32 VERSION_READ_ONLY_FLAG = 3; - /// Disk path. - const String & disk_path; + DiskPtr metadata_disk; /// Total size of all remote FS (S3, HDFS) objects. size_t total_size = 0; @@ -196,7 +200,7 @@ struct IDiskRemote::Metadata : RemoteMetadata /// Load metadata by path or create empty if `create` flag is set. Metadata(const String & remote_fs_root_path_, - const String & disk_path_, + DiskPtr metadata_disk_, const String & metadata_file_path_, bool create = false); @@ -207,33 +211,6 @@ struct IDiskRemote::Metadata : RemoteMetadata }; - -class RemoteDiskDirectoryIterator final : public IDiskDirectoryIterator -{ -public: - RemoteDiskDirectoryIterator() {} - RemoteDiskDirectoryIterator(const String & full_path, const String & folder_path_) : iter(full_path), folder_path(folder_path_) {} - - void next() override { ++iter; } - - bool isValid() const override { return iter != fs::directory_iterator(); } - - String path() const override - { - if (fs::is_directory(iter->path())) - return folder_path / iter->path().filename().string() / ""; - else - return folder_path / iter->path().filename().string(); - } - - String name() const override { return iter->path().filename(); } - -private: - fs::directory_iterator iter; - fs::path folder_path; -}; - - class DiskRemoteReservation final : public IReservation { public: diff --git a/src/Disks/IO/AsynchronousReadIndirectBufferFromRemoteFS.h b/src/Disks/IO/AsynchronousReadIndirectBufferFromRemoteFS.h index 2e37f448fe1..c9b6532e76c 100644 --- a/src/Disks/IO/AsynchronousReadIndirectBufferFromRemoteFS.h +++ b/src/Disks/IO/AsynchronousReadIndirectBufferFromRemoteFS.h @@ -76,7 +76,7 @@ private: size_t bytes_to_ignore = 0; - std::optional read_until_position = 0; + std::optional read_until_position; bool must_read_until_position; }; diff --git a/src/Disks/IO/ReadBufferFromWebServer.cpp b/src/Disks/IO/ReadBufferFromWebServer.cpp index bda20f78e79..c7da6c2051b 100644 --- a/src/Disks/IO/ReadBufferFromWebServer.cpp +++ b/src/Disks/IO/ReadBufferFromWebServer.cpp @@ -21,15 +21,12 @@ namespace ErrorCodes } -static constexpr size_t HTTP_MAX_TRIES = 10; -static constexpr size_t WAIT_INIT = 100; - ReadBufferFromWebServer::ReadBufferFromWebServer( const String & url_, ContextPtr context_, const ReadSettings & settings_, bool use_external_buffer_, - size_t last_offset_) + size_t read_until_position_) : SeekableReadBuffer(nullptr, 0) , log(&Poco::Logger::get("ReadBufferFromWebServer")) , context(context_) @@ -37,7 +34,7 @@ ReadBufferFromWebServer::ReadBufferFromWebServer( , buf_size(settings_.remote_fs_buffer_size) , read_settings(settings_) , use_external_buffer(use_external_buffer_) - , last_offset(last_offset_) + , read_until_position(read_until_position_) { } @@ -45,20 +42,18 @@ ReadBufferFromWebServer::ReadBufferFromWebServer( std::unique_ptr ReadBufferFromWebServer::initialize() { Poco::URI uri(url); - - ReadWriteBufferFromHTTP::HTTPHeaderEntries headers; - - if (last_offset) + ReadWriteBufferFromHTTP::Range range; + if (read_until_position) { - if (last_offset < offset) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Attempt to read beyond right offset ({} > {})", offset, last_offset - 1); + if (read_until_position < offset) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Attempt to read beyond right offset ({} > {})", offset, read_until_position - 1); - headers.emplace_back(std::make_pair("Range", fmt::format("bytes={}-{}", offset, last_offset - 1))); - LOG_DEBUG(log, "Reading with range: {}-{}", offset, last_offset); + range = { .begin = static_cast(offset), .end = read_until_position - 1 }; + LOG_DEBUG(log, "Reading with range: {}-{}", offset, read_until_position); } else { - headers.emplace_back(std::make_pair("Range", fmt::format("bytes={}-", offset))); + range = { .begin = static_cast(offset), .end = std::nullopt }; LOG_DEBUG(log, "Reading from offset: {}", offset); } @@ -75,78 +70,32 @@ std::unique_ptr ReadBufferFromWebServer::initialize() std::max(Poco::Timespan(settings.http_receive_timeout.totalSeconds(), 0), Poco::Timespan(20, 0)), settings.tcp_keep_alive_timeout, http_keep_alive_timeout), + credentials, 0, - Poco::Net::HTTPBasicCredentials{}, buf_size, read_settings, - headers, + ReadWriteBufferFromHTTP::HTTPHeaderEntries{}, + range, context->getRemoteHostFilter(), + /* delay_initialization */true, use_external_buffer); } -void ReadBufferFromWebServer::initializeWithRetry() -{ - /// Initialize impl with retry. - size_t milliseconds_to_wait = WAIT_INIT; - for (size_t i = 0; i < HTTP_MAX_TRIES; ++i) - { - try - { - impl = initialize(); - - if (use_external_buffer) - { - /** - * See comment 30 lines lower. - */ - impl->set(internal_buffer.begin(), internal_buffer.size()); - assert(working_buffer.begin() != nullptr); - assert(!internal_buffer.empty()); - } - - break; - } - catch (Poco::Exception & e) - { - if (i == HTTP_MAX_TRIES - 1) - throw; - - LOG_ERROR(&Poco::Logger::get("ReadBufferFromWeb"), "Error: {}, code: {}", e.what(), e.code()); - sleepForMilliseconds(milliseconds_to_wait); - milliseconds_to_wait *= 2; - } - } -} - - bool ReadBufferFromWebServer::nextImpl() { - if (last_offset) + if (read_until_position) { - if (last_offset == offset) + if (read_until_position == offset) return false; - if (last_offset < offset) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Attempt to read beyond right offset ({} > {})", offset, last_offset - 1); + if (read_until_position < offset) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Attempt to read beyond right offset ({} > {})", offset, read_until_position - 1); } if (impl) { - if (use_external_buffer) - { - /** - * use_external_buffer -- means we read into the buffer which - * was passed to us from somewhere else. We do not check whether - * previously returned buffer was read or not, because this branch - * means we are prefetching data, each nextImpl() call we can fill - * a different buffer. - */ - impl->set(internal_buffer.begin(), internal_buffer.size()); - assert(working_buffer.begin() != nullptr); - assert(!internal_buffer.empty()); - } - else + if (!use_external_buffer) { /** * impl was initialized before, pass position() to it to make @@ -159,7 +108,21 @@ bool ReadBufferFromWebServer::nextImpl() } else { - initializeWithRetry(); + impl = initialize(); + } + + if (use_external_buffer) + { + /** + * use_external_buffer -- means we read into the buffer which + * was passed to us from somewhere else. We do not check whether + * previously returned buffer was read or not, because this branch + * means we are prefetching data, each nextImpl() call we can fill + * a different buffer. + */ + impl->set(internal_buffer.begin(), internal_buffer.size()); + assert(working_buffer.begin() != nullptr); + assert(!internal_buffer.empty()); } auto result = impl->next(); diff --git a/src/Disks/IO/ReadBufferFromWebServer.h b/src/Disks/IO/ReadBufferFromWebServer.h index 1ffb8589392..7285a94b0d8 100644 --- a/src/Disks/IO/ReadBufferFromWebServer.h +++ b/src/Disks/IO/ReadBufferFromWebServer.h @@ -4,6 +4,7 @@ #include #include #include +#include namespace DB @@ -21,7 +22,7 @@ public: const String & url_, ContextPtr context_, const ReadSettings & settings_ = {}, bool use_external_buffer_ = false, - size_t last_offset = 0); + size_t read_until_position = 0); bool nextImpl() override; @@ -32,8 +33,6 @@ public: private: std::unique_ptr initialize(); - void initializeWithRetry(); - Poco::Logger * log; ContextPtr context; @@ -42,13 +41,14 @@ private: std::unique_ptr impl; - off_t offset = 0; - ReadSettings read_settings; + Poco::Net::HTTPBasicCredentials credentials{}; + bool use_external_buffer; - off_t last_offset = 0; + off_t offset = 0; + off_t read_until_position = 0; }; } diff --git a/src/Disks/LocalDirectorySyncGuard.cpp b/src/Disks/LocalDirectorySyncGuard.cpp index ad66cdab682..2610cd7c37f 100644 --- a/src/Disks/LocalDirectorySyncGuard.cpp +++ b/src/Disks/LocalDirectorySyncGuard.cpp @@ -34,10 +34,10 @@ LocalDirectorySyncGuard::~LocalDirectorySyncGuard() #if defined(OS_DARWIN) if (fcntl(fd, F_FULLFSYNC, 0)) throwFromErrno("Cannot fcntl(F_FULLFSYNC)", ErrorCodes::CANNOT_FSYNC); +#else + if (-1 == ::fdatasync(fd)) + throw Exception("Cannot fdatasync", ErrorCodes::CANNOT_FSYNC); #endif - if (-1 == ::fsync(fd)) - throw Exception("Cannot fsync", ErrorCodes::CANNOT_FSYNC); - if (-1 == ::close(fd)) throw Exception("Cannot close file", ErrorCodes::CANNOT_CLOSE_FILE); } diff --git a/src/Disks/S3/DiskS3.cpp b/src/Disks/S3/DiskS3.cpp index 3e99ca1a886..92d673687c6 100644 --- a/src/Disks/S3/DiskS3.cpp +++ b/src/Disks/S3/DiskS3.cpp @@ -158,11 +158,11 @@ DiskS3::DiskS3( String name_, String bucket_, String s3_root_path_, - String metadata_path_, + DiskPtr metadata_disk_, ContextPtr context_, SettingsPtr settings_, GetDiskSettings settings_getter_) - : IDiskRemote(name_, s3_root_path_, metadata_path_, "DiskS3", settings_->thread_pool_size) + : IDiskRemote(name_, s3_root_path_, metadata_disk_, "DiskS3", settings_->thread_pool_size) , bucket(std::move(bucket_)) , current_settings(std::move(settings_)) , settings_getter(settings_getter_) @@ -218,8 +218,7 @@ void DiskS3::moveFile(const String & from_path, const String & to_path, bool sen }; createFileOperationObject("rename", revision, object_metadata); } - - fs::rename(fs::path(metadata_path) / from_path, fs::path(metadata_path) / to_path); + metadata_disk->moveFile(from_path, to_path); } std::unique_ptr DiskS3::readFile(const String & path, const ReadSettings & read_settings, std::optional) const @@ -228,9 +227,9 @@ std::unique_ptr DiskS3::readFile(const String & path, co auto metadata = readMeta(path); LOG_TRACE(log, "Read from file by path: {}. Existing S3 objects: {}", - backQuote(metadata_path + path), metadata.remote_fs_objects.size()); + backQuote(metadata_disk->getPath() + path), metadata.remote_fs_objects.size()); - bool threadpool_read = read_settings.remote_fs_method == RemoteFSReadMethod::read_threadpool; + bool threadpool_read = read_settings.remote_fs_method == RemoteFSReadMethod::threadpool; auto s3_impl = std::make_unique( path, @@ -268,7 +267,7 @@ std::unique_ptr DiskS3::writeFile(const String & path, } LOG_TRACE(log, "{} to file by path: {}. S3 path: {}", - mode == WriteMode::Rewrite ? "Write" : "Append", backQuote(metadata_path + path), remote_fs_root_path + s3_path); + mode == WriteMode::Rewrite ? "Write" : "Append", backQuote(metadata_disk->getPath() + path), remote_fs_root_path + s3_path); auto s3_buffer = std::make_unique( settings->client, @@ -307,7 +306,7 @@ void DiskS3::createHardLink(const String & src_path, const String & dst_path, bo src.save(); /// Create FS hardlink to metadata file. - DB::createHardLink(metadata_path + src_path, metadata_path + dst_path); + metadata_disk->createHardLink(src_path, dst_path); } void DiskS3::shutdown() @@ -421,7 +420,7 @@ void DiskS3::updateObjectMetadata(const String & key, const ObjectMetadata & met void DiskS3::migrateFileToRestorableSchema(const String & path) { - LOG_TRACE(log, "Migrate file {} to restorable schema", metadata_path + path); + LOG_TRACE(log, "Migrate file {} to restorable schema", metadata_disk->getPath() + path); auto meta = readMeta(path); @@ -438,7 +437,7 @@ void DiskS3::migrateToRestorableSchemaRecursive(const String & path, Futures & r { checkStackSize(); /// This is needed to prevent stack overflow in case of cyclic symlinks. - LOG_TRACE(log, "Migrate directory {} to restorable schema", metadata_path + path); + LOG_TRACE(log, "Migrate directory {} to restorable schema", metadata_disk->getPath() + path); bool dir_contains_only_files = true; for (auto it = iterateDirectory(path); it->isValid(); it->next()) @@ -702,18 +701,19 @@ struct DiskS3::RestoreInformation void DiskS3::readRestoreInformation(DiskS3::RestoreInformation & restore_information) { - ReadBufferFromFile buffer(metadata_path + RESTORE_FILE_NAME, 512); - buffer.next(); + const ReadSettings read_settings; + auto buffer = metadata_disk->readFile(RESTORE_FILE_NAME, read_settings, 512); + buffer->next(); try { std::map properties; - while (buffer.hasPendingData()) + while (buffer->hasPendingData()) { String property; - readText(property, buffer); - assertChar('\n', buffer); + readText(property, *buffer); + assertChar('\n', *buffer); auto pos = property.find('='); if (pos == String::npos || pos == 0 || pos == property.length()) @@ -797,8 +797,7 @@ void DiskS3::restore() restoreFiles(information); restoreFileOperations(information); - fs::path restore_file = fs::path(metadata_path) / RESTORE_FILE_NAME; - fs::remove(restore_file); + metadata_disk->removeFile(RESTORE_FILE_NAME); saveSchemaVersion(RESTORABLE_SCHEMA_VERSION); @@ -996,15 +995,18 @@ void DiskS3::restoreFileOperations(const RestoreInformation & restore_informatio LOG_TRACE(log, "Move directory to 'detached' {} -> {}", path, detached_path); - fs::path from_path = fs::path(metadata_path) / path; - fs::path to_path = fs::path(metadata_path) / detached_path; + fs::path from_path = fs::path(path); + fs::path to_path = fs::path(detached_path); if (path.ends_with('/')) to_path /= from_path.parent_path().filename(); else to_path /= from_path.filename(); - fs::create_directories(to_path); - fs::copy(from_path, to_path, fs::copy_options::recursive | fs::copy_options::overwrite_existing); - fs::remove_all(from_path); + + /// to_path may exist and non-empty in case for example abrupt restart, so remove it before rename + if (metadata_disk->exists(to_path)) + metadata_disk->removeRecursive(to_path); + + metadata_disk->moveDirectory(from_path, to_path); } } @@ -1044,9 +1046,9 @@ String DiskS3::pathToDetached(const String & source_path) void DiskS3::onFreeze(const String & path) { createDirectories(path); - WriteBufferFromFile revision_file_buf(metadata_path + path + "revision.txt", 32); - writeIntText(revision_counter.load(), revision_file_buf); - revision_file_buf.finalize(); + auto revision_file_buf = metadata_disk->writeFile(path + "revision.txt", 32); + writeIntText(revision_counter.load(), *revision_file_buf); + revision_file_buf->finalize(); } void DiskS3::applyNewSettings(const Poco::Util::AbstractConfiguration & config, ContextPtr context_, const String &, const DisksMap &) diff --git a/src/Disks/S3/DiskS3.h b/src/Disks/S3/DiskS3.h index 19bcb925cb4..d355d785cea 100644 --- a/src/Disks/S3/DiskS3.h +++ b/src/Disks/S3/DiskS3.h @@ -68,7 +68,7 @@ public: String name_, String bucket_, String s3_root_path_, - String metadata_path_, + DiskPtr metadata_disk_, ContextPtr context_, SettingsPtr settings_, GetDiskSettings settings_getter_); diff --git a/src/Disks/S3/registerDiskS3.cpp b/src/Disks/S3/registerDiskS3.cpp index 8174ccea330..f7c1d7537c4 100644 --- a/src/Disks/S3/registerDiskS3.cpp +++ b/src/Disks/S3/registerDiskS3.cpp @@ -17,7 +17,7 @@ #include "ProxyListConfiguration.h" #include "ProxyResolverConfiguration.h" #include "Disks/DiskRestartProxy.h" - +#include "Disks/DiskLocal.h" namespace DB { @@ -178,12 +178,13 @@ void registerDiskS3(DiskFactory & factory) String metadata_path = config.getString(config_prefix + ".metadata_path", context->getPath() + "disks/" + name + "/"); fs::create_directories(metadata_path); + auto metadata_disk = std::make_shared(name + "-metadata", metadata_path, 0); std::shared_ptr s3disk = std::make_shared( name, uri.bucket, uri.key, - metadata_path, + metadata_disk, context, getSettings(config, config_prefix, context), getSettings); diff --git a/src/Formats/EscapingRuleUtils.cpp b/src/Formats/EscapingRuleUtils.cpp new file mode 100644 index 00000000000..2c2662a6a67 --- /dev/null +++ b/src/Formats/EscapingRuleUtils.cpp @@ -0,0 +1,225 @@ +#include +#include +#include +#include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int BAD_ARGUMENTS; +} + +FormatSettings::EscapingRule stringToEscapingRule(const String & escaping_rule) +{ + if (escaping_rule.empty()) + return FormatSettings::EscapingRule::None; + else if (escaping_rule == "None") + return FormatSettings::EscapingRule::None; + else if (escaping_rule == "Escaped") + return FormatSettings::EscapingRule::Escaped; + else if (escaping_rule == "Quoted") + return FormatSettings::EscapingRule::Quoted; + else if (escaping_rule == "CSV") + return FormatSettings::EscapingRule::CSV; + else if (escaping_rule == "JSON") + return FormatSettings::EscapingRule::JSON; + else if (escaping_rule == "XML") + return FormatSettings::EscapingRule::XML; + else if (escaping_rule == "Raw") + return FormatSettings::EscapingRule::Raw; + else + throw Exception("Unknown escaping rule \"" + escaping_rule + "\"", ErrorCodes::BAD_ARGUMENTS); +} + +String escapingRuleToString(FormatSettings::EscapingRule escaping_rule) +{ + switch (escaping_rule) + { + case FormatSettings::EscapingRule::None: + return "None"; + case FormatSettings::EscapingRule::Escaped: + return "Escaped"; + case FormatSettings::EscapingRule::Quoted: + return "Quoted"; + case FormatSettings::EscapingRule::CSV: + return "CSV"; + case FormatSettings::EscapingRule::JSON: + return "JSON"; + case FormatSettings::EscapingRule::XML: + return "XML"; + case FormatSettings::EscapingRule::Raw: + return "Raw"; + } + __builtin_unreachable(); +} + +void skipFieldByEscapingRule(ReadBuffer & buf, FormatSettings::EscapingRule escaping_rule, const FormatSettings & format_settings) +{ + String tmp; + constexpr const char * field_name = ""; + constexpr size_t field_name_len = 16; + switch (escaping_rule) + { + case FormatSettings::EscapingRule::None: + /// Empty field, just skip spaces + break; + case FormatSettings::EscapingRule::Escaped: + readEscapedString(tmp, buf); + break; + case FormatSettings::EscapingRule::Quoted: + /// FIXME: it skips only strings, not numbers, arrays or tuples. + /// we should read until delimiter and skip all data between + /// single quotes. + readQuotedString(tmp, buf); + break; + case FormatSettings::EscapingRule::CSV: + readCSVString(tmp, buf, format_settings.csv); + break; + case FormatSettings::EscapingRule::JSON: + skipJSONField(buf, StringRef(field_name, field_name_len)); + break; + case FormatSettings::EscapingRule::Raw: + readString(tmp, buf); + break; + default: + __builtin_unreachable(); + } +} + +bool deserializeFieldByEscapingRule( + const DataTypePtr & type, + const SerializationPtr & serialization, + IColumn & column, + ReadBuffer & buf, + FormatSettings::EscapingRule escaping_rule, + const FormatSettings & format_settings) +{ + bool read = true; + bool parse_as_nullable = format_settings.null_as_default && !type->isNullable() && !type->isLowCardinalityNullable(); + switch (escaping_rule) + { + case FormatSettings::EscapingRule::Escaped: + if (parse_as_nullable) + read = SerializationNullable::deserializeTextEscapedImpl(column, buf, format_settings, serialization); + else + serialization->deserializeTextEscaped(column, buf, format_settings); + break; + case FormatSettings::EscapingRule::Quoted: + if (parse_as_nullable) + read = SerializationNullable::deserializeTextQuotedImpl(column, buf, format_settings, serialization); + else + serialization->deserializeTextQuoted(column, buf, format_settings); + break; + case FormatSettings::EscapingRule::CSV: + if (parse_as_nullable) + read = SerializationNullable::deserializeTextCSVImpl(column, buf, format_settings, serialization); + else + serialization->deserializeTextCSV(column, buf, format_settings); + break; + case FormatSettings::EscapingRule::JSON: + if (parse_as_nullable) + read = SerializationNullable::deserializeTextJSONImpl(column, buf, format_settings, serialization); + else + serialization->deserializeTextJSON(column, buf, format_settings); + break; + case FormatSettings::EscapingRule::Raw: + if (parse_as_nullable) + read = SerializationNullable::deserializeTextRawImpl(column, buf, format_settings, serialization); + else + serialization->deserializeTextRaw(column, buf, format_settings); + break; + default: + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Escaping rule {} is not suitable for deserialization", escapingRuleToString(escaping_rule)); + } + return read; +} + +void serializeFieldByEscapingRule( + const IColumn & column, + const ISerialization & serialization, + WriteBuffer & out, + size_t row_num, + FormatSettings::EscapingRule escaping_rule, + const FormatSettings & format_settings) +{ + switch (escaping_rule) + { + case FormatSettings::EscapingRule::Escaped: + serialization.serializeTextEscaped(column, row_num, out, format_settings); + break; + case FormatSettings::EscapingRule::Quoted: + serialization.serializeTextQuoted(column, row_num, out, format_settings); + break; + case FormatSettings::EscapingRule::CSV: + serialization.serializeTextCSV(column, row_num, out, format_settings); + break; + case FormatSettings::EscapingRule::JSON: + serialization.serializeTextJSON(column, row_num, out, format_settings); + break; + case FormatSettings::EscapingRule::XML: + serialization.serializeTextXML(column, row_num, out, format_settings); + break; + case FormatSettings::EscapingRule::Raw: + serialization.serializeTextRaw(column, row_num, out, format_settings); + break; + case FormatSettings::EscapingRule::None: + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Cannot serialize field with None escaping rule"); + } +} + +void writeStringByEscapingRule(const String & value, WriteBuffer & out, FormatSettings::EscapingRule escaping_rule, const FormatSettings & format_settings) +{ + switch (escaping_rule) + { + case FormatSettings::EscapingRule::Quoted: + writeQuotedString(value, out); + break; + case FormatSettings::EscapingRule::JSON: + writeJSONString(value, out, format_settings); + break; + case FormatSettings::EscapingRule::Raw: + writeString(value, out); + break; + case FormatSettings::EscapingRule::CSV: + writeCSVString(value, out); + break; + case FormatSettings::EscapingRule::Escaped: + writeEscapedString(value, out); + break; + case FormatSettings::EscapingRule::XML: + writeXMLStringForTextElement(value, out); + break; + case FormatSettings::EscapingRule::None: + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Cannot serialize string with None escaping rule"); + } +} + +String readStringByEscapingRule(ReadBuffer & buf, FormatSettings::EscapingRule escaping_rule, const FormatSettings & format_settings) +{ + String result; + switch (escaping_rule) + { + case FormatSettings::EscapingRule::Quoted: + readQuotedString(result, buf); + break; + case FormatSettings::EscapingRule::JSON: + readJSONString(result, buf); + break; + case FormatSettings::EscapingRule::Raw: + readString(result, buf); + break; + case FormatSettings::EscapingRule::CSV: + readCSVString(result, buf, format_settings.csv); + break; + case FormatSettings::EscapingRule::Escaped: + readEscapedString(result, buf); + break; + default: + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Cannot read string with {} escaping rule", escapingRuleToString(escaping_rule)); + } + return result; +} + +} diff --git a/src/Formats/EscapingRuleUtils.h b/src/Formats/EscapingRuleUtils.h new file mode 100644 index 00000000000..02f027db74d --- /dev/null +++ b/src/Formats/EscapingRuleUtils.h @@ -0,0 +1,37 @@ +#pragma once + +#include +#include +#include +#include + +namespace DB +{ + +FormatSettings::EscapingRule stringToEscapingRule(const String & escaping_rule); + +String escapingRuleToString(FormatSettings::EscapingRule escaping_rule); + +void skipFieldByEscapingRule(ReadBuffer & buf, FormatSettings::EscapingRule escaping_rule, const FormatSettings & format_settings); + +bool deserializeFieldByEscapingRule( + const DataTypePtr & type, + const SerializationPtr & serialization, + IColumn & column, + ReadBuffer & buf, + FormatSettings::EscapingRule escaping_rule, + const FormatSettings & format_settings); + +void serializeFieldByEscapingRule( + const IColumn & column, + const ISerialization & serialization, + WriteBuffer & out, + size_t row_num, + FormatSettings::EscapingRule escaping_rule, + const FormatSettings & format_settings); + +void writeStringByEscapingRule(const String & value, WriteBuffer & out, FormatSettings::EscapingRule escaping_rule, const FormatSettings & format_settings); + +String readStringByEscapingRule(ReadBuffer & buf, FormatSettings::EscapingRule escaping_rule, const FormatSettings & format_settings); + +} diff --git a/src/Formats/FormatFactory.h b/src/Formats/FormatFactory.h index 4e10aa4141a..77ecd2c167f 100644 --- a/src/Formats/FormatFactory.h +++ b/src/Formats/FormatFactory.h @@ -68,13 +68,11 @@ public: size_t row)>; private: - using InputCreatorFunc = InputFormatPtr( - ReadBuffer & buf, - const Block & header, - const RowInputFormatParams & params, - const FormatSettings & settings); - - using InputCreator = std::function; + using InputCreator = std::function; using OutputCreator = std::function #include +#include #include #include #include -#include #include namespace DB @@ -11,7 +11,6 @@ namespace DB namespace ErrorCodes { - extern const int BAD_ARGUMENTS; extern const int INVALID_TEMPLATE_FORMAT; } @@ -83,7 +82,7 @@ void ParsedTemplateFormatString::parse(const String & format_string, const Colum state = Format; else if (*pos == '}') { - formats.push_back(ColumnFormat::None); + escaping_rules.push_back(EscapingRule::None); delimiters.emplace_back(); state = Delimiter; } @@ -108,7 +107,7 @@ void ParsedTemplateFormatString::parse(const String & format_string, const Colum case Format: if (*pos == '}') { - formats.push_back(stringToFormat(String(token_begin, pos - token_begin))); + escaping_rules.push_back(stringToEscapingRule(String(token_begin, pos - token_begin))); token_begin = pos + 1; delimiters.emplace_back(); state = Delimiter; @@ -120,56 +119,11 @@ void ParsedTemplateFormatString::parse(const String & format_string, const Colum delimiters.back().append(token_begin, pos - token_begin); } - -ParsedTemplateFormatString::ColumnFormat ParsedTemplateFormatString::stringToFormat(const String & col_format) -{ - if (col_format.empty()) - return ColumnFormat::None; - else if (col_format == "None") - return ColumnFormat::None; - else if (col_format == "Escaped") - return ColumnFormat::Escaped; - else if (col_format == "Quoted") - return ColumnFormat::Quoted; - else if (col_format == "CSV") - return ColumnFormat::Csv; - else if (col_format == "JSON") - return ColumnFormat::Json; - else if (col_format == "XML") - return ColumnFormat::Xml; - else if (col_format == "Raw") - return ColumnFormat::Raw; - else - throw Exception("Unknown field format \"" + col_format + "\"", ErrorCodes::BAD_ARGUMENTS); -} - size_t ParsedTemplateFormatString::columnsCount() const { return format_idx_to_column_idx.size(); } -String ParsedTemplateFormatString::formatToString(ParsedTemplateFormatString::ColumnFormat format) -{ - switch (format) - { - case ColumnFormat::None: - return "None"; - case ColumnFormat::Escaped: - return "Escaped"; - case ColumnFormat::Quoted: - return "Quoted"; - case ColumnFormat::Csv: - return "CSV"; - case ColumnFormat::Json: - return "Json"; - case ColumnFormat::Xml: - return "Xml"; - case ColumnFormat::Raw: - return "Raw"; - } - __builtin_unreachable(); -} - const char * ParsedTemplateFormatString::readMayBeQuotedColumnNameInto(const char * pos, size_t size, String & s) { s.clear(); @@ -197,7 +151,7 @@ String ParsedTemplateFormatString::dump() const res << "\nDelimiter " << 0 << ": "; verbosePrintString(delimiters.front().c_str(), delimiters.front().c_str() + delimiters.front().size(), res); - size_t num_columns = std::max(formats.size(), format_idx_to_column_idx.size()); + size_t num_columns = std::max(escaping_rules.size(), format_idx_to_column_idx.size()); for (size_t i = 0; i < num_columns; ++i) { res << "\nColumn " << i << ": \""; @@ -216,7 +170,7 @@ String ParsedTemplateFormatString::dump() const else res << *format_idx_to_column_idx[i]; - res << "), Format " << (i < formats.size() ? formatToString(formats[i]) : ""); + res << "), Format " << (i < escaping_rules.size() ? escapingRuleToString(escaping_rules[i]) : ""); res << "\nDelimiter " << i + 1 << ": "; if (delimiters.size() <= i + 1) @@ -235,34 +189,4 @@ void ParsedTemplateFormatString::throwInvalidFormat(const String & message, size ErrorCodes::INVALID_TEMPLATE_FORMAT); } -ParsedTemplateFormatString ParsedTemplateFormatString::setupCustomSeparatedResultsetFormat(const FormatSettings::Custom & settings) -{ - /// Set resultset format to "result_before_delimiter ${data} result_after_delimiter" - ParsedTemplateFormatString resultset_format; - resultset_format.delimiters.emplace_back(settings.result_before_delimiter); - resultset_format.delimiters.emplace_back(settings.result_after_delimiter); - resultset_format.formats.emplace_back(ParsedTemplateFormatString::ColumnFormat::None); - resultset_format.format_idx_to_column_idx.emplace_back(0); - resultset_format.column_names.emplace_back("data"); - return resultset_format; -} - -ParsedTemplateFormatString ParsedTemplateFormatString::setupCustomSeparatedRowFormat(const FormatSettings::Custom & settings, const Block & sample) -{ - /// Set row format to - /// "row_before_delimiter ${Col0:escaping} field_delimiter ${Col1:escaping} field_delimiter ... ${ColN:escaping} row_after_delimiter" - ParsedTemplateFormatString::ColumnFormat escaping = ParsedTemplateFormatString::stringToFormat(settings.escaping_rule); - ParsedTemplateFormatString row_format; - row_format.delimiters.emplace_back(settings.row_before_delimiter); - for (size_t i = 0; i < sample.columns(); ++i) - { - row_format.formats.emplace_back(escaping); - row_format.format_idx_to_column_idx.emplace_back(i); - row_format.column_names.emplace_back(sample.getByPosition(i).name); - bool last_column = i == sample.columns() - 1; - row_format.delimiters.emplace_back(last_column ? settings.row_after_delimiter : settings.field_delimiter); - } - return row_format; -} - } diff --git a/src/Formats/ParsedTemplateFormatString.h b/src/Formats/ParsedTemplateFormatString.h index 4021b71656f..ba0ebdf5aa8 100644 --- a/src/Formats/ParsedTemplateFormatString.h +++ b/src/Formats/ParsedTemplateFormatString.h @@ -15,23 +15,14 @@ using Strings = std::vector; struct ParsedTemplateFormatString { - enum class ColumnFormat - { - None, - Escaped, - Quoted, - Csv, - Json, - Xml, - Raw - }; + using EscapingRule = FormatSettings::EscapingRule; /// Format string has syntax: "Delimiter0 ${ColumnName0:Format0} Delimiter1 ${ColumnName1:Format1} Delimiter2" /// The following vectors is filled with corresponding values, delimiters.size() - 1 = formats.size() = format_idx_to_column_idx.size() /// If format_idx_to_column_idx[i] has no value, then TemplateRowInputFormat will skip i-th column. std::vector delimiters; - std::vector formats; + std::vector escaping_rules; std::vector> format_idx_to_column_idx; /// For diagnostic info @@ -44,16 +35,11 @@ struct ParsedTemplateFormatString void parse(const String & format_string, const ColumnIdxGetter & idx_by_name); - static ColumnFormat stringToFormat(const String & format); - static String formatToString(ColumnFormat format); static const char * readMayBeQuotedColumnNameInto(const char * pos, size_t size, String & s); size_t columnsCount() const; String dump() const; [[noreturn]] void throwInvalidFormat(const String & message, size_t column) const; - - static ParsedTemplateFormatString setupCustomSeparatedResultsetFormat(const FormatSettings::Custom & settings); - static ParsedTemplateFormatString setupCustomSeparatedRowFormat(const FormatSettings::Custom & settings, const Block & sample); }; } diff --git a/src/Formats/registerFormats.cpp b/src/Formats/registerFormats.cpp index 1aedff5fceb..7425c6898de 100644 --- a/src/Formats/registerFormats.cpp +++ b/src/Formats/registerFormats.cpp @@ -50,6 +50,8 @@ void registerInputFormatAvro(FormatFactory & factory); void registerOutputFormatAvro(FormatFactory & factory); void registerInputFormatRawBLOB(FormatFactory & factory); void registerOutputFormatRawBLOB(FormatFactory & factory); +void registerInputFormatCustomSeparated(FormatFactory & factory); +void registerOutputFormatCustomSeparated(FormatFactory & factory); /// Output only (presentational) formats. @@ -115,6 +117,8 @@ void registerFormats() registerOutputFormatMsgPack(factory); registerInputFormatRawBLOB(factory); registerOutputFormatRawBLOB(factory); + registerInputFormatCustomSeparated(factory); + registerOutputFormatCustomSeparated(factory); registerInputFormatORC(factory); registerOutputFormatORC(factory); diff --git a/src/Functions/FunctionSQLJSON.h b/src/Functions/FunctionSQLJSON.h index aaed980c62a..d860da62b9d 100644 --- a/src/Functions/FunctionSQLJSON.h +++ b/src/Functions/FunctionSQLJSON.h @@ -18,6 +18,8 @@ #include #include #include +#include +#include #include #include "config_functions.h" @@ -260,15 +262,26 @@ public: } if (status == VisitorStatus::Exhausted) - { return false; - } std::stringstream out; // STYLE_CHECK_ALLOW_STD_STRING_STREAM out << current_element.getElement(); auto output_str = out.str(); ColumnString & col_str = assert_cast(dest); - col_str.insertData(output_str.data(), output_str.size()); + ColumnString::Chars & data = col_str.getChars(); + ColumnString::Offsets & offsets = col_str.getOffsets(); + + if (current_element.isString()) + { + ReadBufferFromString buf(output_str); + readJSONStringInto(data, buf); + data.push_back(0); + offsets.push_back(data.size()); + } + else + { + col_str.insertData(output_str.data(), output_str.size()); + } return true; } }; diff --git a/src/Functions/formatRow.cpp b/src/Functions/formatRow.cpp index 3f9d3e782d7..ccd013123d7 100644 --- a/src/Functions/formatRow.cpp +++ b/src/Functions/formatRow.cpp @@ -77,6 +77,8 @@ public: if (!dynamic_cast(out.get())) throw Exception(ErrorCodes::BAD_ARGUMENTS, "Cannot turn rows into a {} format strings. {} function supports only row output formats", format_name, getName()); + /// Don't write prefix if any. + out->doNotWritePrefix(); out->write(arg_columns); return col_str; } diff --git a/src/IO/OpenedFileCache.h b/src/IO/OpenedFileCache.h index 5a96a6240e1..844e5b31d11 100644 --- a/src/IO/OpenedFileCache.h +++ b/src/IO/OpenedFileCache.h @@ -46,8 +46,14 @@ public: auto [it, inserted] = files.emplace(key, OpenedFilePtr{}); if (!inserted) + { if (auto res = it->second.lock()) + { + ProfileEvents::increment(ProfileEvents::OpenedFileCacheHits); return res; + } + } + ProfileEvents::increment(ProfileEvents::OpenedFileCacheMisses); OpenedFilePtr res { diff --git a/src/IO/ReadBufferFromEmptyFile.h b/src/IO/ReadBufferFromEmptyFile.h new file mode 100644 index 00000000000..311aee1559b --- /dev/null +++ b/src/IO/ReadBufferFromEmptyFile.h @@ -0,0 +1,23 @@ +#pragma once + +#include + +namespace DB +{ + +/// In case of empty file it does not make any sense to read it. +/// +/// Plus regular readers from file has an assert that buffer is not empty, that will fail: +/// - ReadBufferFromFileDescriptor +/// - SynchronousReader +/// - ThreadPoolReader +class ReadBufferFromEmptyFile : public ReadBufferFromFileBase +{ +private: + bool nextImpl() override { return false; } + std::string getFileName() const override { return ""; } + off_t seek(off_t /*off*/, int /*whence*/) override { return 0; } + off_t getPosition() override { return 0; } +}; + +} diff --git a/src/IO/ReadBufferFromFileDescriptor.cpp b/src/IO/ReadBufferFromFileDescriptor.cpp index a710dfe33fb..ed8eba62f04 100644 --- a/src/IO/ReadBufferFromFileDescriptor.cpp +++ b/src/IO/ReadBufferFromFileDescriptor.cpp @@ -51,6 +51,9 @@ std::string ReadBufferFromFileDescriptor::getFileName() const bool ReadBufferFromFileDescriptor::nextImpl() { + /// If internal_buffer size is empty, then read() cannot be distinguished from EOF + assert(!internal_buffer.empty()); + size_t bytes_read = 0; while (!bytes_read) { diff --git a/src/IO/ReadBufferFromS3.cpp b/src/IO/ReadBufferFromS3.cpp index 78d296be60e..bf1e45c659a 100644 --- a/src/IO/ReadBufferFromS3.cpp +++ b/src/IO/ReadBufferFromS3.cpp @@ -170,7 +170,7 @@ std::unique_ptr ReadBufferFromS3::initialize() req.SetKey(key); /** - * If remote_filesystem_read_method = 'read_threadpool', then for MergeTree family tables + * If remote_filesystem_read_method = 'threadpool', then for MergeTree family tables * exact byte ranges to read are always passed here. */ if (read_until_position) diff --git a/src/IO/ReadHelpers.cpp b/src/IO/ReadHelpers.cpp index 41dcd9fde6c..675adc43ce6 100644 --- a/src/IO/ReadHelpers.cpp +++ b/src/IO/ReadHelpers.cpp @@ -1166,4 +1166,50 @@ bool loadAtPosition(ReadBuffer & in, Memory<> & memory, char * & current) return loaded_more; } +/// Searches for delimiter in input stream and sets buffer position after delimiter (if found) or EOF (if not) +static void findAndSkipNextDelimiter(PeekableReadBuffer & buf, const String & delimiter) +{ + if (delimiter.empty()) + return; + + while (!buf.eof()) + { + void * pos = memchr(buf.position(), delimiter[0], buf.available()); + if (!pos) + { + buf.position() += buf.available(); + continue; + } + + buf.position() = static_cast(pos); + + PeekableReadBufferCheckpoint checkpoint{buf}; + if (checkString(delimiter, buf)) + return; + + buf.rollbackToCheckpoint(); + ++buf.position(); + } +} + +void skipToNextRowOrEof(PeekableReadBuffer & buf, const String & row_after_delimiter, const String & row_between_delimiter, bool skip_spaces) +{ + if (row_after_delimiter.empty()) + { + findAndSkipNextDelimiter(buf, row_between_delimiter); + return; + } + + while (true) + { + findAndSkipNextDelimiter(buf, row_after_delimiter); + + if (skip_spaces) + skipWhitespaceIfAny(buf); + + if (checkString(row_between_delimiter, buf)) + break; + } +} + } diff --git a/src/IO/ReadHelpers.h b/src/IO/ReadHelpers.h index 81973bcd8a4..aa9f0fe6c07 100644 --- a/src/IO/ReadHelpers.h +++ b/src/IO/ReadHelpers.h @@ -30,6 +30,7 @@ #include #include #include +#include #include #include @@ -1324,6 +1325,9 @@ void saveUpToPosition(ReadBuffer & in, Memory> & memory, char * */ bool loadAtPosition(ReadBuffer & in, Memory> & memory, char * & current); +/// Skip data until start of the next row or eof (the end of row is determined by two delimiters: +/// row_after_delimiter and row_between_delimiter). +void skipToNextRowOrEof(PeekableReadBuffer & buf, const String & row_after_delimiter, const String & row_between_delimiter, bool skip_spaces); struct PcgDeserializer { diff --git a/src/IO/ReadSettings.h b/src/IO/ReadSettings.h index be328e28118..c8b0d6025c0 100644 --- a/src/IO/ReadSettings.h +++ b/src/IO/ReadSettings.h @@ -46,7 +46,7 @@ enum class LocalFSReadMethod enum class RemoteFSReadMethod { read, - read_threadpool, + threadpool, }; class MMappedFileCache; @@ -77,6 +77,10 @@ struct ReadSettings size_t remote_fs_read_max_backoff_ms = 10000; size_t remote_fs_read_backoff_max_tries = 4; + size_t http_max_tries = 1; + size_t http_retry_initial_backoff_ms = 100; + size_t http_retry_max_backoff_ms = 1600; + /// Set to true for MergeTree tables to make sure /// that last position (offset in compressed file) is always passed. /// (Otherwise asynchronous reading from remote fs is not efficient). diff --git a/src/IO/ReadWriteBufferFromHTTP.h b/src/IO/ReadWriteBufferFromHTTP.h index 16259e8057e..126778590db 100644 --- a/src/IO/ReadWriteBufferFromHTTP.h +++ b/src/IO/ReadWriteBufferFromHTTP.h @@ -7,6 +7,7 @@ #include #include #include +#include #include #include #include @@ -31,6 +32,8 @@ namespace DB namespace ErrorCodes { extern const int TOO_MANY_REDIRECTS; + extern const int HTTP_RANGE_NOT_SATISFIABLE; + extern const int BAD_ARGUMENTS; } template @@ -40,7 +43,7 @@ protected: SessionPtr session; UInt64 redirects { 0 }; Poco::URI initial_uri; - const ConnectionTimeouts & timeouts; + ConnectionTimeouts timeouts; UInt64 max_redirects; public: @@ -86,6 +89,13 @@ namespace detail using HTTPHeaderEntry = std::tuple; using HTTPHeaderEntries = std::vector; + /// HTTP range, including right bound [begin, end]. + struct Range + { + size_t begin = 0; + std::optional end; + }; + protected: Poco::URI uri; std::string method; @@ -102,7 +112,33 @@ namespace detail std::function next_callback; size_t buffer_size; + bool use_external_buffer; + + size_t bytes_read = 0; + Range read_range; + + /// Delayed exception in case retries with partial content are not satisfiable. + std::exception_ptr exception; + bool retry_with_range_header = false; + /// In case of redirects, save result uri to use it if we retry the request. + std::optional saved_uri_redirect; + ReadSettings settings; + Poco::Logger * log; + + bool withPartialContent() const + { + /** + * Add range header if we have some passed range (for disk web) + * or if we want to retry GET request on purpose. + */ + return read_range.begin || read_range.end || retry_with_range_header; + } + + size_t getOffset() const + { + return read_range.begin + bytes_read; + } std::istream * call(Poco::URI uri_, Poco::Net::HTTPResponse & response) { @@ -117,14 +153,23 @@ namespace detail request.setChunkedTransferEncoding(true); for (auto & http_header_entry: http_header_entries) - { request.set(std::get<0>(http_header_entry), std::get<1>(http_header_entry)); + + if (withPartialContent()) + { + String range_header_value; + if (read_range.end) + range_header_value = fmt::format("bytes={}-{}", getOffset(), *read_range.end); + else + range_header_value = fmt::format("bytes={}-", getOffset()); + LOG_TEST(log, "Adding header: Range: {}", range_header_value); + request.set("Range", range_header_value); } if (!credentials.getUsername().empty()) credentials.authenticate(request); - LOG_TRACE((&Poco::Logger::get("ReadWriteBufferFromHTTP")), "Sending request to {}", uri_.toString()); + LOG_TRACE(log, "Sending request to {}", uri_.toString()); auto sess = session->getSession(); @@ -140,7 +185,6 @@ namespace detail content_encoding = response.get("Content-Encoding", ""); return istr; - } catch (const Poco::Exception & e) { @@ -151,9 +195,6 @@ namespace detail } } - private: - bool use_external_buffer; - public: using NextCallback = std::function; using OutStreamCallback = std::function; @@ -161,13 +202,15 @@ namespace detail explicit ReadWriteBufferFromHTTPBase( UpdatableSessionPtr session_, Poco::URI uri_, + const Poco::Net::HTTPBasicCredentials & credentials_, const std::string & method_ = {}, OutStreamCallback out_stream_callback_ = {}, - const Poco::Net::HTTPBasicCredentials & credentials_ = {}, size_t buffer_size_ = DBMS_DEFAULT_BUFFER_SIZE, const ReadSettings & settings_ = {}, HTTPHeaderEntries http_header_entries_ = {}, + Range read_range_ = {}, const RemoteHostFilter & remote_host_filter_ = {}, + bool delay_initialization = false, bool use_external_buffer_ = false) : ReadBuffer(nullptr, 0) , uri {uri_} @@ -178,17 +221,31 @@ namespace detail , http_header_entries {http_header_entries_} , remote_host_filter {remote_host_filter_} , buffer_size {buffer_size_} - , settings {settings_} , use_external_buffer {use_external_buffer_} + , read_range(read_range_) + , settings {settings_} + , log(&Poco::Logger::get("ReadWriteBufferFromHTTP")) { - initialize(); + if (settings.http_max_tries <= 0 || settings.http_retry_initial_backoff_ms <= 0 + || settings.http_retry_initial_backoff_ms >= settings.http_retry_max_backoff_ms) + throw Exception(ErrorCodes::BAD_ARGUMENTS, + "Invalid setting for http backoff, " + "must be http_max_tries >= 1 (current is {}) and " + "0 < http_retry_initial_backoff_ms < settings.http_retry_max_backoff_ms (now 0 < {} < {})", + settings.http_max_tries, settings.http_retry_initial_backoff_ms, settings.http_retry_max_backoff_ms); + + if (!delay_initialization) + initialize(); } - void initialize() + /** + * Note: In case of error return false if error is not retriable, otherwise throw. + */ + bool initialize() { - Poco::Net::HTTPResponse response; - istr = call(uri, response); + + istr = call(saved_uri_redirect ? *saved_uri_redirect : uri, response); while (isRedirect(response.getStatus())) { @@ -196,10 +253,33 @@ namespace detail remote_host_filter.checkURL(uri_redirect); session->updateSession(uri_redirect); - istr = call(uri_redirect, response); + saved_uri_redirect = uri_redirect; } + if (withPartialContent() && response.getStatus() != Poco::Net::HTTPResponse::HTTPStatus::HTTP_PARTIAL_CONTENT) + { + /// Having `200 OK` instead of `206 Partial Content` is acceptable in case we retried with range.begin == 0. + if (read_range.begin) + { + if (!exception) + exception = std::make_exception_ptr( + Exception(ErrorCodes::HTTP_RANGE_NOT_SATISFIABLE, + "Cannot read with range: [{}, {}]", read_range.begin, read_range.end ? *read_range.end : '-')); + + return false; + } + else if (read_range.end) + { + /// We could have range.begin == 0 and range.end != 0 in case of DiskWeb and failing to read with partial content + /// will affect only performance, so a warning is enough. + LOG_WARNING(log, "Unable to read with range header: [{}, {}]", read_range.begin, *read_range.end); + } + } + + if (!bytes_read && !read_range.end && response.hasContentLength()) + read_range.end = read_range.begin + response.getContentLength(); + try { impl = std::make_unique(*istr, buffer_size); @@ -222,6 +302,8 @@ namespace detail sess->attachSessionData(e.message()); throw; } + + return true; } bool nextImpl() override @@ -229,34 +311,104 @@ namespace detail if (next_callback) next_callback(count()); - if (use_external_buffer) + if (read_range.end && getOffset() == read_range.end.value()) + return false; + + if (impl) { - /** - * use_external_buffer -- means we read into the buffer which - * was passed to us from somewhere else. We do not check whether - * previously returned buffer was read or not (no hasPendingData() check is needed), - * because this branch means we are prefetching data, - * each nextImpl() call we can fill a different buffer. - */ - impl->set(internal_buffer.begin(), internal_buffer.size()); - assert(working_buffer.begin() != nullptr); - assert(!internal_buffer.empty()); - } - else - { - /** - * impl was initialized before, pass position() to it to make - * sure there is no pending data which was not read. - */ - if (!working_buffer.empty()) - impl->position() = position(); + if (use_external_buffer) + { + /** + * use_external_buffer -- means we read into the buffer which + * was passed to us from somewhere else. We do not check whether + * previously returned buffer was read or not (no hasPendingData() check is needed), + * because this branch means we are prefetching data, + * each nextImpl() call we can fill a different buffer. + */ + impl->set(internal_buffer.begin(), internal_buffer.size()); + assert(working_buffer.begin() != nullptr); + assert(!internal_buffer.empty()); + } + else + { + /** + * impl was initialized before, pass position() to it to make + * sure there is no pending data which was not read. + */ + if (!working_buffer.empty()) + impl->position() = position(); + } } - if (!impl->next()) + bool result = false; + size_t milliseconds_to_wait = settings.http_retry_initial_backoff_ms; + + for (size_t i = 0; i < settings.http_max_tries; ++i) + { + try + { + if (!impl) + { + /// If error is not retriable -- false is returned and exception is set. + /// Otherwise the error is thrown and retries continue. + bool initialized = initialize(); + if (!initialized) + { + assert(exception); + break; + } + + if (use_external_buffer) + { + /// See comment 40 lines above. + impl->set(internal_buffer.begin(), internal_buffer.size()); + assert(working_buffer.begin() != nullptr); + assert(!internal_buffer.empty()); + } + } + + result = impl->next(); + exception = nullptr; + break; + } + catch (const Poco::Exception & e) + { + /** + * Retry request unconditionally if nothing has been read yet. + * Otherwise if it is GET method retry with range header starting from bytes_read. + */ + bool can_retry_request = !bytes_read || method == Poco::Net::HTTPRequest::HTTP_GET; + if (!can_retry_request) + throw; + + LOG_ERROR(log, + "HTTP request to `{}` failed at try {}/{} with bytes read: {}/{}. " + "Error: {}. (Current backoff wait is {}/{} ms)", + uri.toString(), i, settings.http_max_tries, + getOffset(), read_range.end ? toString(*read_range.end) : "unknown", + e.displayText(), + milliseconds_to_wait, settings.http_retry_max_backoff_ms); + + retry_with_range_header = true; + exception = std::current_exception(); + impl.reset(); + auto http_session = session->getSession(); + http_session->reset(); + sleepForMilliseconds(milliseconds_to_wait); + } + + milliseconds_to_wait = std::min(milliseconds_to_wait * 2, settings.http_retry_max_backoff_ms); + } + + if (exception) + std::rethrow_exception(exception); + + if (!result) return false; internal_buffer = impl->buffer(); working_buffer = internal_buffer; + bytes_read += working_buffer.size(); return true; } @@ -316,16 +468,19 @@ public: const std::string & method_, OutStreamCallback out_stream_callback_, const ConnectionTimeouts & timeouts, + const Poco::Net::HTTPBasicCredentials & credentials_, const UInt64 max_redirects = 0, - const Poco::Net::HTTPBasicCredentials & credentials_ = {}, size_t buffer_size_ = DBMS_DEFAULT_BUFFER_SIZE, const ReadSettings & settings_ = {}, const HTTPHeaderEntries & http_header_entries_ = {}, + Range read_range_ = {}, const RemoteHostFilter & remote_host_filter_ = {}, + bool delay_initialization_ = true, bool use_external_buffer_ = false) : Parent(std::make_shared(uri_, timeouts, max_redirects), - uri_, method_, out_stream_callback_, credentials_, buffer_size_, - settings_, http_header_entries_, remote_host_filter_, use_external_buffer_) + uri_, credentials_, method_, out_stream_callback_, buffer_size_, + settings_, http_header_entries_, read_range_, remote_host_filter_, + delay_initialization_, use_external_buffer_) { } }; @@ -350,7 +505,7 @@ public: void buildNewSession(const Poco::URI & uri) override { - session = makePooledHTTPSession(uri, timeouts, per_endpoint_pool_size); + session = makePooledHTTPSession(uri, timeouts, per_endpoint_pool_size); } }; @@ -369,9 +524,9 @@ public: size_t max_connections_per_endpoint = DEFAULT_COUNT_OF_HTTP_CONNECTIONS_PER_ENDPOINT) : Parent(std::make_shared(uri_, timeouts_, max_redirects, max_connections_per_endpoint), uri_, + credentials_, method_, out_stream_callback_, - credentials_, buffer_size_) { } diff --git a/src/IO/S3Common.cpp b/src/IO/S3Common.cpp index 89e1542d946..d841fbc8bb2 100644 --- a/src/IO/S3Common.cpp +++ b/src/IO/S3Common.cpp @@ -14,12 +14,14 @@ # include # include # include +# include # include # include # include # include # include # include +# include # include # include @@ -30,6 +32,8 @@ # include # include +# include + namespace { @@ -361,6 +365,155 @@ private: Poco::Logger * logger; }; +class AwsAuthSTSAssumeRoleWebIdentityCredentialsProvider : public Aws::Auth::AWSCredentialsProvider +{ + /// See STSAssumeRoleWebIdentityCredentialsProvider. + +public: + explicit AwsAuthSTSAssumeRoleWebIdentityCredentialsProvider(DB::S3::PocoHTTPClientConfiguration & aws_client_configuration) + : logger(&Poco::Logger::get("AwsAuthSTSAssumeRoleWebIdentityCredentialsProvider")) + { + // check environment variables + String tmp_region = Aws::Environment::GetEnv("AWS_DEFAULT_REGION"); + role_arn = Aws::Environment::GetEnv("AWS_ROLE_ARN"); + token_file = Aws::Environment::GetEnv("AWS_WEB_IDENTITY_TOKEN_FILE"); + session_name = Aws::Environment::GetEnv("AWS_ROLE_SESSION_NAME"); + + // check profile_config if either m_roleArn or m_tokenFile is not loaded from environment variable + // region source is not enforced, but we need it to construct sts endpoint, if we can't find from environment, we should check if it's set in config file. + if (role_arn.empty() || token_file.empty() || tmp_region.empty()) + { + auto profile = Aws::Config::GetCachedConfigProfile(Aws::Auth::GetConfigProfileName()); + if (tmp_region.empty()) + { + tmp_region = profile.GetRegion(); + } + // If either of these two were not found from environment, use whatever found for all three in config file + if (role_arn.empty() || token_file.empty()) + { + role_arn = profile.GetRoleArn(); + token_file = profile.GetValue("web_identity_token_file"); + session_name = profile.GetValue("role_session_name"); + } + } + + if (token_file.empty()) + { + LOG_WARNING(logger, "Token file must be specified to use STS AssumeRole web identity creds provider."); + return; // No need to do further constructing + } + else + { + LOG_DEBUG(logger, "Resolved token_file from profile_config or environment variable to be {}", token_file); + } + + if (role_arn.empty()) + { + LOG_WARNING(logger, "RoleArn must be specified to use STS AssumeRole web identity creds provider."); + return; // No need to do further constructing + } + else + { + LOG_DEBUG(logger, "Resolved role_arn from profile_config or environment variable to be {}", role_arn); + } + + if (tmp_region.empty()) + { + tmp_region = Aws::Region::US_EAST_1; + } + else + { + LOG_DEBUG(logger, "Resolved region from profile_config or environment variable to be {}", tmp_region); + } + + if (session_name.empty()) + { + session_name = Aws::Utils::UUID::RandomUUID(); + } + else + { + LOG_DEBUG(logger, "Resolved session_name from profile_config or environment variable to be {}", session_name); + } + + aws_client_configuration.scheme = Aws::Http::Scheme::HTTPS; + aws_client_configuration.region = tmp_region; + + std::vector retryable_errors; + retryable_errors.push_back("IDPCommunicationError"); + retryable_errors.push_back("InvalidIdentityToken"); + + aws_client_configuration.retryStrategy = std::make_shared( + retryable_errors, /* maxRetries = */3); + + client = std::make_unique(aws_client_configuration); + initialized = true; + LOG_INFO(logger, "Creating STS AssumeRole with web identity creds provider."); + } + + Aws::Auth::AWSCredentials GetAWSCredentials() override + { + // A valid client means required information like role arn and token file were constructed correctly. + // We can use this provider to load creds, otherwise, we can just return empty creds. + if (!initialized) + { + return Aws::Auth::AWSCredentials(); + } + refreshIfExpired(); + Aws::Utils::Threading::ReaderLockGuard guard(m_reloadLock); + return credentials; + } + +protected: + void Reload() override + { + LOG_INFO(logger, "Credentials have expired, attempting to renew from STS."); + + std::ifstream token_stream(token_file.data()); + if (token_stream) + { + String token_string((std::istreambuf_iterator(token_stream)), std::istreambuf_iterator()); + token = token_string; + } + else + { + LOG_INFO(logger, "Can't open token file: {}", token_file); + return; + } + Aws::Internal::STSCredentialsClient::STSAssumeRoleWithWebIdentityRequest request{session_name, role_arn, token}; + + auto result = client->GetAssumeRoleWithWebIdentityCredentials(request); + LOG_TRACE(logger, "Successfully retrieved credentials with AWS_ACCESS_KEY: {}", result.creds.GetAWSAccessKeyId()); + credentials = result.creds; + } + +private: + void refreshIfExpired() + { + Aws::Utils::Threading::ReaderLockGuard guard(m_reloadLock); + if (!credentials.IsExpiredOrEmpty()) + { + return; + } + + guard.UpgradeToWriterLock(); + if (!credentials.IsExpiredOrEmpty()) // double-checked lock to avoid refreshing twice + { + return; + } + + Reload(); + } + + std::unique_ptr client; + Aws::Auth::AWSCredentials credentials; + Aws::String role_arn; + Aws::String token_file; + Aws::String session_name; + Aws::String token; + bool initialized = false; + Poco::Logger * logger; +}; + class S3CredentialsProviderChain : public Aws::Auth::AWSCredentialsProviderChain { public: @@ -381,7 +534,11 @@ public: AddProvider(std::make_shared()); AddProvider(std::make_shared()); AddProvider(std::make_shared()); - AddProvider(std::make_shared()); + + { + DB::S3::PocoHTTPClientConfiguration aws_client_configuration = DB::S3::ClientFactory::instance().createClientConfiguration(configuration.region, configuration.remote_host_filter, configuration.s3_max_redirects); + AddProvider(std::make_shared(aws_client_configuration)); + } /// ECS TaskRole Credentials only available when ENVIRONMENT VARIABLE is set. const auto relative_uri = Aws::Environment::GetEnv(AWS_ECS_CONTAINER_CREDENTIALS_RELATIVE_URI); @@ -603,7 +760,7 @@ namespace S3 /// Case when bucket name represented in domain name of S3 URL. /// E.g. (https://bucket-name.s3.Region.amazonaws.com/key) /// https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html#virtual-hosted-style-access - static const RE2 virtual_hosted_style_pattern(R"((.+)\.(s3|cos|obs)([.\-][a-z0-9\-.:]+))"); + static const RE2 virtual_hosted_style_pattern(R"((.+)\.(s3|cos|obs|oss)([.\-][a-z0-9\-.:]+))"); /// Case when bucket name and key represented in path of S3 URL. /// E.g. (https://s3.Region.amazonaws.com/bucket-name/key) @@ -614,6 +771,7 @@ namespace S3 static constexpr auto COSN = "COSN"; static constexpr auto COS = "COS"; static constexpr auto OBS = "OBS"; + static constexpr auto OSS = "OSS"; uri = uri_; @@ -638,7 +796,7 @@ namespace S3 } boost::to_upper(name); - if (name != S3 && name != COS && name != OBS) + if (name != S3 && name != COS && name != OBS && name != OSS) { throw Exception(ErrorCodes::BAD_ARGUMENTS, "Object storage system name is unrecognized in virtual hosted style S3 URI: {}", quoteString(name)); } @@ -650,6 +808,10 @@ namespace S3 { storage_name = OBS; } + else if (name == OSS) + { + storage_name = OSS; + } else { storage_name = COSN; diff --git a/src/IO/SynchronousReader.cpp b/src/IO/SynchronousReader.cpp index 82f29da7d91..599299ddad4 100644 --- a/src/IO/SynchronousReader.cpp +++ b/src/IO/SynchronousReader.cpp @@ -17,7 +17,6 @@ namespace ProfileEvents extern const Event ReadBufferFromFileDescriptorReadFailed; extern const Event ReadBufferFromFileDescriptorReadBytes; extern const Event DiskReadElapsedMicroseconds; - extern const Event Seek; } namespace CurrentMetrics @@ -37,6 +36,9 @@ namespace ErrorCodes std::future SynchronousReader::submit(Request request) { + /// If size is zero, then read() cannot be distinguished from EOF + assert(request.size); + int fd = assert_cast(*request.descriptor).fd; #if defined(POSIX_FADV_WILLNEED) diff --git a/src/IO/ThreadPoolReader.cpp b/src/IO/ThreadPoolReader.cpp index 701fa759848..273778df37c 100644 --- a/src/IO/ThreadPoolReader.cpp +++ b/src/IO/ThreadPoolReader.cpp @@ -76,6 +76,9 @@ ThreadPoolReader::ThreadPoolReader(size_t pool_size, size_t queue_size_) std::future ThreadPoolReader::submit(Request request) { + /// If size is zero, then read() cannot be distinguished from EOF + assert(request.size); + int fd = assert_cast(*request.descriptor).fd; #if defined(__linux__) diff --git a/src/IO/WriteBufferFromFileDescriptor.cpp b/src/IO/WriteBufferFromFileDescriptor.cpp index 38aaa945362..5f0db681665 100644 --- a/src/IO/WriteBufferFromFileDescriptor.cpp +++ b/src/IO/WriteBufferFromFileDescriptor.cpp @@ -114,7 +114,11 @@ void WriteBufferFromFileDescriptor::sync() next(); /// Request OS to sync data with storage medium. - int res = fsync(fd); +#if defined(OS_DARWIN) + int res = ::fsync(fd); +#else + int res = ::fdatasync(fd); +#endif if (-1 == res) throwFromErrnoWithPath("Cannot fsync " + getFileName(), getFileName(), ErrorCodes::CANNOT_FSYNC); } diff --git a/src/IO/ZlibInflatingReadBuffer.h b/src/IO/ZlibInflatingReadBuffer.h index e868dd6999d..b8c141e9b9b 100644 --- a/src/IO/ZlibInflatingReadBuffer.h +++ b/src/IO/ZlibInflatingReadBuffer.h @@ -4,7 +4,6 @@ #include #include - #include diff --git a/src/IO/createReadBufferFromFileBase.cpp b/src/IO/createReadBufferFromFileBase.cpp index 05de97c5c92..bed97d54ab0 100644 --- a/src/IO/createReadBufferFromFileBase.cpp +++ b/src/IO/createReadBufferFromFileBase.cpp @@ -1,4 +1,5 @@ #include +#include #include #include #include @@ -33,6 +34,8 @@ std::unique_ptr createReadBufferFromFileBase( char * existing_memory, size_t alignment) { + if (size.has_value() && !*size) + return std::make_unique(); size_t estimated_size = size.has_value() ? *size : 0; if (!existing_memory diff --git a/src/Interpreters/Access/InterpreterCreateQuotaQuery.cpp b/src/Interpreters/Access/InterpreterCreateQuotaQuery.cpp index 703615972c4..83a620d11c6 100644 --- a/src/Interpreters/Access/InterpreterCreateQuotaQuery.cpp +++ b/src/Interpreters/Access/InterpreterCreateQuotaQuery.cpp @@ -3,6 +3,7 @@ #include #include #include +#include #include #include #include @@ -58,8 +59,11 @@ namespace auto & quota_limits = *it; quota_limits.randomize_interval = query_limits.randomize_interval; - for (auto resource_type : collections::range(Quota::MAX_RESOURCE_TYPE)) - quota_limits.max[resource_type] = query_limits.max[resource_type]; + for (auto quota_type : collections::range(QuotaType::MAX)) + { + auto quota_type_i = static_cast(quota_type); + quota_limits.max[quota_type_i] = query_limits.max[quota_type_i]; + } } if (override_to_roles) diff --git a/src/Interpreters/Access/InterpreterCreateRowPolicyQuery.cpp b/src/Interpreters/Access/InterpreterCreateRowPolicyQuery.cpp index 37347b37619..c88e9c299a8 100644 --- a/src/Interpreters/Access/InterpreterCreateRowPolicyQuery.cpp +++ b/src/Interpreters/Access/InterpreterCreateRowPolicyQuery.cpp @@ -5,6 +5,7 @@ #include #include #include +#include #include #include #include @@ -17,21 +18,21 @@ namespace void updateRowPolicyFromQueryImpl( RowPolicy & policy, const ASTCreateRowPolicyQuery & query, - const RowPolicy::NameParts & override_name, + const RowPolicyName & override_name, const std::optional & override_to_roles) { if (!override_name.empty()) - policy.setNameParts(override_name); + policy.setFullName(override_name); else if (!query.new_short_name.empty()) policy.setShortName(query.new_short_name); - else if (query.names->name_parts.size() == 1) - policy.setNameParts(query.names->name_parts.front()); + else if (query.names->full_names.size() == 1) + policy.setFullName(query.names->full_names.front()); if (query.is_restrictive) policy.setRestrictive(*query.is_restrictive); - for (const auto & [condition_type, condition] : query.conditions) - policy.conditions[condition_type] = condition ? serializeAST(*condition) : String{}; + for (const auto & [filter_type, filter] : query.filters) + policy.filters[static_cast(filter_type)] = filter ? serializeAST(*filter) : String{}; if (override_to_roles) policy.to_roles = *override_to_roles; @@ -80,10 +81,10 @@ BlockIO InterpreterCreateRowPolicyQuery::execute() else { std::vector new_policies; - for (const auto & name_parts : query.names->name_parts) + for (const auto & full_name : query.names->full_names) { auto new_policy = std::make_shared(); - updateRowPolicyFromQueryImpl(*new_policy, query, name_parts, roles_from_query); + updateRowPolicyFromQueryImpl(*new_policy, query, full_name, roles_from_query); new_policies.emplace_back(std::move(new_policy)); } diff --git a/src/Interpreters/Access/InterpreterDropAccessEntityQuery.cpp b/src/Interpreters/Access/InterpreterDropAccessEntityQuery.cpp index c00bbe4f379..4d2e880561e 100644 --- a/src/Interpreters/Access/InterpreterDropAccessEntityQuery.cpp +++ b/src/Interpreters/Access/InterpreterDropAccessEntityQuery.cpp @@ -2,12 +2,7 @@ #include #include #include -#include -#include -#include -#include -#include -#include +#include #include #include @@ -19,8 +14,6 @@ namespace ErrorCodes extern const int NOT_IMPLEMENTED; } -using EntityType = IAccessEntity::Type; - BlockIO InterpreterDropAccessEntityQuery::execute() { @@ -41,7 +34,7 @@ BlockIO InterpreterDropAccessEntityQuery::execute() access_control.remove(access_control.getIDs(query.type, names)); }; - if (query.type == EntityType::ROW_POLICY) + if (query.type == AccessEntityType::ROW_POLICY) do_drop(query.row_policy_names->toStrings()); else do_drop(query.names); @@ -56,12 +49,12 @@ AccessRightsElements InterpreterDropAccessEntityQuery::getRequiredAccess() const AccessRightsElements res; switch (query.type) { - case EntityType::USER: res.emplace_back(AccessType::DROP_USER); return res; - case EntityType::ROLE: res.emplace_back(AccessType::DROP_ROLE); return res; - case EntityType::SETTINGS_PROFILE: res.emplace_back(AccessType::DROP_SETTINGS_PROFILE); return res; - case EntityType::ROW_POLICY: res.emplace_back(AccessType::DROP_ROW_POLICY); return res; - case EntityType::QUOTA: res.emplace_back(AccessType::DROP_QUOTA); return res; - case EntityType::MAX: break; + case AccessEntityType::USER: res.emplace_back(AccessType::DROP_USER); return res; + case AccessEntityType::ROLE: res.emplace_back(AccessType::DROP_ROLE); return res; + case AccessEntityType::SETTINGS_PROFILE: res.emplace_back(AccessType::DROP_SETTINGS_PROFILE); return res; + case AccessEntityType::ROW_POLICY: res.emplace_back(AccessType::DROP_ROW_POLICY); return res; + case AccessEntityType::QUOTA: res.emplace_back(AccessType::DROP_QUOTA); return res; + case AccessEntityType::MAX: break; } throw Exception( toString(query.type) + ": type is not supported by DROP query", ErrorCodes::NOT_IMPLEMENTED); diff --git a/src/Interpreters/Access/InterpreterGrantQuery.cpp b/src/Interpreters/Access/InterpreterGrantQuery.cpp index e17af6877be..f2b9cd58991 100644 --- a/src/Interpreters/Access/InterpreterGrantQuery.cpp +++ b/src/Interpreters/Access/InterpreterGrantQuery.cpp @@ -118,7 +118,7 @@ namespace { auto current_user = current_user_access.getUser(); if (current_user && !current_user->grantees.match(grantee_id)) - throw Exception(grantee.outputTypeAndName() + " is not allowed as grantee", ErrorCodes::ACCESS_DENIED); + throw Exception(grantee.formatTypeWithName() + " is not allowed as grantee", ErrorCodes::ACCESS_DENIED); } /// Checks if grantees are allowed for the current user, throws an exception if not. diff --git a/src/Interpreters/Access/InterpreterShowAccessEntitiesQuery.cpp b/src/Interpreters/Access/InterpreterShowAccessEntitiesQuery.cpp index b0fe28e1abd..acb7f521493 100644 --- a/src/Interpreters/Access/InterpreterShowAccessEntitiesQuery.cpp +++ b/src/Interpreters/Access/InterpreterShowAccessEntitiesQuery.cpp @@ -14,8 +14,6 @@ namespace ErrorCodes extern const int NOT_IMPLEMENTED; } -using EntityType = IAccessEntity::Type; - InterpreterShowAccessEntitiesQuery::InterpreterShowAccessEntitiesQuery(const ASTPtr & query_ptr_, ContextMutablePtr context_) : WithMutableContext(context_), query_ptr(query_ptr_) @@ -41,7 +39,7 @@ String InterpreterShowAccessEntitiesQuery::getRewrittenQuery() const switch (query.type) { - case EntityType::ROW_POLICY: + case AccessEntityType::ROW_POLICY: { origin = "row_policies"; expr = "name"; @@ -63,7 +61,7 @@ String InterpreterShowAccessEntitiesQuery::getRewrittenQuery() const break; } - case EntityType::QUOTA: + case AccessEntityType::QUOTA: { if (query.current_quota) { @@ -78,21 +76,21 @@ String InterpreterShowAccessEntitiesQuery::getRewrittenQuery() const break; } - case EntityType::SETTINGS_PROFILE: + case AccessEntityType::SETTINGS_PROFILE: { origin = "settings_profiles"; expr = "name"; break; } - case EntityType::USER: + case AccessEntityType::USER: { origin = "users"; expr = "name"; break; } - case EntityType::ROLE: + case AccessEntityType::ROLE: { if (query.current_roles) { @@ -112,7 +110,7 @@ String InterpreterShowAccessEntitiesQuery::getRewrittenQuery() const break; } - case EntityType::MAX: + case AccessEntityType::MAX: break; } diff --git a/src/Interpreters/Access/InterpreterShowAccessQuery.cpp b/src/Interpreters/Access/InterpreterShowAccessQuery.cpp index a385f6c8d7a..26c47507ce2 100644 --- a/src/Interpreters/Access/InterpreterShowAccessQuery.cpp +++ b/src/Interpreters/Access/InterpreterShowAccessQuery.cpp @@ -16,8 +16,6 @@ namespace DB { -using EntityType = IAccessEntity::Type; - BlockIO InterpreterShowAccessQuery::execute() { @@ -53,7 +51,7 @@ std::vector InterpreterShowAccessQuery::getEntities() const getContext()->checkAccess(AccessType::SHOW_ACCESS); std::vector entities; - for (auto type : collections::range(EntityType::MAX)) + for (auto type : collections::range(AccessEntityType::MAX)) { auto ids = access_control.findAll(type); for (const auto & id : ids) @@ -77,7 +75,7 @@ ASTs InterpreterShowAccessQuery::getCreateAndGrantQueries() const for (const auto & entity : entities) { create_queries.push_back(InterpreterShowCreateAccessEntityQuery::getCreateQuery(*entity, access_control)); - if (entity->isTypeOf(EntityType::USER) || entity->isTypeOf(EntityType::ROLE)) + if (entity->isTypeOf(AccessEntityType::USER) || entity->isTypeOf(AccessEntityType::ROLE)) boost::range::push_back(grant_queries, InterpreterShowGrantsQuery::getGrantQueries(*entity, access_control)); } diff --git a/src/Interpreters/Access/InterpreterShowCreateAccessEntityQuery.cpp b/src/Interpreters/Access/InterpreterShowCreateAccessEntityQuery.cpp index ca6003e2cc0..284b3cd1b48 100644 --- a/src/Interpreters/Access/InterpreterShowCreateAccessEntityQuery.cpp +++ b/src/Interpreters/Access/InterpreterShowCreateAccessEntityQuery.cpp @@ -14,10 +14,12 @@ #include #include #include +#include #include -#include #include +#include #include +#include #include #include #include @@ -148,7 +150,7 @@ namespace query->names.emplace_back(quota.getName()); query->attach = attach_mode; - if (quota.key_type != Quota::KeyType::NONE) + if (quota.key_type != QuotaKeyType::NONE) query->key_type = quota.key_type; query->all_limits.reserve(quota.all_limits.size()); @@ -158,8 +160,11 @@ namespace ASTCreateQuotaQuery::Limits create_query_limits; create_query_limits.duration = limits.duration; create_query_limits.randomize_interval = limits.randomize_interval; - for (auto resource_type : collections::range(Quota::MAX_RESOURCE_TYPE)) - create_query_limits.max[resource_type] = limits.max[resource_type]; + for (auto quota_type : collections::range(QuotaType::MAX)) + { + auto quota_type_i = static_cast(quota_type); + create_query_limits.max[quota_type_i] = limits.max[quota_type_i]; + } query->all_limits.push_back(create_query_limits); } @@ -182,20 +187,20 @@ namespace { auto query = std::make_shared(); query->names = std::make_shared(); - query->names->name_parts.emplace_back(policy.getNameParts()); + query->names->full_names.emplace_back(policy.getFullName()); query->attach = attach_mode; if (policy.isRestrictive()) query->is_restrictive = policy.isRestrictive(); - for (auto type : collections::range(RowPolicy::MAX_CONDITION_TYPE)) + for (auto type : collections::range(RowPolicyFilterType::MAX)) { - const auto & condition = policy.conditions[static_cast(type)]; - if (!condition.empty()) + const auto & filter = policy.filters[static_cast(type)]; + if (!filter.empty()) { ParserExpression parser; - ASTPtr expr = parseQuery(parser, condition, 0, DBMS_DEFAULT_MAX_PARSER_DEPTH); - query->conditions.emplace_back(type, std::move(expr)); + ASTPtr expr = parseQuery(parser, filter, 0, DBMS_DEFAULT_MAX_PARSER_DEPTH); + query->filters.emplace_back(type, std::move(expr)); } } @@ -225,10 +230,8 @@ namespace return getCreateQueryImpl(*quota, access_control, attach_mode); if (const SettingsProfile * profile = typeid_cast(&entity)) return getCreateQueryImpl(*profile, access_control, attach_mode); - throw Exception(entity.outputTypeAndName() + ": type is not supported by SHOW CREATE query", ErrorCodes::NOT_IMPLEMENTED); + throw Exception(entity.formatTypeWithName() + ": type is not supported by SHOW CREATE query", ErrorCodes::NOT_IMPLEMENTED); } - - using EntityType = IAccessEntity::Type; } @@ -302,7 +305,7 @@ std::vector InterpreterShowCreateAccessEntityQuery::getEntities if (usage) entities.push_back(access_control.read(usage->quota_id)); } - else if (show_query.type == EntityType::ROW_POLICY) + else if (show_query.type == AccessEntityType::ROW_POLICY) { auto ids = access_control.findAll(); if (show_query.row_policy_names) @@ -374,12 +377,12 @@ AccessRightsElements InterpreterShowCreateAccessEntityQuery::getRequiredAccess() AccessRightsElements res; switch (show_query.type) { - case EntityType::USER: res.emplace_back(AccessType::SHOW_USERS); return res; - case EntityType::ROLE: res.emplace_back(AccessType::SHOW_ROLES); return res; - case EntityType::SETTINGS_PROFILE: res.emplace_back(AccessType::SHOW_SETTINGS_PROFILES); return res; - case EntityType::ROW_POLICY: res.emplace_back(AccessType::SHOW_ROW_POLICIES); return res; - case EntityType::QUOTA: res.emplace_back(AccessType::SHOW_QUOTAS); return res; - case EntityType::MAX: break; + case AccessEntityType::USER: res.emplace_back(AccessType::SHOW_USERS); return res; + case AccessEntityType::ROLE: res.emplace_back(AccessType::SHOW_ROLES); return res; + case AccessEntityType::SETTINGS_PROFILE: res.emplace_back(AccessType::SHOW_SETTINGS_PROFILES); return res; + case AccessEntityType::ROW_POLICY: res.emplace_back(AccessType::SHOW_ROW_POLICIES); return res; + case AccessEntityType::QUOTA: res.emplace_back(AccessType::SHOW_QUOTAS); return res; + case AccessEntityType::MAX: break; } throw Exception(toString(show_query.type) + ": type is not supported by SHOW CREATE query", ErrorCodes::NOT_IMPLEMENTED); } diff --git a/src/Interpreters/Access/InterpreterShowGrantsQuery.cpp b/src/Interpreters/Access/InterpreterShowGrantsQuery.cpp index 788856dbfe0..cd98d8d4575 100644 --- a/src/Interpreters/Access/InterpreterShowGrantsQuery.cpp +++ b/src/Interpreters/Access/InterpreterShowGrantsQuery.cpp @@ -91,7 +91,7 @@ namespace return getGrantQueriesImpl(*user, access_control, attach_mode); if (const Role * role = typeid_cast(&entity)) return getGrantQueriesImpl(*role, access_control, attach_mode); - throw Exception(entity.outputTypeAndName() + " is expected to be user or role", ErrorCodes::LOGICAL_ERROR); + throw Exception(entity.formatTypeWithName() + " is expected to be user or role", ErrorCodes::LOGICAL_ERROR); } } diff --git a/src/Interpreters/ActionsVisitor.cpp b/src/Interpreters/ActionsVisitor.cpp index e358696fa40..d5eb701e2aa 100644 --- a/src/Interpreters/ActionsVisitor.cpp +++ b/src/Interpreters/ActionsVisitor.cpp @@ -693,6 +693,7 @@ ASTs ActionsMatcher::doUntuple(const ASTFunction * function, ActionsMatcher::Dat ASTs columns; size_t tid = 0; + auto func_alias = function->tryGetAlias(); for (const auto & name [[maybe_unused]] : tuple_type->getElementNames()) { auto tuple_ast = function->arguments->children[0]; @@ -703,7 +704,8 @@ ASTs ActionsMatcher::doUntuple(const ASTFunction * function, ActionsMatcher::Dat visit(*literal, literal, data); auto func = makeASTFunction("tupleElement", tuple_ast, literal); - + if (!func_alias.empty()) + func->setAlias(func_alias + "." + toString(tid)); auto function_builder = FunctionFactory::instance().get(func->name, data.getContext()); data.addFunction(function_builder, {tuple_name_type->name, literal->getColumnName()}, func->getColumnName()); diff --git a/src/Interpreters/AddDefaultDatabaseVisitor.h b/src/Interpreters/AddDefaultDatabaseVisitor.h index 5d7483c45c0..d5039a2f19e 100644 --- a/src/Interpreters/AddDefaultDatabaseVisitor.h +++ b/src/Interpreters/AddDefaultDatabaseVisitor.h @@ -8,6 +8,7 @@ #include #include #include +#include #include #include #include @@ -86,7 +87,12 @@ private: void visit(ASTSelectWithUnionQuery & select, ASTPtr &) const { for (auto & child : select.list_of_selects->children) - tryVisit(child); + { + if (child->as()) + tryVisit(child); + else if (child->as()) + tryVisit(child); + } } void visit(ASTSelectQuery & select, ASTPtr &) const @@ -97,6 +103,19 @@ private: visitChildren(select); } + void visit(ASTSelectIntersectExceptQuery & select, ASTPtr &) const + { + for (auto & child : select.getListOfSelects()) + { + if (child->as()) + tryVisit(child); + else if (child->as()) + tryVisit(child); + else if (child->as()) + tryVisit(child); + } + } + void visit(ASTTablesInSelectQuery & tables, ASTPtr &) const { for (auto & child : tables.children) @@ -220,8 +239,8 @@ private: if (only_replace_current_database_function) return; - if (node.database.empty()) - node.database = database_name; + if (!node.database) + node.setDatabase(database_name); } void visitDDL(ASTRenameQuery & node, ASTPtr &) const @@ -243,8 +262,8 @@ private: if (only_replace_current_database_function) return; - if (node.database.empty()) - node.database = database_name; + if (!node.database) + node.setDatabase(database_name); for (const auto & child : node.command_list->children) { diff --git a/src/Interpreters/AddIndexConstraintsOptimizer.cpp b/src/Interpreters/AddIndexConstraintsOptimizer.cpp new file mode 100644 index 00000000000..b28e31d22f6 --- /dev/null +++ b/src/Interpreters/AddIndexConstraintsOptimizer.cpp @@ -0,0 +1,181 @@ +#include + +#include +#include +#include +#include +#include +#include +#include + +namespace DB +{ + +AddIndexConstraintsOptimizer::AddIndexConstraintsOptimizer( + const StorageMetadataPtr & metadata_snapshot_) + : metadata_snapshot(metadata_snapshot_) +{ +} + +namespace +{ + bool onlyIndexColumns(const ASTPtr & ast, const std::unordered_set & primary_key_set) + { + const auto * identifier = ast->as(); + if (identifier && !primary_key_set.contains(identifier->name())) + return false; + for (auto & child : ast->children) + if (!onlyIndexColumns(child, primary_key_set)) + return false; + return true; + } + + bool onlyConstants(const ASTPtr & ast) + { + const auto * identifier = ast->as(); + if (identifier) + return false; + for (auto & child : ast->children) + if (!onlyConstants(child)) + return false; + return true; + } + + const std::unordered_map & getRelationMap() + { + const static std::unordered_map relations = + { + {"equals", ComparisonGraph::CompareResult::EQUAL}, + {"less", ComparisonGraph::CompareResult::LESS}, + {"lessOrEquals", ComparisonGraph::CompareResult::LESS_OR_EQUAL}, + {"greaterOrEquals", ComparisonGraph::CompareResult::GREATER_OR_EQUAL}, + {"greater", ComparisonGraph::CompareResult::GREATER}, + }; + return relations; + } + + const std::unordered_map & getReverseRelationMap() + { + const static std::unordered_map relations = + { + {ComparisonGraph::CompareResult::EQUAL, "equals"}, + {ComparisonGraph::CompareResult::LESS, "less"}, + {ComparisonGraph::CompareResult::LESS_OR_EQUAL, "lessOrEquals"}, + {ComparisonGraph::CompareResult::GREATER_OR_EQUAL, "greaterOrEquals"}, + {ComparisonGraph::CompareResult::GREATER, "greater"}, + }; + return relations; + } + + bool canBeSequence(const ComparisonGraph::CompareResult left, const ComparisonGraph::CompareResult right) + { + using CR = ComparisonGraph::CompareResult; + if (left == CR::UNKNOWN || right == CR::UNKNOWN || left == CR::NOT_EQUAL || right == CR::NOT_EQUAL) + return false; + if ((left == CR::GREATER || left == CR::GREATER_OR_EQUAL) && (right == CR::LESS || right == CR::LESS_OR_EQUAL)) + return false; + if ((right == CR::GREATER || right == CR::GREATER_OR_EQUAL) && (left == CR::LESS || left == CR::LESS_OR_EQUAL)) + return false; + return true; + } + + ComparisonGraph::CompareResult mostStrict(const ComparisonGraph::CompareResult left, const ComparisonGraph::CompareResult right) + { + using CR = ComparisonGraph::CompareResult; + if (left == CR::LESS || left == CR::GREATER) + return left; + if (right == CR::LESS || right == CR::GREATER) + return right; + if (left == CR::LESS_OR_EQUAL || left == CR::GREATER_OR_EQUAL) + return left; + if (right == CR::LESS_OR_EQUAL || right == CR::GREATER_OR_EQUAL) + return right; + if (left == CR::EQUAL) + return left; + if (right == CR::EQUAL) + return right; + return CR::UNKNOWN; + } + + /// Create OR-group for 'indexHint'. + /// Consider we have expression like A C, where C is constant. + /// Consider we have a constraint I A, where I depends only on columns from primary key. + /// Then if op1 and op2 forms a sequence of comparisons (e.g. A < C and I < A), + /// we can add to expression 'indexHint(I < A)' condition. + CNFQuery::OrGroup createIndexHintGroup( + const CNFQuery::OrGroup & group, + const ComparisonGraph & graph, + const ASTs & primary_key_only_asts) + { + CNFQuery::OrGroup result; + for (const auto & atom : group) + { + const auto * func = atom.ast->as(); + if (func && func->arguments->children.size() == 2 && getRelationMap().contains(func->name)) + { + auto check_and_insert = [&](const size_t index, const ComparisonGraph::CompareResult need_result) + { + if (!onlyConstants(func->arguments->children[1 - index])) + return false; + + for (const auto & primary_key_ast : primary_key_only_asts) + { + ComparisonGraph::CompareResult actual_result; + if (index == 0) + actual_result = graph.compare(primary_key_ast, func->arguments->children[index]); + else + actual_result = graph.compare(func->arguments->children[index], primary_key_ast); + + if (canBeSequence(need_result, actual_result)) + { + ASTPtr helper_ast = func->clone(); + auto * helper_func = helper_ast->as(); + helper_func->name = getReverseRelationMap().at(mostStrict(need_result, actual_result)); + helper_func->arguments->children[index] = primary_key_ast->clone(); + result.insert(CNFQuery::AtomicFormula{atom.negative, helper_ast}); + return true; + } + } + + return false; + }; + + auto expected = getRelationMap().at(func->name); + if (!check_and_insert(0, expected) && !check_and_insert(1, expected)) + return {}; + } + } + + return result; + } +} + +void AddIndexConstraintsOptimizer::perform(CNFQuery & cnf_query) +{ + const auto primary_key = metadata_snapshot->getColumnsRequiredForPrimaryKey(); + const auto & graph = metadata_snapshot->getConstraints().getGraph(); + const std::unordered_set primary_key_set(std::begin(primary_key), std::end(primary_key)); + + ASTs primary_key_only_asts; + for (const auto & vertex : graph.getVertices()) + for (const auto & ast : vertex) + if (onlyIndexColumns(ast, primary_key_set)) + primary_key_only_asts.push_back(ast); + + CNFQuery::AndGroup and_group; + cnf_query.iterateGroups([&](const auto & or_group) + { + auto add_group = createIndexHintGroup(or_group, graph, primary_key_only_asts); + if (!add_group.empty()) + and_group.emplace(std::move(add_group)); + }); + + if (!and_group.empty()) + { + CNFQuery::OrGroup new_or_group; + new_or_group.insert(CNFQuery::AtomicFormula{false, makeASTFunction("indexHint", TreeCNFConverter::fromCNF(CNFQuery(std::move(and_group))))}); + cnf_query.appendGroup(CNFQuery::AndGroup{new_or_group}); + } +} + +} diff --git a/src/Interpreters/AddIndexConstraintsOptimizer.h b/src/Interpreters/AddIndexConstraintsOptimizer.h new file mode 100644 index 00000000000..228d8d8ad1a --- /dev/null +++ b/src/Interpreters/AddIndexConstraintsOptimizer.h @@ -0,0 +1,35 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace DB +{ + +struct StorageInMemoryMetadata; +using StorageMetadataPtr = std::shared_ptr; + +/// Optimizer that extracts constraints that +/// depends only on columns of primary key +/// and tries to add function 'indexHint' to +/// WHERE clause, which reduces amount of read data. +class AddIndexConstraintsOptimizer final +{ +public: + AddIndexConstraintsOptimizer( + const StorageMetadataPtr & metadata_snapshot); + + void perform(CNFQuery & cnf_query); + +private: + const StorageMetadataPtr & metadata_snapshot; +}; + +} diff --git a/src/Interpreters/Aggregator.cpp b/src/Interpreters/Aggregator.cpp index 4f4b981b44d..efea8e9d0f7 100644 --- a/src/Interpreters/Aggregator.cpp +++ b/src/Interpreters/Aggregator.cpp @@ -33,11 +33,6 @@ namespace ProfileEvents extern const Event ExternalAggregationUncompressedBytes; } -namespace CurrentMetrics -{ - extern const Metric QueryThread; -} - namespace DB { diff --git a/src/Interpreters/ApplyWithGlobalVisitor.cpp b/src/Interpreters/ApplyWithGlobalVisitor.cpp index df238e27abf..1ac2b19a04e 100644 --- a/src/Interpreters/ApplyWithGlobalVisitor.cpp +++ b/src/Interpreters/ApplyWithGlobalVisitor.cpp @@ -1,6 +1,7 @@ #include #include #include +#include #include namespace DB @@ -40,6 +41,31 @@ void ApplyWithGlobalVisitor::visit( { visit(*node_select, exprs, with_expression_list); } + else if (ASTSelectIntersectExceptQuery * node_intersect_except = select->as()) + { + visit(*node_intersect_except, exprs, with_expression_list); + } + } +} + +void ApplyWithGlobalVisitor::visit( + ASTSelectIntersectExceptQuery & selects, const std::map & exprs, const ASTPtr & with_expression_list) +{ + auto selects_list = selects.getListOfSelects(); + for (auto & select : selects_list) + { + if (ASTSelectWithUnionQuery * node_union = select->as()) + { + visit(*node_union, exprs, with_expression_list); + } + else if (ASTSelectQuery * node_select = select->as()) + { + visit(*node_select, exprs, with_expression_list); + } + else if (ASTSelectIntersectExceptQuery * node_intersect_except = select->as()) + { + visit(*node_intersect_except, exprs, with_expression_list); + } } } @@ -47,7 +73,7 @@ void ApplyWithGlobalVisitor::visit(ASTPtr & ast) { if (ASTSelectWithUnionQuery * node_union = ast->as()) { - if (auto * first_select = node_union->list_of_selects->children[0]->as()) + if (auto * first_select = typeid_cast(node_union->list_of_selects->children[0].get())) { ASTPtr with_expression_list = first_select->with(); if (with_expression_list) @@ -64,6 +90,8 @@ void ApplyWithGlobalVisitor::visit(ASTPtr & ast) visit(*union_child, exprs, with_expression_list); else if (auto * select_child = (*it)->as()) visit(*select_child, exprs, with_expression_list); + else if (auto * intersect_except_child = (*it)->as()) + visit(*intersect_except_child, exprs, with_expression_list); } } } diff --git a/src/Interpreters/ApplyWithGlobalVisitor.h b/src/Interpreters/ApplyWithGlobalVisitor.h index a42203c68ef..2f7c554da40 100644 --- a/src/Interpreters/ApplyWithGlobalVisitor.h +++ b/src/Interpreters/ApplyWithGlobalVisitor.h @@ -8,6 +8,7 @@ namespace DB class ASTSelectWithUnionQuery; class ASTSelectQuery; +class ASTSelectIntersectExceptQuery; /// Pull out the WITH statement from the first child of ASTSelectWithUnion query if any. class ApplyWithGlobalVisitor @@ -18,6 +19,7 @@ public: private: static void visit(ASTSelectWithUnionQuery & selects, const std::map & exprs, const ASTPtr & with_expression_list); static void visit(ASTSelectQuery & select, const std::map & exprs, const ASTPtr & with_expression_list); + static void visit(ASTSelectIntersectExceptQuery & select, const std::map & exprs, const ASTPtr & with_expression_list); }; } diff --git a/src/Interpreters/AsynchronousInsertQueue.cpp b/src/Interpreters/AsynchronousInsertQueue.cpp index d5d2b1a722d..0e2605fa2e2 100644 --- a/src/Interpreters/AsynchronousInsertQueue.cpp +++ b/src/Interpreters/AsynchronousInsertQueue.cpp @@ -21,6 +21,7 @@ #include #include #include +#include namespace DB diff --git a/src/Interpreters/ComparisonGraph.cpp b/src/Interpreters/ComparisonGraph.cpp new file mode 100644 index 00000000000..e236de67fdc --- /dev/null +++ b/src/Interpreters/ComparisonGraph.cpp @@ -0,0 +1,640 @@ +#include +#include +#include +#include +#include +#include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int VIOLATED_CONSTRAINT; +} + +namespace +{ + +/// Make function a > b or a >= b +ASTPtr normalizeAtom(const ASTPtr & atom) +{ + static const std::map inverse_relations = + { + {"lessOrEquals", "greaterOrEquals"}, + {"less", "greater"}, + }; + + ASTPtr res = atom->clone(); + if (const auto * func = res->as()) + { + if (const auto it = inverse_relations.find(func->name); it != std::end(inverse_relations)) + { + res = makeASTFunction(it->second, func->arguments->children[1]->clone(), func->arguments->children[0]->clone()); + } + } + + return res; +} + +bool less(const Field & lhs, const Field & rhs) { return applyVisitor(FieldVisitorAccurateLess{}, lhs, rhs); } +bool greater(const Field & lhs, const Field & rhs) { return applyVisitor(FieldVisitorAccurateLess{}, rhs, lhs); } +bool equals(const Field & lhs, const Field & rhs) { return applyVisitor(FieldVisitorAccurateEquals{}, lhs, rhs); } + +} + +ComparisonGraph::ComparisonGraph(const ASTs & atomic_formulas) +{ + if (atomic_formulas.empty()) + return; + + static const std::unordered_map relation_to_enum = + { + {"equals", Edge::EQUAL}, + {"greater", Edge::GREATER}, + {"greaterOrEquals", Edge::GREATER_OR_EQUAL}, + }; + + /// Firstly build an intermediate graph, + /// in which each vertex corresponds to one expression. + /// That means that if we have edge (A, B) with type GREATER, then always A > B. + /// If we have EQUAL relation, then we add both edges (A, B) and (B, A). + + Graph g; + for (const auto & atom_raw : atomic_formulas) + { + const auto atom = normalizeAtom(atom_raw); + + auto get_index = [](const ASTPtr & ast, Graph & asts_graph) -> std::optional + { + const auto it = asts_graph.ast_hash_to_component.find(ast->getTreeHash()); + if (it != std::end(asts_graph.ast_hash_to_component)) + { + if (!std::any_of( + std::cbegin(asts_graph.vertices[it->second].asts), + std::cend(asts_graph.vertices[it->second].asts), + [ast](const ASTPtr & constraint_ast) + { + return constraint_ast->getTreeHash() == ast->getTreeHash() + && constraint_ast->getColumnName() == ast->getColumnName(); + })) + { + return {}; + } + + return it->second; + } + else + { + asts_graph.ast_hash_to_component[ast->getTreeHash()] = asts_graph.vertices.size(); + asts_graph.vertices.push_back(EqualComponent{{ast}, std::nullopt}); + asts_graph.edges.emplace_back(); + return asts_graph.vertices.size() - 1; + } + }; + + const auto * func = atom->as(); + if (func && func->arguments->children.size() == 2) + { + auto index_left = get_index(func->arguments->children[0], g); + auto index_right = get_index(func->arguments->children[1], g); + + if (index_left && index_right) + { + if (const auto it = relation_to_enum.find(func->name); it != std::end(relation_to_enum)) + { + g.edges[*index_left].push_back(Edge{it->second, *index_right}); + if (it->second == Edge::EQUAL) + g.edges[*index_right].push_back(Edge{it->second, *index_left}); + } + } + } + } + + /// Now expressions A and B are equal, if and only if + /// we have both paths from A to B and from B to A in graph. + /// That means that equivalence classes of expressions + /// are the same as strongly connected components in graph. + /// So, we find such components and build graph on them. + /// All expressions from one equivalence class will be stored + /// in the corresponding vertex of new graph. + + graph = buildGraphFromAstsGraph(g); + dists = buildDistsFromGraph(graph); + std::tie(ast_const_lower_bound, ast_const_upper_bound) = buildConstBounds(); + + /// Find expressions that are known to be unequal. + static const std::unordered_set not_equals_functions = {"notEquals", "greater"}; + + /// Explicitly save unequal components. + /// TODO: Build a graph for unequal components. + for (const auto & atom_raw : atomic_formulas) + { + const auto atom = normalizeAtom(atom_raw); + const auto * func = atom->as(); + + if (func && not_equals_functions.contains(func->name)) + { + auto index_left = graph.ast_hash_to_component.at(func->arguments->children[0]->getTreeHash()); + auto index_right = graph.ast_hash_to_component.at(func->arguments->children[1]->getTreeHash()); + + if (index_left == index_right) + throw Exception(ErrorCodes::VIOLATED_CONSTRAINT, + "Found expression '{}', but its arguments considered equal according to constraints", + queryToString(atom)); + + not_equal.emplace(index_left, index_right); + not_equal.emplace(index_right, index_left); + } + } +} + +ComparisonGraph::CompareResult ComparisonGraph::pathToCompareResult(Path path, bool inverse) +{ + switch (path) + { + case Path::GREATER: return inverse ? CompareResult::LESS : CompareResult::GREATER; + case Path::GREATER_OR_EQUAL: return inverse ? CompareResult::LESS_OR_EQUAL : CompareResult::GREATER_OR_EQUAL; + } + __builtin_unreachable(); +} + +std::optional ComparisonGraph::findPath(const size_t start, const size_t finish) const +{ + const auto it = dists.find(std::make_pair(start, finish)); + if (it == std::end(dists)) + return {}; + + /// Since path can be only GREATER or GREATER_OR_EQUALS, + /// we can strengthen the condition. + return not_equal.contains({start, finish}) ? Path::GREATER : it->second; +} + +ComparisonGraph::CompareResult ComparisonGraph::compare(const ASTPtr & left, const ASTPtr & right) const +{ + size_t start = 0; + size_t finish = 0; + + /// TODO: check full ast + const auto it_left = graph.ast_hash_to_component.find(left->getTreeHash()); + const auto it_right = graph.ast_hash_to_component.find(right->getTreeHash()); + + if (it_left == std::end(graph.ast_hash_to_component) || it_right == std::end(graph.ast_hash_to_component)) + { + CompareResult result = CompareResult::UNKNOWN; + { + const auto left_bound = getConstLowerBound(left); + const auto right_bound = getConstUpperBound(right); + + if (left_bound && right_bound) + { + if (greater(left_bound->first, right_bound->first)) + result = CompareResult::GREATER; + else if (equals(left_bound->first, right_bound->first)) + result = left_bound->second || right_bound->second + ? CompareResult::GREATER : CompareResult::GREATER_OR_EQUAL; + } + } + { + const auto left_bound = getConstUpperBound(left); + const auto right_bound = getConstLowerBound(right); + + if (left_bound && right_bound) + { + if (less(left_bound->first, right_bound->first)) + result = CompareResult::LESS; + else if (equals(left_bound->first, right_bound->first)) + result = left_bound->second || right_bound->second + ? CompareResult::LESS : CompareResult::LESS_OR_EQUAL; + } + } + + return result; + } + else + { + start = it_left->second; + finish = it_right->second; + } + + if (start == finish) + return CompareResult::EQUAL; + + if (auto path = findPath(start, finish)) + return pathToCompareResult(*path, /*inverse=*/ false); + + if (auto path = findPath(finish, start)) + return pathToCompareResult(*path, /*inverse=*/ true); + + if (not_equal.contains({start, finish})) + return CompareResult::NOT_EQUAL; + + return CompareResult::UNKNOWN; +} + +bool ComparisonGraph::isPossibleCompare(const CompareResult expected, const ASTPtr & left, const ASTPtr & right) const +{ + const auto result = compare(left, right); + + if (expected == CompareResult::UNKNOWN || result == CompareResult::UNKNOWN) + return true; + + if (expected == result) + return true; + + static const std::set> possible_pairs = + { + {CompareResult::EQUAL, CompareResult::LESS_OR_EQUAL}, + {CompareResult::EQUAL, CompareResult::GREATER_OR_EQUAL}, + {CompareResult::LESS_OR_EQUAL, CompareResult::LESS}, + {CompareResult::LESS_OR_EQUAL, CompareResult::EQUAL}, + {CompareResult::LESS_OR_EQUAL, CompareResult::NOT_EQUAL}, + {CompareResult::GREATER_OR_EQUAL, CompareResult::GREATER}, + {CompareResult::GREATER_OR_EQUAL, CompareResult::EQUAL}, + {CompareResult::GREATER_OR_EQUAL, CompareResult::NOT_EQUAL}, + {CompareResult::LESS, CompareResult::LESS}, + {CompareResult::LESS, CompareResult::LESS_OR_EQUAL}, + {CompareResult::LESS, CompareResult::NOT_EQUAL}, + {CompareResult::GREATER, CompareResult::GREATER}, + {CompareResult::GREATER, CompareResult::GREATER_OR_EQUAL}, + {CompareResult::GREATER, CompareResult::NOT_EQUAL}, + {CompareResult::NOT_EQUAL, CompareResult::LESS}, + {CompareResult::NOT_EQUAL, CompareResult::GREATER}, + {CompareResult::NOT_EQUAL, CompareResult::LESS_OR_EQUAL}, + {CompareResult::NOT_EQUAL, CompareResult::GREATER_OR_EQUAL}, + }; + + return possible_pairs.contains({expected, result}); +} + +bool ComparisonGraph::isAlwaysCompare(const CompareResult expected, const ASTPtr & left, const ASTPtr & right) const +{ + const auto result = compare(left, right); + + if (expected == CompareResult::UNKNOWN || result == CompareResult::UNKNOWN) + return false; + + if (expected == result) + return true; + + static const std::set> possible_pairs = + { + {CompareResult::LESS_OR_EQUAL, CompareResult::LESS}, + {CompareResult::LESS_OR_EQUAL, CompareResult::EQUAL}, + {CompareResult::GREATER_OR_EQUAL, CompareResult::GREATER}, + {CompareResult::GREATER_OR_EQUAL, CompareResult::EQUAL}, + {CompareResult::NOT_EQUAL, CompareResult::GREATER}, + {CompareResult::NOT_EQUAL, CompareResult::LESS}, + }; + + return possible_pairs.contains({expected, result}); +} + + +ASTs ComparisonGraph::getEqual(const ASTPtr & ast) const +{ + const auto res = getComponentId(ast); + if (!res) + return {}; + else + return getComponent(res.value()); +} + +std::optional ComparisonGraph::getComponentId(const ASTPtr & ast) const +{ + const auto hash_it = graph.ast_hash_to_component.find(ast->getTreeHash()); + if (hash_it == std::end(graph.ast_hash_to_component)) + return {}; + + const size_t index = hash_it->second; + if (std::any_of( + std::cbegin(graph.vertices[index].asts), + std::cend(graph.vertices[index].asts), + [ast](const ASTPtr & constraint_ast) + { + return constraint_ast->getTreeHash() == ast->getTreeHash() && + constraint_ast->getColumnName() == ast->getColumnName(); + })) + { + return index; + } + else + { + return {}; + } +} + +bool ComparisonGraph::hasPath(const size_t left, const size_t right) const +{ + return findPath(left, right) || findPath(right, left); +} + +ASTs ComparisonGraph::getComponent(const size_t id) const +{ + return graph.vertices[id].asts; +} + +bool ComparisonGraph::EqualComponent::hasConstant() const +{ + return constant_index.has_value(); +} + +ASTPtr ComparisonGraph::EqualComponent::getConstant() const +{ + assert(constant_index); + return asts[*constant_index]; +} + +void ComparisonGraph::EqualComponent::buildConstants() +{ + constant_index.reset(); + for (size_t i = 0; i < asts.size(); ++i) + { + if (asts[i]->as()) + { + constant_index = i; + return; + } + } +} + +ComparisonGraph::CompareResult ComparisonGraph::atomToCompareResult(const CNFQuery::AtomicFormula & atom) +{ + if (const auto * func = atom.ast->as()) + { + auto expected = functionNameToCompareResult(func->name); + if (atom.negative) + expected = inverseCompareResult(expected); + return expected; + } + + return ComparisonGraph::CompareResult::UNKNOWN; +} + +ComparisonGraph::CompareResult ComparisonGraph::functionNameToCompareResult(const std::string & name) +{ + static const std::unordered_map relation_to_compare = + { + {"equals", CompareResult::EQUAL}, + {"notEquals", CompareResult::NOT_EQUAL}, + {"less", CompareResult::LESS}, + {"lessOrEquals", CompareResult::LESS_OR_EQUAL}, + {"greaterOrEquals", CompareResult::GREATER_OR_EQUAL}, + {"greater", CompareResult::GREATER}, + }; + + const auto it = relation_to_compare.find(name); + return it == std::end(relation_to_compare) ? CompareResult::UNKNOWN : it->second; +} + +ComparisonGraph::CompareResult ComparisonGraph::inverseCompareResult(const CompareResult result) +{ + static const std::unordered_map inverse_relations = + { + {CompareResult::NOT_EQUAL, CompareResult::EQUAL}, + {CompareResult::EQUAL, CompareResult::NOT_EQUAL}, + {CompareResult::GREATER_OR_EQUAL, CompareResult::LESS}, + {CompareResult::GREATER, CompareResult::LESS_OR_EQUAL}, + {CompareResult::LESS, CompareResult::GREATER_OR_EQUAL}, + {CompareResult::LESS_OR_EQUAL, CompareResult::GREATER}, + {CompareResult::UNKNOWN, CompareResult::UNKNOWN}, + }; + return inverse_relations.at(result); +} + +std::optional ComparisonGraph::getEqualConst(const ASTPtr & ast) const +{ + const auto hash_it = graph.ast_hash_to_component.find(ast->getTreeHash()); + if (hash_it == std::end(graph.ast_hash_to_component)) + return std::nullopt; + + const size_t index = hash_it->second; + return graph.vertices[index].hasConstant() + ? std::optional{graph.vertices[index].getConstant()} + : std::nullopt; +} + +std::optional> ComparisonGraph::getConstUpperBound(const ASTPtr & ast) const +{ + if (const auto * literal = ast->as()) + return std::make_pair(literal->value, false); + + const auto it = graph.ast_hash_to_component.find(ast->getTreeHash()); + if (it == std::end(graph.ast_hash_to_component)) + return std::nullopt; + + const size_t to = it->second; + const ssize_t from = ast_const_upper_bound[to]; + if (from == -1) + return std::nullopt; + + return std::make_pair(graph.vertices[from].getConstant()->as()->value, dists.at({from, to}) == Path::GREATER); +} + +std::optional> ComparisonGraph::getConstLowerBound(const ASTPtr & ast) const +{ + if (const auto * literal = ast->as()) + return std::make_pair(literal->value, false); + + const auto it = graph.ast_hash_to_component.find(ast->getTreeHash()); + if (it == std::end(graph.ast_hash_to_component)) + return std::nullopt; + + const size_t from = it->second; + const ssize_t to = ast_const_lower_bound[from]; + if (to == -1) + return std::nullopt; + + return std::make_pair(graph.vertices[to].getConstant()->as()->value, dists.at({from, to}) == Path::GREATER); +} + +void ComparisonGraph::dfsOrder(const Graph & asts_graph, size_t v, std::vector & visited, std::vector & order) +{ + visited[v] = true; + for (const auto & edge : asts_graph.edges[v]) + if (!visited[edge.to]) + dfsOrder(asts_graph, edge.to, visited, order); + + order.push_back(v); +} + +ComparisonGraph::Graph ComparisonGraph::reverseGraph(const Graph & asts_graph) +{ + Graph g; + g.ast_hash_to_component = asts_graph.ast_hash_to_component; + g.vertices = asts_graph.vertices; + g.edges.resize(g.vertices.size()); + + for (size_t v = 0; v < asts_graph.vertices.size(); ++v) + for (const auto & edge : asts_graph.edges[v]) + g.edges[edge.to].push_back(Edge{edge.type, v}); + + return g; +} + +std::vector ComparisonGraph::getVertices() const +{ + std::vector result; + for (const auto & vertex : graph.vertices) + { + result.emplace_back(); + for (const auto & ast : vertex.asts) + result.back().push_back(ast); + } + return result; +} + +void ComparisonGraph::dfsComponents( + const Graph & reversed_graph, size_t v, + OptionalIndices & components, const size_t component) +{ + components[v] = component; + for (const auto & edge : reversed_graph.edges[v]) + if (!components[edge.to]) + dfsComponents(reversed_graph, edge.to, components, component); +} + +ComparisonGraph::Graph ComparisonGraph::buildGraphFromAstsGraph(const Graph & asts_graph) +{ + /// Find strongly connected component by using 2 dfs traversals. + /// https://en.wikipedia.org/wiki/Kosaraju%27s_algorithm + const auto n = asts_graph.vertices.size(); + + std::vector order; + { + std::vector visited(n, false); + for (size_t v = 0; v < n; ++v) + { + if (!visited[v]) + dfsOrder(asts_graph, v, visited, order); + } + } + + OptionalIndices components(n); + size_t component = 0; + { + const Graph reversed_graph = reverseGraph(asts_graph); + for (auto it = order.rbegin(); it != order.rend(); ++it) + { + if (!components[*it]) + { + dfsComponents(reversed_graph, *it, components, component); + ++component; + } + } + } + + Graph result; + result.vertices.resize(component); + result.edges.resize(component); + for (const auto & [hash, index] : asts_graph.ast_hash_to_component) + { + assert(components[index]); + result.ast_hash_to_component[hash] = *components[index]; + result.vertices[*components[index]].asts.insert( + std::end(result.vertices[*components[index]].asts), + std::begin(asts_graph.vertices[index].asts), + std::end(asts_graph.vertices[index].asts)); // asts_graph has only one ast per vertex + } + + /// Calculate constants + for (auto & vertex : result.vertices) + vertex.buildConstants(); + + /// For each edge in initial graph, we add an edge between components in condensation graph. + for (size_t v = 0; v < n; ++v) + { + for (const auto & edge : asts_graph.edges[v]) + result.edges[*components[v]].push_back(Edge{edge.type, *components[edge.to]}); + + /// TODO: make edges unique (left most strict) + } + + /// If we have constansts in two components, we can compare them and add and extra edge. + for (size_t v = 0; v < result.vertices.size(); ++v) + { + for (size_t u = 0; u < result.vertices.size(); ++u) + { + if (v != u && result.vertices[v].hasConstant() && result.vertices[u].hasConstant()) + { + const auto * left = result.vertices[v].getConstant()->as(); + const auto * right = result.vertices[u].getConstant()->as(); + + /// Only GREATER. Equal constant fields = equal literals so it was already considered above. + if (greater(left->value, right->value)) + result.edges[v].push_back(Edge{Edge::GREATER, u}); + } + } + } + + return result; +} + +std::map, ComparisonGraph::Path> ComparisonGraph::buildDistsFromGraph(const Graph & g) +{ + /// Min path : -1 means GREATER, 0 means GREATER_OR_EQUALS. + /// We use Floyd–Warshall algorithm to find distances between all pairs of vertices. + /// https://en.wikipedia.org/wiki/Floyd–Warshall_algorithm + + constexpr auto inf = std::numeric_limits::max(); + const size_t n = g.vertices.size(); + std::vector> results(n, std::vector(n, inf)); + + for (size_t v = 0; v < n; ++v) + { + results[v][v] = 0; + for (const auto & edge : g.edges[v]) + results[v][edge.to] = std::min(results[v][edge.to], static_cast(edge.type == Edge::GREATER ? -1 : 0)); + } + + for (size_t k = 0; k < n; ++k) + for (size_t v = 0; v < n; ++v) + for (size_t u = 0; u < n; ++u) + if (results[v][k] != inf && results[k][u] != inf) + results[v][u] = std::min(results[v][u], std::min(results[v][k], results[k][u])); + + std::map, Path> path; + for (size_t v = 0; v < n; ++v) + for (size_t u = 0; u < n; ++u) + if (results[v][u] != inf) + path[std::make_pair(v, u)] = (results[v][u] == -1 ? Path::GREATER : Path::GREATER_OR_EQUAL); + + return path; +} + +std::pair, std::vector> ComparisonGraph::buildConstBounds() const +{ + const size_t n = graph.vertices.size(); + std::vector lower(n, -1); + std::vector upper(n, -1); + + auto get_value = [this](const size_t vertex) -> Field + { + return graph.vertices[vertex].getConstant()->as()->value; + }; + + for (const auto & [edge, path] : dists) + { + const auto [from, to] = edge; + + if (graph.vertices[to].hasConstant()) + { + if (lower[from] == -1 + || greater(get_value(to), get_value(lower[from])) + || (equals(get_value(to), get_value(lower[from])) && path == Path::GREATER)) + lower[from] = to; + } + + if (graph.vertices[from].hasConstant()) + { + if (upper[to] == -1 + || less(get_value(from), get_value(upper[to])) + || (equals(get_value(from), get_value(upper[to])) && path == Path::GREATER)) + upper[to] = from; + } + } + + return {lower, upper}; +} + +} diff --git a/src/Interpreters/ComparisonGraph.h b/src/Interpreters/ComparisonGraph.h new file mode 100644 index 00000000000..60cc97e68a3 --- /dev/null +++ b/src/Interpreters/ComparisonGraph.h @@ -0,0 +1,176 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +namespace DB +{ + +/* + * Graph of relations between terms in constraints. + * Allows to compare terms and get equal terms. + */ +class ComparisonGraph +{ +public: + /// atomic_formulas are extracted from constraints. + ComparisonGraph(const std::vector & atomic_formulas); + + enum class CompareResult + { + LESS, + LESS_OR_EQUAL, + EQUAL, + GREATER_OR_EQUAL, + GREATER, + NOT_EQUAL, + UNKNOWN, + }; + + static CompareResult atomToCompareResult(const CNFQuery::AtomicFormula & atom); + static CompareResult functionNameToCompareResult(const std::string & name); + static CompareResult inverseCompareResult(const CompareResult result); + + CompareResult compare(const ASTPtr & left, const ASTPtr & right) const; + + /// It's possible that left right + bool isPossibleCompare(const CompareResult expected, const ASTPtr & left, const ASTPtr & right) const; + + /// It's always true that left right + bool isAlwaysCompare(const CompareResult expected, const ASTPtr & left, const ASTPtr & right) const; + + /// Returns all expressions from component to which @ast belongs if any. + std::vector getEqual(const ASTPtr & ast) const; + + /// Returns constant expression from component to which @ast belongs if any. + std::optional getEqualConst(const ASTPtr & ast) const; + + /// Finds component id to which @ast belongs if any. + std::optional getComponentId(const ASTPtr & ast) const; + + /// Returns all expressions from component. + std::vector getComponent(const std::size_t id) const; + + size_t getNumOfComponents() const { return graph.vertices.size(); } + + bool hasPath(const size_t left, const size_t right) const; + + /// Find constants lessOrEqual and greaterOrEqual. + /// For int and double linear programming can be applied here. + /// Returns: {constant, is strict less/greater} + std::optional> getConstUpperBound(const ASTPtr & ast) const; + std::optional> getConstLowerBound(const ASTPtr & ast) const; + + /// Returns all expression in graph. + std::vector getVertices() const; + +private: + /// Strongly connected component + struct EqualComponent + { + /// All these expressions are considered as equal. + std::vector asts; + std::optional constant_index; + + bool hasConstant() const; + ASTPtr getConstant() const; + void buildConstants(); + }; + + /// Edge (from, to, type) means that it's always true that @from @to, + /// where @op is the operation of type @type. + /// + /// TODO: move to diff for int and double: + /// GREATER and GREATER_OR_EQUAL with +const or 0 --- ok + /// with -const --- not ok + /// EQUAL is ok only for 0 + struct Edge + { + enum Type + { + GREATER, + GREATER_OR_EQUAL, + EQUAL, + }; + + Type type; + size_t to; + }; + + struct Graph + { + struct ASTHash + { + size_t operator() (const IAST::Hash & hash) const + { + return hash.first; + } + }; + + std::unordered_map ast_hash_to_component; + std::vector vertices; + std::vector> edges; + }; + + /// Receives graph, in which each vertex corresponds to one expression. + /// Then finds strongly connected components and builds graph on them. + static Graph buildGraphFromAstsGraph(const Graph & asts_graph); + + static Graph reverseGraph(const Graph & asts_graph); + + /// The first part of finding strongly connected components. + /// Finds order of exit from vertices of dfs traversal of graph. + static void dfsOrder(const Graph & asts_graph, size_t v, std::vector & visited, std::vector & order); + + using OptionalIndices = std::vector>; + + /// The second part of finding strongly connected components. + /// Assigns index of component for each vertex. + static void dfsComponents( + const Graph & reversed_graph, size_t v, + OptionalIndices & components, const size_t component); + + enum class Path + { + GREATER, + GREATER_OR_EQUAL, + }; + + static CompareResult pathToCompareResult(Path path, bool inverse); + std::optional findPath(const size_t start, const size_t finish) const; + + /// Calculate @dists. + static std::map, Path> buildDistsFromGraph(const Graph & g); + + /// Calculate @ast_const_lower_bound and @ast_const_lower_bound. + std::pair, std::vector> buildConstBounds() const; + + /// Direct acyclic graph in which each vertex corresponds + /// to one equivalence class of expressions. + /// Each edge sets the relation between classes (GREATER or GREATER_OR_EQUAL). + Graph graph; + + /// Precalculated distances between each pair of vertices. + /// Distance can be either 0 or -1. + /// 0 means GREATER_OR_EQUAL. + /// -1 means GREATER. + std::map, Path> dists; + + /// Explicitly collected components, for which it's known + /// that expressions in them are unequal. + std::set> not_equal; + + /// Maximal constant value for each component that + /// is lower bound for all expressions in component. + std::vector ast_const_lower_bound; + + /// Minimal constant value for each component that + /// is upper bound for all expressions in component. + std::vector ast_const_upper_bound; +}; + +} diff --git a/src/Interpreters/Context.cpp b/src/Interpreters/Context.cpp index 6c7baebff66..9bf66a6ac9c 100644 --- a/src/Interpreters/Context.cpp +++ b/src/Interpreters/Context.cpp @@ -94,7 +94,6 @@ namespace fs = std::filesystem; namespace ProfileEvents { extern const Event ContextLock; - extern const Event CompiledCacheSizeBytes; } namespace CurrentMetrics @@ -105,14 +104,9 @@ namespace CurrentMetrics extern const Metric BackgroundBufferFlushSchedulePoolTask; extern const Metric BackgroundDistributedSchedulePoolTask; extern const Metric BackgroundMessageBrokerSchedulePoolTask; - - - extern const Metric DelayedInserts; extern const Metric BackgroundMergesAndMutationsPoolTask; - extern const Metric BackgroundMovePoolTask; extern const Metric BackgroundFetchesPoolTask; extern const Metric BackgroundCommonPoolTask; - } namespace DB @@ -510,10 +504,23 @@ String Context::getUserScriptsPath() const return shared->user_scripts_path; } -std::vector Context::getWarnings() const +Strings Context::getWarnings() const { - auto lock = getLock(); - return shared->warnings; + Strings common_warnings; + { + auto lock = getLock(); + common_warnings = shared->warnings; + } + for (const auto & setting : settings) + { + if (setting.isValueChanged() && setting.isObsolete()) + { + common_warnings.emplace_back("Some obsolete setting is changed. " + "Check 'select * from system.settings where changed' and read the changelog."); + break; + } + } + return common_warnings; } VolumePtr Context::getTemporaryVolume() const @@ -766,23 +773,23 @@ std::shared_ptr Context::getAccess() const return access ? access : ContextAccess::getFullAccess(); } -ASTPtr Context::getRowPolicyCondition(const String & database, const String & table_name, RowPolicy::ConditionType type) const +ASTPtr Context::getRowPolicyFilter(const String & database, const String & table_name, RowPolicyFilterType filter_type) const { auto lock = getLock(); - auto initial_condition = initial_row_policy ? initial_row_policy->getCondition(database, table_name, type) : nullptr; - return getAccess()->getRowPolicyCondition(database, table_name, type, initial_condition); + auto row_filter_of_initial_user = row_policies_of_initial_user ? row_policies_of_initial_user->getFilter(database, table_name, filter_type) : nullptr; + return getAccess()->getRowPolicyFilter(database, table_name, filter_type, row_filter_of_initial_user); } -void Context::setInitialRowPolicy() +void Context::enableRowPoliciesOfInitialUser() { auto lock = getLock(); - initial_row_policy = nullptr; + row_policies_of_initial_user = nullptr; if (client_info.initial_user == client_info.current_user) return; auto initial_user_id = getAccessControl().find(client_info.initial_user); if (!initial_user_id) return; - initial_row_policy = getAccessControl().getEnabledRowPolicies(*initial_user_id, {}); + row_policies_of_initial_user = getAccessControl().tryGetDefaultRowPolicies(*initial_user_id); } @@ -864,7 +871,9 @@ const Block * Context::tryGetLocalScalar(const String & name) const Tables Context::getExternalTables() const { - assert(!isGlobalContext() || getApplicationType() == ApplicationType::LOCAL); + if (isGlobalContext()) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Global context cannot have external tables"); + auto lock = getLock(); Tables res; @@ -889,7 +898,9 @@ Tables Context::getExternalTables() const void Context::addExternalTable(const String & table_name, TemporaryTableHolder && temporary_table) { - assert(!isGlobalContext() || getApplicationType() == ApplicationType::LOCAL); + if (isGlobalContext()) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Global context cannot have external tables"); + auto lock = getLock(); if (external_tables_mapping.end() != external_tables_mapping.find(table_name)) throw Exception("Temporary table " + backQuoteIfNeed(table_name) + " already exists.", ErrorCodes::TABLE_ALREADY_EXISTS); @@ -899,7 +910,9 @@ void Context::addExternalTable(const String & table_name, TemporaryTableHolder & std::shared_ptr Context::removeExternalTable(const String & table_name) { - assert(!isGlobalContext() || getApplicationType() == ApplicationType::LOCAL); + if (isGlobalContext()) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Global context cannot have external tables"); + std::shared_ptr holder; { auto lock = getLock(); @@ -915,21 +928,27 @@ std::shared_ptr Context::removeExternalTable(const String void Context::addScalar(const String & name, const Block & block) { - assert(!isGlobalContext() || getApplicationType() == ApplicationType::LOCAL); + if (isGlobalContext()) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Global context cannot have scalars"); + scalars[name] = block; } void Context::addLocalScalar(const String & name, const Block & block) { - assert(!isGlobalContext() || getApplicationType() == ApplicationType::LOCAL); + if (isGlobalContext()) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Global context cannot have local scalars"); + local_scalars[name] = block; } bool Context::hasScalar(const String & name) const { - assert(!isGlobalContext() || getApplicationType() == ApplicationType::LOCAL); + if (isGlobalContext()) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Global context cannot have scalars"); + return scalars.count(name); } @@ -941,7 +960,9 @@ void Context::addQueryAccessInfo( const String & projection_name, const String & view_name) { - assert(!isGlobalContext() || getApplicationType() == ApplicationType::LOCAL); + if (isGlobalContext()) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Global context cannot have query access info"); + std::lock_guard lock(query_access_info.mutex); query_access_info.databases.emplace(quoted_database_name); query_access_info.tables.emplace(full_quoted_table_name); @@ -955,7 +976,9 @@ void Context::addQueryAccessInfo( void Context::addQueryFactoriesInfo(QueryLogFactories factory_type, const String & created_object) const { - assert(!isGlobalContext() || getApplicationType() == ApplicationType::LOCAL); + if (isGlobalContext()) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Global context cannot have query factories info"); + auto lock = getLock(); switch (factory_type) @@ -2814,6 +2837,10 @@ StorageID Context::resolveStorageIDImpl(StorageID storage_id, StorageNamespace w } bool look_for_external_table = where & StorageNamespace::ResolveExternal; + /// Global context should not contain temporary tables + if (isGlobalContext()) + look_for_external_table = false; + bool in_current_database = where & StorageNamespace::ResolveCurrentDatabase; bool in_specified_database = where & StorageNamespace::ResolveGlobal; @@ -2831,9 +2858,6 @@ StorageID Context::resolveStorageIDImpl(StorageID storage_id, StorageNamespace w if (look_for_external_table) { - /// Global context should not contain temporary tables - assert(!isGlobalContext() || getApplicationType() == ApplicationType::LOCAL); - auto resolved_id = StorageID::createEmpty(); auto try_resolve = [&](ContextPtr context) -> bool { @@ -3059,6 +3083,10 @@ ReadSettings Context::getReadSettings() const res.mmap_threshold = settings.min_bytes_to_use_mmap_io; res.priority = settings.read_priority; + res.http_max_tries = settings.http_max_tries; + res.http_retry_initial_backoff_ms = settings.http_retry_initial_backoff_ms; + res.http_retry_max_backoff_ms = settings.http_retry_max_backoff_ms; + res.mmap_cache = getMMappedFileCache().get(); return res; diff --git a/src/Interpreters/Context.h b/src/Interpreters/Context.h index 03e693935e1..5948cc7f7a7 100644 --- a/src/Interpreters/Context.h +++ b/src/Interpreters/Context.h @@ -1,6 +1,5 @@ #pragma once -#include #include #include #include @@ -18,6 +17,7 @@ #include "config_core.h" +#include #include #include #include @@ -43,6 +43,7 @@ struct QuotaUsage; class AccessFlags; struct AccessRightsElement; class AccessRightsElements; +enum class RowPolicyFilterType; class EmbeddedDictionaries; class ExternalDictionariesLoader; class ExternalModelsLoader; @@ -195,7 +196,7 @@ private: std::shared_ptr> current_roles; std::shared_ptr settings_constraints_and_current_profiles; std::shared_ptr access; - std::shared_ptr initial_row_policy; + std::shared_ptr row_policies_of_initial_user; String current_database; Settings settings; /// Setting for query execution. @@ -340,7 +341,7 @@ public: String getUserScriptsPath() const; /// A list of warnings about server configuration to place in `system.warnings` table. - std::vector getWarnings() const; + Strings getWarnings() const; VolumePtr getTemporaryVolume() const; @@ -414,12 +415,14 @@ public: std::shared_ptr getAccess() const; - ASTPtr getRowPolicyCondition(const String & database, const String & table_name, RowPolicy::ConditionType type) const; + ASTPtr getRowPolicyFilter(const String & database, const String & table_name, RowPolicyFilterType filter_type) const; - /// Sets an extra row policy based on `client_info.initial_user`, if it exists. + /// Finds and sets extra row policies to be used based on `client_info.initial_user`, + /// if the initial user exists. /// TODO: we need a better solution here. It seems we should pass the initial row policy - /// because a shard is allowed to don't have the initial user or it may be another user with the same name. - void setInitialRowPolicy(); + /// because a shard is allowed to not have the initial user or it might be another user + /// with the same name. + void enableRowPoliciesOfInitialUser(); std::shared_ptr getQuota() const; std::optional getQuotaUsage() const; diff --git a/src/Interpreters/DDLTask.cpp b/src/Interpreters/DDLTask.cpp index cfce6ac9463..b8d3d64feea 100644 --- a/src/Interpreters/DDLTask.cpp +++ b/src/Interpreters/DDLTask.cpp @@ -257,12 +257,12 @@ bool DDLTask::tryFindHostInCluster() * */ is_circular_replicated = true; auto * query_with_table = dynamic_cast(query.get()); - if (!query_with_table || query_with_table->database.empty()) + if (!query_with_table || !query_with_table->database) { throw Exception(ErrorCodes::INCONSISTENT_CLUSTER_DEFINITION, "For a distributed DDL on circular replicated cluster its table name must be qualified by database name."); } - if (default_database == query_with_table->database) + if (default_database == query_with_table->getDatabase()) return true; } } @@ -351,8 +351,8 @@ void DatabaseReplicatedTask::parseQueryFromEntry(ContextPtr context) if (auto * ddl_query = dynamic_cast(query.get())) { /// Update database name with actual name of local database - assert(ddl_query->database.empty()); - ddl_query->database = database->getDatabaseName(); + assert(!ddl_query->database); + ddl_query->setDatabase(database->getDatabaseName()); } } diff --git a/src/Interpreters/DDLWorker.cpp b/src/Interpreters/DDLWorker.cpp index d7984af30c7..9fefcf012a9 100644 --- a/src/Interpreters/DDLWorker.cpp +++ b/src/Interpreters/DDLWorker.cpp @@ -662,7 +662,7 @@ void DDLWorker::processTask(DDLTaskBase & task, const ZooKeeperPtr & zookeeper) StoragePtr storage; if (auto * query_with_table = dynamic_cast(task.query.get()); query_with_table) { - if (!query_with_table->table.empty()) + if (query_with_table->table) { /// It's not CREATE DATABASE auto table_id = context->tryResolveStorageID(*query_with_table, Context::ResolveOrdinary); @@ -1154,8 +1154,7 @@ void DDLWorker::runMainThread() cleanup_event->set(); scheduleTasks(reinitialized); - LOG_DEBUG(log, "Waiting for queue updates (stat: {}, {}, {}, {})", - queue_node_stat.version, queue_node_stat.cversion, queue_node_stat.numChildren, queue_node_stat.pzxid); + LOG_DEBUG(log, "Waiting for queue updates"); queue_updated_event->wait(); } catch (const Coordination::Exception & e) diff --git a/src/Interpreters/DatabaseCatalog.cpp b/src/Interpreters/DatabaseCatalog.cpp index 205d7052d0c..117119a3ee8 100644 --- a/src/Interpreters/DatabaseCatalog.cpp +++ b/src/Interpreters/DatabaseCatalog.cpp @@ -50,6 +50,7 @@ namespace ErrorCodes extern const int DATABASE_NOT_EMPTY; extern const int DATABASE_ACCESS_DENIED; extern const int LOGICAL_ERROR; + extern const int HAVE_DEPENDENT_OBJECTS; } TemporaryTableHolder::TemporaryTableHolder(ContextPtr context_, const TemporaryTableHolder::Creator & creator, const ASTPtr & query) @@ -65,9 +66,9 @@ TemporaryTableHolder::TemporaryTableHolder(ContextPtr context_, const TemporaryT if (create->uuid == UUIDHelpers::Nil) create->uuid = UUIDHelpers::generateV4(); id = create->uuid; - create->table = "_tmp_" + toString(id); - global_name = create->table; - create->database = DatabaseCatalog::TEMPORARY_DATABASE; + create->setTable("_tmp_" + toString(id)); + global_name = create->getTable(); + create->setDatabase(DatabaseCatalog::TEMPORARY_DATABASE); } else { @@ -247,10 +248,9 @@ DatabaseAndTable DatabaseCatalog::getTableImpl( #if USE_MYSQL /// It's definitely not the best place for this logic, but behaviour must be consistent with DatabaseMaterializedMySQL::tryGetTable(...) - if (db_and_table.first->getEngineName() == "MaterializedMySQL") + if (!context_->isInternalQuery() && db_and_table.first->getEngineName() == "MaterializedMySQL") { - if (!MaterializedMySQLSyncThread::isMySQLSyncThread()) - db_and_table.second = std::make_shared(std::move(db_and_table.second), db_and_table.first.get()); + db_and_table.second = std::make_shared(std::move(db_and_table.second), db_and_table.first.get()); } #endif return db_and_table; @@ -376,7 +376,7 @@ DatabasePtr DatabaseCatalog::detachDatabase(ContextPtr local_context, const Stri return db; } -void DatabaseCatalog::updateDatabaseName(const String & old_name, const String & new_name) +void DatabaseCatalog::updateDatabaseName(const String & old_name, const String & new_name, const Strings & tables_in_database) { std::lock_guard lock{databases_mutex}; assert(databases.find(new_name) == databases.end()); @@ -385,6 +385,17 @@ void DatabaseCatalog::updateDatabaseName(const String & old_name, const String & auto db = it->second; databases.erase(it); databases.emplace(new_name, db); + + for (const auto & table_name : tables_in_database) + { + QualifiedTableName new_table_name{new_name, table_name}; + auto dependencies = tryRemoveLoadingDependenciesUnlocked(QualifiedTableName{old_name, table_name}, /* check_dependencies */ false); + DependenciesInfos new_info; + for (const auto & dependency : dependencies) + new_info[dependency].dependent_database_objects.insert(new_table_name); + new_info[new_table_name].dependencies = std::move(dependencies); + mergeDependenciesGraphs(loading_dependencies, new_info); + } } DatabasePtr DatabaseCatalog::getDatabase(const String & database_name) const @@ -774,8 +785,8 @@ void DatabaseCatalog::enqueueDroppedTableCleanup(StorageID table_id, StoragePtr if (create) { String data_path = "store/" + getPathForUUID(table_id.uuid); - create->database = table_id.database_name; - create->table = table_id.table_name; + create->setDatabase(table_id.database_name); + create->setTable(table_id.table_name); try { table = createTableFromAST(*create, table_id.getDatabaseName(), data_path, getContext(), false).second; @@ -938,6 +949,119 @@ void DatabaseCatalog::waitTableFinallyDropped(const UUID & uuid) }); } +void DatabaseCatalog::addLoadingDependencies(const QualifiedTableName & table, TableNamesSet && dependencies) +{ + DependenciesInfos new_info; + for (const auto & dependency : dependencies) + new_info[dependency].dependent_database_objects.insert(table); + new_info[table].dependencies = std::move(dependencies); + addLoadingDependencies(new_info); +} + +void DatabaseCatalog::addLoadingDependencies(const DependenciesInfos & new_infos) +{ + std::lock_guard lock{databases_mutex}; + mergeDependenciesGraphs(loading_dependencies, new_infos); +} + +DependenciesInfo DatabaseCatalog::getLoadingDependenciesInfo(const StorageID & table_id) const +{ + std::lock_guard lock{databases_mutex}; + auto it = loading_dependencies.find(table_id.getQualifiedName()); + if (it == loading_dependencies.end()) + return {}; + return it->second; +} + +TableNamesSet DatabaseCatalog::tryRemoveLoadingDependencies(const StorageID & table_id, bool check_dependencies, bool is_drop_database) +{ + QualifiedTableName removing_table = table_id.getQualifiedName(); + std::lock_guard lock{databases_mutex}; + return tryRemoveLoadingDependenciesUnlocked(removing_table, check_dependencies, is_drop_database); +} + +TableNamesSet DatabaseCatalog::tryRemoveLoadingDependenciesUnlocked(const QualifiedTableName & removing_table, bool check_dependencies, bool is_drop_database) +{ + auto it = loading_dependencies.find(removing_table); + if (it == loading_dependencies.end()) + return {}; + + TableNamesSet & dependent = it->second.dependent_database_objects; + if (!dependent.empty()) + { + if (check_dependencies && !is_drop_database) + throw Exception(ErrorCodes::HAVE_DEPENDENT_OBJECTS, "Cannot drop or rename {}, because some tables depend on it: {}", + removing_table, fmt::join(dependent, ", ")); + + /// For DROP DATABASE we should ignore dependent tables from the same database. + /// TODO unload tables in reverse topological order and remove this code + if (check_dependencies) + { + TableNames from_other_databases; + for (const auto & table : dependent) + if (table.database != removing_table.database) + from_other_databases.push_back(table); + + if (!from_other_databases.empty()) + throw Exception(ErrorCodes::HAVE_DEPENDENT_OBJECTS, "Cannot drop or rename {}, because some tables depend on it: {}", + removing_table, fmt::join(from_other_databases, ", ")); + } + + for (const auto & table : dependent) + { + [[maybe_unused]] bool removed = loading_dependencies[table].dependencies.erase(removing_table); + assert(removed); + } + dependent.clear(); + } + + TableNamesSet dependencies = it->second.dependencies; + for (const auto & table : dependencies) + { + [[maybe_unused]] bool removed = loading_dependencies[table].dependent_database_objects.erase(removing_table); + assert(removed); + } + + loading_dependencies.erase(it); + return dependencies; +} + +void DatabaseCatalog::checkTableCanBeRemovedOrRenamed(const StorageID & table_id) const +{ + QualifiedTableName removing_table = table_id.getQualifiedName(); + std::lock_guard lock{databases_mutex}; + auto it = loading_dependencies.find(removing_table); + if (it == loading_dependencies.end()) + return; + + const TableNamesSet & dependent = it->second.dependent_database_objects; + if (!dependent.empty()) + throw Exception(ErrorCodes::HAVE_DEPENDENT_OBJECTS, "Cannot drop or rename {}, because some tables depend on it: {}", + table_id.getNameForLogs(), fmt::join(dependent, ", ")); +} + +void DatabaseCatalog::updateLoadingDependencies(const StorageID & table_id, TableNamesSet && new_dependencies) +{ + if (new_dependencies.empty()) + return; + QualifiedTableName table_name = table_id.getQualifiedName(); + std::lock_guard lock{databases_mutex}; + auto it = loading_dependencies.find(table_name); + if (it == loading_dependencies.end()) + it = loading_dependencies.emplace(table_name, DependenciesInfo{}).first; + + auto & old_dependencies = it->second.dependencies; + for (const auto & dependency : old_dependencies) + if (!new_dependencies.contains(dependency)) + loading_dependencies[dependency].dependent_database_objects.erase(table_name); + + for (const auto & dependency : new_dependencies) + if (!old_dependencies.contains(dependency)) + loading_dependencies[dependency].dependent_database_objects.insert(table_name); + + old_dependencies = std::move(new_dependencies); +} + DDLGuard::DDLGuard(Map & map_, std::shared_mutex & db_mutex_, std::unique_lock guards_lock_, const String & elem, const String & database_name) : map(map_), db_mutex(db_mutex_), guards_lock(std::move(guards_lock_)) diff --git a/src/Interpreters/DatabaseCatalog.h b/src/Interpreters/DatabaseCatalog.h index 6079553b025..a32995658f1 100644 --- a/src/Interpreters/DatabaseCatalog.h +++ b/src/Interpreters/DatabaseCatalog.h @@ -3,6 +3,7 @@ #include #include #include +#include #include #include @@ -148,7 +149,7 @@ public: void attachDatabase(const String & database_name, const DatabasePtr & database); DatabasePtr detachDatabase(ContextPtr local_context, const String & database_name, bool drop = false, bool check_empty = true); - void updateDatabaseName(const String & old_name, const String & new_name); + void updateDatabaseName(const String & old_name, const String & new_name, const Strings & tables_in_database); /// database_name must be not empty DatabasePtr getDatabase(const String & database_name) const; @@ -207,6 +208,16 @@ public: void waitTableFinallyDropped(const UUID & uuid); + void addLoadingDependencies(const QualifiedTableName & table, TableNamesSet && dependencies); + void addLoadingDependencies(const DependenciesInfos & new_infos); + DependenciesInfo getLoadingDependenciesInfo(const StorageID & table_id) const; + + TableNamesSet tryRemoveLoadingDependencies(const StorageID & table_id, bool check_dependencies, bool is_drop_database = false); + TableNamesSet tryRemoveLoadingDependenciesUnlocked(const QualifiedTableName & removing_table, bool check_dependencies, bool is_drop_database = false); + void checkTableCanBeRemovedOrRenamed(const StorageID & table_id) const; + + void updateLoadingDependencies(const StorageID & table_id, TableNamesSet && new_dependencies); + private: // The global instance of database catalog. unique_ptr is to allow // deferred initialization. Thought I'd use std::optional, but I can't @@ -259,6 +270,8 @@ private: UUIDToDatabaseMap db_uuid_map; UUIDToStorageMap uuid_map; + DependenciesInfos loading_dependencies; + Poco::Logger * log; /// Do not allow simultaneous execution of DDL requests on the same table. diff --git a/src/Interpreters/InJoinSubqueriesPreprocessor.cpp b/src/Interpreters/InJoinSubqueriesPreprocessor.cpp index 1fad674f256..28e1757bc3e 100644 --- a/src/Interpreters/InJoinSubqueriesPreprocessor.cpp +++ b/src/Interpreters/InJoinSubqueriesPreprocessor.cpp @@ -191,13 +191,25 @@ private: ASTTableJoin * table_join = node.table_join->as(); if (table_join->locality != ASTTableJoin::Locality::Global) { - if (auto & subquery = node.table_expression->as()->subquery) + if (auto * table = node.table_expression->as()) { - std::vector renamed; - NonGlobalTableVisitor::Data table_data(data.getContext(), data.checker, renamed, nullptr, table_join); - NonGlobalTableVisitor(table_data).visit(subquery); - if (!renamed.empty()) //-V547 - data.renamed_tables.emplace_back(subquery, std::move(renamed)); + if (auto & subquery = table->subquery) + { + std::vector renamed; + NonGlobalTableVisitor::Data table_data(data.getContext(), data.checker, renamed, nullptr, table_join); + NonGlobalTableVisitor(table_data).visit(subquery); + if (!renamed.empty()) //-V547 + data.renamed_tables.emplace_back(subquery, std::move(renamed)); + } + else if (table->database_and_table_name) + { + auto tb = node.table_expression; + std::vector renamed; + NonGlobalTableVisitor::Data table_data{data.getContext(), data.checker, renamed, nullptr, table_join}; + NonGlobalTableVisitor(table_data).visit(tb); + if (!renamed.empty()) //-V547 + data.renamed_tables.emplace_back(tb, std::move(renamed)); + } } } } diff --git a/src/Interpreters/InterpreterAlterQuery.cpp b/src/Interpreters/InterpreterAlterQuery.cpp index b620ddf6a1e..665d76625ef 100644 --- a/src/Interpreters/InterpreterAlterQuery.cpp +++ b/src/Interpreters/InterpreterAlterQuery.cpp @@ -62,7 +62,7 @@ BlockIO InterpreterAlterQuery::executeToTable(const ASTAlterQuery & alter) getContext()->checkAccess(getRequiredAccess()); auto table_id = getContext()->resolveStorageID(alter, Context::ResolveOrdinary); - query_ptr->as().database = table_id.database_name; + query_ptr->as().setDatabase(table_id.database_name); DatabasePtr database = DatabaseCatalog::instance().getDatabase(table_id.database_name); if (typeid_cast(database.get()) @@ -80,7 +80,6 @@ BlockIO InterpreterAlterQuery::executeToTable(const ASTAlterQuery & alter) if (table->isStaticStorage()) throw Exception(ErrorCodes::TABLE_IS_READ_ONLY, "Table is read-only"); auto table_lock = table->lockForShare(getContext()->getCurrentQueryId(), getContext()->getSettingsRef().lock_acquire_timeout); - auto alter_lock = table->lockForAlter(getContext()->getSettingsRef().lock_acquire_timeout); auto metadata_snapshot = table->getInMemoryMetadataPtr(); /// Add default database to table identifiers that we can encounter in e.g. default expressions, mutation expression, etc. @@ -160,6 +159,7 @@ BlockIO InterpreterAlterQuery::executeToTable(const ASTAlterQuery & alter) if (!alter_commands.empty()) { + auto alter_lock = table->lockForAlter(getContext()->getSettingsRef().lock_acquire_timeout); StorageInMemoryMetadata metadata = table->getInMemoryMetadata(); alter_commands.validate(metadata, getContext()); alter_commands.prepare(metadata); @@ -175,7 +175,7 @@ BlockIO InterpreterAlterQuery::executeToDatabase(const ASTAlterQuery & alter) { BlockIO res; getContext()->checkAccess(getRequiredAccess()); - DatabasePtr database = DatabaseCatalog::instance().getDatabase(alter.database); + DatabasePtr database = DatabaseCatalog::instance().getDatabase(alter.getDatabase()); AlterCommands alter_commands; for (const auto & child : alter.command_list->children) @@ -215,7 +215,7 @@ AccessRightsElements InterpreterAlterQuery::getRequiredAccess() const AccessRightsElements required_access; const auto & alter = query_ptr->as(); for (const auto & child : alter.command_list->children) - boost::range::push_back(required_access, getRequiredAccessForCommand(child->as(), alter.database, alter.table)); + boost::range::push_back(required_access, getRequiredAccessForCommand(child->as(), alter.getDatabase(), alter.getTable())); return required_access; } diff --git a/src/Interpreters/InterpreterCreateQuery.cpp b/src/Interpreters/InterpreterCreateQuery.cpp index f1db1f771f3..9898cbd2011 100644 --- a/src/Interpreters/InterpreterCreateQuery.cpp +++ b/src/Interpreters/InterpreterCreateQuery.cpp @@ -54,6 +54,7 @@ #include #include #include +#include #include @@ -100,7 +101,7 @@ InterpreterCreateQuery::InterpreterCreateQuery(const ASTPtr & query_ptr_, Contex BlockIO InterpreterCreateQuery::createDatabase(ASTCreateQuery & create) { - String database_name = create.database; + String database_name = create.getDatabase(); auto guard = DatabaseCatalog::instance().getDDLGuard(database_name, ""); @@ -126,11 +127,11 @@ BlockIO InterpreterCreateQuery::createDatabase(ASTCreateQuery & create) /// Short syntax: try read database definition from file auto ast = DatabaseOnDisk::parseQueryFromMetadata(nullptr, getContext(), metadata_file_path); create = ast->as(); - if (!create.table.empty() || !create.storage) + if (create.table || !create.storage) throw Exception(ErrorCodes::INCORRECT_QUERY, "Metadata file {} contains incorrect CREATE DATABASE query", metadata_file_path.string()); create.attach = true; create.attach_short_syntax = true; - create.database = database_name; + create.setDatabase(database_name); } else if (!create.storage) { @@ -160,7 +161,7 @@ BlockIO InterpreterCreateQuery::createDatabase(ASTCreateQuery & create) { if (create.attach && create.uuid == UUIDHelpers::Nil) throw Exception(ErrorCodes::INCORRECT_QUERY, "UUID must be specified for ATTACH. " - "If you want to attach existing database, use just ATTACH DATABASE {};", create.database); + "If you want to attach existing database, use just ATTACH DATABASE {};", create.getDatabase()); else if (create.uuid == UUIDHelpers::Nil) create.uuid = UUIDHelpers::generateV4(); @@ -237,7 +238,7 @@ BlockIO InterpreterCreateQuery::createDatabase(ASTCreateQuery & create) DatabasePtr database = DatabaseFactory::get(create, metadata_path / "", getContext()); if (create.uuid != UUIDHelpers::Nil) - create.database = TABLE_WITH_UUID_NAME_PLACEHOLDER; + create.setDatabase(TABLE_WITH_UUID_NAME_PLACEHOLDER); bool need_write_metadata = !create.attach || !fs::exists(metadata_file_path); @@ -374,18 +375,26 @@ ASTPtr InterpreterCreateQuery::formatColumns(const ColumnsDescription & columns) { column_declaration->default_specifier = toString(column.default_desc.kind); column_declaration->default_expression = column.default_desc.expression->clone(); + column_declaration->children.push_back(column_declaration->default_expression); } if (!column.comment.empty()) { column_declaration->comment = std::make_shared(Field(column.comment)); + column_declaration->children.push_back(column_declaration->comment); } if (column.codec) + { column_declaration->codec = column.codec; + column_declaration->children.push_back(column_declaration->codec); + } if (column.ttl) + { column_declaration->ttl = column.ttl; + column_declaration->children.push_back(column_declaration->ttl); + } columns_list->children.push_back(column_declaration_ptr); } @@ -407,7 +416,7 @@ ASTPtr InterpreterCreateQuery::formatConstraints(const ConstraintsDescription & { auto res = std::make_shared(); - for (const auto & constraint : constraints.constraints) + for (const auto & constraint : constraints.getConstraints()) res->children.push_back(constraint->clone()); return res; @@ -555,11 +564,12 @@ ColumnsDescription InterpreterCreateQuery::getColumnsDescription( ConstraintsDescription InterpreterCreateQuery::getConstraintsDescription(const ASTExpressionList * constraints) { - ConstraintsDescription res; + ASTs constraints_data; if (constraints) for (const auto & constraint : constraints->children) - res.constraints.push_back(std::dynamic_pointer_cast(constraint->clone())); - return res; + constraints_data.push_back(constraint->clone()); + + return ConstraintsDescription{constraints_data}; } @@ -814,7 +824,7 @@ void InterpreterCreateQuery::assertOrSetUUID(ASTCreateQuery & create, const Data bool has_uuid = create.uuid != UUIDHelpers::Nil || create.to_inner_uuid != UUIDHelpers::Nil; if (has_uuid && !is_on_cluster) throw Exception(ErrorCodes::INCORRECT_QUERY, - "{} UUID specified, but engine of database {} is not Atomic", kind, create.database); + "{} UUID specified, but engine of database {} is not Atomic", kind, create.getDatabase()); /// Ignore UUID if it's ON CLUSTER query create.uuid = UUIDHelpers::Nil; @@ -826,12 +836,12 @@ void InterpreterCreateQuery::assertOrSetUUID(ASTCreateQuery & create, const Data BlockIO InterpreterCreateQuery::createTable(ASTCreateQuery & create) { /// Temporary tables are created out of databases. - if (create.temporary && !create.database.empty()) + if (create.temporary && create.database) throw Exception("Temporary tables cannot be inside a database. You should not specify a database for a temporary table.", ErrorCodes::BAD_DATABASE_FOR_TEMPORARY_TABLE); String current_database = getContext()->getCurrentDatabase(); - auto database_name = create.database.empty() ? current_database : create.database; + auto database_name = create.database ? create.getDatabase() : current_database; // If this is a stub ATTACH query, read the query definition from the database if (create.attach && !create.storage && !create.columns_list) @@ -840,12 +850,12 @@ BlockIO InterpreterCreateQuery::createTable(ASTCreateQuery & create) if (database->getEngineName() == "Replicated") { - auto guard = DatabaseCatalog::instance().getDDLGuard(database_name, create.table); + auto guard = DatabaseCatalog::instance().getDDLGuard(database_name, create.getTable()); if (auto* ptr = typeid_cast(database.get()); ptr && !getContext()->getClientInfo().is_replicated_database_internal) { - create.database = database_name; + create.setDatabase(database_name); guard->releaseTableLock(); return ptr->tryEnqueueReplicatedDDL(query_ptr, getContext()); } @@ -854,18 +864,18 @@ BlockIO InterpreterCreateQuery::createTable(ASTCreateQuery & create) bool if_not_exists = create.if_not_exists; // Table SQL definition is available even if the table is detached (even permanently) - auto query = database->getCreateTableQuery(create.table, getContext()); + auto query = database->getCreateTableQuery(create.getTable(), getContext()); auto create_query = query->as(); if (!create.is_dictionary && create_query.is_dictionary) throw Exception(ErrorCodes::INCORRECT_QUERY, "Cannot ATTACH TABLE {}.{}, it is a Dictionary", - backQuoteIfNeed(database_name), backQuoteIfNeed(create.table)); + backQuoteIfNeed(database_name), backQuoteIfNeed(create.getTable())); if (create.is_dictionary && !create_query.is_dictionary) throw Exception(ErrorCodes::INCORRECT_QUERY, "Cannot ATTACH DICTIONARY {}.{}, it is a Table", - backQuoteIfNeed(database_name), backQuoteIfNeed(create.table)); + backQuoteIfNeed(database_name), backQuoteIfNeed(create.getTable())); create = create_query; // Copy the saved create query, but use ATTACH instead of CREATE @@ -908,11 +918,11 @@ BlockIO InterpreterCreateQuery::createTable(ASTCreateQuery & create) "use either ATTACH TABLE {}; to attach existing table " "or CREATE TABLE {} ; to create new table " "or ATTACH TABLE {} FROM '/path/to/data/'
; to create new table and attach data.", - create.table, create.table, create.table); + create.getTable(), create.getTable(), create.getTable()); } - if (!create.temporary && create.database.empty()) - create.database = current_database; + if (!create.temporary && !create.database) + create.setDatabase(current_database); if (create.to_table_id && create.to_table_id.database_name.empty()) create.to_table_id.database_name = current_database; @@ -940,7 +950,7 @@ BlockIO InterpreterCreateQuery::createTable(ASTCreateQuery & create) if (need_add_to_database && database->getEngineName() == "Replicated") { - auto guard = DatabaseCatalog::instance().getDDLGuard(create.database, create.table); + auto guard = DatabaseCatalog::instance().getDDLGuard(create.getDatabase(), create.getTable()); if (auto * ptr = typeid_cast(database.get()); ptr && !getContext()->getClientInfo().is_replicated_database_internal) @@ -960,6 +970,11 @@ BlockIO InterpreterCreateQuery::createTable(ASTCreateQuery & create) if (!created) /// Table already exists return {}; + /// If table has dependencies - add them to the graph + TableNamesSet loading_dependencies = getDependenciesSetFromCreateQuery(getContext()->getGlobalContext(), query_ptr); + if (!loading_dependencies.empty()) + DatabaseCatalog::instance().addLoadingDependencies(QualifiedTableName{database_name, create.getTable()}, std::move(loading_dependencies)); + return fillTableIfNeeded(create); } @@ -977,16 +992,16 @@ bool InterpreterCreateQuery::doCreateTable(ASTCreateQuery & create, /** If the request specifies IF NOT EXISTS, we allow concurrent CREATE queries (which do nothing). * If table doesn't exist, one thread is creating table, while others wait in DDLGuard. */ - guard = DatabaseCatalog::instance().getDDLGuard(create.database, create.table); + guard = DatabaseCatalog::instance().getDDLGuard(create.getDatabase(), create.getTable()); - database = DatabaseCatalog::instance().getDatabase(create.database); + database = DatabaseCatalog::instance().getDatabase(create.getDatabase()); assertOrSetUUID(create, database); String storage_name = create.is_dictionary ? "Dictionary" : "Table"; auto storage_already_exists_error_code = create.is_dictionary ? ErrorCodes::DICTIONARY_ALREADY_EXISTS : ErrorCodes::TABLE_ALREADY_EXISTS; /// Table can be created before or it can be created concurrently in another thread, while we were waiting in DDLGuard. - if (database->isTableExist(create.table, getContext())) + if (database->isTableExist(create.getTable(), getContext())) { /// TODO Check structure of table if (create.if_not_exists) @@ -995,8 +1010,8 @@ bool InterpreterCreateQuery::doCreateTable(ASTCreateQuery & create, { /// when executing CREATE OR REPLACE VIEW, drop current existing view auto drop_ast = std::make_shared(); - drop_ast->database = create.database; - drop_ast->table = create.table; + drop_ast->setDatabase(create.getDatabase()); + drop_ast->setTable(create.getTable()); drop_ast->no_ddl_lock = true; auto drop_context = Context::createCopy(context); @@ -1005,7 +1020,7 @@ bool InterpreterCreateQuery::doCreateTable(ASTCreateQuery & create, } else throw Exception(storage_already_exists_error_code, - "{} {}.{} already exists", storage_name, backQuoteIfNeed(create.database), backQuoteIfNeed(create.table)); + "{} {}.{} already exists", storage_name, backQuoteIfNeed(create.getDatabase()), backQuoteIfNeed(create.getTable())); } data_path = database->getTableDataPath(create); @@ -1016,10 +1031,10 @@ bool InterpreterCreateQuery::doCreateTable(ASTCreateQuery & create, } else { - if (create.if_not_exists && getContext()->tryResolveStorageID({"", create.table}, Context::ResolveExternal)) + if (create.if_not_exists && getContext()->tryResolveStorageID({"", create.getTable()}, Context::ResolveExternal)) return false; - String temporary_table_name = create.table; + String temporary_table_name = create.getTable(); auto temporary_table = TemporaryTableHolder(getContext(), properties.columns, properties.constraints, query_ptr); getContext()->getSessionContext()->addExternalTable(temporary_table_name, std::move(temporary_table)); return true; @@ -1056,8 +1071,8 @@ bool InterpreterCreateQuery::doCreateTable(ASTCreateQuery & create, { const auto & factory = TableFunctionFactory::instance(); auto table_func = factory.get(create.as_table_function, getContext()); - res = table_func->execute(create.as_table_function, getContext(), create.table, properties.columns); - res->renameInMemory({create.database, create.table, create.uuid}); + res = table_func->execute(create.as_table_function, getContext(), create.getTable(), properties.columns); + res->renameInMemory({create.getDatabase(), create.getTable(), create.uuid}); } else { @@ -1075,12 +1090,12 @@ bool InterpreterCreateQuery::doCreateTable(ASTCreateQuery & create, "ATTACH ... FROM ... query is not supported for {} table engine, " "because such tables do not store any data on disk. Use CREATE instead.", res->getName()); - database->createTable(getContext(), create.table, res, query_ptr); + database->createTable(getContext(), create.getTable(), res, query_ptr); /// Move table data to the proper place. Wo do not move data earlier to avoid situations /// when data directory moved, but table has not been created due to some error. if (from_path) - res->rename(actual_data_path, {create.database, create.table, create.uuid}); + res->rename(actual_data_path, {create.getDatabase(), create.getTable(), create.uuid}); /// We must call "startup" and "shutdown" while holding DDLGuard. /// Because otherwise method "shutdown" (from InterpreterDropQuery) can be called before startup @@ -1128,30 +1143,30 @@ BlockIO InterpreterCreateQuery::doCreateOrReplaceTable(ASTCreateQuery & create, }; auto ast_drop = std::make_shared(); - String table_to_replace_name = create.table; + String table_to_replace_name = create.getTable(); { - auto database = DatabaseCatalog::instance().getDatabase(create.database); + auto database = DatabaseCatalog::instance().getDatabase(create.getDatabase()); if (database->getUUID() == UUIDHelpers::Nil) throw Exception(ErrorCodes::INCORRECT_QUERY, "{} query is supported only for Atomic databases", create.create_or_replace ? "CREATE OR REPLACE TABLE" : "REPLACE TABLE"); - UInt64 name_hash = sipHash64(create.database + create.table); + UInt64 name_hash = sipHash64(create.getDatabase() + create.getTable()); UInt16 random_suffix = thread_local_rng(); if (auto txn = current_context->getZooKeeperMetadataTransaction()) { /// Avoid different table name on database replicas random_suffix = sipHash64(txn->getTaskZooKeeperPath()); } - create.table = fmt::format("_tmp_replace_{}_{}", - getHexUIntLowercase(name_hash), - getHexUIntLowercase(random_suffix)); + create.setTable(fmt::format("_tmp_replace_{}_{}", + getHexUIntLowercase(name_hash), + getHexUIntLowercase(random_suffix))); - ast_drop->table = create.table; + ast_drop->setTable(create.getTable()); ast_drop->is_dictionary = create.is_dictionary; - ast_drop->database = create.database; + ast_drop->setDatabase(create.getDatabase()); ast_drop->kind = ASTDropQuery::Drop; } @@ -1172,8 +1187,8 @@ BlockIO InterpreterCreateQuery::doCreateOrReplaceTable(ASTCreateQuery & create, auto ast_rename = std::make_shared(); ASTRenameQuery::Element elem { - ASTRenameQuery::Table{create.database, create.table}, - ASTRenameQuery::Table{create.database, table_to_replace_name} + ASTRenameQuery::Table{create.getDatabase(), create.getTable()}, + ASTRenameQuery::Table{create.getDatabase(), table_to_replace_name} }; ast_rename->elements.push_back(std::move(elem)); @@ -1203,7 +1218,7 @@ BlockIO InterpreterCreateQuery::doCreateOrReplaceTable(ASTCreateQuery & create, InterpreterDropQuery(ast_drop, drop_context).execute(); } - create.table = table_to_replace_name; + create.setTable(table_to_replace_name); return {}; } @@ -1226,7 +1241,7 @@ BlockIO InterpreterCreateQuery::fillTableIfNeeded(const ASTCreateQuery & create) && !create.is_ordinary_view && !create.is_live_view && (!create.is_materialized_view || create.is_populate)) { auto insert = std::make_shared(); - insert->table_id = {create.database, create.table, create.uuid}; + insert->table_id = {create.getDatabase(), create.getTable(), create.uuid}; insert->select = create.select->clone(); if (create.temporary && !getContext()->getSessionContext()->hasQueryContext()) @@ -1302,7 +1317,7 @@ BlockIO InterpreterCreateQuery::execute() ASTQueryWithOutput::resetOutputASTIfExist(create); /// CREATE|ATTACH DATABASE - if (!create.database.empty() && create.table.empty()) + if (create.database && !create.table) return createDatabase(create); else return createTable(create); @@ -1318,21 +1333,21 @@ AccessRightsElements InterpreterCreateQuery::getRequiredAccess() const AccessRightsElements required_access; const auto & create = query_ptr->as(); - if (create.table.empty()) + if (!create.table) { - required_access.emplace_back(AccessType::CREATE_DATABASE, create.database); + required_access.emplace_back(AccessType::CREATE_DATABASE, create.getDatabase()); } else if (create.is_dictionary) { - required_access.emplace_back(AccessType::CREATE_DICTIONARY, create.database, create.table); + required_access.emplace_back(AccessType::CREATE_DICTIONARY, create.getDatabase(), create.getTable()); } else if (create.isView()) { assert(!create.temporary); if (create.replace_view) - required_access.emplace_back(AccessType::DROP_VIEW | AccessType::CREATE_VIEW, create.database, create.table); + required_access.emplace_back(AccessType::DROP_VIEW | AccessType::CREATE_VIEW, create.getDatabase(), create.getTable()); else - required_access.emplace_back(AccessType::CREATE_VIEW, create.database, create.table); + required_access.emplace_back(AccessType::CREATE_VIEW, create.getDatabase(), create.getTable()); } else { @@ -1341,8 +1356,8 @@ AccessRightsElements InterpreterCreateQuery::getRequiredAccess() const else { if (create.replace_table) - required_access.emplace_back(AccessType::DROP_TABLE, create.database, create.table); - required_access.emplace_back(AccessType::CREATE_TABLE, create.database, create.table); + required_access.emplace_back(AccessType::DROP_TABLE, create.getDatabase(), create.getTable()); + required_access.emplace_back(AccessType::CREATE_TABLE, create.getDatabase(), create.getTable()); } } diff --git a/src/Interpreters/InterpreterDropQuery.cpp b/src/Interpreters/InterpreterDropQuery.cpp index 13a376dff8d..90c4311b032 100644 --- a/src/Interpreters/InterpreterDropQuery.cpp +++ b/src/Interpreters/InterpreterDropQuery.cpp @@ -56,9 +56,9 @@ BlockIO InterpreterDropQuery::execute() if (getContext()->getSettingsRef().database_atomic_wait_for_drop_and_detach_synchronously) drop.no_delay = true; - if (!drop.table.empty()) + if (drop.table) return executeToTable(drop); - else if (!drop.database.empty()) + else if (drop.database) return executeToDatabase(drop); else throw Exception("Nothing to drop, both names are empty", ErrorCodes::LOGICAL_ERROR); @@ -80,22 +80,22 @@ BlockIO InterpreterDropQuery::executeToTable(ASTDropQuery & query) { DatabasePtr database; UUID table_to_wait_on = UUIDHelpers::Nil; - auto res = executeToTableImpl(query, database, table_to_wait_on); + auto res = executeToTableImpl(getContext(), query, database, table_to_wait_on); if (query.no_delay) waitForTableToBeActuallyDroppedOrDetached(query, database, table_to_wait_on); return res; } -BlockIO InterpreterDropQuery::executeToTableImpl(ASTDropQuery & query, DatabasePtr & db, UUID & uuid_to_wait) +BlockIO InterpreterDropQuery::executeToTableImpl(ContextPtr context_, ASTDropQuery & query, DatabasePtr & db, UUID & uuid_to_wait) { /// NOTE: it does not contain UUID, we will resolve it with locked DDLGuard auto table_id = StorageID(query); if (query.temporary || table_id.database_name.empty()) { - if (getContext()->tryResolveStorageID(table_id, Context::ResolveExternal)) + if (context_->tryResolveStorageID(table_id, Context::ResolveExternal)) return executeToTemporaryTable(table_id.getTableName(), query.kind); else - query.database = table_id.database_name = getContext()->getCurrentDatabase(); + query.setDatabase(table_id.database_name = context_->getCurrentDatabase()); } if (query.temporary) @@ -109,8 +109,8 @@ BlockIO InterpreterDropQuery::executeToTableImpl(ASTDropQuery & query, DatabaseP auto ddl_guard = (!query.no_ddl_lock ? DatabaseCatalog::instance().getDDLGuard(table_id.database_name, table_id.table_name) : nullptr); /// If table was already dropped by anyone, an exception will be thrown - auto [database, table] = query.if_exists ? DatabaseCatalog::instance().tryGetDatabaseAndTable(table_id, getContext()) - : DatabaseCatalog::instance().getDatabaseAndTable(table_id, getContext()); + auto [database, table] = query.if_exists ? DatabaseCatalog::instance().tryGetDatabaseAndTable(table_id, context_) + : DatabaseCatalog::instance().getDatabaseAndTable(table_id, context_); if (database && table) { @@ -130,9 +130,9 @@ BlockIO InterpreterDropQuery::executeToTableImpl(ASTDropQuery & query, DatabaseP table_id.uuid = database->tryGetTableUUID(table_id.table_name); /// Prevents recursive drop from drop database query. The original query must specify a table. - bool is_drop_or_detach_database = query_ptr->as()->table.empty(); + bool is_drop_or_detach_database = !query_ptr->as()->table; bool is_replicated_ddl_query = typeid_cast(database.get()) && - !getContext()->getClientInfo().is_replicated_database_internal && + !context_->getClientInfo().is_replicated_database_internal && !is_drop_or_detach_database; AccessFlags drop_storage; @@ -147,20 +147,20 @@ BlockIO InterpreterDropQuery::executeToTableImpl(ASTDropQuery & query, DatabaseP if (is_replicated_ddl_query) { if (query.kind == ASTDropQuery::Kind::Detach) - getContext()->checkAccess(drop_storage, table_id); + context_->checkAccess(drop_storage, table_id); else if (query.kind == ASTDropQuery::Kind::Truncate) - getContext()->checkAccess(AccessType::TRUNCATE, table_id); + context_->checkAccess(AccessType::TRUNCATE, table_id); else if (query.kind == ASTDropQuery::Kind::Drop) - getContext()->checkAccess(drop_storage, table_id); + context_->checkAccess(drop_storage, table_id); ddl_guard->releaseTableLock(); table.reset(); - return typeid_cast(database.get())->tryEnqueueReplicatedDDL(query.clone(), getContext()); + return typeid_cast(database.get())->tryEnqueueReplicatedDDL(query.clone(), context_); } if (query.kind == ASTDropQuery::Kind::Detach) { - getContext()->checkAccess(drop_storage, table_id); + context_->checkAccess(drop_storage, table_id); if (table->isDictionary()) { @@ -175,17 +175,20 @@ BlockIO InterpreterDropQuery::executeToTableImpl(ASTDropQuery & query, DatabaseP TableExclusiveLockHolder table_lock; if (database->getUUID() == UUIDHelpers::Nil) - table_lock = table->lockExclusively(getContext()->getCurrentQueryId(), getContext()->getSettingsRef().lock_acquire_timeout); + table_lock = table->lockExclusively(context_->getCurrentQueryId(), context_->getSettingsRef().lock_acquire_timeout); if (query.permanently) { + /// Server may fail to restart of DETACH PERMANENTLY if table has dependent ones + DatabaseCatalog::instance().tryRemoveLoadingDependencies(table_id, getContext()->getSettingsRef().check_table_dependencies, + is_drop_or_detach_database); /// Drop table from memory, don't touch data, metadata file renamed and will be skipped during server restart - database->detachTablePermanently(getContext(), table_id.table_name); + database->detachTablePermanently(context_, table_id.table_name); } else { /// Drop table from memory, don't touch data and metadata - database->detachTable(table_id.table_name); + database->detachTable(context_, table_id.table_name); } } else if (query.kind == ASTDropQuery::Kind::Truncate) @@ -193,20 +196,20 @@ BlockIO InterpreterDropQuery::executeToTableImpl(ASTDropQuery & query, DatabaseP if (table->isDictionary()) throw Exception("Cannot TRUNCATE dictionary", ErrorCodes::SYNTAX_ERROR); - getContext()->checkAccess(AccessType::TRUNCATE, table_id); + context_->checkAccess(AccessType::TRUNCATE, table_id); if (table->isStaticStorage()) throw Exception(ErrorCodes::TABLE_IS_READ_ONLY, "Table is read-only"); table->checkTableCanBeDropped(); - auto table_lock = table->lockExclusively(getContext()->getCurrentQueryId(), getContext()->getSettingsRef().lock_acquire_timeout); + auto table_lock = table->lockExclusively(context_->getCurrentQueryId(), context_->getSettingsRef().lock_acquire_timeout); auto metadata_snapshot = table->getInMemoryMetadataPtr(); /// Drop table data, don't touch metadata - table->truncate(query_ptr, metadata_snapshot, getContext(), table_lock); + table->truncate(query_ptr, metadata_snapshot, context_, table_lock); } else if (query.kind == ASTDropQuery::Kind::Drop) { - getContext()->checkAccess(drop_storage, table_id); + context_->checkAccess(drop_storage, table_id); if (table->isDictionary()) { @@ -221,9 +224,11 @@ BlockIO InterpreterDropQuery::executeToTableImpl(ASTDropQuery & query, DatabaseP TableExclusiveLockHolder table_lock; if (database->getUUID() == UUIDHelpers::Nil) - table_lock = table->lockExclusively(getContext()->getCurrentQueryId(), getContext()->getSettingsRef().lock_acquire_timeout); + table_lock = table->lockExclusively(context_->getCurrentQueryId(), context_->getSettingsRef().lock_acquire_timeout); - database->dropTable(getContext(), table_id.table_name, query.no_delay); + DatabaseCatalog::instance().tryRemoveLoadingDependencies(table_id, getContext()->getSettingsRef().check_table_dependencies, + is_drop_or_detach_database); + database->dropTable(context_, table_id.table_name, query.no_delay); } db = database; @@ -297,7 +302,7 @@ BlockIO InterpreterDropQuery::executeToDatabase(const ASTDropQuery & query) BlockIO InterpreterDropQuery::executeToDatabaseImpl(const ASTDropQuery & query, DatabasePtr & database, std::vector & uuids_to_wait) { - const auto & database_name = query.database; + const auto & database_name = query.getDatabase(); auto ddl_guard = DatabaseCatalog::instance().getDDLGuard(database_name, ""); database = tryGetDatabase(database_name, query.if_exists); @@ -315,40 +320,34 @@ BlockIO InterpreterDropQuery::executeToDatabaseImpl(const ASTDropQuery & query, if (query.kind == ASTDropQuery::Kind::Detach && query.permanently) throw Exception("DETACH PERMANENTLY is not implemented for databases", ErrorCodes::NOT_IMPLEMENTED); -#if USE_MYSQL - if (database->getEngineName() == "MaterializedMySQL") - stopDatabaseSynchronization(database); -#endif - if (auto * replicated = typeid_cast(database.get())) - replicated->stopReplication(); -#if USE_LIBPQXX - if (auto * materialize_postgresql = typeid_cast(database.get())) - materialize_postgresql->stopReplication(); -#endif + if (database->hasReplicationThread()) + database->stopReplication(); if (database->shouldBeEmptyOnDetach()) { ASTDropQuery query_for_table; query_for_table.kind = query.kind; query_for_table.if_exists = true; - query_for_table.database = database_name; + query_for_table.setDatabase(database_name); query_for_table.no_delay = query.no_delay; /// Flush should not be done if shouldBeEmptyOnDetach() == false, /// since in this case getTablesIterator() may do some additional work, - /// see DatabaseMaterializedMySQL<>::getTablesIterator() + /// see DatabaseMaterializedMySQL::getTablesIterator() for (auto iterator = database->getTablesIterator(getContext()); iterator->isValid(); iterator->next()) { iterator->table()->flush(); } - for (auto iterator = database->getTablesIterator(getContext()); iterator->isValid(); iterator->next()) + auto table_context = Context::createCopy(getContext()); + table_context->setInternalQuery(true); + for (auto iterator = database->getTablesIterator(table_context); iterator->isValid(); iterator->next()) { DatabasePtr db; UUID table_to_wait = UUIDHelpers::Nil; - query_for_table.table = iterator->name(); + query_for_table.setTable(iterator->name()); query_for_table.is_dictionary = iterator->table()->isDictionary(); - executeToTableImpl(query_for_table, db, table_to_wait); + executeToTableImpl(table_context, query_for_table, db, table_to_wait); uuids_to_wait.push_back(table_to_wait); } } @@ -380,29 +379,29 @@ AccessRightsElements InterpreterDropQuery::getRequiredAccessForDDLOnCluster() co AccessRightsElements required_access; const auto & drop = query_ptr->as(); - if (drop.table.empty()) + if (!drop.table) { if (drop.kind == ASTDropQuery::Kind::Detach) - required_access.emplace_back(AccessType::DROP_DATABASE, drop.database); + required_access.emplace_back(AccessType::DROP_DATABASE, drop.getDatabase()); else if (drop.kind == ASTDropQuery::Kind::Drop) - required_access.emplace_back(AccessType::DROP_DATABASE, drop.database); + required_access.emplace_back(AccessType::DROP_DATABASE, drop.getDatabase()); } else if (drop.is_dictionary) { if (drop.kind == ASTDropQuery::Kind::Detach) - required_access.emplace_back(AccessType::DROP_DICTIONARY, drop.database, drop.table); + required_access.emplace_back(AccessType::DROP_DICTIONARY, drop.getDatabase(), drop.getTable()); else if (drop.kind == ASTDropQuery::Kind::Drop) - required_access.emplace_back(AccessType::DROP_DICTIONARY, drop.database, drop.table); + required_access.emplace_back(AccessType::DROP_DICTIONARY, drop.getDatabase(), drop.getTable()); } else if (!drop.temporary) { /// It can be view or table. if (drop.kind == ASTDropQuery::Kind::Drop) - required_access.emplace_back(AccessType::DROP_TABLE | AccessType::DROP_VIEW, drop.database, drop.table); + required_access.emplace_back(AccessType::DROP_TABLE | AccessType::DROP_VIEW, drop.getDatabase(), drop.getTable()); else if (drop.kind == ASTDropQuery::Kind::Truncate) - required_access.emplace_back(AccessType::TRUNCATE, drop.database, drop.table); + required_access.emplace_back(AccessType::TRUNCATE, drop.getDatabase(), drop.getTable()); else if (drop.kind == ASTDropQuery::Kind::Detach) - required_access.emplace_back(AccessType::DROP_TABLE | AccessType::DROP_VIEW, drop.database, drop.table); + required_access.emplace_back(AccessType::DROP_TABLE | AccessType::DROP_VIEW, drop.getDatabase(), drop.getTable()); } return required_access; @@ -419,8 +418,8 @@ void InterpreterDropQuery::executeDropQuery(ASTDropQuery::Kind kind, ContextPtr { /// We create and execute `drop` query for internal table. auto drop_query = std::make_shared(); - drop_query->database = target_table_id.database_name; - drop_query->table = target_table_id.table_name; + drop_query->setDatabase(target_table_id.database_name); + drop_query->setTable(target_table_id.table_name); drop_query->kind = kind; drop_query->no_delay = no_delay; drop_query->if_exists = true; diff --git a/src/Interpreters/InterpreterDropQuery.h b/src/Interpreters/InterpreterDropQuery.h index accadf690ca..1a38abcdff9 100644 --- a/src/Interpreters/InterpreterDropQuery.h +++ b/src/Interpreters/InterpreterDropQuery.h @@ -36,7 +36,7 @@ private: BlockIO executeToDatabaseImpl(const ASTDropQuery & query, DatabasePtr & database, std::vector & uuids_to_wait); BlockIO executeToTable(ASTDropQuery & query); - BlockIO executeToTableImpl(ASTDropQuery & query, DatabasePtr & db, UUID & uuid_to_wait); + BlockIO executeToTableImpl(ContextPtr context_, ASTDropQuery & query, DatabasePtr & db, UUID & uuid_to_wait); static void waitForTableToBeActuallyDroppedOrDetached(const ASTDropQuery & query, const DatabasePtr & db, const UUID & uuid_to_wait); diff --git a/src/Interpreters/InterpreterExistsQuery.cpp b/src/Interpreters/InterpreterExistsQuery.cpp index 5af51c61b29..758c6d81407 100644 --- a/src/Interpreters/InterpreterExistsQuery.cpp +++ b/src/Interpreters/InterpreterExistsQuery.cpp @@ -44,25 +44,25 @@ QueryPipeline InterpreterExistsQuery::executeImpl() if (exists_query->temporary) { result = static_cast(getContext()->tryResolveStorageID( - {"", exists_query->table}, Context::ResolveExternal)); + {"", exists_query->getTable()}, Context::ResolveExternal)); } else { - String database = getContext()->resolveDatabase(exists_query->database); - getContext()->checkAccess(AccessType::SHOW_TABLES, database, exists_query->table); - result = DatabaseCatalog::instance().isTableExist({database, exists_query->table}, getContext()); + String database = getContext()->resolveDatabase(exists_query->getDatabase()); + getContext()->checkAccess(AccessType::SHOW_TABLES, database, exists_query->getTable()); + result = DatabaseCatalog::instance().isTableExist({database, exists_query->getTable()}, getContext()); } } else if ((exists_query = query_ptr->as())) { - String database = getContext()->resolveDatabase(exists_query->database); - getContext()->checkAccess(AccessType::SHOW_TABLES, database, exists_query->table); - auto table = DatabaseCatalog::instance().tryGetTable({database, exists_query->table}, getContext()); + String database = getContext()->resolveDatabase(exists_query->getDatabase()); + getContext()->checkAccess(AccessType::SHOW_TABLES, database, exists_query->getTable()); + auto table = DatabaseCatalog::instance().tryGetTable({database, exists_query->getTable()}, getContext()); result = table && table->isView(); } else if ((exists_query = query_ptr->as())) { - String database = getContext()->resolveDatabase(exists_query->database); + String database = getContext()->resolveDatabase(exists_query->getDatabase()); getContext()->checkAccess(AccessType::SHOW_DATABASES, database); result = DatabaseCatalog::instance().isDatabaseExist(database); } @@ -70,9 +70,9 @@ QueryPipeline InterpreterExistsQuery::executeImpl() { if (exists_query->temporary) throw Exception("Temporary dictionaries are not possible.", ErrorCodes::SYNTAX_ERROR); - String database = getContext()->resolveDatabase(exists_query->database); - getContext()->checkAccess(AccessType::SHOW_DICTIONARIES, database, exists_query->table); - result = DatabaseCatalog::instance().isDictionaryExist({database, exists_query->table}); + String database = getContext()->resolveDatabase(exists_query->getDatabase()); + getContext()->checkAccess(AccessType::SHOW_DICTIONARIES, database, exists_query->getTable()); + result = DatabaseCatalog::instance().isDictionaryExist({database, exists_query->getTable()}); } return QueryPipeline(std::make_shared(Block{{ diff --git a/src/Interpreters/InterpreterExternalDDLQuery.cpp b/src/Interpreters/InterpreterExternalDDLQuery.cpp index ac3f18f1741..adceb1fe419 100644 --- a/src/Interpreters/InterpreterExternalDDLQuery.cpp +++ b/src/Interpreters/InterpreterExternalDDLQuery.cpp @@ -9,7 +9,7 @@ #include #include -#ifdef USE_MYSQL +#if USE_MYSQL # include # include # include @@ -38,7 +38,7 @@ BlockIO InterpreterExternalDDLQuery::execute() if (external_ddl_query.from->name == "MySQL") { -#ifdef USE_MYSQL +#if USE_MYSQL const ASTs & arguments = external_ddl_query.from->arguments->children; if (arguments.size() != 2 || !arguments[0]->as() || !arguments[1]->as()) diff --git a/src/Interpreters/InterpreterInsertQuery.cpp b/src/Interpreters/InterpreterInsertQuery.cpp index 231eb15b02f..99b68897549 100644 --- a/src/Interpreters/InterpreterInsertQuery.cpp +++ b/src/Interpreters/InterpreterInsertQuery.cpp @@ -62,7 +62,18 @@ StoragePtr InterpreterInsertQuery::getTable(ASTInsertQuery & query) return table_function_ptr->execute(query.table_function, getContext(), table_function_ptr->getName()); } - query.table_id = getContext()->resolveStorageID(query.table_id); + if (query.table_id) + { + query.table_id = getContext()->resolveStorageID(query.table_id); + } + else + { + /// Insert query parser does not fill table_id because table and + /// database can be parameters and be filled after parsing. + StorageID local_table_id(query.getDatabase(), query.getTable()); + query.table_id = getContext()->resolveStorageID(local_table_id); + } + return DatabaseCatalog::instance().getTable(query.table_id, getContext()); } diff --git a/src/Interpreters/InterpreterOptimizeQuery.cpp b/src/Interpreters/InterpreterOptimizeQuery.cpp index a44a49ec020..f9a701a0a77 100644 --- a/src/Interpreters/InterpreterOptimizeQuery.cpp +++ b/src/Interpreters/InterpreterOptimizeQuery.cpp @@ -79,7 +79,7 @@ AccessRightsElements InterpreterOptimizeQuery::getRequiredAccess() const { const auto & optimize = query_ptr->as(); AccessRightsElements required_access; - required_access.emplace_back(AccessType::OPTIMIZE, optimize.database, optimize.table); + required_access.emplace_back(AccessType::OPTIMIZE, optimize.getDatabase(), optimize.getTable()); return required_access; } diff --git a/src/Interpreters/InterpreterRenameQuery.cpp b/src/Interpreters/InterpreterRenameQuery.cpp index 72d7e9b1cba..f4b3fff19b6 100644 --- a/src/Interpreters/InterpreterRenameQuery.cpp +++ b/src/Interpreters/InterpreterRenameQuery.cpp @@ -78,6 +78,13 @@ BlockIO InterpreterRenameQuery::executeToTables(const ASTRenameQuery & rename, c for (const auto & elem : descriptions) { + if (elem.if_exists) + { + assert(!rename.exchange); + if (!database_catalog.isTableExist(StorageID(elem.from_database_name, elem.from_table_name), getContext())) + continue; + } + bool exchange_tables; if (rename.exchange) { @@ -95,12 +102,14 @@ BlockIO InterpreterRenameQuery::executeToTables(const ASTRenameQuery & rename, c } DatabasePtr database = database_catalog.getDatabase(elem.from_database_name); - if (typeid_cast(database.get()) - && !getContext()->getClientInfo().is_replicated_database_internal) + if (typeid_cast(database.get()) && !getContext()->getClientInfo().is_replicated_database_internal) { if (1 < descriptions.size()) - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Database {} is Replicated, " - "it does not support renaming of multiple tables in single query.", elem.from_database_name); + throw Exception( + ErrorCodes::NOT_IMPLEMENTED, + "Database {} is Replicated, " + "it does not support renaming of multiple tables in single query.", + elem.from_database_name); UniqueTableName from(elem.from_database_name, elem.from_table_name); UniqueTableName to(elem.to_database_name, elem.to_table_name); @@ -110,6 +119,11 @@ BlockIO InterpreterRenameQuery::executeToTables(const ASTRenameQuery & rename, c } else { + TableNamesSet dependencies; + if (!exchange_tables) + dependencies = database_catalog.tryRemoveLoadingDependencies(StorageID(elem.from_database_name, elem.from_table_name), + getContext()->getSettingsRef().check_table_dependencies); + database->renameTable( getContext(), elem.from_table_name, @@ -117,6 +131,9 @@ BlockIO InterpreterRenameQuery::executeToTables(const ASTRenameQuery & rename, c elem.to_table_name, exchange_tables, rename.dictionary); + + if (!dependencies.empty()) + DatabaseCatalog::instance().addLoadingDependencies(QualifiedTableName{elem.to_database_name, elem.to_table_name}, std::move(dependencies)); } } @@ -133,9 +150,14 @@ BlockIO InterpreterRenameQuery::executeToDatabase(const ASTRenameQuery &, const const auto & new_name = descriptions.back().to_database_name; auto & catalog = DatabaseCatalog::instance(); - auto db = catalog.getDatabase(old_name); - catalog.assertDatabaseDoesntExist(new_name); - db->renameDatabase(new_name); + auto db = descriptions.front().if_exists ? catalog.tryGetDatabase(old_name) : catalog.getDatabase(old_name); + + if (db) + { + catalog.assertDatabaseDoesntExist(new_name); + db->renameDatabase(getContext(), new_name); + } + return {}; } diff --git a/src/Interpreters/InterpreterRenameQuery.h b/src/Interpreters/InterpreterRenameQuery.h index dfcd741754e..194f6266634 100644 --- a/src/Interpreters/InterpreterRenameQuery.h +++ b/src/Interpreters/InterpreterRenameQuery.h @@ -31,7 +31,8 @@ struct RenameDescription from_database_name(elem.from.database.empty() ? current_database : elem.from.database), from_table_name(elem.from.table), to_database_name(elem.to.database.empty() ? current_database : elem.to.database), - to_table_name(elem.to.table) + to_table_name(elem.to.table), + if_exists(elem.if_exists) {} String from_database_name; @@ -39,6 +40,7 @@ struct RenameDescription String to_database_name; String to_table_name; + bool if_exists; }; using RenameDescriptions = std::vector; @@ -59,7 +61,7 @@ public: private: BlockIO executeToTables(const ASTRenameQuery & rename, const RenameDescriptions & descriptions, TableGuards & ddl_guards); - static BlockIO executeToDatabase(const ASTRenameQuery & rename, const RenameDescriptions & descriptions); + BlockIO executeToDatabase(const ASTRenameQuery & rename, const RenameDescriptions & descriptions); AccessRightsElements getRequiredAccess() const; diff --git a/src/Interpreters/InterpreterSelectIntersectExceptQuery.cpp b/src/Interpreters/InterpreterSelectIntersectExceptQuery.cpp index f7d98e2e0fe..3bb78b57702 100644 --- a/src/Interpreters/InterpreterSelectIntersectExceptQuery.cpp +++ b/src/Interpreters/InterpreterSelectIntersectExceptQuery.cpp @@ -56,7 +56,7 @@ InterpreterSelectIntersectExceptQuery::InterpreterSelectIntersectExceptQuery( ASTSelectIntersectExceptQuery * ast = query_ptr->as(); final_operator = ast->final_operator; - const auto & children = ast->children; + const auto & children = ast->getListOfSelects(); size_t num_children = children.size(); /// AST must have been changed by the visitor. diff --git a/src/Interpreters/InterpreterSelectQuery.cpp b/src/Interpreters/InterpreterSelectQuery.cpp index fc6e193fc6e..c8f48f2ed1f 100644 --- a/src/Interpreters/InterpreterSelectQuery.cpp +++ b/src/Interpreters/InterpreterSelectQuery.cpp @@ -358,7 +358,7 @@ InterpreterSelectQuery::InterpreterSelectQuery( std::shared_ptr table_join = joined_tables.makeTableJoin(query); if (storage) - row_policy_filter = context->getRowPolicyCondition(table_id.getDatabaseName(), table_id.getTableName(), RowPolicy::SELECT_FILTER); + row_policy_filter = context->getRowPolicyFilter(table_id.getDatabaseName(), table_id.getTableName(), RowPolicyFilterType::SELECT_FILTER); StorageView * view = nullptr; if (storage) @@ -878,52 +878,44 @@ static bool hasWithTotalsInAnySubqueryInFromClause(const ASTSelectQuery & query) return true; /** NOTE You can also check that the table in the subquery is distributed, and that it only looks at one shard. - * In other cases, totals will be computed on the initiating server of the query, and it is not necessary to read the data to the end. - */ - + * In other cases, totals will be computed on the initiating server of the query, and it is not necessary to read the data to the end. + */ if (auto query_table = extractTableExpression(query, 0)) { if (const auto * ast_union = query_table->as()) { - for (const auto & elem : ast_union->list_of_selects->children) + /** NOTE + * 1. For ASTSelectWithUnionQuery after normalization for union child node the height of the AST tree is at most 2. + * 2. For ASTSelectIntersectExceptQuery after normalization in case there are intersect or except nodes, + * the height of the AST tree can have any depth (each intersect/except adds a level), but the + * number of children in those nodes is always 2. + */ + std::function traverse_recursively = [&](ASTPtr child_ast) -> bool { - /// After normalization for union child node the height of the AST tree is at most 2. - if (const auto * child_union = elem->as()) + if (const auto * select_child = child_ast->as ()) { - for (const auto & child_elem : child_union->list_of_selects->children) - if (hasWithTotalsInAnySubqueryInFromClause(child_elem->as())) + if (hasWithTotalsInAnySubqueryInFromClause(select_child->as())) + return true; + } + else if (const auto * union_child = child_ast->as()) + { + for (const auto & subchild : union_child->list_of_selects->children) + if (traverse_recursively(subchild)) return true; } - /// After normalization in case there are intersect or except nodes, the height of - /// the AST tree can have any depth (each intersect/except adds a level), but the - /// number of children in those nodes is always 2. - else if (elem->as()) + else if (const auto * intersect_child = child_ast->as()) { - std::function traverse_recursively = [&](ASTPtr child_ast) -> bool - { - if (const auto * child = child_ast->as ()) - return hasWithTotalsInAnySubqueryInFromClause(child->as()); - - if (const auto * child = child_ast->as()) - for (const auto & subchild : child->list_of_selects->children) - if (traverse_recursively(subchild)) - return true; - - if (const auto * child = child_ast->as()) - for (const auto & subchild : child->children) - if (traverse_recursively(subchild)) - return true; - return false; - }; - if (traverse_recursively(elem)) - return true; + auto selects = intersect_child->getListOfSelects(); + for (const auto & subchild : selects) + if (traverse_recursively(subchild)) + return true; } - else - { - if (hasWithTotalsInAnySubqueryInFromClause(elem->as())) - return true; - } - } + return false; + }; + + for (const auto & elem : ast_union->list_of_selects->children) + if (traverse_recursively(elem)) + return true; } } diff --git a/src/Interpreters/InterpreterSelectWithUnionQuery.cpp b/src/Interpreters/InterpreterSelectWithUnionQuery.cpp index e7ea08e557d..3da7bf12805 100644 --- a/src/Interpreters/InterpreterSelectWithUnionQuery.cpp +++ b/src/Interpreters/InterpreterSelectWithUnionQuery.cpp @@ -86,7 +86,9 @@ InterpreterSelectWithUnionQuery::InterpreterSelectWithUnionQuery( if (num_children == 1 && settings_limit_offset_needed) { const ASTPtr first_select_ast = ast->list_of_selects->children.at(0); - ASTSelectQuery * select_query = first_select_ast->as(); + ASTSelectQuery * select_query = dynamic_cast(first_select_ast.get()); + if (!select_query) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Invalid type in list_of_selects: {}", first_select_ast->getID()); if (!select_query->withFill() && !select_query->limit_with_ties) { diff --git a/src/Interpreters/InterpreterShowCreateQuery.cpp b/src/Interpreters/InterpreterShowCreateQuery.cpp index 84dbae0fac5..6ae51c1ff23 100644 --- a/src/Interpreters/InterpreterShowCreateQuery.cpp +++ b/src/Interpreters/InterpreterShowCreateQuery.cpp @@ -64,26 +64,26 @@ QueryPipeline InterpreterShowCreateQuery::executeImpl() { if (!ast_create_query.isView()) throw Exception(ErrorCodes::BAD_ARGUMENTS, "{}.{} is not a VIEW", - backQuote(ast_create_query.database), backQuote(ast_create_query.table)); + backQuote(ast_create_query.getDatabase()), backQuote(ast_create_query.getTable())); } else if (is_dictionary) { if (!ast_create_query.is_dictionary) throw Exception(ErrorCodes::BAD_ARGUMENTS, "{}.{} is not a DICTIONARY", - backQuote(ast_create_query.database), backQuote(ast_create_query.table)); + backQuote(ast_create_query.getDatabase()), backQuote(ast_create_query.getTable())); } } else if ((show_query = query_ptr->as())) { if (show_query->temporary) throw Exception("Temporary databases are not possible.", ErrorCodes::SYNTAX_ERROR); - show_query->database = getContext()->resolveDatabase(show_query->database); - getContext()->checkAccess(AccessType::SHOW_DATABASES, show_query->database); - create_query = DatabaseCatalog::instance().getDatabase(show_query->database)->getCreateDatabaseQuery(); + show_query->setDatabase(getContext()->resolveDatabase(show_query->getDatabase())); + getContext()->checkAccess(AccessType::SHOW_DATABASES, show_query->getDatabase()); + create_query = DatabaseCatalog::instance().getDatabase(show_query->getDatabase())->getCreateDatabaseQuery(); } if (!create_query) - throw Exception("Unable to show the create query of " + show_query->table + ". Maybe it was created by the system.", ErrorCodes::THERE_IS_NO_QUERY); + throw Exception("Unable to show the create query of " + show_query->getTable() + ". Maybe it was created by the system.", ErrorCodes::THERE_IS_NO_QUERY); if (!getContext()->getSettingsRef().show_table_uuid_in_table_create_query_if_not_nil) { diff --git a/src/Interpreters/InterpreterSystemQuery.cpp b/src/Interpreters/InterpreterSystemQuery.cpp index 69bf036ae97..960fddccb8c 100644 --- a/src/Interpreters/InterpreterSystemQuery.cpp +++ b/src/Interpreters/InterpreterSystemQuery.cpp @@ -217,12 +217,12 @@ BlockIO InterpreterSystemQuery::execute() /// Make canonical query for simpler processing if (query.type == Type::RELOAD_DICTIONARY) { - if (!query.database.empty()) - query.table = query.database + "." + query.table; + if (query.database) + query.setTable(query.getDatabase() + "." + query.getTable()); } - else if (!query.table.empty()) + else if (query.table) { - table_id = getContext()->resolveStorageID(StorageID(query.database, query.table), Context::ResolveOrdinary); + table_id = getContext()->resolveStorageID(StorageID(query.getDatabase(), query.getTable()), Context::ResolveOrdinary); } @@ -302,7 +302,7 @@ BlockIO InterpreterSystemQuery::execute() getContext()->checkAccess(AccessType::SYSTEM_RELOAD_DICTIONARY); auto & external_dictionaries_loader = system_context->getExternalDictionariesLoader(); - external_dictionaries_loader.reloadDictionary(query.table, getContext()); + external_dictionaries_loader.reloadDictionary(query.getTable(), getContext()); ExternalDictionariesLoader::resetAll(); break; @@ -523,7 +523,7 @@ StoragePtr InterpreterSystemQuery::tryRestartReplica(const StorageID & replica, auto table_lock = table->lockExclusively(getContext()->getCurrentQueryId(), getContext()->getSettingsRef().lock_acquire_timeout); create_ast = database->getCreateTableQuery(replica.table_name, getContext()); - database->detachTable(replica.table_name); + database->detachTable(system_context, replica.table_name); } table.reset(); @@ -544,7 +544,7 @@ StoragePtr InterpreterSystemQuery::tryRestartReplica(const StorageID & replica, constraints, false); - database->attachTable(replica.table_name, table, data_path); + database->attachTable(system_context, replica.table_name, table, data_path); table->startup(); return table; @@ -594,10 +594,10 @@ void InterpreterSystemQuery::dropReplica(ASTSystemQuery & query) if (!dropReplicaImpl(query, table)) throw Exception(ErrorCodes::BAD_ARGUMENTS, table_is_not_replicated.data(), table_id.getNameForLogs()); } - else if (!query.database.empty()) + else if (query.database) { - getContext()->checkAccess(AccessType::SYSTEM_DROP_REPLICA, query.database); - DatabasePtr database = DatabaseCatalog::instance().getDatabase(query.database); + getContext()->checkAccess(AccessType::SYSTEM_DROP_REPLICA, query.getDatabase()); + DatabasePtr database = DatabaseCatalog::instance().getDatabase(query.getDatabase()); for (auto iterator = database->getTablesIterator(getContext()); iterator->isValid(); iterator->next()) dropReplicaImpl(query, iterator->table()); LOG_TRACE(log, "Dropped replica {} from database {}", query.replica, backQuoteIfNeed(database->getDatabaseName())); @@ -790,84 +790,84 @@ AccessRightsElements InterpreterSystemQuery::getRequiredAccessForDDLOnCluster() case Type::STOP_MERGES: [[fallthrough]]; case Type::START_MERGES: { - if (query.table.empty()) + if (!query.table) required_access.emplace_back(AccessType::SYSTEM_MERGES); else - required_access.emplace_back(AccessType::SYSTEM_MERGES, query.database, query.table); + required_access.emplace_back(AccessType::SYSTEM_MERGES, query.getDatabase(), query.getTable()); break; } case Type::STOP_TTL_MERGES: [[fallthrough]]; case Type::START_TTL_MERGES: { - if (query.table.empty()) + if (!query.table) required_access.emplace_back(AccessType::SYSTEM_TTL_MERGES); else - required_access.emplace_back(AccessType::SYSTEM_TTL_MERGES, query.database, query.table); + required_access.emplace_back(AccessType::SYSTEM_TTL_MERGES, query.getDatabase(), query.getTable()); break; } case Type::STOP_MOVES: [[fallthrough]]; case Type::START_MOVES: { - if (query.table.empty()) + if (!query.table) required_access.emplace_back(AccessType::SYSTEM_MOVES); else - required_access.emplace_back(AccessType::SYSTEM_MOVES, query.database, query.table); + required_access.emplace_back(AccessType::SYSTEM_MOVES, query.getDatabase(), query.getTable()); break; } case Type::STOP_FETCHES: [[fallthrough]]; case Type::START_FETCHES: { - if (query.table.empty()) + if (!query.table) required_access.emplace_back(AccessType::SYSTEM_FETCHES); else - required_access.emplace_back(AccessType::SYSTEM_FETCHES, query.database, query.table); + required_access.emplace_back(AccessType::SYSTEM_FETCHES, query.getDatabase(), query.getTable()); break; } case Type::STOP_DISTRIBUTED_SENDS: [[fallthrough]]; case Type::START_DISTRIBUTED_SENDS: { - if (query.table.empty()) + if (!query.table) required_access.emplace_back(AccessType::SYSTEM_DISTRIBUTED_SENDS); else - required_access.emplace_back(AccessType::SYSTEM_DISTRIBUTED_SENDS, query.database, query.table); + required_access.emplace_back(AccessType::SYSTEM_DISTRIBUTED_SENDS, query.getDatabase(), query.getTable()); break; } case Type::STOP_REPLICATED_SENDS: [[fallthrough]]; case Type::START_REPLICATED_SENDS: { - if (query.table.empty()) + if (!query.table) required_access.emplace_back(AccessType::SYSTEM_REPLICATED_SENDS); else - required_access.emplace_back(AccessType::SYSTEM_REPLICATED_SENDS, query.database, query.table); + required_access.emplace_back(AccessType::SYSTEM_REPLICATED_SENDS, query.getDatabase(), query.getTable()); break; } case Type::STOP_REPLICATION_QUEUES: [[fallthrough]]; case Type::START_REPLICATION_QUEUES: { - if (query.table.empty()) + if (!query.table) required_access.emplace_back(AccessType::SYSTEM_REPLICATION_QUEUES); else - required_access.emplace_back(AccessType::SYSTEM_REPLICATION_QUEUES, query.database, query.table); + required_access.emplace_back(AccessType::SYSTEM_REPLICATION_QUEUES, query.getDatabase(), query.getTable()); break; } case Type::DROP_REPLICA: { - required_access.emplace_back(AccessType::SYSTEM_DROP_REPLICA, query.database, query.table); + required_access.emplace_back(AccessType::SYSTEM_DROP_REPLICA, query.getDatabase(), query.getTable()); break; } case Type::RESTORE_REPLICA: { - required_access.emplace_back(AccessType::SYSTEM_RESTORE_REPLICA, query.database, query.table); + required_access.emplace_back(AccessType::SYSTEM_RESTORE_REPLICA, query.getDatabase(), query.getTable()); break; } case Type::SYNC_REPLICA: { - required_access.emplace_back(AccessType::SYSTEM_SYNC_REPLICA, query.database, query.table); + required_access.emplace_back(AccessType::SYSTEM_SYNC_REPLICA, query.getDatabase(), query.getTable()); break; } case Type::RESTART_REPLICA: { - required_access.emplace_back(AccessType::SYSTEM_RESTART_REPLICA, query.database, query.table); + required_access.emplace_back(AccessType::SYSTEM_RESTART_REPLICA, query.getDatabase(), query.getTable()); break; } case Type::RESTART_REPLICAS: @@ -877,7 +877,7 @@ AccessRightsElements InterpreterSystemQuery::getRequiredAccessForDDLOnCluster() } case Type::FLUSH_DISTRIBUTED: { - required_access.emplace_back(AccessType::SYSTEM_FLUSH_DISTRIBUTED, query.database, query.table); + required_access.emplace_back(AccessType::SYSTEM_FLUSH_DISTRIBUTED, query.getDatabase(), query.getTable()); break; } case Type::FLUSH_LOGS: diff --git a/src/Interpreters/MergeJoin.cpp b/src/Interpreters/MergeJoin.cpp index 48d5f5254ff..6f4fef46886 100644 --- a/src/Interpreters/MergeJoin.cpp +++ b/src/Interpreters/MergeJoin.cpp @@ -552,10 +552,13 @@ MergeJoin::MergeJoin(std::shared_ptr table_join_, const Block & right LOG_DEBUG(log, "Joining keys: left [{}], right [{}]", fmt::join(key_names_left, ", "), fmt::join(key_names_right, ", ")); - /// Temporary disable 'partial_merge_join_left_table_buffer_bytes' without 'partial_merge_join_optimizations' - if (table_join->enablePartialMergeJoinOptimizations()) - if (size_t max_bytes = table_join->maxBytesInLeftBuffer()) - left_blocks_buffer = std::make_shared(left_sort_description, max_bytes); + if (size_t max_bytes = table_join->maxBytesInLeftBuffer(); max_bytes > 0) + { + /// Disabled due to https://github.com/ClickHouse/ClickHouse/issues/31009 + // left_blocks_buffer = std::make_shared(left_sort_description, max_bytes); + LOG_WARNING(log, "`partial_merge_join_left_table_buffer_bytes` is disabled in current version of ClickHouse"); + UNUSED(left_blocks_buffer); + } } /// Has to be called even if totals are empty diff --git a/src/Interpreters/MySQL/InterpretersMySQLDDLQuery.cpp b/src/Interpreters/MySQL/InterpretersMySQLDDLQuery.cpp index a96713e3b5d..54b87e3bed6 100644 --- a/src/Interpreters/MySQL/InterpretersMySQLDDLQuery.cpp +++ b/src/Interpreters/MySQL/InterpretersMySQLDDLQuery.cpp @@ -421,23 +421,42 @@ static ASTPtr getOrderByPolicy( void InterpreterCreateImpl::validate(const InterpreterCreateImpl::TQuery & create_query, ContextPtr) { - /// This is dangerous, because the like table may not exists in ClickHouse - if (create_query.like_table) - throw Exception("Cannot convert create like statement to ClickHouse SQL", ErrorCodes::NOT_IMPLEMENTED); - - const auto & create_defines = create_query.columns_list->as(); - - if (!create_defines || !create_defines->columns || create_defines->columns->children.empty()) - throw Exception("Missing definition of columns.", ErrorCodes::EMPTY_LIST_OF_COLUMNS_PASSED); + if (!create_query.like_table) + { + bool missing_columns_definition = true; + if (create_query.columns_list) + { + const auto & create_defines = create_query.columns_list->as(); + if (create_defines && create_defines->columns && !create_defines->columns->children.empty()) + missing_columns_definition = false; + } + if (missing_columns_definition) + throw Exception("Missing definition of columns.", ErrorCodes::EMPTY_LIST_OF_COLUMNS_PASSED); + } } ASTs InterpreterCreateImpl::getRewrittenQueries( const TQuery & create_query, ContextPtr context, const String & mapped_to_database, const String & mysql_database) { - auto rewritten_query = std::make_shared(); if (resolveDatabase(create_query.database, mysql_database, mapped_to_database, context) != mapped_to_database) return {}; + if (create_query.like_table) + { + auto * table_like = create_query.like_table->as(); + if (table_like->compound() && table_like->getTableId().database_name != mysql_database) + return {}; + String table_name = table_like->shortName(); + ASTPtr rewritten_create_ast = DatabaseCatalog::instance().getDatabase(mapped_to_database)->getCreateTableQuery(table_name, context); + auto * create_ptr = rewritten_create_ast->as(); + create_ptr->setDatabase(mapped_to_database); + create_ptr->setTable(create_query.table); + create_ptr->uuid = UUIDHelpers::generateV4(); + create_ptr->if_not_exists = create_query.if_not_exists; + return ASTs{rewritten_create_ast}; + } + + auto rewritten_query = std::make_shared(); const auto & create_defines = create_query.columns_list->as(); NamesAndTypesList columns_name_and_type = getColumnsList(create_defines->columns); @@ -494,8 +513,8 @@ ASTs InterpreterCreateImpl::getRewrittenQueries( storage->set(storage->engine, makeASTFunction("ReplacingMergeTree", std::make_shared(version_column_name))); - rewritten_query->database = mapped_to_database; - rewritten_query->table = create_query.table; + rewritten_query->setDatabase(mapped_to_database); + rewritten_query->setTable(create_query.table); rewritten_query->if_not_exists = create_query.if_not_exists; rewritten_query->set(rewritten_query->storage, storage); rewritten_query->set(rewritten_query->columns_list, columns); @@ -510,14 +529,14 @@ void InterpreterDropImpl::validate(const InterpreterDropImpl::TQuery & /*query*/ ASTs InterpreterDropImpl::getRewrittenQueries( const InterpreterDropImpl::TQuery & drop_query, ContextPtr context, const String & mapped_to_database, const String & mysql_database) { - const auto & database_name = resolveDatabase(drop_query.database, mysql_database, mapped_to_database, context); + const auto & database_name = resolveDatabase(drop_query.getDatabase(), mysql_database, mapped_to_database, context); /// Skip drop database|view|dictionary - if (database_name != mapped_to_database || drop_query.table.empty() || drop_query.is_view || drop_query.is_dictionary) + if (database_name != mapped_to_database || !drop_query.table || drop_query.is_view || drop_query.is_dictionary) return {}; ASTPtr rewritten_query = drop_query.clone(); - rewritten_query->as()->database = mapped_to_database; + rewritten_query->as()->setDatabase(mapped_to_database); return ASTs{rewritten_query}; } @@ -569,8 +588,8 @@ ASTs InterpreterAlterImpl::getRewrittenQueries( auto rewritten_alter_query = std::make_shared(); auto rewritten_rename_query = std::make_shared(); - rewritten_alter_query->database = mapped_to_database; - rewritten_alter_query->table = alter_query.table; + rewritten_alter_query->setDatabase(mapped_to_database); + rewritten_alter_query->setTable(alter_query.table); rewritten_alter_query->alter_object = ASTAlterQuery::AlterObjectType::TABLE; rewritten_alter_query->set(rewritten_alter_query->command_list, std::make_shared()); diff --git a/src/Interpreters/PredicateRewriteVisitor.cpp b/src/Interpreters/PredicateRewriteVisitor.cpp index 0f2a11e6ff1..b3425750b56 100644 --- a/src/Interpreters/PredicateRewriteVisitor.cpp +++ b/src/Interpreters/PredicateRewriteVisitor.cpp @@ -6,6 +6,7 @@ #include #include #include +#include #include #include #include @@ -37,13 +38,44 @@ void PredicateRewriteVisitorData::visit(ASTSelectWithUnionQuery & union_select_q for (size_t index = 0; index < internal_select_list.size(); ++index) { if (auto * child_union = internal_select_list[index]->as()) - visit(*child_union, internal_select_list[index]); - else { - if (index == 0) - visitFirstInternalSelect(*internal_select_list[0]->as(), internal_select_list[0]); - else - visitOtherInternalSelect(*internal_select_list[index]->as(), internal_select_list[index]); + visit(*child_union, internal_select_list[index]); + } + else if (auto * child_select = internal_select_list[index]->as()) + { + visitInternalSelect(index, *child_select, internal_select_list[index]); + } + else if (auto * child_intersect_except = internal_select_list[index]->as()) + { + visit(*child_intersect_except, internal_select_list[index]); + } + } +} + +void PredicateRewriteVisitorData::visitInternalSelect(size_t index, ASTSelectQuery & select_node, ASTPtr & node) +{ + if (index == 0) + visitFirstInternalSelect(select_node, node); + else + visitOtherInternalSelect(select_node, node); +} + +void PredicateRewriteVisitorData::visit(ASTSelectIntersectExceptQuery & intersect_except_query, ASTPtr &) +{ + auto internal_select_list = intersect_except_query.getListOfSelects(); + for (size_t index = 0; index < internal_select_list.size(); ++index) + { + if (auto * union_node = internal_select_list[index]->as()) + { + visit(*union_node, internal_select_list[index]); + } + else if (auto * select_node = internal_select_list[index]->as()) + { + visitInternalSelect(index, *select_node, internal_select_list[index]); + } + else if (auto * intersect_node = internal_select_list[index]->as()) + { + visit(*intersect_node, internal_select_list[index]); } } } diff --git a/src/Interpreters/PredicateRewriteVisitor.h b/src/Interpreters/PredicateRewriteVisitor.h index fc076464925..e7512711596 100644 --- a/src/Interpreters/PredicateRewriteVisitor.h +++ b/src/Interpreters/PredicateRewriteVisitor.h @@ -10,6 +10,8 @@ namespace DB { +class ASTSelectIntersectExceptQuery; + class PredicateRewriteVisitorData : WithContext { public: @@ -40,7 +42,11 @@ private: void visitOtherInternalSelect(ASTSelectQuery & select_query, ASTPtr &); + void visit(ASTSelectIntersectExceptQuery & intersect_except_query, ASTPtr &); + bool rewriteSubquery(ASTSelectQuery & subquery, const Names & inner_columns); + + void visitInternalSelect(size_t index, ASTSelectQuery & select_node, ASTPtr & node); }; using PredicateRewriteMatcher = OneTypeMatcher; diff --git a/src/Interpreters/ProcessList.cpp b/src/Interpreters/ProcessList.cpp index 81afa990d3b..4d58f0c97dc 100644 --- a/src/Interpreters/ProcessList.cpp +++ b/src/Interpreters/ProcessList.cpp @@ -3,6 +3,7 @@ #include #include #include +#include #include #include #include @@ -15,6 +16,11 @@ #include +namespace CurrentMetrics +{ + extern const Metric Query; +} + namespace DB { diff --git a/src/Interpreters/RewriteFunctionToSubcolumnVisitor.cpp b/src/Interpreters/RewriteFunctionToSubcolumnVisitor.cpp index debbd9ae8fe..5128bb136c3 100644 --- a/src/Interpreters/RewriteFunctionToSubcolumnVisitor.cpp +++ b/src/Interpreters/RewriteFunctionToSubcolumnVisitor.cpp @@ -1,5 +1,6 @@ #include #include +#include #include #include @@ -79,7 +80,8 @@ void RewriteFunctionToSubcolumnData::visit(ASTFunction & function, ASTPtr & ast) if (!columns.has(name_in_storage)) return; - TypeIndex column_type_id = columns.get(name_in_storage).type->getTypeId(); + const auto & column_type = columns.get(name_in_storage).type; + TypeIndex column_type_id = column_type->getTypeId(); if (arguments.size() == 1) { @@ -93,12 +95,36 @@ void RewriteFunctionToSubcolumnData::visit(ASTFunction & function, ASTPtr & ast) } else { - auto it = binary_function_to_subcolumn.find(function.name); - if (it != binary_function_to_subcolumn.end()) + if (function.name == "tupleElement" && column_type_id == TypeIndex::Tuple) { - const auto & [type_id, subcolumn_name, transformer] = it->second; - if (column_type_id == type_id) - ast = transformer(name_in_storage, subcolumn_name, arguments[1]); + const auto * literal = arguments[1]->as(); + if (!literal) + return; + + String subcolumn_name; + auto value_type = literal->value.getType(); + if (value_type == Field::Types::UInt64) + { + const auto & type_tuple = assert_cast(*column_type); + auto index = get(literal->value); + subcolumn_name = type_tuple.getNameByPosition(index); + } + else if (value_type == Field::Types::String) + subcolumn_name = get(literal->value); + else + return; + + ast = transformToSubcolumn(name_in_storage, subcolumn_name); + } + else + { + auto it = binary_function_to_subcolumn.find(function.name); + if (it != binary_function_to_subcolumn.end()) + { + const auto & [type_id, subcolumn_name, transformer] = it->second; + if (column_type_id == type_id) + ast = transformer(name_in_storage, subcolumn_name, arguments[1]); + } } } } diff --git a/src/Interpreters/Session.cpp b/src/Interpreters/Session.cpp index 39d2abc9b43..c26d8b52049 100644 --- a/src/Interpreters/Session.cpp +++ b/src/Interpreters/Session.cpp @@ -468,8 +468,8 @@ ContextMutablePtr Session::makeQueryContextImpl(const ClientInfo * client_info_t res_client_info.initial_address = res_client_info.current_address; } - /// Sets that row policies from the initial user should be used too. - query_context->setInitialRowPolicy(); + /// Sets that row policies of the initial user should be used too. + query_context->enableRowPoliciesOfInitialUser(); /// Set user information for the new context: current profiles, roles, access rights. if (user_id && !query_context->getUser()) diff --git a/src/Interpreters/StorageID.cpp b/src/Interpreters/StorageID.cpp index 2e76618e4c0..8811adc087b 100644 --- a/src/Interpreters/StorageID.cpp +++ b/src/Interpreters/StorageID.cpp @@ -18,8 +18,8 @@ namespace ErrorCodes StorageID::StorageID(const ASTQueryWithTableAndOutput & query) { - database_name = query.database; - table_name = query.table; + database_name = query.getDatabase(); + table_name = query.getTable(); uuid = query.uuid; assertNotEmpty(); } diff --git a/src/Interpreters/SubstituteColumnOptimizer.cpp b/src/Interpreters/SubstituteColumnOptimizer.cpp new file mode 100644 index 00000000000..da738d3db1e --- /dev/null +++ b/src/Interpreters/SubstituteColumnOptimizer.cpp @@ -0,0 +1,331 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace DB +{ +namespace ErrorCodes +{ + extern const int LOGICAL_ERROR; +} + +namespace +{ + +constexpr auto COMPONENT_PART = "__component_"; +constexpr UInt64 COLUMN_PENALTY = 10 * 1024 * 1024; +constexpr Int64 INDEX_PRICE = -1'000'000'000'000'000'000; + +class ComponentMatcher +{ +public: + using Visitor = InDepthNodeVisitor; + + struct Data + { + const ComparisonGraph & graph; + std::set & components; + std::unordered_map & old_name; + std::unordered_map & component; + UInt64 & current_id; + + Data(const ComparisonGraph & graph_, + std::set & components_, + std::unordered_map & old_name_, + std::unordered_map & component_, + UInt64 & current_id_) + : graph(graph_) + , components(components_) + , old_name(old_name_) + , component(component_) + , current_id(current_id_) + { + } + }; + + static void visit(ASTPtr & ast, Data & data) + { + if (auto id = data.graph.getComponentId(ast)) + { + const String name = COMPONENT_PART + std::to_string(*id) + "_" + std::to_string(++data.current_id); + data.old_name[name] = ast->getAliasOrColumnName(); + data.component[name] = *id; + data.components.insert(*id); + ast = std::make_shared(name); + } + } + + static bool needChildVisit(const ASTPtr &, const ASTPtr &) + { + return true; + } +}; + +using ComponentVisitor = ComponentMatcher::Visitor; + +struct ColumnPrice +{ + Int64 compressed_size; + Int64 uncompressed_size; + + ColumnPrice(const Int64 compressed_size_, const Int64 uncompressed_size_) + : compressed_size(compressed_size_) + , uncompressed_size(uncompressed_size_) + { + } + + ColumnPrice() : ColumnPrice(0, 0) {} + + bool operator<(const ColumnPrice & that) const + { + return std::tie(compressed_size, uncompressed_size) < std::tie(that.compressed_size, that.uncompressed_size); + } + + ColumnPrice & operator+=(const ColumnPrice & that) + { + compressed_size += that.compressed_size; + uncompressed_size += that.uncompressed_size; + return *this; + } + + ColumnPrice & operator-=(const ColumnPrice & that) + { + compressed_size -= that.compressed_size; + uncompressed_size -= that.uncompressed_size; + return *this; + } +}; + +using ColumnPriceByName = std::unordered_map; + +class SubstituteColumnMatcher +{ +public: + using Visitor = InDepthNodeVisitor; + + struct Data + { + std::unordered_map id_to_expression_map; + std::unordered_map name_to_component_id; + std::unordered_map old_name; + bool is_select; + }; + + static void visit(ASTPtr & ast, Data & data) + { + const auto * identifier = ast->as(); + if (identifier && data.name_to_component_id.contains(identifier->name())) + { + const String & name = identifier->name(); + const auto component_id = data.name_to_component_id.at(name); + auto new_ast = data.id_to_expression_map.at(component_id)->clone(); + + if (data.is_select) + new_ast->setAlias(data.old_name.at(name)); + + ast = new_ast; + } + } + + static bool needChildVisit(const ASTPtr &, const ASTPtr &) + { + return true; + } +}; + +using SubstituteColumnVisitor = SubstituteColumnMatcher::Visitor; + +ColumnPrice calculatePrice( + const ColumnPriceByName & column_prices, + const IdentifierNameSet & identifiers) +{ + ColumnPrice result(0, 0); + for (const auto & ident : identifiers) + { + auto it = column_prices.find(ident); + if (it != column_prices.end()) + result += it->second; + } + + return result; +} + +/// We need to choose one expression in each component, +/// so that total price of all read columns will be minimal. +/// Bruteforce equal ASTs in each component and calculate +/// price of all columns on which ast depends. +/// TODO: branch-and-bound +void bruteforce( + const ComparisonGraph & graph, + const std::vector & components, + size_t current_component, + const ColumnPriceByName & column_prices, + ColumnPrice current_price, + std::vector & expressions_stack, + ColumnPrice & min_price, + std::vector & min_expressions) +{ + if (current_component == components.size()) + { + if (current_price < min_price) + { + min_price = current_price; + min_expressions = expressions_stack; + } + } + else + { + for (const auto & ast : graph.getComponent(components[current_component])) + { + IdentifierNameSet identifiers; + ast->collectIdentifierNames(identifiers); + ColumnPrice expression_price = calculatePrice(column_prices, identifiers); + + expressions_stack.push_back(ast); + current_price += expression_price; + + ColumnPriceByName new_prices(column_prices); + /// Update prices of already counted columns. + for (const auto & identifier : identifiers) + new_prices[identifier] = ColumnPrice(0, 0); + + bruteforce(graph, + components, + current_component + 1, + new_prices, + current_price, + expressions_stack, + min_price, + min_expressions); + + current_price -= expression_price; + expressions_stack.pop_back(); + } + } +} + +} + + +SubstituteColumnOptimizer::SubstituteColumnOptimizer( + ASTSelectQuery * select_query_, + const StorageMetadataPtr & metadata_snapshot_, + const ConstStoragePtr & storage_) + : select_query(select_query_) + , metadata_snapshot(metadata_snapshot_) + , storage(storage_) +{ +} + +void SubstituteColumnOptimizer::perform() +{ + if (!storage) + return; + + const auto column_sizes = storage->getColumnSizes(); + if (column_sizes.empty()) + return; + + const auto & compare_graph = metadata_snapshot->getConstraints().getGraph(); + + // Fill aliases + if (select_query->select()) + { + auto * list = select_query->refSelect()->as(); + if (!list) + throw Exception("List of selected columns must be ASTExpressionList", ErrorCodes::LOGICAL_ERROR); + + for (ASTPtr & ast : list->children) + ast->setAlias(ast->getAliasOrColumnName()); + } + + auto run_for_all = [&](const auto func) + { + if (select_query->where()) + func(select_query->refWhere(), false); + if (select_query->prewhere()) + func(select_query->refPrewhere(), false); + if (select_query->select()) + func(select_query->refSelect(), true); + if (select_query->having()) + func(select_query->refHaving(), false); + }; + + std::set components; + std::unordered_map old_name; + std::unordered_map name_to_component; + + UInt64 counter_id = 0; + + ComponentVisitor::Data component_data( + compare_graph, components, old_name, name_to_component, counter_id); + + IdentifierNameSet identifiers; + auto preprocess = [&](ASTPtr & ast, bool) + { + ComponentVisitor(component_data).visit(ast); + ast->collectIdentifierNames(identifiers); + }; + + run_for_all(preprocess); + + const auto primary_key = metadata_snapshot->getColumnsRequiredForPrimaryKey(); + const std::unordered_set primary_key_set(std::begin(primary_key), std::end(primary_key)); + ColumnPriceByName column_prices; + + for (const auto & [column_name, column_size] : column_sizes) + column_prices[column_name] = ColumnPrice(column_size.data_compressed + COLUMN_PENALTY, column_size.data_uncompressed); + + for (const auto & column_name : primary_key) + column_prices[column_name] = ColumnPrice(INDEX_PRICE, INDEX_PRICE); + + for (const auto & column_name : identifiers) + column_prices[column_name] = ColumnPrice(0, 0); + + std::unordered_map id_to_expression_map; + std::vector components_list; + + for (const UInt64 component_id : components) + { + auto component = compare_graph.getComponent(component_id); + if (component.size() == 1) + id_to_expression_map[component_id] = component.front(); + else + components_list.push_back(component_id); + } + + std::vector expressions_stack; + ColumnPrice min_price(std::numeric_limits::max(), std::numeric_limits::max()); + std::vector min_expressions; + + bruteforce(compare_graph, + components_list, + 0, + column_prices, + ColumnPrice(0, 0), + expressions_stack, + min_price, + min_expressions); + + for (size_t i = 0; i < components_list.size(); ++i) + id_to_expression_map[components_list[i]] = min_expressions[i]; + + auto process = [&](ASTPtr & ast, bool is_select) + { + SubstituteColumnVisitor::Data substitute_data{id_to_expression_map, name_to_component, old_name, is_select}; + SubstituteColumnVisitor(substitute_data).visit(ast); + }; + + run_for_all(process); +} + +} diff --git a/src/Interpreters/SubstituteColumnOptimizer.h b/src/Interpreters/SubstituteColumnOptimizer.h new file mode 100644 index 00000000000..63867e80386 --- /dev/null +++ b/src/Interpreters/SubstituteColumnOptimizer.h @@ -0,0 +1,35 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +namespace DB +{ + +struct StorageInMemoryMetadata; +using StorageMetadataPtr = std::shared_ptr; + +/// Optimizer that tries to replace columns to equal columns (according to constraints) +/// with lower size (accorsing to compressed and uncomressed size). +class SubstituteColumnOptimizer +{ +public: + SubstituteColumnOptimizer( + ASTSelectQuery * select_query, + const StorageMetadataPtr & metadata_snapshot, + const ConstStoragePtr & storage); + + void perform(); + +private: + ASTSelectQuery * select_query; + const StorageMetadataPtr & metadata_snapshot; + ConstStoragePtr storage; +}; + +} diff --git a/src/Interpreters/SystemLog.h b/src/Interpreters/SystemLog.h index ce0062e8c77..ee3e3062e13 100644 --- a/src/Interpreters/SystemLog.h +++ b/src/Interpreters/SystemLog.h @@ -595,8 +595,8 @@ ASTPtr SystemLog::getCreateTableQuery() { auto create = std::make_shared(); - create->database = table_id.database_name; - create->table = table_id.table_name; + create->setDatabase(table_id.database_name); + create->setTable(table_id.table_name); auto ordinary_columns = LogElement::getNamesAndTypes(); auto alias_columns = LogElement::getNamesAndAliases(); diff --git a/src/Interpreters/TableJoin.cpp b/src/Interpreters/TableJoin.cpp index e657bf38e49..42424b81192 100644 --- a/src/Interpreters/TableJoin.cpp +++ b/src/Interpreters/TableJoin.cpp @@ -99,7 +99,6 @@ TableJoin::TableJoin(const Settings & settings, VolumePtr tmp_volume_) , join_use_nulls(settings.join_use_nulls) , max_joined_block_rows(settings.max_joined_block_size_rows) , join_algorithm(settings.join_algorithm) - , partial_merge_join_optimizations(settings.partial_merge_join_optimizations) , partial_merge_join_rows_in_right_blocks(settings.partial_merge_join_rows_in_right_blocks) , partial_merge_join_left_table_buffer_bytes(settings.partial_merge_join_left_table_buffer_bytes) , max_files_to_merge(settings.join_on_disk_max_files_to_merge) diff --git a/src/Interpreters/TableJoin.h b/src/Interpreters/TableJoin.h index 956fed99fb8..b3e5748fb2f 100644 --- a/src/Interpreters/TableJoin.h +++ b/src/Interpreters/TableJoin.h @@ -108,7 +108,6 @@ private: const bool join_use_nulls = false; const size_t max_joined_block_rows = 0; JoinAlgorithm join_algorithm = JoinAlgorithm::AUTO; - const bool partial_merge_join_optimizations = false; const size_t partial_merge_join_rows_in_right_blocks = 0; const size_t partial_merge_join_left_table_buffer_bytes = 0; const size_t max_files_to_merge = 0; @@ -205,7 +204,6 @@ public: size_t maxBytesInLeftBuffer() const { return partial_merge_join_left_table_buffer_bytes; } size_t maxFilesToMerge() const { return max_files_to_merge; } const String & temporaryFilesCodec() const { return temporary_files_codec; } - bool enablePartialMergeJoinOptimizations() const { return partial_merge_join_optimizations; } bool needStreamWithNonJoinedRows() const; bool oneDisjunct() const; diff --git a/src/Interpreters/ThreadStatusExt.cpp b/src/Interpreters/ThreadStatusExt.cpp index fff803fa559..e796c2b85a1 100644 --- a/src/Interpreters/ThreadStatusExt.cpp +++ b/src/Interpreters/ThreadStatusExt.cpp @@ -26,10 +26,8 @@ namespace ProfileEvents { -extern const Event SelectedRows; -extern const Event SelectedBytes; -extern const Event InsertedRows; -extern const Event InsertedBytes; + extern const Event InsertedRows; + extern const Event InsertedBytes; } diff --git a/src/Interpreters/TreeCNFConverter.cpp b/src/Interpreters/TreeCNFConverter.cpp new file mode 100644 index 00000000000..a6b46c46589 --- /dev/null +++ b/src/Interpreters/TreeCNFConverter.cpp @@ -0,0 +1,469 @@ +#include +#include +#include +#include +#include + + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int LOGICAL_ERROR; + extern const int INCORRECT_QUERY; + extern const int TOO_MANY_TEMPORARY_COLUMNS; +} + +namespace +{ + +bool isLogicalFunction(const ASTFunction & func) +{ + return func.name == "and" || func.name == "or" || func.name == "not"; +} + +size_t countAtoms(const ASTPtr & node) +{ + checkStackSize(); + if (node->as()) + return 1; + + const auto * func = node->as(); + if (func && !isLogicalFunction(*func)) + return 1; + + size_t num_atoms = 0; + for (const auto & child : node->children) + num_atoms += countAtoms(child); + return num_atoms; +} + +/// Splits AND(a, b, c) to AND(a, AND(b, c)) for AND/OR +void splitMultiLogic(ASTPtr & node) +{ + checkStackSize(); + auto * func = node->as(); + + if (func && (func->name == "and" || func->name == "or")) + { + if (func->arguments->children.size() < 2) + throw Exception("Bad AND or OR function. Expected at least 2 arguments", ErrorCodes::INCORRECT_QUERY); + + if (func->arguments->children.size() > 2) + { + ASTPtr res = func->arguments->children[0]->clone(); + for (size_t i = 1; i < func->arguments->children.size(); ++i) + res = makeASTFunction(func->name, res, func->arguments->children[i]->clone()); + + node = res; + } + + auto * new_func = node->as(); + for (auto & child : new_func->arguments->children) + splitMultiLogic(child); + } + else if (func && func->name == "not") + { + for (auto & child : func->arguments->children) + splitMultiLogic(child); + } +} + +/// Push NOT to leafs, remove NOT NOT ... +void traversePushNot(ASTPtr & node, bool add_negation) +{ + checkStackSize(); + auto * func = node->as(); + + if (func && (func->name == "and" || func->name == "or")) + { + if (add_negation) + { + if (func->arguments->children.size() != 2) + throw Exception("Bad AND or OR function. Expected at least 2 arguments", ErrorCodes::LOGICAL_ERROR); + + /// apply De Morgan's Law + node = makeASTFunction( + (func->name == "and" ? "or" : "and"), + func->arguments->children[0]->clone(), + func->arguments->children[1]->clone()); + } + + auto * new_func = node->as(); + for (auto & child : new_func->arguments->children) + traversePushNot(child, add_negation); + } + else if (func && func->name == "not") + { + if (func->arguments->children.size() != 1) + throw Exception("Bad NOT function. Expected 1 argument", ErrorCodes::INCORRECT_QUERY); + /// delete NOT + node = func->arguments->children[0]->clone(); + + traversePushNot(node, !add_negation); + } + else + { + if (add_negation) + node = makeASTFunction("not", node->clone()); + } +} + +/// Push Or inside And (actually pull AND to top) +bool traversePushOr(ASTPtr & node, size_t num_atoms, size_t max_atoms) +{ + if (max_atoms && num_atoms > max_atoms) + return false; + + checkStackSize(); + auto * func = node->as(); + + if (func && (func->name == "or" || func->name == "and")) + { + for (auto & child : func->arguments->children) + if (!traversePushOr(child, num_atoms, max_atoms)) + return false; + } + + if (func && func->name == "or") + { + assert(func->arguments->children.size() == 2); + size_t and_node_id = func->arguments->children.size(); + for (size_t i = 0; i < func->arguments->children.size(); ++i) + { + auto & child = func->arguments->children[i]; + auto * and_func = child->as(); + if (and_func && and_func->name == "and") + and_node_id = i; + } + + if (and_node_id == func->arguments->children.size()) + return true; + + const size_t other_node_id = 1 - and_node_id; + const auto * and_func = func->arguments->children[and_node_id]->as(); + + auto a = func->arguments->children[other_node_id]; + auto b = and_func->arguments->children[0]; + auto c = and_func->arguments->children[1]; + + /// apply the distributive law ( a or (b and c) -> (a or b) and (a or c) ) + node = makeASTFunction( + "and", + makeASTFunction("or", a->clone(), b), + makeASTFunction("or", a, c)); + + /// Count all atoms from 'a', because it was cloned. + num_atoms += countAtoms(a); + return traversePushOr(node, num_atoms, max_atoms); + } + + return true; +} + +/// transform ast into cnf groups +void traverseCNF(const ASTPtr & node, CNFQuery::AndGroup & and_group, CNFQuery::OrGroup & or_group) +{ + checkStackSize(); + + auto * func = node->as(); + if (func && func->name == "and") + { + for (auto & child : func->arguments->children) + { + CNFQuery::OrGroup group; + traverseCNF(child, and_group, group); + if (!group.empty()) + and_group.insert(std::move(group)); + } + } + else if (func && func->name == "or") + { + for (auto & child : func->arguments->children) + { + traverseCNF(child, and_group, or_group); + } + } + else if (func && func->name == "not") + { + if (func->arguments->children.size() != 1) + throw Exception("Bad NOT function. Expected 1 argument", ErrorCodes::INCORRECT_QUERY); + or_group.insert(CNFQuery::AtomicFormula{true, func->arguments->children.front()}); + } + else + { + or_group.insert(CNFQuery::AtomicFormula{false, node}); + } +} + +void traverseCNF(const ASTPtr & node, CNFQuery::AndGroup & result) +{ + CNFQuery::OrGroup or_group; + traverseCNF(node, result, or_group); + if (!or_group.empty()) + result.insert(or_group); +} + +} + +std::optional TreeCNFConverter::tryConvertToCNF( + const ASTPtr & query, size_t max_growth_multipler) +{ + auto cnf = query->clone(); + size_t num_atoms = countAtoms(cnf); + + splitMultiLogic(cnf); + traversePushNot(cnf, false); + + size_t max_atoms = max_growth_multipler + ? std::max(MAX_ATOMS_WITHOUT_CHECK, num_atoms * max_growth_multipler) + : 0; + + if (!traversePushOr(cnf, num_atoms, max_atoms)) + return {}; + + CNFQuery::AndGroup and_group; + traverseCNF(cnf, and_group); + + CNFQuery result{std::move(and_group)}; + + return result; +} + +CNFQuery TreeCNFConverter::toCNF( + const ASTPtr & query, size_t max_growth_multipler) +{ + auto cnf = tryConvertToCNF(query, max_growth_multipler); + if (!cnf) + throw Exception(ErrorCodes::TOO_MANY_TEMPORARY_COLUMNS, + "Cannot expression '{}' to CNF, because it produces to many clauses." + "Size of formula inCNF can be exponential of size of source formula."); + + return *cnf; +} + +ASTPtr TreeCNFConverter::fromCNF(const CNFQuery & cnf) +{ + const auto & groups = cnf.getStatements(); + if (groups.empty()) + return nullptr; + + ASTs or_groups; + for (const auto & group : groups) + { + if (group.size() == 1) + { + if ((*group.begin()).negative) + or_groups.push_back(makeASTFunction("not", (*group.begin()).ast->clone())); + else + or_groups.push_back((*group.begin()).ast->clone()); + } + else if (group.size() > 1) + { + or_groups.push_back(makeASTFunction("or")); + auto * func = or_groups.back()->as(); + for (const auto & atom : group) + { + if (atom.negative) + func->arguments->children.push_back(makeASTFunction("not", atom.ast->clone())); + else + func->arguments->children.push_back(atom.ast->clone()); + } + } + } + + if (or_groups.size() == 1) + return or_groups.front(); + + ASTPtr res = makeASTFunction("and"); + auto * func = res->as(); + for (const auto & group : or_groups) + func->arguments->children.push_back(group); + + return res; +} + +static void pushPullNotInAtom(CNFQuery::AtomicFormula & atom, const std::unordered_map & inverse_relations) +{ + auto * func = atom.ast->as(); + if (!func) + return; + if (auto it = inverse_relations.find(func->name); it != std::end(inverse_relations)) + { + /// inverse func + atom.ast = atom.ast->clone(); + auto * new_func = atom.ast->as(); + new_func->name = it->second; + /// add not + atom.negative = !atom.negative; + } +} + +static void pullNotOut(CNFQuery::AtomicFormula & atom) +{ + static const std::unordered_map inverse_relations = { + {"notEquals", "equals"}, + {"greaterOrEquals", "less"}, + {"greater", "lessOrEquals"}, + {"notIn", "in"}, + {"notLike", "like"}, + {"notEmpty", "empty"}, + }; + + pushPullNotInAtom(atom, inverse_relations); +} + +void pushNotIn(CNFQuery::AtomicFormula & atom) +{ + if (!atom.negative) + return; + + static const std::unordered_map inverse_relations = { + {"equals", "notEquals"}, + {"less", "greaterOrEquals"}, + {"lessOrEquals", "greater"}, + {"in", "notIn"}, + {"like", "notLike"}, + {"empty", "notEmpty"}, + {"notEquals", "equals"}, + {"greaterOrEquals", "less"}, + {"greater", "lessOrEquals"}, + {"notIn", "in"}, + {"notLike", "like"}, + {"notEmpty", "empty"}, + }; + + pushPullNotInAtom(atom, inverse_relations); +} + +CNFQuery & CNFQuery::pullNotOutFunctions() +{ + transformAtoms([](const AtomicFormula & atom) -> AtomicFormula + { + AtomicFormula result{atom.negative, atom.ast->clone()}; + pullNotOut(result); + return result; + }); + return *this; +} + +CNFQuery & CNFQuery::pushNotInFuntions() +{ + transformAtoms([](const AtomicFormula & atom) -> AtomicFormula + { + AtomicFormula result{atom.negative, atom.ast->clone()}; + pushNotIn(result); + return result; + }); + return *this; +} + +namespace +{ + CNFQuery::AndGroup reduceOnce(const CNFQuery::AndGroup & groups) + { + CNFQuery::AndGroup result; + for (const CNFQuery::OrGroup & group : groups) + { + CNFQuery::OrGroup copy(group); + bool inserted = false; + for (const CNFQuery::AtomicFormula & atom : group) + { + copy.erase(atom); + CNFQuery::AtomicFormula negative_atom(atom); + negative_atom.negative = !atom.negative; + copy.insert(negative_atom); + + if (groups.contains(copy)) + { + copy.erase(negative_atom); + result.insert(copy); + inserted = true; + break; + } + + copy.erase(negative_atom); + copy.insert(atom); + } + if (!inserted) + result.insert(group); + } + return result; + } + + bool isSubset(const CNFQuery::OrGroup & left, const CNFQuery::OrGroup & right) + { + if (left.size() > right.size()) + return false; + for (const auto & elem : left) + if (!right.contains(elem)) + return false; + return true; + } + + CNFQuery::AndGroup filterSubsets(const CNFQuery::AndGroup & groups) + { + CNFQuery::AndGroup result; + for (const CNFQuery::OrGroup & group : groups) + { + bool insert = true; + + for (const CNFQuery::OrGroup & other_group : groups) + { + if (isSubset(other_group, group) && group != other_group) + { + insert = false; + break; + } + } + + if (insert) + result.insert(group); + } + return result; + } +} + +CNFQuery & CNFQuery::reduce() +{ + while (true) + { + AndGroup new_statements = reduceOnce(statements); + if (statements == new_statements) + { + statements = filterSubsets(statements); + return *this; + } + else + statements = new_statements; + } +} + +std::string CNFQuery::dump() const +{ + WriteBufferFromOwnString res; + bool first = true; + for (const auto & group : statements) + { + if (!first) + res << " AND "; + first = false; + res << "("; + bool first_in_group = true; + for (const auto & atom : group) + { + if (!first_in_group) + res << " OR "; + first_in_group = false; + if (atom.negative) + res << " NOT "; + res << atom.ast->getColumnName(); + } + res << ")"; + } + + return res.str(); +} + +} diff --git a/src/Interpreters/TreeCNFConverter.h b/src/Interpreters/TreeCNFConverter.h new file mode 100644 index 00000000000..22ec2969096 --- /dev/null +++ b/src/Interpreters/TreeCNFConverter.h @@ -0,0 +1,167 @@ +#pragma once + +#include +#include +#include +#include +#include + +namespace DB +{ + +class CNFQuery +{ +public: + struct AtomicFormula + { + bool negative = false; + ASTPtr ast; + + /// for set + bool operator<(const AtomicFormula & rhs) const + { + return ast->getTreeHash() == rhs.ast->getTreeHash() + ? negative < rhs.negative + : ast->getTreeHash() < rhs.ast->getTreeHash(); + } + + bool operator==(const AtomicFormula & rhs) const + { + return negative == rhs.negative && + ast->getTreeHash() == rhs.ast->getTreeHash() && + ast->getColumnName() == rhs.ast->getColumnName(); + } + }; + + using OrGroup = std::set; + using AndGroup = std::set; + + CNFQuery(AndGroup && statements_) : statements(std::move(statements_)) { } + + template + CNFQuery & filterAlwaysTrueGroups(P predicate_is_unknown) /// delete always true groups + { + AndGroup filtered; + for (const auto & or_group : statements) + { + if (predicate_is_unknown(or_group)) + filtered.insert(or_group); + } + std::swap(statements, filtered); + return *this; + } + + template + CNFQuery & filterAlwaysFalseAtoms(P predicate_is_unknown) /// delete always false atoms + { + AndGroup filtered; + for (const auto & or_group : statements) + { + OrGroup filtered_group; + for (auto ast : or_group) + { + if (predicate_is_unknown(ast)) + filtered_group.insert(ast); + } + if (!filtered_group.empty()) + filtered.insert(filtered_group); + else + { + /// all atoms false -> group false -> CNF false + filtered.clear(); + filtered_group.clear(); + filtered_group.insert(AtomicFormula{false, std::make_shared(static_cast(0))}); + filtered.insert(filtered_group); + std::swap(statements, filtered); + return *this; + } + } + std::swap(statements, filtered); + return *this; + } + + template + const CNFQuery & iterateGroups(F func) const + { + for (const auto & group : statements) + func(group); + return *this; + } + + CNFQuery & appendGroup(AndGroup&& and_group) + { + for (auto && or_group : and_group) + statements.emplace(std::move(or_group)); + return *this; + } + + template + CNFQuery & transformGroups(F func) + { + AndGroup result; + for (const auto & group : statements) + { + auto new_group = func(group); + if (!new_group.empty()) + result.insert(std::move(new_group)); + } + std::swap(statements, result); + return *this; + } + + template + CNFQuery & transformAtoms(F func) + { + transformGroups([func](const OrGroup & group) -> OrGroup + { + OrGroup result; + for (const auto & atom : group) + { + auto new_atom = func(atom); + if (new_atom.ast) + result.insert(std::move(new_atom)); + } + return result; + }); + return *this; + } + + const AndGroup & getStatements() const { return statements; } + + std::string dump() const; + + /// Converts != -> NOT =; <,>= -> (NOT) <; >,<= -> (NOT) <= for simpler matching + CNFQuery & pullNotOutFunctions(); + /// Revert pullNotOutFunctions actions + CNFQuery & pushNotInFuntions(); + + /// (a OR b OR ...) AND (NOT a OR b OR ...) -> (b OR ...) + CNFQuery & reduce(); + +private: + AndGroup statements; +}; + +class TreeCNFConverter +{ +public: + static constexpr size_t DEFAULT_MAX_GROWTH_MULTIPLIER = 20; + static constexpr size_t MAX_ATOMS_WITHOUT_CHECK = 200; + + /// @max_growth_multipler means that it's allowed to grow size of formula only + /// in that amount of times. It's needed to avoid exponential explosion of formula. + /// CNF of boolean formula with N clauses can have 2^N clauses. + /// If amount of atomic formulas will be exceeded nullopt will be returned. + /// 0 - means unlimited. + static std::optional tryConvertToCNF( + const ASTPtr & query, size_t max_growth_multipler = DEFAULT_MAX_GROWTH_MULTIPLIER); + + static CNFQuery toCNF( + const ASTPtr & query, size_t max_growth_multipler = DEFAULT_MAX_GROWTH_MULTIPLIER); + + static ASTPtr fromCNF(const CNFQuery & cnf); +}; + +void pushNotIn(CNFQuery::AtomicFormula & atom); + +} diff --git a/src/Interpreters/TreeOptimizer.cpp b/src/Interpreters/TreeOptimizer.cpp index e811299b327..5e355cd52af 100644 --- a/src/Interpreters/TreeOptimizer.cpp +++ b/src/Interpreters/TreeOptimizer.cpp @@ -4,6 +4,9 @@ #include #include #include +#include +#include +#include #include #include #include @@ -164,7 +167,7 @@ void optimizeGroupBy(ASTSelectQuery * select_query, ContextPtr context) if (value.getType() == Field::Types::UInt64) { auto pos = value.get(); - if (pos > 0 && pos <= select_query->children.size()) + if (pos > 0 && pos <= select_query->select()->children.size()) keep_position = true; } } @@ -539,6 +542,44 @@ void optimizeLimitBy(const ASTSelectQuery * select_query) elems = std::move(unique_elems); } +/// Use constraints to get rid of useless parts of query +void optimizeWithConstraints(ASTSelectQuery * select_query, + Aliases & /*aliases*/, + const NameSet & /*source_columns_set*/, + const std::vector & /*tables_with_columns*/, + const StorageMetadataPtr & metadata_snapshot, + const bool optimize_append_index) +{ + WhereConstraintsOptimizer(select_query, metadata_snapshot, optimize_append_index).perform(); +} + +void optimizeSubstituteColumn(ASTSelectQuery * select_query, + Aliases & /*aliases*/, + const NameSet & /*source_columns_set*/, + const std::vector & /*tables_with_columns*/, + const StorageMetadataPtr & metadata_snapshot, + const ConstStoragePtr & storage) +{ + SubstituteColumnOptimizer(select_query, metadata_snapshot, storage).perform(); +} + +/// Transform WHERE to CNF for more convenient optimization. +bool convertQueryToCNF(ASTSelectQuery * select_query) +{ + if (select_query->where()) + { + auto cnf_form = TreeCNFConverter::tryConvertToCNF(select_query->where()); + if (!cnf_form) + return false; + + cnf_form->pushNotInFuntions(); + select_query->refWhere() = TreeCNFConverter::fromCNF(*cnf_form); + return true; + } + + return false; +} + /// Remove duplicated columns from USING(...). void optimizeUsing(const ASTSelectQuery * select_query) { @@ -700,6 +741,20 @@ void TreeOptimizer::apply(ASTPtr & query, TreeRewriterResult & result, if (settings.optimize_arithmetic_operations_in_aggregate_functions) optimizeAggregationFunctions(query); + bool converted_to_cnf = false; + if (settings.convert_query_to_cnf) + converted_to_cnf = convertQueryToCNF(select_query); + + if (converted_to_cnf && settings.optimize_using_constraints) + { + optimizeWithConstraints(select_query, result.aliases, result.source_columns_set, + tables_with_columns, result.metadata_snapshot, settings.optimize_append_index); + + if (settings.optimize_substitute_columns) + optimizeSubstituteColumn(select_query, result.aliases, result.source_columns_set, + tables_with_columns, result.metadata_snapshot, result.storage); + } + /// GROUP BY injective function elimination. optimizeGroupBy(select_query, context); diff --git a/src/Interpreters/TreeOptimizer.h b/src/Interpreters/TreeOptimizer.h index b8a98a6f9ac..1d9a29bd0ac 100644 --- a/src/Interpreters/TreeOptimizer.h +++ b/src/Interpreters/TreeOptimizer.h @@ -3,6 +3,7 @@ #include #include #include +#include #include namespace DB @@ -15,6 +16,7 @@ struct TreeRewriterResult; class TreeOptimizer { public: + static void apply( ASTPtr & query, TreeRewriterResult & result, diff --git a/src/Interpreters/WhereConstraintsOptimizer.cpp b/src/Interpreters/WhereConstraintsOptimizer.cpp new file mode 100644 index 00000000000..83bdcfeb2e1 --- /dev/null +++ b/src/Interpreters/WhereConstraintsOptimizer.cpp @@ -0,0 +1,182 @@ +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +namespace DB +{ + +WhereConstraintsOptimizer::WhereConstraintsOptimizer( + ASTSelectQuery * select_query_, + const StorageMetadataPtr & metadata_snapshot_, + bool optimize_append_index_) + : select_query(select_query_) + , metadata_snapshot(metadata_snapshot_) + , optimize_append_index(optimize_append_index_) +{ +} + +namespace +{ + +enum class MatchState +{ + FULL_MATCH, /// a = b + NOT_MATCH, /// a = not b + NONE, /// other +}; + +MatchState match(CNFQuery::AtomicFormula a, CNFQuery::AtomicFormula b) +{ + bool match_means_ok = (a.negative == b.negative); + if (a.ast->getTreeHash() == b.ast->getTreeHash()) + return match_means_ok ? MatchState::FULL_MATCH : MatchState::NOT_MATCH; + + return MatchState::NONE; +} + +bool checkIfGroupAlwaysTrueFullMatch(const CNFQuery::OrGroup & group, const ConstraintsDescription & constraints_description) +{ + /// We have constraints in CNF. + /// CNF is always true => Each OR group in CNF is always true. + /// So, we try to check whether we have al least one OR group from CNF as subset in our group. + /// If we've found one then our group is always true too. + + const auto & constraints_data = constraints_description.getConstraintData(); + std::vector found(constraints_data.size()); + for (size_t i = 0; i < constraints_data.size(); ++i) + found[i] = constraints_data[i].size(); + + for (const auto & atom : group) + { + const auto constraint_atom_ids = constraints_description.getAtomIds(atom.ast); + if (constraint_atom_ids) + { + const auto constraint_atoms = constraints_description.getAtomsById(*constraint_atom_ids); + for (size_t i = 0; i < constraint_atoms.size(); ++i) + { + if (match(constraint_atoms[i], atom) == MatchState::FULL_MATCH) + { + if ((--found[(*constraint_atom_ids)[i].group_id]) == 0) + return true; + } + } + } + } + return false; +} + +bool checkIfGroupAlwaysTrueGraph(const CNFQuery::OrGroup & group, const ComparisonGraph & graph) +{ + /// We try to find at least one atom that is always true by using comparison graph. + for (const auto & atom : group) + { + const auto * func = atom.ast->as(); + if (func && func->arguments->children.size() == 2) + { + const auto expected = ComparisonGraph::atomToCompareResult(atom); + if (graph.isAlwaysCompare(expected, func->arguments->children[0], func->arguments->children[1])) + return true; + } + } + + return false; +} + + +bool checkIfAtomAlwaysFalseFullMatch(const CNFQuery::AtomicFormula & atom, const ConstraintsDescription & constraints_description) +{ + const auto constraint_atom_ids = constraints_description.getAtomIds(atom.ast); + if (constraint_atom_ids) + { + for (const auto & constraint_atom : constraints_description.getAtomsById(*constraint_atom_ids)) + { + const auto match_result = match(constraint_atom, atom); + if (match_result == MatchState::NOT_MATCH) + return true; + } + } + + return false; +} + +bool checkIfAtomAlwaysFalseGraph(const CNFQuery::AtomicFormula & atom, const ComparisonGraph & graph) +{ + const auto * func = atom.ast->as(); + if (func && func->arguments->children.size() == 2) + { + /// TODO: special support for != + const auto expected = ComparisonGraph::atomToCompareResult(atom); + return !graph.isPossibleCompare(expected, func->arguments->children[0], func->arguments->children[1]); + } + + return false; +} + +void replaceToConstants(ASTPtr & term, const ComparisonGraph & graph) +{ + const auto equal_constant = graph.getEqualConst(term); + if (equal_constant) + { + term = (*equal_constant)->clone(); + } + else + { + for (auto & child : term->children) + replaceToConstants(child, graph); + } +} + +CNFQuery::AtomicFormula replaceTermsToConstants(const CNFQuery::AtomicFormula & atom, const ComparisonGraph & graph) +{ + CNFQuery::AtomicFormula result; + result.negative = atom.negative; + result.ast = atom.ast->clone(); + + replaceToConstants(result.ast, graph); + + return result; +} + +} + +void WhereConstraintsOptimizer::perform() +{ + if (select_query->where() && metadata_snapshot) + { + const auto & compare_graph = metadata_snapshot->getConstraints().getGraph(); + auto cnf = TreeCNFConverter::toCNF(select_query->where()); + cnf.pullNotOutFunctions() + .filterAlwaysTrueGroups([&compare_graph, this](const auto & group) + { + /// remove always true groups from CNF + return !checkIfGroupAlwaysTrueFullMatch(group, metadata_snapshot->getConstraints()) && !checkIfGroupAlwaysTrueGraph(group, compare_graph); + }) + .filterAlwaysFalseAtoms([&compare_graph, this](const auto & atom) + { + /// remove always false atoms from CNF + return !checkIfAtomAlwaysFalseFullMatch(atom, metadata_snapshot->getConstraints()) && !checkIfAtomAlwaysFalseGraph(atom, compare_graph); + }) + .transformAtoms([&compare_graph](const auto & atom) + { + return replaceTermsToConstants(atom, compare_graph); + }) + .reduce() + .pushNotInFuntions(); + + if (optimize_append_index) + AddIndexConstraintsOptimizer(metadata_snapshot).perform(cnf); + + select_query->setExpression(ASTSelectQuery::Expression::WHERE, TreeCNFConverter::fromCNF(cnf)); + } +} + +} diff --git a/src/Interpreters/WhereConstraintsOptimizer.h b/src/Interpreters/WhereConstraintsOptimizer.h new file mode 100644 index 00000000000..01cee91fca5 --- /dev/null +++ b/src/Interpreters/WhereConstraintsOptimizer.h @@ -0,0 +1,30 @@ +#pragma once + +#include +#include + +namespace DB +{ + +struct StorageInMemoryMetadata; +using StorageMetadataPtr = std::shared_ptr; + +/// Optimizer that can remove useless parts of conditions +/// in WHERE clause according to table constraints. +class WhereConstraintsOptimizer final +{ +public: + WhereConstraintsOptimizer( + ASTSelectQuery * select_query, + const StorageMetadataPtr & metadata_snapshot, + bool optimize_append_index_); + + void perform(); + +private: + ASTSelectQuery * select_query; + const StorageMetadataPtr & metadata_snapshot; + bool optimize_append_index; +}; + +} diff --git a/src/Interpreters/executeQuery.cpp b/src/Interpreters/executeQuery.cpp index f401f708ab1..2ab4167176f 100644 --- a/src/Interpreters/executeQuery.cpp +++ b/src/Interpreters/executeQuery.cpp @@ -247,7 +247,7 @@ static void onExceptionBeforeStart(const String & query_for_logging, ContextPtr { /// Exception before the query execution. if (auto quota = context->getQuota()) - quota->used(Quota::ERRORS, 1, /* check_exceeded = */ false); + quota->used(QuotaType::ERRORS, 1, /* check_exceeded = */ false); const Settings & settings = context->getSettingsRef(); @@ -431,12 +431,6 @@ static std::tuple executeQueryImpl( InterpreterSetQuery(query_with_output->settings_ast, context).executeForCurrentContext(); } - if (const auto * query_with_table_output = dynamic_cast(ast.get())) - { - query_database = query_with_table_output->database; - query_table = query_with_table_output->table; - } - if (auto * create_query = ast->as()) { if (create_query->select) @@ -510,6 +504,12 @@ static std::tuple executeQueryImpl( query = serializeAST(*ast); } + if (const auto * query_with_table_output = dynamic_cast(ast.get())) + { + query_database = query_with_table_output->getDatabase(); + query_table = query_with_table_output->getTable(); + } + /// MUST go before any modification (except for prepared statements, /// since it substitute parameters and w/o them query does not contain /// parameters), to keep query as-is in query_log and server log. @@ -607,14 +607,14 @@ static std::tuple executeQueryImpl( { if (ast->as() || ast->as()) { - quota->used(Quota::QUERY_SELECTS, 1); + quota->used(QuotaType::QUERY_SELECTS, 1); } else if (ast->as()) { - quota->used(Quota::QUERY_INSERTS, 1); + quota->used(QuotaType::QUERY_INSERTS, 1); } - quota->used(Quota::QUERIES, 1); - quota->checkExceeded(Quota::ERRORS); + quota->used(QuotaType::QUERIES, 1); + quota->checkExceeded(QuotaType::ERRORS); } } @@ -864,7 +864,7 @@ static std::tuple executeQueryImpl( quota(quota), status_info_to_query_log] () mutable { if (quota) - quota->used(Quota::ERRORS, 1, /* check_exceeded = */ false); + quota->used(QuotaType::ERRORS, 1, /* check_exceeded = */ false); elem.type = QueryLogElementType::EXCEPTION_WHILE_PROCESSING; diff --git a/src/Interpreters/loadMetadata.cpp b/src/Interpreters/loadMetadata.cpp index 65b2065b2ad..c5bef6f7205 100644 --- a/src/Interpreters/loadMetadata.cpp +++ b/src/Interpreters/loadMetadata.cpp @@ -20,6 +20,7 @@ #include #include #include +#include namespace fs = std::filesystem; @@ -38,7 +39,7 @@ static void executeCreateQuery( parser, query.data(), query.data() + query.size(), "in file " + file_name, 0, context->getSettingsRef().max_parser_depth); auto & ast_create_query = ast->as(); - ast_create_query.database = database; + ast_create_query.setDatabase(database); InterpreterCreateQuery interpreter(ast, context); interpreter.setInternal(true); diff --git a/src/Interpreters/tests/gtest_comparison_graph.cpp b/src/Interpreters/tests/gtest_comparison_graph.cpp new file mode 100644 index 00000000000..6348bd13b8e --- /dev/null +++ b/src/Interpreters/tests/gtest_comparison_graph.cpp @@ -0,0 +1,183 @@ +#include +#include +#include +#include +#include +#include + +#include + +using namespace DB; + +static ComparisonGraph getGraph(const String & query) +{ + ParserExpressionList parser(false); + ASTPtr ast = parseQuery(parser, query, 0, 0); + return ComparisonGraph(ast->children); +} + +TEST(ComparisonGraph, Bounds) +{ + String query = "x <= 1, 1 < c, 3 < c, c < d, d < e, e < 7, e < 10, 10 <= y"; + auto graph = getGraph(query); + + auto d = std::make_shared("d"); + + { + auto res = graph.getConstLowerBound(d); + ASSERT_TRUE(res.has_value()); + + const auto & [lower, strict] = *res; + + ASSERT_EQ(get(lower), 3); + ASSERT_TRUE(strict); + } + + { + auto res = graph.getConstUpperBound(d); + ASSERT_TRUE(res.has_value()); + + const auto & [upper, strict] = *res; + + ASSERT_EQ(get(upper), 7); + ASSERT_TRUE(strict); + } + + { + auto x = std::make_shared("x"); + auto y = std::make_shared("y"); + + ASSERT_EQ(graph.compare(x, y), ComparisonGraph::CompareResult::LESS); + ASSERT_EQ(graph.compare(y, x), ComparisonGraph::CompareResult::GREATER); + } +} + +using Components = std::set>; + +static std::set componentToStrings(const ASTs & comp) +{ + std::set res; + for (const auto & ast : comp) + res.insert(ast->getColumnName()); + return res; +} + +static void checkComponents(const String & query, const Components & expected) +{ + auto graph = getGraph(query); + + size_t num_components = graph.getNumOfComponents(); + ASSERT_EQ(num_components, expected.size()); + + Components res; + for (size_t i = 0; i < num_components; ++i) + res.insert(componentToStrings(graph.getComponent(i))); + + ASSERT_EQ(res, expected); +} + +TEST(ComparisonGraph, Components) +{ + { + String query = "a >= b, b >= c, c >= d, d >= b, d >= e, a >= e"; + Components expected = {{"a"}, {"b", "c", "d"}, {"e"}}; + checkComponents(query, expected); + } + + { + String query = "a >= b, b >= a, b >= c, c >= d, d >= c"; + Components expected = {{"a", "b"}, {"c", "d"}}; + checkComponents(query, expected); + } +} + +TEST(ComparisonGraph, Compare) +{ + using CompareResult = ComparisonGraph::CompareResult; + + { + String query = "a >= b, c >= b"; + auto graph = getGraph(query); + + auto a = std::make_shared("a"); + auto c = std::make_shared("c"); + + ASSERT_EQ(graph.compare(a, c), CompareResult::UNKNOWN); + } + + { + String query = "a >= b, b > c"; + auto graph = getGraph(query); + + auto a = std::make_shared("a"); + auto b = std::make_shared("b"); + auto c = std::make_shared("c"); + + ASSERT_EQ(graph.compare(a, c), CompareResult::GREATER); + ASSERT_EQ(graph.compare(a, b), CompareResult::GREATER_OR_EQUAL); + ASSERT_EQ(graph.compare(b, c), CompareResult::GREATER); + } + + { + String query = "a != b, c < a"; + auto graph = getGraph(query); + + auto a = std::make_shared("a"); + auto b = std::make_shared("b"); + auto c = std::make_shared("c"); + + ASSERT_EQ(graph.compare(a, b), CompareResult::NOT_EQUAL); + ASSERT_EQ(graph.compare(a, c), CompareResult::GREATER); + ASSERT_EQ(graph.compare(b, c), CompareResult::UNKNOWN); + } + + { + /// These constraints are inconsistent. + String query = "a >= b, b >= a, a != b"; + ASSERT_THROW(getGraph(query), Exception); + } + + { + /// These constraints are inconsistent. + String query = "a > b, b > c, c > a"; + ASSERT_THROW(getGraph(query), Exception); + } + + { + String query = "a >= 3, b > a, c >= 3, d >= c"; + auto graph = getGraph(query); + + auto a = std::make_shared("a"); + auto b = std::make_shared("b"); + auto d = std::make_shared("d"); + auto lit_2 = std::make_shared(2u); + auto lit_3 = std::make_shared(3u); + auto lit_4 = std::make_shared(4u); + + ASSERT_EQ(graph.compare(lit_3, a), CompareResult::LESS_OR_EQUAL); + ASSERT_FALSE(graph.isAlwaysCompare(CompareResult::LESS, lit_3, a)); + ASSERT_TRUE(graph.isAlwaysCompare(CompareResult::LESS, lit_2, a)); + + ASSERT_EQ(graph.compare(b, lit_2), CompareResult::GREATER); + ASSERT_EQ(graph.compare(b, lit_3), CompareResult::GREATER); + ASSERT_EQ(graph.compare(b, lit_4), CompareResult::UNKNOWN); + + ASSERT_EQ(graph.compare(d, lit_2), CompareResult::GREATER); + ASSERT_EQ(graph.compare(d, lit_3), CompareResult::GREATER_OR_EQUAL); + ASSERT_EQ(graph.compare(d, lit_4), CompareResult::UNKNOWN); + } + + { + String query = "a >= 5, a <= 10"; + auto graph = getGraph(query); + + auto a = std::make_shared("a"); + auto lit_8 = std::make_shared(8); + auto lit_3 = std::make_shared(3); + auto lit_15 = std::make_shared(15); + + ASSERT_EQ(graph.compare(a, lit_8), CompareResult::UNKNOWN); + ASSERT_EQ(graph.compare(a, lit_3), CompareResult::GREATER); + ASSERT_EQ(graph.compare(a, lit_15), CompareResult::LESS); + } +} diff --git a/src/Parsers/ASTAlterQuery.cpp b/src/Parsers/ASTAlterQuery.cpp index 50c56c9aeff..2ae494854ec 100644 --- a/src/Parsers/ASTAlterQuery.cpp +++ b/src/Parsers/ASTAlterQuery.cpp @@ -485,7 +485,7 @@ bool ASTAlterQuery::isDropPartitionAlter() const /** Get the text that identifies this element. */ String ASTAlterQuery::getID(char delim) const { - return "AlterQuery" + (delim + database) + delim + table; + return "AlterQuery" + (delim + getDatabase()) + delim + getTable(); } ASTPtr ASTAlterQuery::clone() const @@ -523,18 +523,18 @@ void ASTAlterQuery::formatQueryImpl(const FormatSettings & settings, FormatState settings.ostr << (settings.hilite ? hilite_none : ""); - if (!table.empty()) + if (table) { - if (!database.empty()) + if (database) { - settings.ostr << indent_str << backQuoteIfNeed(database); + settings.ostr << indent_str << backQuoteIfNeed(getDatabase()); settings.ostr << "."; } - settings.ostr << indent_str << backQuoteIfNeed(table); + settings.ostr << indent_str << backQuoteIfNeed(getTable()); } - else if (alter_object == AlterObjectType::DATABASE && !database.empty()) + else if (alter_object == AlterObjectType::DATABASE && database) { - settings.ostr << indent_str << backQuoteIfNeed(database); + settings.ostr << indent_str << backQuoteIfNeed(getDatabase()); } formatOnCluster(settings); diff --git a/src/Parsers/ASTCheckQuery.h b/src/Parsers/ASTCheckQuery.h index fdd1179ec90..8b376ef8d60 100644 --- a/src/Parsers/ASTCheckQuery.h +++ b/src/Parsers/ASTCheckQuery.h @@ -12,13 +12,14 @@ struct ASTCheckQuery : public ASTQueryWithTableAndOutput ASTPtr partition; /** Get the text that identifies this element. */ - String getID(char delim) const override { return "CheckQuery" + (delim + database) + delim + table; } + String getID(char delim) const override { return "CheckQuery" + (delim + getDatabase()) + delim + getTable(); } ASTPtr clone() const override { auto res = std::make_shared(*this); res->children.clear(); cloneOutputOptions(*res); + cloneTableOptions(*res); return res; } @@ -32,14 +33,14 @@ protected: settings.ostr << (settings.hilite ? hilite_keyword : "") << indent_str << "CHECK TABLE " << (settings.hilite ? hilite_none : ""); - if (!table.empty()) + if (table) { - if (!database.empty()) + if (database) { - settings.ostr << (settings.hilite ? hilite_keyword : "") << indent_str << backQuoteIfNeed(database) << (settings.hilite ? hilite_none : ""); + settings.ostr << (settings.hilite ? hilite_keyword : "") << indent_str << backQuoteIfNeed(getDatabase()) << (settings.hilite ? hilite_none : ""); settings.ostr << "."; } - settings.ostr << (settings.hilite ? hilite_keyword : "") << indent_str << backQuoteIfNeed(table) << (settings.hilite ? hilite_none : ""); + settings.ostr << (settings.hilite ? hilite_keyword : "") << indent_str << backQuoteIfNeed(getTable()) << (settings.hilite ? hilite_none : ""); } if (partition) diff --git a/src/Parsers/ASTConstraintDeclaration.cpp b/src/Parsers/ASTConstraintDeclaration.cpp index 7d74837478c..2b895b85996 100644 --- a/src/Parsers/ASTConstraintDeclaration.cpp +++ b/src/Parsers/ASTConstraintDeclaration.cpp @@ -11,6 +11,7 @@ ASTPtr ASTConstraintDeclaration::clone() const auto res = std::make_shared(); res->name = name; + res->type = type; if (expr) res->set(res->expr, expr->clone()); @@ -21,7 +22,7 @@ ASTPtr ASTConstraintDeclaration::clone() const void ASTConstraintDeclaration::formatImpl(const FormatSettings & s, FormatState & state, FormatStateStacked frame) const { s.ostr << backQuoteIfNeed(name); - s.ostr << (s.hilite ? hilite_keyword : "") << " CHECK " << (s.hilite ? hilite_none : ""); + s.ostr << (s.hilite ? hilite_keyword : "") << (type == Type::CHECK ? " CHECK " : " ASSUME ") << (s.hilite ? hilite_none : ""); expr->formatImpl(s, state, frame); } diff --git a/src/Parsers/ASTConstraintDeclaration.h b/src/Parsers/ASTConstraintDeclaration.h index 3a8ad75f54b..437aab1a82d 100644 --- a/src/Parsers/ASTConstraintDeclaration.h +++ b/src/Parsers/ASTConstraintDeclaration.h @@ -10,7 +10,14 @@ namespace DB class ASTConstraintDeclaration : public IAST { public: + enum class Type : UInt8 + { + CHECK, + ASSUME, + }; + String name; + Type type; IAST * expr; String getID(char) const override { return "Constraint"; } diff --git a/src/Parsers/ASTCreateQuery.cpp b/src/Parsers/ASTCreateQuery.cpp index 2c048886247..608098b8d95 100644 --- a/src/Parsers/ASTCreateQuery.cpp +++ b/src/Parsers/ASTCreateQuery.cpp @@ -212,6 +212,7 @@ ASTPtr ASTCreateQuery::clone() const res->set(res->comment, comment->clone()); cloneOutputOptions(*res); + cloneTableOptions(*res); return res; } @@ -220,13 +221,13 @@ void ASTCreateQuery::formatQueryImpl(const FormatSettings & settings, FormatStat { frame.need_parens = false; - if (!database.empty() && table.empty()) + if (database && !table) { settings.ostr << (settings.hilite ? hilite_keyword : "") << (attach ? "ATTACH DATABASE " : "CREATE DATABASE ") << (if_not_exists ? "IF NOT EXISTS " : "") << (settings.hilite ? hilite_none : "") - << backQuoteIfNeed(database); + << backQuoteIfNeed(getDatabase()); if (uuid != UUIDHelpers::Nil) { @@ -275,7 +276,7 @@ void ASTCreateQuery::formatQueryImpl(const FormatSettings & settings, FormatStat << what << " " << (if_not_exists ? "IF NOT EXISTS " : "") << (settings.hilite ? hilite_none : "") - << (!database.empty() ? backQuoteIfNeed(database) + "." : "") << backQuoteIfNeed(table); + << (database ? backQuoteIfNeed(getDatabase()) + "." : "") << backQuoteIfNeed(getTable()); if (uuid != UUIDHelpers::Nil) settings.ostr << (settings.hilite ? hilite_keyword : "") << " UUID " << (settings.hilite ? hilite_none : "") @@ -316,7 +317,7 @@ void ASTCreateQuery::formatQueryImpl(const FormatSettings & settings, FormatStat /// Always DICTIONARY settings.ostr << (settings.hilite ? hilite_keyword : "") << action << " DICTIONARY " << (if_not_exists ? "IF NOT EXISTS " : "") << (settings.hilite ? hilite_none : "") - << (!database.empty() ? backQuoteIfNeed(database) + "." : "") << backQuoteIfNeed(table); + << (database ? backQuoteIfNeed(getDatabase()) + "." : "") << backQuoteIfNeed(getTable()); if (uuid != UUIDHelpers::Nil) settings.ostr << (settings.hilite ? hilite_keyword : "") << " UUID " << (settings.hilite ? hilite_none : "") << quoteString(toString(uuid)); diff --git a/src/Parsers/ASTCreateQuery.h b/src/Parsers/ASTCreateQuery.h index 51c4ac15e67..4c7768517b1 100644 --- a/src/Parsers/ASTCreateQuery.h +++ b/src/Parsers/ASTCreateQuery.h @@ -91,7 +91,7 @@ public: bool create_or_replace{false}; /** Get the text that identifies this element. */ - String getID(char delim) const override { return (attach ? "AttachQuery" : "CreateQuery") + (delim + database) + delim + table; } + String getID(char delim) const override { return (attach ? "AttachQuery" : "CreateQuery") + (delim + getDatabase()) + delim + getTable(); } ASTPtr clone() const override; diff --git a/src/Parsers/ASTDropQuery.cpp b/src/Parsers/ASTDropQuery.cpp index 6ea6c81c3be..9e815ee75de 100644 --- a/src/Parsers/ASTDropQuery.cpp +++ b/src/Parsers/ASTDropQuery.cpp @@ -15,11 +15,11 @@ namespace ErrorCodes String ASTDropQuery::getID(char delim) const { if (kind == ASTDropQuery::Kind::Drop) - return "DropQuery" + (delim + database) + delim + table; + return "DropQuery" + (delim + getDatabase()) + delim + getTable(); else if (kind == ASTDropQuery::Kind::Detach) - return "DetachQuery" + (delim + database) + delim + table; + return "DetachQuery" + (delim + getDatabase()) + delim + getTable(); else if (kind == ASTDropQuery::Kind::Truncate) - return "TruncateQuery" + (delim + database) + delim + table; + return "TruncateQuery" + (delim + getDatabase()) + delim + getTable(); else throw Exception("Not supported kind of drop query.", ErrorCodes::SYNTAX_ERROR); } @@ -28,6 +28,7 @@ ASTPtr ASTDropQuery::clone() const { auto res = std::make_shared(*this); cloneOutputOptions(*res); + cloneTableOptions(*res); return res; } @@ -46,7 +47,8 @@ void ASTDropQuery::formatQueryImpl(const FormatSettings & settings, FormatState if (temporary) settings.ostr << "TEMPORARY "; - if (table.empty() && !database.empty()) + + if (!table && database) settings.ostr << "DATABASE "; else if (is_dictionary) settings.ostr << "DICTIONARY "; @@ -60,10 +62,10 @@ void ASTDropQuery::formatQueryImpl(const FormatSettings & settings, FormatState settings.ostr << (settings.hilite ? hilite_none : ""); - if (table.empty() && !database.empty()) - settings.ostr << backQuoteIfNeed(database); + if (!table && database) + settings.ostr << backQuoteIfNeed(getDatabase()); else - settings.ostr << (!database.empty() ? backQuoteIfNeed(database) + "." : "") << backQuoteIfNeed(table); + settings.ostr << (database ? backQuoteIfNeed(getDatabase()) + "." : "") << backQuoteIfNeed(getTable()); formatOnCluster(settings); diff --git a/src/Parsers/ASTIdentifier.cpp b/src/Parsers/ASTIdentifier.cpp index 58052b1419b..a889680c81f 100644 --- a/src/Parsers/ASTIdentifier.cpp +++ b/src/Parsers/ASTIdentifier.cpp @@ -201,6 +201,40 @@ String ASTTableIdentifier::getDatabaseName() const else return {}; } +ASTPtr ASTTableIdentifier::getTable() const +{ + if (name_parts.size() == 2) + { + if (!name_parts[1].empty()) + return std::make_shared(name_parts[1]); + + if (name_parts[0].empty()) + return std::make_shared("", children[1]->clone()); + else + return std::make_shared("", children[0]->clone()); + } + else if (name_parts.size() == 1) + { + if (name_parts[0].empty()) + return std::make_shared("", children[0]->clone()); + else + return std::make_shared(name_parts[0]); + } + else return {}; +} + +ASTPtr ASTTableIdentifier::getDatabase() const +{ + if (name_parts.size() == 2) + { + if (name_parts[0].empty()) + return std::make_shared("", children[0]->clone()); + else + return std::make_shared(name_parts[0]); + } + else return {}; +} + void ASTTableIdentifier::resetTable(const String & database_name, const String & table_name) { auto identifier = std::make_shared(database_name, table_name); diff --git a/src/Parsers/ASTIdentifier.h b/src/Parsers/ASTIdentifier.h index 323280e07bc..048b32e438d 100644 --- a/src/Parsers/ASTIdentifier.h +++ b/src/Parsers/ASTIdentifier.h @@ -61,6 +61,7 @@ protected: private: using ASTWithAlias::children; /// ASTIdentifier is child free + friend class ASTTableIdentifier; friend class ReplaceQueryParameterVisitor; friend struct IdentifierSemantic; friend void setIdentifierSpecial(ASTPtr & ast); @@ -83,6 +84,9 @@ public: StorageID getTableId() const; String getDatabaseName() const; + ASTPtr getTable() const; + ASTPtr getDatabase() const; + // FIXME: used only when it's needed to rewrite distributed table name to real remote table name. void resetTable(const String & database_name, const String & table_name); // TODO(ilezhankin): get rid of this diff --git a/src/Parsers/ASTInsertQuery.cpp b/src/Parsers/ASTInsertQuery.cpp index 3c8c31b85dd..c733398a32b 100644 --- a/src/Parsers/ASTInsertQuery.cpp +++ b/src/Parsers/ASTInsertQuery.cpp @@ -1,4 +1,5 @@ #include +#include #include #include #include @@ -15,6 +16,35 @@ namespace ErrorCodes extern const int INVALID_USAGE_OF_INPUT; } +String ASTInsertQuery::getDatabase() const +{ + String name; + tryGetIdentifierNameInto(database, name); + return name; +} + +String ASTInsertQuery::getTable() const +{ + String name; + tryGetIdentifierNameInto(table, name); + return name; +} + +void ASTInsertQuery::setDatabase(const String & name) +{ + if (name.empty()) + database.reset(); + else + database = std::make_shared(name); +} + +void ASTInsertQuery::setTable(const String & name) +{ + if (name.empty()) + table.reset(); + else + table = std::make_shared(name); +} void ASTInsertQuery::formatImpl(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const { @@ -31,9 +61,16 @@ void ASTInsertQuery::formatImpl(const FormatSettings & settings, FormatState & s partition_by->formatImpl(settings, state, frame); } } - else + else if (table_id) + { settings.ostr << (settings.hilite ? hilite_none : "") << (!table_id.database_name.empty() ? backQuoteIfNeed(table_id.database_name) + "." : "") << backQuoteIfNeed(table_id.table_name); + } + else + { + settings.ostr << (settings.hilite ? hilite_none : "") + << (database ? backQuoteIfNeed(getDatabase()) + "." : "") << backQuoteIfNeed(getTable()); + } if (columns) { diff --git a/src/Parsers/ASTInsertQuery.h b/src/Parsers/ASTInsertQuery.h index 81925ec2e66..d539ad5fdb3 100644 --- a/src/Parsers/ASTInsertQuery.h +++ b/src/Parsers/ASTInsertQuery.h @@ -13,6 +13,10 @@ class ASTInsertQuery : public IAST { public: StorageID table_id = StorageID::createEmpty(); + + ASTPtr database; + ASTPtr table; + ASTPtr columns; String format; ASTPtr table_function; @@ -31,6 +35,12 @@ public: /// Data from buffer to insert after inlined one - may be nullptr. ReadBuffer * tail = nullptr; + String getDatabase() const; + String getTable() const; + + void setDatabase(const String & name); + void setTable(const String & name); + bool hasInlinedData() const { return data || tail; } /// Try to find table function input() in SELECT part @@ -44,6 +54,8 @@ public: auto res = std::make_shared(*this); res->children.clear(); + if (database) { res->database = database->clone(); res->children.push_back(res->database); } + if (table) { res->table = table->clone(); res->children.push_back(res->table); } if (columns) { res->columns = columns->clone(); res->children.push_back(res->columns); } if (select) { res->select = select->clone(); res->children.push_back(res->select); } if (watch) { res->watch = watch->clone(); res->children.push_back(res->watch); } diff --git a/src/Parsers/ASTOptimizeQuery.cpp b/src/Parsers/ASTOptimizeQuery.cpp index 6423e247ecc..720c7699fb6 100644 --- a/src/Parsers/ASTOptimizeQuery.cpp +++ b/src/Parsers/ASTOptimizeQuery.cpp @@ -8,7 +8,7 @@ namespace DB void ASTOptimizeQuery::formatQueryImpl(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const { settings.ostr << (settings.hilite ? hilite_keyword : "") << "OPTIMIZE TABLE " << (settings.hilite ? hilite_none : "") - << (!database.empty() ? backQuoteIfNeed(database) + "." : "") << backQuoteIfNeed(table); + << (database ? backQuoteIfNeed(getDatabase()) + "." : "") << backQuoteIfNeed(getTable()); formatOnCluster(settings); diff --git a/src/Parsers/ASTOptimizeQuery.h b/src/Parsers/ASTOptimizeQuery.h index f4981d156c7..cac2ef1c6d0 100644 --- a/src/Parsers/ASTOptimizeQuery.h +++ b/src/Parsers/ASTOptimizeQuery.h @@ -25,7 +25,7 @@ public: /** Get the text that identifies this element. */ String getID(char delim) const override { - return "OptimizeQuery" + (delim + database) + delim + table + (final ? "_final" : "") + (deduplicate ? "_deduplicate" : ""); + return "OptimizeQuery" + (delim + getDatabase()) + delim + getTable() + (final ? "_final" : "") + (deduplicate ? "_deduplicate" : ""); } ASTPtr clone() const override diff --git a/src/Parsers/ASTQueryWithOnCluster.h b/src/Parsers/ASTQueryWithOnCluster.h index e053b50ffb2..b309ae5e847 100644 --- a/src/Parsers/ASTQueryWithOnCluster.h +++ b/src/Parsers/ASTQueryWithOnCluster.h @@ -39,8 +39,8 @@ protected: T & query = static_cast(*query_ptr); query.cluster.clear(); - if (query.database.empty()) - query.database = new_database; + if (!query.database) + query.setDatabase(new_database); return query_ptr; } diff --git a/src/Parsers/ASTQueryWithTableAndOutput.cpp b/src/Parsers/ASTQueryWithTableAndOutput.cpp index d44ba988d7a..a216aeaa11f 100644 --- a/src/Parsers/ASTQueryWithTableAndOutput.cpp +++ b/src/Parsers/ASTQueryWithTableAndOutput.cpp @@ -1,3 +1,4 @@ +#include #include #include #include @@ -6,10 +7,67 @@ namespace DB { +String ASTQueryWithTableAndOutput::getDatabase() const +{ + String name; + tryGetIdentifierNameInto(database, name); + return name; +} + +String ASTQueryWithTableAndOutput::getTable() const +{ + String name; + tryGetIdentifierNameInto(table, name); + return name; +} + +void ASTQueryWithTableAndOutput::setDatabase(const String & name) +{ + if (database) + { + std::erase(children, database); + database.reset(); + } + + if (!name.empty()) + { + database = std::make_shared(name); + children.push_back(database); + } +} + +void ASTQueryWithTableAndOutput::setTable(const String & name) +{ + if (table) + { + std::erase(children, table); + table.reset(); + } + + if (!name.empty()) + { + table = std::make_shared(name); + children.push_back(table); + } +} + +void ASTQueryWithTableAndOutput::cloneTableOptions(ASTQueryWithTableAndOutput & cloned) const +{ + if (database) + { + cloned.database = database->clone(); + cloned.children.push_back(cloned.database); + } + if (table) + { + cloned.table = table->clone(); + cloned.children.push_back(cloned.table); + } +} void ASTQueryWithTableAndOutput::formatHelper(const FormatSettings & settings, const char * name) const { settings.ostr << (settings.hilite ? hilite_keyword : "") << name << " " << (settings.hilite ? hilite_none : ""); - settings.ostr << (!database.empty() ? backQuoteIfNeed(database) + "." : "") << backQuoteIfNeed(table); + settings.ostr << (database ? backQuoteIfNeed(getDatabase()) + "." : "") << backQuoteIfNeed(getTable()); } } diff --git a/src/Parsers/ASTQueryWithTableAndOutput.h b/src/Parsers/ASTQueryWithTableAndOutput.h index 03f5fa7bf22..233028c5023 100644 --- a/src/Parsers/ASTQueryWithTableAndOutput.h +++ b/src/Parsers/ASTQueryWithTableAndOutput.h @@ -14,11 +14,21 @@ namespace DB class ASTQueryWithTableAndOutput : public ASTQueryWithOutput { public: - String database; - String table; + ASTPtr database; + ASTPtr table; + UUID uuid = UUIDHelpers::Nil; bool temporary{false}; + String getDatabase() const; + String getTable() const; + + // Once database or table are set they cannot be assigned with empty value + void setDatabase(const String & name); + void setTable(const String & name); + + void cloneTableOptions(ASTQueryWithTableAndOutput & cloned) const; + protected: void formatHelper(const FormatSettings & settings, const char * name) const; }; @@ -28,13 +38,14 @@ template class ASTQueryWithTableAndOutputImpl : public ASTQueryWithTableAndOutput { public: - String getID(char delim) const override { return AstIDAndQueryNames::ID + (delim + database) + delim + table; } + String getID(char delim) const override { return AstIDAndQueryNames::ID + (delim + getDatabase()) + delim + getTable(); } ASTPtr clone() const override { auto res = std::make_shared>(*this); res->children.clear(); cloneOutputOptions(*res); + cloneTableOptions(*res); return res; } diff --git a/src/Parsers/ASTRenameQuery.h b/src/Parsers/ASTRenameQuery.h index 4940bf42a3a..dafc166f672 100644 --- a/src/Parsers/ASTRenameQuery.h +++ b/src/Parsers/ASTRenameQuery.h @@ -25,6 +25,7 @@ public: { Table from; Table to; + bool if_exists{false}; /// If this directive is used, one will not get an error if the table/database/dictionary to be renamed/exchanged doesn't exist. }; using Elements = std::vector; @@ -72,6 +73,10 @@ protected: if (database) { settings.ostr << (settings.hilite ? hilite_keyword : "") << "RENAME DATABASE " << (settings.hilite ? hilite_none : ""); + + if (elements.at(0).if_exists) + settings.ostr << (settings.hilite ? hilite_keyword : "") << "IF EXISTS " << (settings.hilite ? hilite_none : ""); + settings.ostr << backQuoteIfNeed(elements.at(0).from.database); settings.ostr << (settings.hilite ? hilite_keyword : "") << " TO " << (settings.hilite ? hilite_none : ""); settings.ostr << backQuoteIfNeed(elements.at(0).to.database); @@ -96,6 +101,8 @@ protected: if (it != elements.cbegin()) settings.ostr << ", "; + if (it->if_exists) + settings.ostr << (settings.hilite ? hilite_keyword : "") << "IF EXISTS " << (settings.hilite ? hilite_none : ""); settings.ostr << (!it->from.database.empty() ? backQuoteIfNeed(it->from.database) + "." : "") << backQuoteIfNeed(it->from.table) << (settings.hilite ? hilite_keyword : "") << (exchange ? " AND " : " TO ") << (settings.hilite ? hilite_none : "") << (!it->to.database.empty() ? backQuoteIfNeed(it->to.database) + "." : "") << backQuoteIfNeed(it->to.table); diff --git a/src/Parsers/ASTSelectIntersectExceptQuery.cpp b/src/Parsers/ASTSelectIntersectExceptQuery.cpp index 3b9cb0a2c16..62eeefba385 100644 --- a/src/Parsers/ASTSelectIntersectExceptQuery.cpp +++ b/src/Parsers/ASTSelectIntersectExceptQuery.cpp @@ -15,12 +15,10 @@ ASTPtr ASTSelectIntersectExceptQuery::clone() const res->children.push_back(child->clone()); res->final_operator = final_operator; - - cloneOutputOptions(*res); return res; } -void ASTSelectIntersectExceptQuery::formatQueryImpl(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const +void ASTSelectIntersectExceptQuery::formatImpl(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const { std::string indent_str = settings.one_line ? "" : std::string(4 * frame.indent, ' '); @@ -38,4 +36,21 @@ void ASTSelectIntersectExceptQuery::formatQueryImpl(const FormatSettings & setti } } +ASTs ASTSelectIntersectExceptQuery::getListOfSelects() const +{ + /** + * Because of normalization actual number of selects is 2. + * But this is checked in InterpreterSelectIntersectExceptQuery. + */ + ASTs selects; + for (const auto & child : children) + { + if (typeid_cast(child.get()) + || typeid_cast(child.get()) + || typeid_cast(child.get())) + selects.push_back(child); + } + return selects; +} + } diff --git a/src/Parsers/ASTSelectIntersectExceptQuery.h b/src/Parsers/ASTSelectIntersectExceptQuery.h index 97a8296ce2c..86475fcba5c 100644 --- a/src/Parsers/ASTSelectIntersectExceptQuery.h +++ b/src/Parsers/ASTSelectIntersectExceptQuery.h @@ -1,22 +1,18 @@ #pragma once -#include +#include namespace DB { -class ASTSelectIntersectExceptQuery : public ASTQueryWithOutput +class ASTSelectIntersectExceptQuery : public ASTSelectQuery { public: String getID(char) const override { return "SelectIntersectExceptQuery"; } ASTPtr clone() const override; - void formatQueryImpl(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const override; - - const char * getQueryKindString() const override { return "SelectIntersectExcept"; } - enum class Operator { UNKNOWN, @@ -24,6 +20,12 @@ public: EXCEPT }; + void formatImpl(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const override; + + const char * getQueryKindString() const override { return "SelectIntersectExcept"; } + + ASTs getListOfSelects() const; + /// Final operator after applying visitor. Operator final_operator = Operator::UNKNOWN; }; diff --git a/src/Parsers/ASTSystemQuery.cpp b/src/Parsers/ASTSystemQuery.cpp index ba8e49b98ca..b8056862bfc 100644 --- a/src/Parsers/ASTSystemQuery.cpp +++ b/src/Parsers/ASTSystemQuery.cpp @@ -1,3 +1,4 @@ +#include #include #include #include @@ -39,6 +40,50 @@ const char * ASTSystemQuery::typeToString(Type type) return type_name.data(); } +String ASTSystemQuery::getDatabase() const +{ + String name; + tryGetIdentifierNameInto(database, name); + return name; +} + +String ASTSystemQuery::getTable() const +{ + String name; + tryGetIdentifierNameInto(table, name); + return name; +} + +void ASTSystemQuery::setDatabase(const String & name) +{ + if (database) + { + std::erase(children, database); + database.reset(); + } + + if (!name.empty()) + { + database = std::make_shared(name); + children.push_back(database); + } +} + +void ASTSystemQuery::setTable(const String & name) +{ + if (table) + { + std::erase(children, table); + table.reset(); + } + + if (!name.empty()) + { + table = std::make_shared(name); + children.push_back(table); + } +} + void ASTSystemQuery::formatImpl(const FormatSettings & settings, FormatState &, FormatStateStacked) const { settings.ostr << (settings.hilite ? hilite_keyword : "") << "SYSTEM "; @@ -47,19 +92,19 @@ void ASTSystemQuery::formatImpl(const FormatSettings & settings, FormatState &, auto print_database_table = [&] { settings.ostr << " "; - if (!database.empty()) + if (database) { - settings.ostr << (settings.hilite ? hilite_identifier : "") << backQuoteIfNeed(database) + settings.ostr << (settings.hilite ? hilite_identifier : "") << backQuoteIfNeed(getDatabase()) << (settings.hilite ? hilite_none : "") << "."; } - settings.ostr << (settings.hilite ? hilite_identifier : "") << backQuoteIfNeed(table) + settings.ostr << (settings.hilite ? hilite_identifier : "") << backQuoteIfNeed(getTable()) << (settings.hilite ? hilite_none : ""); }; auto print_drop_replica = [&] { settings.ostr << " " << quoteString(replica); - if (!table.empty()) + if (table) { settings.ostr << (settings.hilite ? hilite_keyword : "") << " FROM TABLE" << (settings.hilite ? hilite_none : ""); @@ -70,11 +115,11 @@ void ASTSystemQuery::formatImpl(const FormatSettings & settings, FormatState &, settings.ostr << (settings.hilite ? hilite_keyword : "") << " FROM ZKPATH " << (settings.hilite ? hilite_none : "") << quoteString(replica_zk_path); } - else if (!database.empty()) + else if (database) { settings.ostr << (settings.hilite ? hilite_keyword : "") << " FROM DATABASE " << (settings.hilite ? hilite_none : ""); - settings.ostr << (settings.hilite ? hilite_identifier : "") << backQuoteIfNeed(database) + settings.ostr << (settings.hilite ? hilite_identifier : "") << backQuoteIfNeed(getDatabase()) << (settings.hilite ? hilite_none : ""); } }; @@ -107,7 +152,7 @@ void ASTSystemQuery::formatImpl(const FormatSettings & settings, FormatState &, || type == Type::STOP_DISTRIBUTED_SENDS || type == Type::START_DISTRIBUTED_SENDS) { - if (!table.empty()) + if (table) print_database_table(); else if (!volume.empty()) print_on_volume(); diff --git a/src/Parsers/ASTSystemQuery.h b/src/Parsers/ASTSystemQuery.h index 419c22dd0d5..16f8a3c118a 100644 --- a/src/Parsers/ASTSystemQuery.h +++ b/src/Parsers/ASTSystemQuery.h @@ -70,10 +70,17 @@ public: Type type = Type::UNKNOWN; + ASTPtr database; + ASTPtr table; + + String getDatabase() const; + String getTable() const; + + void setDatabase(const String & name); + void setTable(const String & name); + String target_model; String target_function; - String database; - String table; String replica; String replica_zk_path; bool is_drop_whole_replica{}; @@ -84,7 +91,16 @@ public: String getID(char) const override { return "SYSTEM query"; } - ASTPtr clone() const override { return std::make_shared(*this); } + ASTPtr clone() const override + { + auto res = std::make_shared(*this); + res->children.clear(); + + if (database) { res->database = database->clone(); res->children.push_back(res->database); } + if (table) { res->table = table->clone(); res->children.push_back(res->table); } + + return res; + } ASTPtr getRewrittenASTWithoutOnCluster(const std::string & new_database) const override { diff --git a/src/Parsers/ASTWatchQuery.h b/src/Parsers/ASTWatchQuery.h index 7db6443e024..ea02c18a51d 100644 --- a/src/Parsers/ASTWatchQuery.h +++ b/src/Parsers/ASTWatchQuery.h @@ -26,13 +26,14 @@ public: bool is_watch_events; ASTWatchQuery() = default; - String getID(char) const override { return "WatchQuery_" + database + "_" + table; } + String getID(char) const override { return "WatchQuery_" + getDatabase() + "_" + getTable(); } ASTPtr clone() const override { std::shared_ptr res = std::make_shared(*this); res->children.clear(); cloneOutputOptions(*res); + cloneTableOptions(*res); return res; } @@ -42,7 +43,7 @@ protected: std::string indent_str = s.one_line ? "" : std::string(4 * frame.indent, ' '); s.ostr << (s.hilite ? hilite_keyword : "") << "WATCH " << (s.hilite ? hilite_none : "") - << (!database.empty() ? backQuoteIfNeed(database) + "." : "") << backQuoteIfNeed(table); + << (database ? backQuoteIfNeed(getDatabase()) + "." : "") << backQuoteIfNeed(getTable()); if (is_watch_events) { diff --git a/src/Parsers/Access/ASTCreateQuotaQuery.cpp b/src/Parsers/Access/ASTCreateQuotaQuery.cpp index 4e4c84f9e93..0bb6872e3af 100644 --- a/src/Parsers/Access/ASTCreateQuotaQuery.cpp +++ b/src/Parsers/Access/ASTCreateQuotaQuery.cpp @@ -10,17 +10,10 @@ namespace DB { namespace { - using KeyType = Quota::KeyType; - using KeyTypeInfo = Quota::KeyTypeInfo; - using ResourceType = Quota::ResourceType; - using ResourceTypeInfo = Quota::ResourceTypeInfo; - using ResourceAmount = Quota::ResourceAmount; - - - void formatKeyType(const KeyType & key_type, const IAST::FormatSettings & settings) + void formatKeyType(const QuotaKeyType & key_type, const IAST::FormatSettings & settings) { - const auto & type_info = KeyTypeInfo::get(key_type); - if (key_type == KeyType::NONE) + const auto & type_info = QuotaKeyTypeInfo::get(key_type); + if (key_type == QuotaKeyType::NONE) { settings.ostr << (settings.hilite ? IAST::hilite_keyword : "") << " NOT KEYED" << (settings.hilite ? IAST::hilite_none : ""); return; @@ -35,7 +28,7 @@ namespace { if (std::exchange(need_comma, true)) settings.ostr << ", "; - settings.ostr << KeyTypeInfo::get(base_type).name; + settings.ostr << QuotaKeyTypeInfo::get(base_type).name; } return; } @@ -64,10 +57,10 @@ namespace } - void formatLimit(ResourceType resource_type, ResourceAmount max, const IAST::FormatSettings & settings) + void formatLimit(QuotaType quota_type, QuotaValue max_value, const IAST::FormatSettings & settings) { - const auto & type_info = ResourceTypeInfo::get(resource_type); - settings.ostr << " " << type_info.name << " = " << type_info.amountToString(max); + const auto & type_info = QuotaTypeInfo::get(quota_type); + settings.ostr << " " << type_info.name << " = " << type_info.valueToString(max_value); } @@ -93,22 +86,24 @@ namespace else { bool limit_found = false; - for (auto resource_type : collections::range(Quota::MAX_RESOURCE_TYPE)) + for (auto quota_type : collections::range(QuotaType::MAX)) { - if (limits.max[resource_type]) + auto quota_type_i = static_cast(quota_type); + if (limits.max[quota_type_i]) limit_found = true; } if (limit_found) { settings.ostr << (settings.hilite ? IAST::hilite_keyword : "") << " MAX" << (settings.hilite ? IAST::hilite_none : ""); bool need_comma = false; - for (auto resource_type : collections::range(Quota::MAX_RESOURCE_TYPE)) + for (auto quota_type : collections::range(QuotaType::MAX)) { - if (limits.max[resource_type]) + auto quota_type_i = static_cast(quota_type); + if (limits.max[quota_type_i]) { if (std::exchange(need_comma, true)) settings.ostr << ","; - formatLimit(resource_type, *limits.max[resource_type], settings); + formatLimit(quota_type, *limits.max[quota_type_i], settings); } } } diff --git a/src/Parsers/Access/ASTCreateQuotaQuery.h b/src/Parsers/Access/ASTCreateQuotaQuery.h index 00984d4b4c9..154245cbfe3 100644 --- a/src/Parsers/Access/ASTCreateQuotaQuery.h +++ b/src/Parsers/Access/ASTCreateQuotaQuery.h @@ -2,7 +2,7 @@ #include #include -#include +#include namespace DB @@ -35,16 +35,13 @@ public: bool if_not_exists = false; bool or_replace = false; - using KeyType = Quota::KeyType; - using ResourceAmount = Quota::ResourceAmount; - Strings names; String new_name; - std::optional key_type; + std::optional key_type; struct Limits { - std::optional max[Quota::MAX_RESOURCE_TYPE]; + std::optional max[static_cast(QuotaType::MAX)]; bool drop = false; std::chrono::seconds duration = std::chrono::seconds::zero(); bool randomize_interval = false; diff --git a/src/Parsers/Access/ASTCreateRowPolicyQuery.cpp b/src/Parsers/Access/ASTCreateRowPolicyQuery.cpp index 0267379d6e5..d968fdd3250 100644 --- a/src/Parsers/Access/ASTCreateRowPolicyQuery.cpp +++ b/src/Parsers/Access/ASTCreateRowPolicyQuery.cpp @@ -5,6 +5,7 @@ #include #include #include +#include #include @@ -12,10 +13,6 @@ namespace DB { namespace { - using ConditionType = RowPolicy::ConditionType; - using ConditionTypeInfo = RowPolicy::ConditionTypeInfo; - - void formatRenameTo(const String & new_short_name, const IAST::FormatSettings & settings) { settings.ostr << (settings.hilite ? IAST::hilite_keyword : "") << " RENAME TO " << (settings.hilite ? IAST::hilite_none : "") @@ -30,7 +27,7 @@ namespace } - void formatConditionalExpression(const ASTPtr & expr, const IAST::FormatSettings & settings) + void formatFilterExpression(const ASTPtr & expr, const IAST::FormatSettings & settings) { settings.ostr << " "; if (expr) @@ -59,15 +56,15 @@ namespace } - void formatForClauses(const std::vector> & conditions, bool alter, const IAST::FormatSettings & settings) + void formatForClauses(const std::vector> & filters, bool alter, const IAST::FormatSettings & settings) { - std::vector> conditions_as_strings; + std::vector> filters_as_strings; WriteBufferFromOwnString temp_buf; IAST::FormatSettings temp_settings(temp_buf, settings); - for (const auto & [condition_type, condition] : conditions) + for (const auto & [filter_type, filter] : filters) { - formatConditionalExpression(condition, temp_settings); - conditions_as_strings.emplace_back(condition_type, temp_buf.str()); + formatFilterExpression(filter, temp_settings); + filters_as_strings.emplace_back(filter_type, temp_buf.str()); temp_buf.restart(); } @@ -81,27 +78,27 @@ namespace check.clear(); /// Collect commands using the same filter and check conditions. - for (auto & [condition_type, condition] : conditions_as_strings) + for (auto & [filter_type, str] : filters_as_strings) { - if (condition.empty()) + if (str.empty()) continue; - const auto & type_info = ConditionTypeInfo::get(condition_type); + const auto & type_info = RowPolicyFilterTypeInfo::get(filter_type); if (type_info.is_check) { if (check.empty()) - check = condition; - else if (check != condition) + check = str; + else if (check != str) continue; } else { if (filter.empty()) - filter = condition; - else if (filter != condition) + filter = str; + else if (filter != str) continue; } commands.emplace(type_info.command); - condition.clear(); /// Skip this condition on the next iteration. + str.clear(); /// Skip this condition on the next iteration. } if (!filter.empty() || !check.empty()) @@ -162,7 +159,7 @@ void ASTCreateRowPolicyQuery::formatImpl(const FormatSettings & settings, Format if (is_restrictive) formatAsRestrictiveOrPermissive(*is_restrictive, settings); - formatForClauses(conditions, alter, settings); + formatForClauses(filters, alter, settings); if (roles && (!roles->empty() || alter)) formatToRoles(*roles, settings); diff --git a/src/Parsers/Access/ASTCreateRowPolicyQuery.h b/src/Parsers/Access/ASTCreateRowPolicyQuery.h index 46a7578726e..dc698c25c6d 100644 --- a/src/Parsers/Access/ASTCreateRowPolicyQuery.h +++ b/src/Parsers/Access/ASTCreateRowPolicyQuery.h @@ -2,7 +2,7 @@ #include #include -#include +#include #include @@ -40,7 +40,7 @@ public: String new_short_name; std::optional is_restrictive; - std::vector> conditions; /// `nullptr` means set to NONE. + std::vector> filters; /// `nullptr` means set to NONE. std::shared_ptr roles; diff --git a/src/Parsers/Access/ASTDropAccessEntityQuery.cpp b/src/Parsers/Access/ASTDropAccessEntityQuery.cpp index 19064ad9109..22b30d47ffa 100644 --- a/src/Parsers/Access/ASTDropAccessEntityQuery.cpp +++ b/src/Parsers/Access/ASTDropAccessEntityQuery.cpp @@ -8,8 +8,6 @@ namespace DB { namespace { - using EntityTypeInfo = IAccessEntity::TypeInfo; - void formatNames(const Strings & names, const IAST::FormatSettings & settings) { bool need_comma = false; @@ -38,11 +36,11 @@ ASTPtr ASTDropAccessEntityQuery::clone() const void ASTDropAccessEntityQuery::formatImpl(const FormatSettings & settings, FormatState &, FormatStateStacked) const { settings.ostr << (settings.hilite ? hilite_keyword : "") - << "DROP " << EntityTypeInfo::get(type).name + << "DROP " << AccessEntityTypeInfo::get(type).name << (if_exists ? " IF EXISTS" : "") << (settings.hilite ? hilite_none : ""); - if (type == EntityType::ROW_POLICY) + if (type == AccessEntityType::ROW_POLICY) { settings.ostr << " "; row_policy_names->format(settings); diff --git a/src/Parsers/Access/ASTDropAccessEntityQuery.h b/src/Parsers/Access/ASTDropAccessEntityQuery.h index df78acef6f4..b1a6ca58a18 100644 --- a/src/Parsers/Access/ASTDropAccessEntityQuery.h +++ b/src/Parsers/Access/ASTDropAccessEntityQuery.h @@ -1,8 +1,8 @@ #pragma once #include -#include #include +#include namespace DB @@ -18,9 +18,7 @@ class ASTRowPolicyNames; class ASTDropAccessEntityQuery : public IAST, public ASTQueryWithOnCluster { public: - using EntityType = IAccessEntity::Type; - - EntityType type; + AccessEntityType type; bool if_exists = false; Strings names; std::shared_ptr row_policy_names; diff --git a/src/Parsers/Access/ASTRowPolicyName.cpp b/src/Parsers/Access/ASTRowPolicyName.cpp index c8b8107af20..280713fe9d9 100644 --- a/src/Parsers/Access/ASTRowPolicyName.cpp +++ b/src/Parsers/Access/ASTRowPolicyName.cpp @@ -1,4 +1,5 @@ #include +#include #include @@ -12,9 +13,9 @@ namespace ErrorCodes void ASTRowPolicyName::formatImpl(const FormatSettings & settings, FormatState &, FormatStateStacked) const { - const String & database = name_parts.database; - const String & table_name = name_parts.table_name; - const String & short_name = name_parts.short_name; + const String & database = full_name.database; + const String & table_name = full_name.table_name; + const String & short_name = full_name.short_name; settings.ostr << backQuoteIfNeed(short_name) << (settings.hilite ? hilite_keyword : "") << " ON " << (settings.hilite ? hilite_none : "") << (database.empty() ? String{} : backQuoteIfNeed(database) + ".") << backQuoteIfNeed(table_name); @@ -25,21 +26,21 @@ void ASTRowPolicyName::formatImpl(const FormatSettings & settings, FormatState & void ASTRowPolicyName::replaceEmptyDatabase(const String & current_database) { - if (name_parts.database.empty()) - name_parts.database = current_database; + if (full_name.database.empty()) + full_name.database = current_database; } void ASTRowPolicyNames::formatImpl(const FormatSettings & settings, FormatState &, FormatStateStacked) const { - if (name_parts.empty()) + if (full_names.empty()) throw Exception("No names of row policies in AST", ErrorCodes::LOGICAL_ERROR); bool same_short_name = true; - if (name_parts.size() > 1) + if (full_names.size() > 1) { - for (size_t i = 1; i != name_parts.size(); ++i) - if (name_parts[i].short_name != name_parts[0].short_name) + for (size_t i = 1; i != full_names.size(); ++i) + if (full_names[i].short_name != full_names[0].short_name) { same_short_name = false; break; @@ -47,10 +48,10 @@ void ASTRowPolicyNames::formatImpl(const FormatSettings & settings, FormatState } bool same_db_and_table_name = true; - if (name_parts.size() > 1) + if (full_names.size() > 1) { - for (size_t i = 1; i != name_parts.size(); ++i) - if ((name_parts[i].database != name_parts[0].database) || (name_parts[i].table_name != name_parts[0].table_name)) + for (size_t i = 1; i != full_names.size(); ++i) + if ((full_names[i].database != full_names[0].database) || (full_names[i].table_name != full_names[0].table_name)) { same_db_and_table_name = false; break; @@ -59,17 +60,17 @@ void ASTRowPolicyNames::formatImpl(const FormatSettings & settings, FormatState if (same_short_name) { - const String & short_name = name_parts[0].short_name; + const String & short_name = full_names[0].short_name; settings.ostr << backQuoteIfNeed(short_name) << (settings.hilite ? hilite_keyword : "") << " ON " << (settings.hilite ? hilite_none : ""); bool need_comma = false; - for (const auto & np : name_parts) + for (const auto & full_name : full_names) { if (std::exchange(need_comma, true)) settings.ostr << ", "; - const String & database = np.database; - const String & table_name = np.table_name; + const String & database = full_name.database; + const String & table_name = full_name.table_name; if (!database.empty()) settings.ostr << backQuoteIfNeed(database) + "."; settings.ostr << backQuoteIfNeed(table_name); @@ -78,16 +79,16 @@ void ASTRowPolicyNames::formatImpl(const FormatSettings & settings, FormatState else if (same_db_and_table_name) { bool need_comma = false; - for (const auto & np : name_parts) + for (const auto & full_name : full_names) { if (std::exchange(need_comma, true)) settings.ostr << ", "; - const String & short_name = np.short_name; + const String & short_name = full_name.short_name; settings.ostr << backQuoteIfNeed(short_name); } - const String & database = name_parts[0].database; - const String & table_name = name_parts[0].table_name; + const String & database = full_names[0].database; + const String & table_name = full_names[0].table_name; settings.ostr << (settings.hilite ? hilite_keyword : "") << " ON " << (settings.hilite ? hilite_none : ""); if (!database.empty()) settings.ostr << backQuoteIfNeed(database) + "."; @@ -96,13 +97,13 @@ void ASTRowPolicyNames::formatImpl(const FormatSettings & settings, FormatState else { bool need_comma = false; - for (const auto & np : name_parts) + for (const auto & full_name : full_names) { if (std::exchange(need_comma, true)) settings.ostr << ", "; - const String & short_name = np.short_name; - const String & database = np.database; - const String & table_name = np.table_name; + const String & short_name = full_name.short_name; + const String & database = full_name.database; + const String & table_name = full_name.table_name; settings.ostr << backQuoteIfNeed(short_name) << (settings.hilite ? hilite_keyword : "") << " ON " << (settings.hilite ? hilite_none : ""); if (!database.empty()) @@ -118,18 +119,18 @@ void ASTRowPolicyNames::formatImpl(const FormatSettings & settings, FormatState Strings ASTRowPolicyNames::toStrings() const { Strings res; - res.reserve(name_parts.size()); - for (const auto & np : name_parts) - res.emplace_back(np.toString()); + res.reserve(full_names.size()); + for (const auto & full_name : full_names) + res.emplace_back(full_name.toString()); return res; } void ASTRowPolicyNames::replaceEmptyDatabase(const String & current_database) { - for (auto & np : name_parts) - if (np.database.empty()) - np.database = current_database; + for (auto & full_name : full_names) + if (full_name.database.empty()) + full_name.database = current_database; } } diff --git a/src/Parsers/Access/ASTRowPolicyName.h b/src/Parsers/Access/ASTRowPolicyName.h index b195596225b..43270b0185d 100644 --- a/src/Parsers/Access/ASTRowPolicyName.h +++ b/src/Parsers/Access/ASTRowPolicyName.h @@ -2,7 +2,7 @@ #include #include -#include +#include namespace DB @@ -14,8 +14,8 @@ namespace DB class ASTRowPolicyName : public IAST, public ASTQueryWithOnCluster { public: - RowPolicy::NameParts name_parts; - String toString() const { return name_parts.getName(); } + RowPolicyName full_name; + String toString() const { return full_name.toString(); } String getID(char) const override { return "RowPolicyName"; } ASTPtr clone() const override { return std::make_shared(*this); } @@ -36,7 +36,7 @@ public: class ASTRowPolicyNames : public IAST, public ASTQueryWithOnCluster { public: - std::vector name_parts; + std::vector full_names; Strings toStrings() const; String getID(char) const override { return "RowPolicyNames"; } diff --git a/src/Parsers/Access/ASTShowAccessEntitiesQuery.cpp b/src/Parsers/Access/ASTShowAccessEntitiesQuery.cpp index e2dfe031f53..f6c42171d5b 100644 --- a/src/Parsers/Access/ASTShowAccessEntitiesQuery.cpp +++ b/src/Parsers/Access/ASTShowAccessEntitiesQuery.cpp @@ -5,8 +5,6 @@ namespace DB { -using EntityTypeInfo = IAccessEntity::TypeInfo; - String ASTShowAccessEntitiesQuery::getKeyword() const { @@ -16,7 +14,7 @@ String ASTShowAccessEntitiesQuery::getKeyword() const return "CURRENT ROLES"; if (enabled_roles) return "ENABLED ROLES"; - return EntityTypeInfo::get(type).plural_name; + return AccessEntityTypeInfo::get(type).plural_name; } diff --git a/src/Parsers/Access/ASTShowAccessEntitiesQuery.h b/src/Parsers/Access/ASTShowAccessEntitiesQuery.h index 2be1e0b92f0..e633a4b506a 100644 --- a/src/Parsers/Access/ASTShowAccessEntitiesQuery.h +++ b/src/Parsers/Access/ASTShowAccessEntitiesQuery.h @@ -1,7 +1,7 @@ #pragma once #include -#include +#include namespace DB @@ -16,9 +16,7 @@ namespace DB class ASTShowAccessEntitiesQuery : public ASTQueryWithOutput { public: - using EntityType = IAccessEntity::Type; - - EntityType type; + AccessEntityType type; bool all = false; bool current_quota = false; diff --git a/src/Parsers/Access/ASTShowCreateAccessEntityQuery.cpp b/src/Parsers/Access/ASTShowCreateAccessEntityQuery.cpp index db252db968d..e92af22f14f 100644 --- a/src/Parsers/Access/ASTShowCreateAccessEntityQuery.cpp +++ b/src/Parsers/Access/ASTShowCreateAccessEntityQuery.cpp @@ -8,9 +8,6 @@ namespace DB { namespace { - using EntityType = IAccessEntity::Type; - using EntityTypeInfo = IAccessEntity::TypeInfo; - void formatNames(const Strings & names, const IAST::FormatSettings & settings) { bool need_comma = false; @@ -28,7 +25,7 @@ String ASTShowCreateAccessEntityQuery::getKeyword() const { size_t total_count = (names.size()) + (row_policy_names ? row_policy_names->size() : 0) + current_user + current_quota; bool multiple = (total_count != 1) || all || !short_name.empty() || database_and_table_name; - const auto & type_info = EntityTypeInfo::get(type); + const auto & type_info = AccessEntityTypeInfo::get(type); return multiple ? type_info.plural_name : type_info.name; } diff --git a/src/Parsers/Access/ASTShowCreateAccessEntityQuery.h b/src/Parsers/Access/ASTShowCreateAccessEntityQuery.h index e20bb4f022e..27f13587033 100644 --- a/src/Parsers/Access/ASTShowCreateAccessEntityQuery.h +++ b/src/Parsers/Access/ASTShowCreateAccessEntityQuery.h @@ -1,7 +1,7 @@ #pragma once #include -#include +#include namespace DB @@ -24,9 +24,7 @@ using Strings = std::vector; class ASTShowCreateAccessEntityQuery : public ASTQueryWithOutput { public: - using EntityType = IAccessEntity::Type; - - EntityType type; + AccessEntityType type; Strings names; std::shared_ptr row_policy_names; diff --git a/src/Parsers/Access/ParserCreateQuotaQuery.cpp b/src/Parsers/Access/ParserCreateQuotaQuery.cpp index 0c6e1224cce..a06464913b7 100644 --- a/src/Parsers/Access/ParserCreateQuotaQuery.cpp +++ b/src/Parsers/Access/ParserCreateQuotaQuery.cpp @@ -11,7 +11,10 @@ #include #include #include +#include +#include #include +#include namespace DB @@ -24,13 +27,6 @@ namespace ErrorCodes namespace { - using KeyType = Quota::KeyType; - using KeyTypeInfo = Quota::KeyTypeInfo; - using ResourceType = Quota::ResourceType; - using ResourceTypeInfo = Quota::ResourceTypeInfo; - using ResourceAmount = Quota::ResourceAmount; - - bool parseRenameTo(IParserBase::Pos & pos, Expected & expected, String & new_name) { return IParserBase::wrapParseImpl(pos, [&] @@ -42,13 +38,13 @@ namespace }); } - bool parseKeyType(IParserBase::Pos & pos, Expected & expected, KeyType & key_type) + bool parseKeyType(IParserBase::Pos & pos, Expected & expected, QuotaKeyType & key_type) { return IParserBase::wrapParseImpl(pos, [&] { if (ParserKeyword{"NOT KEYED"}.ignore(pos, expected)) { - key_type = KeyType::NONE; + key_type = QuotaKeyType::NONE; return true; } @@ -63,9 +59,9 @@ namespace boost::to_lower(name); boost::replace_all(name, " ", "_"); - for (auto kt : collections::range(Quota::KeyType::MAX)) + for (auto kt : collections::range(QuotaKeyType::MAX)) { - if (KeyTypeInfo::get(kt).name == name) + if (QuotaKeyTypeInfo::get(kt).name == name) { key_type = kt; return true; @@ -73,23 +69,23 @@ namespace } String all_types_str; - for (auto kt : collections::range(Quota::KeyType::MAX)) - all_types_str += String(all_types_str.empty() ? "" : ", ") + "'" + KeyTypeInfo::get(kt).name + "'"; + for (auto kt : collections::range(QuotaKeyType::MAX)) + all_types_str += String(all_types_str.empty() ? "" : ", ") + "'" + QuotaKeyTypeInfo::get(kt).name + "'"; String msg = "Quota cannot be keyed by '" + name + "'. Expected one of the following identifiers: " + all_types_str; throw Exception(msg, ErrorCodes::SYNTAX_ERROR); }); } - bool parseResourceType(IParserBase::Pos & pos, Expected & expected, ResourceType & resource_type) + bool parseQuotaType(IParserBase::Pos & pos, Expected & expected, QuotaType & quota_type) { return IParserBase::wrapParseImpl(pos, [&] { - for (auto rt : collections::range(Quota::MAX_RESOURCE_TYPE)) + for (auto qt : collections::range(QuotaType::MAX)) { - if (ParserKeyword{ResourceTypeInfo::get(rt).keyword.c_str()}.ignore(pos, expected)) + if (ParserKeyword{QuotaTypeInfo::get(qt).keyword.c_str()}.ignore(pos, expected)) { - resource_type = rt; + quota_type = qt; return true; } } @@ -99,11 +95,11 @@ namespace return false; String name = getIdentifierName(ast); - for (auto rt : collections::range(Quota::MAX_RESOURCE_TYPE)) + for (auto qt : collections::range(QuotaType::MAX)) { - if (ResourceTypeInfo::get(rt).name == name) + if (QuotaTypeInfo::get(qt).name == name) { - resource_type = rt; + quota_type = qt; return true; } } @@ -113,34 +109,33 @@ namespace } - bool parseMaxAmount(IParserBase::Pos & pos, Expected & expected, ResourceType resource_type, ResourceAmount & max) + bool parseMaxValue(IParserBase::Pos & pos, Expected & expected, QuotaType quota_type, QuotaValue & max_value) { ASTPtr ast; if (!ParserNumber{}.parse(pos, ast, expected)) return false; const Field & max_field = ast->as().value; - const auto & type_info = ResourceTypeInfo::get(resource_type); + const auto & type_info = QuotaTypeInfo::get(quota_type); if (type_info.output_denominator == 1) - max = applyVisitor(FieldVisitorConvertToNumber(), max_field); + max_value = applyVisitor(FieldVisitorConvertToNumber(), max_field); else - max = static_cast( - applyVisitor(FieldVisitorConvertToNumber(), max_field) * type_info.output_denominator); + max_value = static_cast(applyVisitor(FieldVisitorConvertToNumber(), max_field) * type_info.output_denominator); return true; } - bool parseLimits(IParserBase::Pos & pos, Expected & expected, std::vector> & limits) + bool parseLimits(IParserBase::Pos & pos, Expected & expected, std::vector> & limits) { - std::vector> res_limits; + std::vector> res_limits; bool max_prefix_encountered = false; auto parse_limit = [&] { max_prefix_encountered |= ParserKeyword{"MAX"}.ignore(pos, expected); - ResourceType resource_type; - if (!parseResourceType(pos, expected, resource_type)) + QuotaType quota_type; + if (!parseQuotaType(pos, expected, quota_type)) return false; if (max_prefix_encountered) @@ -153,11 +148,11 @@ namespace return false; } - ResourceAmount max; - if (!parseMaxAmount(pos, expected, resource_type, max)) + QuotaValue max_value; + if (!parseMaxValue(pos, expected, quota_type, max_value)) return false; - res_limits.emplace_back(resource_type, max); + res_limits.emplace_back(quota_type, max_value); return true; }; @@ -193,7 +188,7 @@ namespace return false; limits.duration = std::chrono::seconds(static_cast(num_intervals * interval_kind.toAvgSeconds())); - std::vector> maxs; + std::vector> new_limits; if (ParserKeyword{"NO LIMITS"}.ignore(pos, expected)) { @@ -202,10 +197,10 @@ namespace else if (ParserKeyword{"TRACKING ONLY"}.ignore(pos, expected)) { } - else if (parseLimits(pos, expected, maxs)) + else if (parseLimits(pos, expected, new_limits)) { - for (const auto & [resource_type, max] : maxs) - limits.max[resource_type] = max; + for (const auto & [quota_type, max_value] : new_limits) + limits.max[static_cast(quota_type)] = max_value; } else return false; @@ -283,7 +278,7 @@ bool ParserCreateQuotaQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expe return false; String new_name; - std::optional key_type; + std::optional key_type; std::vector all_limits; String cluster; @@ -294,7 +289,7 @@ bool ParserCreateQuotaQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expe if (!key_type) { - KeyType new_key_type; + QuotaKeyType new_key_type; if (parseKeyType(pos, expected, new_key_type)) { key_type = new_key_type; diff --git a/src/Parsers/Access/ParserCreateRowPolicyQuery.cpp b/src/Parsers/Access/ParserCreateRowPolicyQuery.cpp index f6a33ec84a3..dd01f7d024d 100644 --- a/src/Parsers/Access/ParserCreateRowPolicyQuery.cpp +++ b/src/Parsers/Access/ParserCreateRowPolicyQuery.cpp @@ -9,8 +9,9 @@ #include #include #include -#include +#include #include +#include #include @@ -18,11 +19,6 @@ namespace DB { namespace { - using ConditionType = RowPolicy::ConditionType; - using ConditionTypeInfo = RowPolicy::ConditionTypeInfo; - constexpr auto MAX_CONDITION_TYPE = RowPolicy::MAX_CONDITION_TYPE; - - bool parseRenameTo(IParserBase::Pos & pos, Expected & expected, String & new_short_name) { return IParserBase::wrapParseImpl(pos, [&] @@ -55,7 +51,7 @@ namespace }); } - bool parseConditionalExpression(IParserBase::Pos & pos, Expected & expected, ASTPtr & expr) + bool parseFilterExpression(IParserBase::Pos & pos, Expected & expected, ASTPtr & expr) { return IParserBase::wrapParseImpl(pos, [&] { @@ -78,9 +74,9 @@ namespace void addAllCommands(boost::container::flat_set & commands) { - for (auto condition_type : collections::range(MAX_CONDITION_TYPE)) + for (auto filter_type : collections::range(RowPolicyFilterType::MAX)) { - const std::string_view & command = ConditionTypeInfo::get(condition_type).command; + const std::string_view & command = RowPolicyFilterTypeInfo::get(filter_type).command; commands.emplace(command); } } @@ -99,9 +95,9 @@ namespace return true; } - for (auto condition_type : collections::range(MAX_CONDITION_TYPE)) + for (auto filter_type : collections::range(RowPolicyFilterType::MAX)) { - const std::string_view & command = ConditionTypeInfo::get(condition_type).command; + const std::string_view & command = RowPolicyFilterTypeInfo::get(filter_type).command; if (ParserKeyword{command.data()}.ignore(pos, expected)) { res_commands.emplace(command); @@ -120,10 +116,10 @@ namespace } - bool - parseForClauses(IParserBase::Pos & pos, Expected & expected, bool alter, std::vector> & conditions) + bool parseForClauses( + IParserBase::Pos & pos, Expected & expected, bool alter, std::vector> & filters) { - std::vector> res_conditions; + std::vector> res_filters; auto parse_for_clause = [&] { @@ -141,12 +137,12 @@ namespace std::optional check; if (ParserKeyword{"USING"}.ignore(pos, expected)) { - if (!parseConditionalExpression(pos, expected, filter.emplace())) + if (!parseFilterExpression(pos, expected, filter.emplace())) return false; } if (ParserKeyword{"WITH CHECK"}.ignore(pos, expected)) { - if (!parseConditionalExpression(pos, expected, check.emplace())) + if (!parseFilterExpression(pos, expected, check.emplace())) return false; } @@ -156,15 +152,15 @@ namespace if (!check && !alter) check = filter; - for (auto condition_type : collections::range(MAX_CONDITION_TYPE)) + for (auto filter_type : collections::range(RowPolicyFilterType::MAX)) { - const auto & type_info = ConditionTypeInfo::get(condition_type); + const auto & type_info = RowPolicyFilterTypeInfo::get(filter_type); if (commands.count(type_info.command)) { if (type_info.is_check && check) - res_conditions.emplace_back(condition_type, *check); + res_filters.emplace_back(filter_type, *check); else if (filter) - res_conditions.emplace_back(condition_type, *filter); + res_filters.emplace_back(filter_type, *filter); } } @@ -174,7 +170,7 @@ namespace if (!ParserList::parseUtil(pos, expected, parse_for_clause, false)) return false; - conditions = std::move(res_conditions); + filters = std::move(res_filters); return true; } @@ -249,11 +245,11 @@ bool ParserCreateRowPolicyQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & String new_short_name; std::optional is_restrictive; - std::vector> conditions; + std::vector> filters; while (true) { - if (alter && new_short_name.empty() && (names->name_parts.size() == 1) && parseRenameTo(pos, expected, new_short_name)) + if (alter && (names->full_names.size() == 1) && new_short_name.empty() && parseRenameTo(pos, expected, new_short_name)) continue; if (!is_restrictive) @@ -266,10 +262,10 @@ bool ParserCreateRowPolicyQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & } } - std::vector> new_conditions; - if (parseForClauses(pos, expected, alter, new_conditions)) + std::vector> new_filters; + if (parseForClauses(pos, expected, alter, new_filters)) { - boost::range::push_back(conditions, std::move(new_conditions)); + boost::range::push_back(filters, std::move(new_filters)); continue; } @@ -297,7 +293,7 @@ bool ParserCreateRowPolicyQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & query->names = std::move(names); query->new_short_name = std::move(new_short_name); query->is_restrictive = is_restrictive; - query->conditions = std::move(conditions); + query->filters = std::move(filters); query->roles = std::move(roles); return true; diff --git a/src/Parsers/Access/ParserDropAccessEntityQuery.cpp b/src/Parsers/Access/ParserDropAccessEntityQuery.cpp index d91cd8280a7..1a6784d2d3c 100644 --- a/src/Parsers/Access/ParserDropAccessEntityQuery.cpp +++ b/src/Parsers/Access/ParserDropAccessEntityQuery.cpp @@ -12,15 +12,11 @@ namespace DB { namespace { - using EntityType = IAccessEntity::Type; - using EntityTypeInfo = IAccessEntity::TypeInfo; - - - bool parseEntityType(IParserBase::Pos & pos, Expected & expected, EntityType & type) + bool parseEntityType(IParserBase::Pos & pos, Expected & expected, AccessEntityType & type) { - for (auto i : collections::range(EntityType::MAX)) + for (auto i : collections::range(AccessEntityType::MAX)) { - const auto & type_info = EntityTypeInfo::get(i); + const auto & type_info = AccessEntityTypeInfo::get(i); if (ParserKeyword{type_info.name.c_str()}.ignore(pos, expected) || (!type_info.alias.empty() && ParserKeyword{type_info.alias.c_str()}.ignore(pos, expected))) { @@ -47,7 +43,7 @@ bool ParserDropAccessEntityQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & if (!ParserKeyword{"DROP"}.ignore(pos, expected)) return false; - EntityType type; + AccessEntityType type; if (!parseEntityType(pos, expected, type)) return false; @@ -59,12 +55,12 @@ bool ParserDropAccessEntityQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & std::shared_ptr row_policy_names; String cluster; - if ((type == EntityType::USER) || (type == EntityType::ROLE)) + if ((type == AccessEntityType::USER) || (type == AccessEntityType::ROLE)) { if (!parseUserNames(pos, expected, names)) return false; } - else if (type == EntityType::ROW_POLICY) + else if (type == AccessEntityType::ROW_POLICY) { ParserRowPolicyNames parser; ASTPtr ast; diff --git a/src/Parsers/Access/ParserRowPolicyName.cpp b/src/Parsers/Access/ParserRowPolicyName.cpp index aa159532754..7df4e5a36dc 100644 --- a/src/Parsers/Access/ParserRowPolicyName.cpp +++ b/src/Parsers/Access/ParserRowPolicyName.cpp @@ -91,7 +91,7 @@ namespace bool allow_multiple_short_names, bool allow_multiple_tables, bool allow_on_cluster, - std::vector & name_parts, + std::vector & full_names, String & cluster) { return IParserBase::wrapParseImpl(pos, [&] @@ -132,10 +132,10 @@ namespace assert(!short_names.empty()); assert(!database_and_table_names.empty()); - name_parts.clear(); + full_names.clear(); for (const String & short_name : short_names) for (const auto & [database, table_name] : database_and_table_names) - name_parts.push_back({short_name, database, table_name}); + full_names.push_back({short_name, database, table_name}); cluster = std::move(res_cluster); return true; @@ -146,14 +146,14 @@ namespace bool ParserRowPolicyName::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) { - std::vector name_parts; + std::vector full_names; String cluster; - if (!parseRowPolicyNamesAroundON(pos, expected, false, false, allow_on_cluster, name_parts, cluster)) + if (!parseRowPolicyNamesAroundON(pos, expected, false, false, allow_on_cluster, full_names, cluster)) return false; - assert(name_parts.size() == 1); + assert(full_names.size() == 1); auto result = std::make_shared(); - result->name_parts = std::move(name_parts.front()); + result->full_name = std::move(full_names.front()); result->cluster = std::move(cluster); node = result; return true; @@ -162,24 +162,24 @@ bool ParserRowPolicyName::parseImpl(Pos & pos, ASTPtr & node, Expected & expecte bool ParserRowPolicyNames::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) { - std::vector name_parts; + std::vector full_names; size_t num_added_names_last_time = 0; String cluster; auto parse_around_on = [&] { - if (!name_parts.empty()) + if (!full_names.empty()) { if ((num_added_names_last_time != 1) || !cluster.empty()) return false; } - std::vector new_name_parts; - if (!parseRowPolicyNamesAroundON(pos, expected, name_parts.empty(), name_parts.empty(), allow_on_cluster, new_name_parts, cluster)) + std::vector new_full_names; + if (!parseRowPolicyNamesAroundON(pos, expected, full_names.empty(), full_names.empty(), allow_on_cluster, new_full_names, cluster)) return false; - num_added_names_last_time = new_name_parts.size(); - boost::range::push_back(name_parts, std::move(new_name_parts)); + num_added_names_last_time = new_full_names.size(); + boost::range::push_back(full_names, std::move(new_full_names)); return true; }; @@ -187,7 +187,7 @@ bool ParserRowPolicyNames::parseImpl(Pos & pos, ASTPtr & node, Expected & expect return false; auto result = std::make_shared(); - result->name_parts = std::move(name_parts); + result->full_names = std::move(full_names); result->cluster = std::move(cluster); node = result; return true; diff --git a/src/Parsers/Access/ParserRowPolicyName.h b/src/Parsers/Access/ParserRowPolicyName.h index 6af0519d161..d311469faa5 100644 --- a/src/Parsers/Access/ParserRowPolicyName.h +++ b/src/Parsers/Access/ParserRowPolicyName.h @@ -1,7 +1,6 @@ #pragma once #include -#include namespace DB diff --git a/src/Parsers/Access/ParserShowAccessEntitiesQuery.cpp b/src/Parsers/Access/ParserShowAccessEntitiesQuery.cpp index b1329735b64..3953c28c356 100644 --- a/src/Parsers/Access/ParserShowAccessEntitiesQuery.cpp +++ b/src/Parsers/Access/ParserShowAccessEntitiesQuery.cpp @@ -10,14 +10,11 @@ namespace DB { namespace { - using EntityType = IAccessEntity::Type; - using EntityTypeInfo = IAccessEntity::TypeInfo; - - bool parseEntityType(IParserBase::Pos & pos, Expected & expected, EntityType & type) + bool parseEntityType(IParserBase::Pos & pos, Expected & expected, AccessEntityType & type) { - for (auto i : collections::range(EntityType::MAX)) + for (auto i : collections::range(AccessEntityType::MAX)) { - const auto & type_info = EntityTypeInfo::get(i); + const auto & type_info = AccessEntityTypeInfo::get(i); if (ParserKeyword{type_info.plural_name.c_str()}.ignore(pos, expected) || (!type_info.plural_alias.empty() && ParserKeyword{type_info.plural_alias.c_str()}.ignore(pos, expected))) { @@ -44,7 +41,7 @@ bool ParserShowAccessEntitiesQuery::parseImpl(Pos & pos, ASTPtr & node, Expected if (!ParserKeyword{"SHOW"}.ignore(pos, expected)) return false; - EntityType type; + AccessEntityType type; bool all = false; bool current_quota = false; bool current_roles = false; @@ -56,17 +53,17 @@ bool ParserShowAccessEntitiesQuery::parseImpl(Pos & pos, ASTPtr & node, Expected } else if (ParserKeyword{"CURRENT ROLES"}.ignore(pos, expected)) { - type = EntityType::ROLE; + type = AccessEntityType::ROLE; current_roles = true; } else if (ParserKeyword{"ENABLED ROLES"}.ignore(pos, expected)) { - type = EntityType::ROLE; + type = AccessEntityType::ROLE; enabled_roles = true; } else if (ParserKeyword{"CURRENT QUOTA"}.ignore(pos, expected) || ParserKeyword{"QUOTA"}.ignore(pos, expected)) { - type = EntityType::QUOTA; + type = AccessEntityType::QUOTA; current_quota = true; } else @@ -74,7 +71,7 @@ bool ParserShowAccessEntitiesQuery::parseImpl(Pos & pos, ASTPtr & node, Expected String short_name; std::optional> database_and_table_name; - if (type == EntityType::ROW_POLICY) + if (type == AccessEntityType::ROW_POLICY) { String database, table_name; bool any_database, any_table; diff --git a/src/Parsers/Access/ParserShowCreateAccessEntityQuery.cpp b/src/Parsers/Access/ParserShowCreateAccessEntityQuery.cpp index 2df04513361..88fafcaaf22 100644 --- a/src/Parsers/Access/ParserShowCreateAccessEntityQuery.cpp +++ b/src/Parsers/Access/ParserShowCreateAccessEntityQuery.cpp @@ -20,14 +20,11 @@ namespace ErrorCodes namespace { - using EntityType = IAccessEntity::Type; - using EntityTypeInfo = IAccessEntity::TypeInfo; - - bool parseEntityType(IParserBase::Pos & pos, Expected & expected, EntityType & type, bool & plural) + bool parseEntityType(IParserBase::Pos & pos, Expected & expected, AccessEntityType & type, bool & plural) { - for (auto i : collections::range(EntityType::MAX)) + for (auto i : collections::range(AccessEntityType::MAX)) { - const auto & type_info = EntityTypeInfo::get(i); + const auto & type_info = AccessEntityTypeInfo::get(i); if (ParserKeyword{type_info.name.c_str()}.ignore(pos, expected) || (!type_info.alias.empty() && ParserKeyword{type_info.alias.c_str()}.ignore(pos, expected))) { @@ -37,9 +34,9 @@ namespace } } - for (auto i : collections::range(EntityType::MAX)) + for (auto i : collections::range(AccessEntityType::MAX)) { - const auto & type_info = EntityTypeInfo::get(i); + const auto & type_info = AccessEntityTypeInfo::get(i); if (ParserKeyword{type_info.plural_name.c_str()}.ignore(pos, expected) || (!type_info.plural_alias.empty() && ParserKeyword{type_info.plural_alias.c_str()}.ignore(pos, expected))) { @@ -68,7 +65,7 @@ bool ParserShowCreateAccessEntityQuery::parseImpl(Pos & pos, ASTPtr & node, Expe if (!ParserKeyword{"SHOW CREATE"}.ignore(pos, expected)) return false; - EntityType type; + AccessEntityType type; bool plural; if (!parseEntityType(pos, expected, type, plural)) return false; @@ -83,7 +80,7 @@ bool ParserShowCreateAccessEntityQuery::parseImpl(Pos & pos, ASTPtr & node, Expe switch (type) { - case EntityType::USER: + case AccessEntityType::USER: { if (parseCurrentUserTag(pos, expected)) current_user = true; @@ -96,7 +93,7 @@ bool ParserShowCreateAccessEntityQuery::parseImpl(Pos & pos, ASTPtr & node, Expe current_user = true; break; } - case EntityType::ROLE: + case AccessEntityType::ROLE: { if (parseRoleNames(pos, expected, names)) { @@ -107,7 +104,7 @@ bool ParserShowCreateAccessEntityQuery::parseImpl(Pos & pos, ASTPtr & node, Expe return false; break; } - case EntityType::ROW_POLICY: + case AccessEntityType::ROW_POLICY: { ASTPtr ast; String database, table_name; @@ -130,7 +127,7 @@ bool ParserShowCreateAccessEntityQuery::parseImpl(Pos & pos, ASTPtr & node, Expe return false; break; } - case EntityType::SETTINGS_PROFILE: + case AccessEntityType::SETTINGS_PROFILE: { if (parseIdentifiersOrStringLiterals(pos, expected, names)) { @@ -141,7 +138,7 @@ bool ParserShowCreateAccessEntityQuery::parseImpl(Pos & pos, ASTPtr & node, Expe return false; break; } - case EntityType::QUOTA: + case AccessEntityType::QUOTA: { if (parseIdentifiersOrStringLiterals(pos, expected, names)) { @@ -152,7 +149,7 @@ bool ParserShowCreateAccessEntityQuery::parseImpl(Pos & pos, ASTPtr & node, Expe current_quota = true; break; } - case EntityType::MAX: + case AccessEntityType::MAX: throw Exception("Type " + toString(type) + " is not implemented in SHOW CREATE query", ErrorCodes::NOT_IMPLEMENTED); } diff --git a/src/Parsers/ParserAlterQuery.cpp b/src/Parsers/ParserAlterQuery.cpp index 1ea64d94fe7..66645ccaf0e 100644 --- a/src/Parsers/ParserAlterQuery.cpp +++ b/src/Parsers/ParserAlterQuery.cpp @@ -861,12 +861,12 @@ bool ParserAlterQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) if (alter_object_type == ASTAlterQuery::AlterObjectType::DATABASE) { - if (!parseDatabase(pos, expected, query->database)) + if (!parseDatabaseAsAST(pos, expected, query->database)) return false; } else { - if (!parseDatabaseAndTableName(pos, expected, query->database, query->table)) + if (!parseDatabaseAndTableAsAST(pos, expected, query->database, query->table)) return false; String cluster_str; @@ -886,6 +886,12 @@ bool ParserAlterQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) query->set(query->command_list, command_list); query->alter_object = alter_object_type; + if (query->database) + query->children.push_back(query->database); + + if (query->table) + query->children.push_back(query->table); + return true; } diff --git a/src/Parsers/ParserCheckQuery.cpp b/src/Parsers/ParserCheckQuery.cpp index c397e1c33c5..a89416ab253 100644 --- a/src/Parsers/ParserCheckQuery.cpp +++ b/src/Parsers/ParserCheckQuery.cpp @@ -4,6 +4,7 @@ #include #include #include +#include namespace DB @@ -15,31 +16,15 @@ bool ParserCheckQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) ParserKeyword s_partition("PARTITION"); ParserToken s_dot(TokenType::Dot); - ParserIdentifier table_parser; ParserPartition partition_parser; - ASTPtr table; - ASTPtr database; - if (!s_check_table.ignore(pos, expected)) return false; - if (!table_parser.parse(pos, database, expected)) - return false; auto query = std::make_shared(); - if (s_dot.ignore(pos)) - { - if (!table_parser.parse(pos, table, expected)) - return false; - tryGetIdentifierNameInto(database, query->database); - tryGetIdentifierNameInto(table, query->table); - } - else - { - table = database; - tryGetIdentifierNameInto(table, query->table); - } + if (!parseDatabaseAndTableAsAST(pos, expected, query->database, query->table)) + return false; if (s_partition.ignore(pos, expected)) { @@ -47,6 +32,12 @@ bool ParserCheckQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) return false; } + if (query->database) + query->children.push_back(query->database); + + if (query->table) + query->children.push_back(query->table); + node = query; return true; } diff --git a/src/Parsers/ParserCreateQuery.cpp b/src/Parsers/ParserCreateQuery.cpp index af2966948fb..13b38c4f0a7 100644 --- a/src/Parsers/ParserCreateQuery.cpp +++ b/src/Parsers/ParserCreateQuery.cpp @@ -143,24 +143,32 @@ bool ParserIndexDeclaration::parseImpl(Pos & pos, ASTPtr & node, Expected & expe bool ParserConstraintDeclaration::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) { ParserKeyword s_check("CHECK"); + ParserKeyword s_assume("ASSUME"); ParserIdentifier name_p; ParserLogicalOrExpression expression_p; ASTPtr name; ASTPtr expr; + ASTConstraintDeclaration::Type type = ASTConstraintDeclaration::Type::CHECK; if (!name_p.parse(pos, name, expected)) return false; if (!s_check.ignore(pos, expected)) - return false; + { + if (s_assume.ignore(pos, expected)) + type = ASTConstraintDeclaration::Type::ASSUME; + else + return false; + } if (!expression_p.parse(pos, expr, expected)) return false; auto constraint = std::make_shared(); constraint->name = name->as().name(); + constraint->type = type; constraint->set(constraint->expr, expr); node = constraint; @@ -426,7 +434,7 @@ bool ParserCreateTableQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expe ParserKeyword s_temporary("TEMPORARY"); ParserKeyword s_table("TABLE"); ParserKeyword s_if_not_exists("IF NOT EXISTS"); - ParserCompoundIdentifier table_name_p(true); + ParserCompoundIdentifier table_name_p(true, true); ParserKeyword s_from("FROM"); ParserKeyword s_on("ON"); ParserKeyword s_as("AS"); @@ -495,7 +503,7 @@ bool ParserCreateTableQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expe return false; } - auto table_id = table->as()->getTableId(); + auto * table_id = table->as(); // Shortcut for ATTACH a previously detached table bool short_attach = attach && !from_path; @@ -508,9 +516,14 @@ bool ParserCreateTableQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expe query->if_not_exists = if_not_exists; query->cluster = cluster_str; - query->database = table_id.database_name; - query->table = table_id.table_name; - query->uuid = table_id.uuid; + query->database = table_id->getDatabase(); + query->table = table_id->getTable(); + query->uuid = table_id->uuid; + + if (query->database) + query->children.push_back(query->database); + if (query->table) + query->children.push_back(query->table); return true; } @@ -585,11 +598,16 @@ bool ParserCreateTableQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expe query->if_not_exists = if_not_exists; query->temporary = is_temporary; - query->database = table_id.database_name; - query->table = table_id.table_name; - query->uuid = table_id.uuid; + query->database = table_id->getDatabase(); + query->table = table_id->getTable(); + query->uuid = table_id->uuid; query->cluster = cluster_str; + if (query->database) + query->children.push_back(query->database); + if (query->table) + query->children.push_back(query->table); + query->set(query->columns_list, columns_list); query->set(query->storage, storage); @@ -620,7 +638,7 @@ bool ParserCreateLiveViewQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & e ParserKeyword s_create("CREATE"); ParserKeyword s_attach("ATTACH"); ParserKeyword s_if_not_exists("IF NOT EXISTS"); - ParserCompoundIdentifier table_name_p(true); + ParserCompoundIdentifier table_name_p(true, true); ParserKeyword s_as("AS"); ParserKeyword s_view("VIEW"); ParserKeyword s_live("LIVE"); @@ -735,12 +753,17 @@ bool ParserCreateLiveViewQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & e query->if_not_exists = if_not_exists; query->is_live_view = true; - auto table_id = table->as()->getTableId(); - query->database = table_id.database_name; - query->table = table_id.table_name; - query->uuid = table_id.uuid; + auto * table_id = table->as(); + query->database = table_id->getDatabase(); + query->table = table_id->getTable(); + query->uuid = table_id->uuid; query->cluster = cluster_str; + if (query->database) + query->children.push_back(query->database); + if (query->table) + query->children.push_back(query->table); + if (to_table) query->to_table_id = to_table->as()->getTableId(); @@ -766,7 +789,7 @@ bool ParserCreateDatabaseQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & e ParserKeyword s_database("DATABASE"); ParserKeyword s_if_not_exists("IF NOT EXISTS"); ParserStorage storage_p; - ParserIdentifier name_p; + ParserIdentifier name_p(true); ASTPtr database; ASTPtr storage; @@ -817,9 +840,12 @@ bool ParserCreateDatabaseQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & e query->attach = attach; query->if_not_exists = if_not_exists; - tryGetIdentifierNameInto(database, query->database); query->uuid = uuid; query->cluster = cluster_str; + query->database = database; + + if (database) + query->children.push_back(database); query->set(query->storage, storage); if (comment) @@ -833,7 +859,7 @@ bool ParserCreateViewQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expec ParserKeyword s_create("CREATE"); ParserKeyword s_attach("ATTACH"); ParserKeyword s_if_not_exists("IF NOT EXISTS"); - ParserCompoundIdentifier table_name_p(true); + ParserCompoundIdentifier table_name_p(true, true); ParserKeyword s_as("AS"); ParserKeyword s_view("VIEW"); ParserKeyword s_materialized("MATERIALIZED"); @@ -954,12 +980,17 @@ bool ParserCreateViewQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expec query->is_populate = is_populate; query->replace_view = replace_view; - auto table_id = table->as()->getTableId(); - query->database = table_id.database_name; - query->table = table_id.table_name; - query->uuid = table_id.uuid; + auto * table_id = table->as(); + query->database = table_id->getDatabase(); + query->table = table_id->getTable(); + query->uuid = table_id->uuid; query->cluster = cluster_str; + if (query->database) + query->children.push_back(query->database); + if (query->table) + query->children.push_back(query->table); + if (to_table) query->to_table_id = to_table->as()->getTableId(); if (to_inner_uuid) @@ -987,7 +1018,7 @@ bool ParserCreateDictionaryQuery::parseImpl(IParser::Pos & pos, ASTPtr & node, E ParserKeyword s_dictionary("DICTIONARY"); ParserKeyword s_if_not_exists("IF NOT EXISTS"); ParserKeyword s_on("ON"); - ParserCompoundIdentifier dict_name_p(true); + ParserCompoundIdentifier dict_name_p(true, true); ParserToken s_left_paren(TokenType::OpeningRoundBracket); ParserToken s_right_paren(TokenType::ClosingRoundBracket); ParserToken s_dot(TokenType::Dot); @@ -1059,10 +1090,15 @@ bool ParserCreateDictionaryQuery::parseImpl(IParser::Pos & pos, ASTPtr & node, E query->create_or_replace = or_replace; query->replace_table = replace; - auto dict_id = name->as()->getTableId(); - query->database = dict_id.database_name; - query->table = dict_id.table_name; - query->uuid = dict_id.uuid; + auto * dict_id = name->as(); + query->database = dict_id->getDatabase(); + query->table = dict_id->getTable(); + query->uuid = dict_id->uuid; + + if (query->database) + query->children.push_back(query->database); + if (query->table) + query->children.push_back(query->table); query->if_not_exists = if_not_exists; query->set(query->dictionary_attributes_list, attributes); diff --git a/src/Parsers/ParserDropQuery.cpp b/src/Parsers/ParserDropQuery.cpp index 5400f33fbd9..211c7699723 100644 --- a/src/Parsers/ParserDropQuery.cpp +++ b/src/Parsers/ParserDropQuery.cpp @@ -20,7 +20,7 @@ bool parseDropQuery(IParser::Pos & pos, ASTPtr & node, Expected & expected, cons ParserKeyword s_database("DATABASE"); ParserToken s_dot(TokenType::Dot); ParserKeyword s_if_exists("IF EXISTS"); - ParserIdentifier name_p; + ParserIdentifier name_p(true); ParserKeyword s_permanently("PERMANENTLY"); ParserKeyword s_no_delay("NO DELAY"); ParserKeyword s_sync("SYNC"); @@ -96,9 +96,14 @@ bool parseDropQuery(IParser::Pos & pos, ASTPtr & node, Expected & expected, cons query->is_view = is_view; query->no_delay = no_delay; query->permanently = permanently; + query->database = database; + query->table = table; - tryGetIdentifierNameInto(database, query->database); - tryGetIdentifierNameInto(table, query->table); + if (database) + query->children.push_back(database); + + if (table) + query->children.push_back(table); query->cluster = cluster_str; diff --git a/src/Parsers/ParserExternalDDLQuery.cpp b/src/Parsers/ParserExternalDDLQuery.cpp index c64c50cd99f..9a87de9c2b1 100644 --- a/src/Parsers/ParserExternalDDLQuery.cpp +++ b/src/Parsers/ParserExternalDDLQuery.cpp @@ -7,7 +7,7 @@ #include #include -#ifdef USE_MYSQL +#if USE_MYSQL # include # include #endif @@ -15,7 +15,7 @@ namespace DB { -#ifdef USE_MYSQL +#if USE_MYSQL namespace ErrorCodes { extern const int MYSQL_SYNTAX_ERROR; @@ -41,7 +41,7 @@ bool ParserExternalDDLQuery::parseImpl(IParser::Pos & pos, ASTPtr & node, Expect bool res = false; if (external_ddl_query->from->name == "MySQL") { -#ifdef USE_MYSQL +#if USE_MYSQL ParserDropQuery p_drop_query; ParserRenameQuery p_rename_query; MySQLParser::ParserAlterQuery p_alter_query; diff --git a/src/Parsers/ParserInsertQuery.cpp b/src/Parsers/ParserInsertQuery.cpp index 1bc552af384..8925de4cf55 100644 --- a/src/Parsers/ParserInsertQuery.cpp +++ b/src/Parsers/ParserInsertQuery.cpp @@ -41,7 +41,7 @@ bool ParserInsertQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) ParserKeyword s_with("WITH"); ParserToken s_lparen(TokenType::OpeningRoundBracket); ParserToken s_rparen(TokenType::ClosingRoundBracket); - ParserIdentifier name_p; + ParserIdentifier name_p(true); ParserList columns_p(std::make_unique(), std::make_unique(TokenType::Comma), false); ParserFunction table_function_p{false}; ParserStringLiteral infile_name_p; @@ -244,8 +244,13 @@ bool ParserInsertQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) } else { - tryGetIdentifierNameInto(database, query->table_id.database_name); - tryGetIdentifierNameInto(table, query->table_id.table_name); + query->database = database; + query->table = table; + + if (database) + query->children.push_back(database); + if (table) + query->children.push_back(table); } query->columns = columns; diff --git a/src/Parsers/ParserOptimizeQuery.cpp b/src/Parsers/ParserOptimizeQuery.cpp index 441cec1465e..b1bfd43936b 100644 --- a/src/Parsers/ParserOptimizeQuery.cpp +++ b/src/Parsers/ParserOptimizeQuery.cpp @@ -31,7 +31,7 @@ bool ParserOptimizeQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expecte ParserKeyword s_deduplicate("DEDUPLICATE"); ParserKeyword s_by("BY"); ParserToken s_dot(TokenType::Dot); - ParserIdentifier name_p; + ParserIdentifier name_p(true); ParserPartition partition_p; ASTPtr database; @@ -80,15 +80,20 @@ bool ParserOptimizeQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expecte auto query = std::make_shared(); node = query; - tryGetIdentifierNameInto(database, query->database); - tryGetIdentifierNameInto(table, query->table); - query->cluster = cluster_str; if ((query->partition = partition)) query->children.push_back(partition); query->final = final; query->deduplicate = deduplicate; query->deduplicate_by_columns = deduplicate_by_columns; + query->database = database; + query->table = table; + + if (database) + query->children.push_back(database); + + if (table) + query->children.push_back(table); return true; } diff --git a/src/Parsers/ParserRenameQuery.cpp b/src/Parsers/ParserRenameQuery.cpp index c42a0af88b2..59813140b7c 100644 --- a/src/Parsers/ParserRenameQuery.cpp +++ b/src/Parsers/ParserRenameQuery.cpp @@ -44,6 +44,7 @@ bool ParserRenameQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) ParserKeyword s_rename_dictionary("RENAME DICTIONARY"); ParserKeyword s_exchange_dictionaries("EXCHANGE DICTIONARIES"); ParserKeyword s_rename_database("RENAME DATABASE"); + ParserKeyword s_if_exists("IF EXISTS"); ParserKeyword s_to("TO"); ParserKeyword s_and("AND"); ParserToken s_comma(TokenType::Comma); @@ -67,6 +68,7 @@ bool ParserRenameQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) ASTPtr from_db; ASTPtr to_db; ParserIdentifier db_name_p; + bool if_exists = s_if_exists.ignore(pos, expected); if (!db_name_p.parse(pos, from_db, expected)) return false; if (!s_to.ignore(pos, expected)) @@ -84,6 +86,7 @@ bool ParserRenameQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) auto query = std::make_shared(); query->database = true; query->elements.emplace({}); + query->elements.front().if_exists = if_exists; tryGetIdentifierNameInto(from_db, query->elements.front().from.database); tryGetIdentifierNameInto(to_db, query->elements.front().to.database); query->cluster = cluster_str; @@ -104,6 +107,9 @@ bool ParserRenameQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) ASTRenameQuery::Element& ref = elements.emplace_back(); + if (!exchange) + ref.if_exists = s_if_exists.ignore(pos, expected); + if (!parseDatabaseAndTable(ref.from, pos, expected) || !ignore_delim() || !parseDatabaseAndTable(ref.to, pos, expected)) diff --git a/src/Parsers/ParserSelectQuery.cpp b/src/Parsers/ParserSelectQuery.cpp index b1f7570878f..90ab5911d6b 100644 --- a/src/Parsers/ParserSelectQuery.cpp +++ b/src/Parsers/ParserSelectQuery.cpp @@ -23,6 +23,7 @@ namespace ErrorCodes extern const int SYNTAX_ERROR; extern const int TOP_AND_LIMIT_TOGETHER; extern const int WITH_TIES_WITHOUT_ORDER_BY; + extern const int OFFSET_FETCH_WITHOUT_ORDER_BY; } @@ -323,7 +324,7 @@ bool ParserSelectQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) { /// OFFSET FETCH clause must exists with "ORDER BY" if (!order_expression_list) - return false; + throw Exception("Can not use OFFSET FETCH clause without ORDER BY", ErrorCodes::OFFSET_FETCH_WITHOUT_ORDER_BY); if (s_first.ignore(pos, expected)) { diff --git a/src/Parsers/ParserSystemQuery.cpp b/src/Parsers/ParserSystemQuery.cpp index b861cbd2a5a..5a61929bdb3 100644 --- a/src/Parsers/ParserSystemQuery.cpp +++ b/src/Parsers/ParserSystemQuery.cpp @@ -39,14 +39,13 @@ static bool parseQueryWithOnClusterAndMaybeTable(std::shared_ptr ASTPtr ast; if (ParserStringLiteral{}.parse(pos, ast, expected)) { - res->database = {}; - res->table = ast->as().value.safeGet(); + res->setTable(ast->as().value.safeGet()); parsed_table = true; } } if (!parsed_table) - parsed_table = parseDatabaseAndTableName(pos, expected, res->database, res->table); + parsed_table = parseDatabaseAndTableAsAST(pos, expected, res->database, res->table); if (!parsed_table && require_table) return false; @@ -56,6 +55,12 @@ static bool parseQueryWithOnClusterAndMaybeTable(std::shared_ptr return false; res->cluster = cluster; + + if (res->database) + res->children.push_back(res->database); + if (res->table) + res->children.push_back(res->table); + return true; } @@ -163,14 +168,12 @@ bool ParserSystemQuery::parseImpl(IParser::Pos & pos, ASTPtr & node, Expected & if (ParserKeyword{"DATABASE"}.ignore(pos, expected)) { ParserIdentifier database_parser; - ASTPtr database; - if (!database_parser.parse(pos, database, expected)) + if (!database_parser.parse(pos, res->database, expected)) return false; - tryGetIdentifierNameInto(database, res->database); } else if (ParserKeyword{"TABLE"}.ignore(pos, expected)) { - parseDatabaseAndTableName(pos, expected, res->database, res->table); + parseDatabaseAndTableAsAST(pos, expected, res->database, res->table); } else if (ParserKeyword{"ZKPATH"}.ignore(pos, expected)) { @@ -193,7 +196,7 @@ bool ParserSystemQuery::parseImpl(IParser::Pos & pos, ASTPtr & node, Expected & case Type::RESTART_REPLICA: case Type::SYNC_REPLICA: - if (!parseDatabaseAndTableName(pos, expected, res->database, res->table)) + if (!parseDatabaseAndTableAsAST(pos, expected, res->database, res->table)) return false; break; @@ -251,7 +254,7 @@ bool ParserSystemQuery::parseImpl(IParser::Pos & pos, ASTPtr & node, Expected & res->storage_policy = storage_policy_str; res->volume = volume_str; if (res->volume.empty() && res->storage_policy.empty()) - parseDatabaseAndTableName(pos, expected, res->database, res->table); + parseDatabaseAndTableAsAST(pos, expected, res->database, res->table); break; } @@ -265,7 +268,7 @@ bool ParserSystemQuery::parseImpl(IParser::Pos & pos, ASTPtr & node, Expected & case Type::START_REPLICATED_SENDS: case Type::STOP_REPLICATION_QUEUES: case Type::START_REPLICATION_QUEUES: - parseDatabaseAndTableName(pos, expected, res->database, res->table); + parseDatabaseAndTableAsAST(pos, expected, res->database, res->table); break; case Type::SUSPEND: @@ -287,6 +290,11 @@ bool ParserSystemQuery::parseImpl(IParser::Pos & pos, ASTPtr & node, Expected & break; } + if (res->database) + res->children.push_back(res->database); + if (res->table) + res->children.push_back(res->table); + node = std::move(res); return true; } diff --git a/src/Parsers/ParserTablePropertiesQuery.cpp b/src/Parsers/ParserTablePropertiesQuery.cpp index 30be37bc4a1..bf3ff399f61 100644 --- a/src/Parsers/ParserTablePropertiesQuery.cpp +++ b/src/Parsers/ParserTablePropertiesQuery.cpp @@ -24,7 +24,7 @@ bool ParserTablePropertiesQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & ParserKeyword s_view("VIEW"); ParserKeyword s_dictionary("DICTIONARY"); ParserToken s_dot(TokenType::Dot); - ParserIdentifier name_p; + ParserIdentifier name_p(true); ASTPtr database; ASTPtr table; @@ -110,8 +110,14 @@ bool ParserTablePropertiesQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & } } - tryGetIdentifierNameInto(database, query->database); - tryGetIdentifierNameInto(table, query->table); + query->database = database; + query->table = table; + + if (database) + query->children.push_back(database); + + if (table) + query->children.push_back(table); node = query; diff --git a/src/Parsers/ParserWatchQuery.cpp b/src/Parsers/ParserWatchQuery.cpp index 5171f4f2536..40d2c47bd4f 100644 --- a/src/Parsers/ParserWatchQuery.cpp +++ b/src/Parsers/ParserWatchQuery.cpp @@ -24,7 +24,7 @@ bool ParserWatchQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) { ParserKeyword s_watch("WATCH"); ParserToken s_dot(TokenType::Dot); - ParserIdentifier name_p; + ParserIdentifier name_p(true); ParserKeyword s_events("EVENTS"); ParserKeyword s_limit("LIMIT"); @@ -62,11 +62,14 @@ bool ParserWatchQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) return false; } + query->database = database; + query->table = table; + if (database) - query->database = getIdentifierName(database); + query->children.push_back(database); if (table) - query->table = getIdentifierName(table); + query->children.push_back(table); node = query; diff --git a/src/Parsers/TablePropertiesQueriesASTs.h b/src/Parsers/TablePropertiesQueriesASTs.h index edb040d72d9..b5baddcf268 100644 --- a/src/Parsers/TablePropertiesQueriesASTs.h +++ b/src/Parsers/TablePropertiesQueriesASTs.h @@ -89,7 +89,7 @@ protected: void formatQueryImpl(const FormatSettings & settings, FormatState &, FormatStateStacked) const override { settings.ostr << (settings.hilite ? hilite_keyword : "") << ASTExistsDatabaseQueryIDAndQueryNames::Query - << " " << (settings.hilite ? hilite_none : "") << backQuoteIfNeed(database); + << " " << (settings.hilite ? hilite_none : "") << backQuoteIfNeed(getDatabase()); } }; @@ -99,7 +99,7 @@ protected: void formatQueryImpl(const FormatSettings & settings, FormatState &, FormatStateStacked) const override { settings.ostr << (settings.hilite ? hilite_keyword : "") << ASTShowCreateDatabaseQueryIDAndQueryNames::Query - << " " << (settings.hilite ? hilite_none : "") << backQuoteIfNeed(database); + << " " << (settings.hilite ? hilite_none : "") << backQuoteIfNeed(getDatabase()); } }; diff --git a/src/Parsers/parseDatabaseAndTableName.cpp b/src/Parsers/parseDatabaseAndTableName.cpp index c071f1b6eb4..48f47dbb68c 100644 --- a/src/Parsers/parseDatabaseAndTableName.cpp +++ b/src/Parsers/parseDatabaseAndTableName.cpp @@ -41,6 +41,24 @@ bool parseDatabaseAndTableName(IParser::Pos & pos, Expected & expected, String & return true; } +bool parseDatabaseAndTableAsAST(IParser::Pos & pos, Expected & expected, ASTPtr & database, ASTPtr & table) +{ + ParserToken s_dot(TokenType::Dot); + ParserIdentifier table_parser(true); + + if (!table_parser.parse(pos, table, expected)) + return false; + + if (s_dot.ignore(pos)) + { + database = table; + if (!table_parser.parse(pos, table, expected)) + return false; + } + + return true; +} + bool parseDatabase(IParser::Pos & pos, Expected & expected, String & database_str) { @@ -57,6 +75,12 @@ bool parseDatabase(IParser::Pos & pos, Expected & expected, String & database_st return true; } +bool parseDatabaseAsAST(IParser::Pos & pos, Expected & expected, ASTPtr & database) +{ + ParserIdentifier identifier_parser(/* allow_query_parameter */true); + return identifier_parser.parse(pos, database, expected); +} + bool parseDatabaseAndTableNameOrAsterisks(IParser::Pos & pos, Expected & expected, String & database, bool & any_database, String & table, bool & any_table) { diff --git a/src/Parsers/parseDatabaseAndTableName.h b/src/Parsers/parseDatabaseAndTableName.h index dc435ca047e..6ec354d68b4 100644 --- a/src/Parsers/parseDatabaseAndTableName.h +++ b/src/Parsers/parseDatabaseAndTableName.h @@ -7,9 +7,13 @@ namespace DB /// Parses [db.]name bool parseDatabaseAndTableName(IParser::Pos & pos, Expected & expected, String & database_str, String & table_str); +bool parseDatabaseAndTableAsAST(IParser::Pos & pos, Expected & expected, ASTPtr & database, ASTPtr & table); + /// Parses [db.]name or [db.]* or [*.]* bool parseDatabaseAndTableNameOrAsterisks(IParser::Pos & pos, Expected & expected, String & database, bool & any_database, String & table, bool & any_table); bool parseDatabase(IParser::Pos & pos, Expected & expected, String & database_str); +bool parseDatabaseAsAST(IParser::Pos & pos, Expected & expected, ASTPtr & database); + } diff --git a/src/Parsers/tests/gtest_dictionary_parser.cpp b/src/Parsers/tests/gtest_dictionary_parser.cpp index 967f6848404..25f48fc27c8 100644 --- a/src/Parsers/tests/gtest_dictionary_parser.cpp +++ b/src/Parsers/tests/gtest_dictionary_parser.cpp @@ -42,8 +42,8 @@ TEST(ParserDictionaryDDL, SimpleDictionary) ParserCreateDictionaryQuery parser; ASTPtr ast = parseQuery(parser, input.data(), input.data() + input.size(), "", 0, 0); ASTCreateQuery * create = ast->as(); - EXPECT_EQ(create->table, "dict1"); - EXPECT_EQ(create->database, "test"); + EXPECT_EQ(create->getTable(), "dict1"); + EXPECT_EQ(create->getDatabase(), "test"); EXPECT_EQ(create->is_dictionary, true); EXPECT_NE(create->dictionary, nullptr); EXPECT_NE(create->dictionary->lifetime, nullptr); @@ -138,8 +138,8 @@ TEST(ParserDictionaryDDL, AttributesWithMultipleProperties) ParserCreateDictionaryQuery parser; ASTPtr ast = parseQuery(parser, input.data(), input.data() + input.size(), "", 0, 0); ASTCreateQuery * create = ast->as(); - EXPECT_EQ(create->table, "dict2"); - EXPECT_EQ(create->database, ""); + EXPECT_EQ(create->getTable(), "dict2"); + EXPECT_EQ(create->getDatabase(), ""); /// test attributes EXPECT_NE(create->dictionary_attributes_list, nullptr); @@ -240,8 +240,8 @@ TEST(ParserDictionaryDDL, NestedSource) ParserCreateDictionaryQuery parser; ASTPtr ast = parseQuery(parser, input.data(), input.data() + input.size(), "", 0, 0); ASTCreateQuery * create = ast->as(); - EXPECT_EQ(create->table, "dict4"); - EXPECT_EQ(create->database, ""); + EXPECT_EQ(create->getTable(), "dict4"); + EXPECT_EQ(create->getDatabase(), ""); /// source test EXPECT_EQ(create->dictionary->source->name, "mysql"); @@ -301,8 +301,8 @@ TEST(ParserDictionaryDDL, ParseDropQuery) ASTDropQuery * drop1 = ast1->as(); EXPECT_TRUE(drop1->is_dictionary); - EXPECT_EQ(drop1->database, "test"); - EXPECT_EQ(drop1->table, "dict1"); + EXPECT_EQ(drop1->getDatabase(), "test"); + EXPECT_EQ(drop1->getTable(), "dict1"); auto str1 = serializeAST(*drop1, true); EXPECT_EQ(input1, str1); @@ -312,8 +312,8 @@ TEST(ParserDictionaryDDL, ParseDropQuery) ASTDropQuery * drop2 = ast2->as(); EXPECT_TRUE(drop2->is_dictionary); - EXPECT_EQ(drop2->database, ""); - EXPECT_EQ(drop2->table, "dict2"); + EXPECT_EQ(drop2->getDatabase(), ""); + EXPECT_EQ(drop2->getTable(), "dict2"); auto str2 = serializeAST(*drop2, true); EXPECT_EQ(input2, str2); } @@ -326,8 +326,8 @@ TEST(ParserDictionaryDDL, ParsePropertiesQueries) ASTPtr ast1 = parseQuery(parser, input1.data(), input1.data() + input1.size(), "", 0, 0); ASTShowCreateDictionaryQuery * show1 = ast1->as(); - EXPECT_EQ(show1->table, "dict1"); - EXPECT_EQ(show1->database, "test"); + EXPECT_EQ(show1->getTable(), "dict1"); + EXPECT_EQ(show1->getDatabase(), "test"); EXPECT_EQ(serializeAST(*show1), input1); String input2 = "EXISTS DICTIONARY dict2"; @@ -335,7 +335,7 @@ TEST(ParserDictionaryDDL, ParsePropertiesQueries) ASTPtr ast2 = parseQuery(parser, input2.data(), input2.data() + input2.size(), "", 0, 0); ASTExistsDictionaryQuery * show2 = ast2->as(); - EXPECT_EQ(show2->table, "dict2"); - EXPECT_EQ(show2->database, ""); + EXPECT_EQ(show2->getTable(), "dict2"); + EXPECT_EQ(show2->getDatabase(), ""); EXPECT_EQ(serializeAST(*show2), input2); } diff --git a/src/Processors/Executors/CompletedPipelineExecutor.cpp b/src/Processors/Executors/CompletedPipelineExecutor.cpp index a4e3dea89fa..45b02cba298 100644 --- a/src/Processors/Executors/CompletedPipelineExecutor.cpp +++ b/src/Processors/Executors/CompletedPipelineExecutor.cpp @@ -1,7 +1,9 @@ #include #include #include +#include #include +#include #include #include diff --git a/src/Processors/Executors/ExecutingGraph.cpp b/src/Processors/Executors/ExecutingGraph.cpp index c8d00303cc0..738cbfd2176 100644 --- a/src/Processors/Executors/ExecutingGraph.cpp +++ b/src/Processors/Executors/ExecutingGraph.cpp @@ -1,4 +1,6 @@ #include +#include +#include namespace DB { @@ -8,7 +10,7 @@ namespace ErrorCodes extern const int LOGICAL_ERROR; } -ExecutingGraph::ExecutingGraph(const Processors & processors) +ExecutingGraph::ExecutingGraph(Processors & processors_) : processors(processors_) { uint64_t num_processors = processors.size(); nodes.reserve(num_processors); @@ -88,9 +90,36 @@ bool ExecutingGraph::addEdges(uint64_t node) return was_edge_added; } -std::vector ExecutingGraph::expandPipeline(const Processors & processors) +bool ExecutingGraph::expandPipeline(std::stack & stack, uint64_t pid) { + auto & cur_node = *nodes[pid]; + Processors new_processors; + + try + { + new_processors = cur_node.processor->expandPipeline(); + } + catch (...) + { + cur_node.exception = std::current_exception(); + return false; + } + + { + std::lock_guard guard(processors_mutex); + processors.insert(processors.end(), new_processors.begin(), new_processors.end()); + } + uint64_t num_processors = processors.size(); + std::vector back_edges_sizes(num_processors, 0); + std::vector direct_edge_sizes(num_processors, 0); + + for (uint64_t node = 0; node < nodes.size(); ++node) + { + direct_edge_sizes[node] = nodes[node]->direct_edges.size(); + back_edges_sizes[node] = nodes[node]->back_edges.size(); + } + nodes.reserve(num_processors); while (nodes.size() < num_processors) @@ -112,7 +141,226 @@ std::vector ExecutingGraph::expandPipeline(const Processors & processo updated_nodes.push_back(node); } - return updated_nodes; + for (auto updated_node : updated_nodes) + { + auto & node = *nodes[updated_node]; + + size_t num_direct_edges = node.direct_edges.size(); + size_t num_back_edges = node.back_edges.size(); + + std::lock_guard guard(node.status_mutex); + + for (uint64_t edge = back_edges_sizes[updated_node]; edge < num_back_edges; ++edge) + node.updated_input_ports.emplace_back(edge); + + for (uint64_t edge = direct_edge_sizes[updated_node]; edge < num_direct_edges; ++edge) + node.updated_output_ports.emplace_back(edge); + + if (node.status == ExecutingGraph::ExecStatus::Idle) + { + node.status = ExecutingGraph::ExecStatus::Preparing; + stack.push(updated_node); + } + } + + return true; +} + +void ExecutingGraph::initializeExecution(Queue & queue) +{ + std::stack stack; + + /// Add childless processors to stack. + uint64_t num_processors = nodes.size(); + for (uint64_t proc = 0; proc < num_processors; ++proc) + { + if (nodes[proc]->direct_edges.empty()) + { + stack.push(proc); + /// do not lock mutex, as this function is executed in single thread + nodes[proc]->status = ExecutingGraph::ExecStatus::Preparing; + } + } + + Queue async_queue; + + while (!stack.empty()) + { + uint64_t proc = stack.top(); + stack.pop(); + + updateNode(proc, queue, async_queue); + + if (!async_queue.empty()) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Async is only possible after work() call. Processor {}", + async_queue.front()->processor->getName()); + } +} + + +bool ExecutingGraph::updateNode(uint64_t pid, Queue & queue, Queue & async_queue) +{ + std::stack updated_edges; + std::stack updated_processors; + updated_processors.push(pid); + + UpgradableMutex::ReadGuard read_lock(nodes_mutex); + + while (!updated_processors.empty() || !updated_edges.empty()) + { + std::optional> stack_top_lock; + + if (updated_processors.empty()) + { + auto * edge = updated_edges.top(); + updated_edges.pop(); + + /// Here we have ownership on edge, but node can be concurrently accessed. + + auto & node = *nodes[edge->to]; + + std::unique_lock lock(node.status_mutex); + + ExecutingGraph::ExecStatus status = node.status; + + if (status != ExecutingGraph::ExecStatus::Finished) + { + if (edge->backward) + node.updated_output_ports.push_back(edge->output_port_number); + else + node.updated_input_ports.push_back(edge->input_port_number); + + if (status == ExecutingGraph::ExecStatus::Idle) + { + node.status = ExecutingGraph::ExecStatus::Preparing; + updated_processors.push(edge->to); + stack_top_lock = std::move(lock); + } + else + nodes[edge->to]->processor->onUpdatePorts(); + } + } + + if (!updated_processors.empty()) + { + pid = updated_processors.top(); + updated_processors.pop(); + + /// In this method we have ownership on node. + auto & node = *nodes[pid]; + + bool need_expand_pipeline = false; + + if (!stack_top_lock) + stack_top_lock.emplace(node.status_mutex); + + { +#ifndef NDEBUG + Stopwatch watch; +#endif + + std::unique_lock lock(std::move(*stack_top_lock)); + + try + { + node.last_processor_status = node.processor->prepare(node.updated_input_ports, node.updated_output_ports); + } + catch (...) + { + node.exception = std::current_exception(); + return false; + } + +#ifndef NDEBUG + node.preparation_time_ns += watch.elapsed(); +#endif + + node.updated_input_ports.clear(); + node.updated_output_ports.clear(); + + switch (node.last_processor_status) + { + case IProcessor::Status::NeedData: + case IProcessor::Status::PortFull: + { + node.status = ExecutingGraph::ExecStatus::Idle; + break; + } + case IProcessor::Status::Finished: + { + node.status = ExecutingGraph::ExecStatus::Finished; + break; + } + case IProcessor::Status::Ready: + { + node.status = ExecutingGraph::ExecStatus::Executing; + queue.push(&node); + break; + } + case IProcessor::Status::Async: + { + node.status = ExecutingGraph::ExecStatus::Executing; + async_queue.push(&node); + break; + } + case IProcessor::Status::ExpandPipeline: + { + need_expand_pipeline = true; + break; + } + } + + if (!need_expand_pipeline) + { + /// If you wonder why edges are pushed in reverse order, + /// it is because updated_edges is a stack, and we prefer to get from stack + /// input ports firstly, and then outputs, both in-order. + /// + /// Actually, there should be no difference in which order we process edges. + /// However, some tests are sensitive to it (e.g. something like SELECT 1 UNION ALL 2). + /// Let's not break this behaviour so far. + + for (auto it = node.post_updated_output_ports.rbegin(); it != node.post_updated_output_ports.rend(); ++it) + { + auto * edge = static_cast(*it); + updated_edges.push(edge); + edge->update_info.trigger(); + } + + for (auto it = node.post_updated_input_ports.rbegin(); it != node.post_updated_input_ports.rend(); ++it) + { + auto * edge = static_cast(*it); + updated_edges.push(edge); + edge->update_info.trigger(); + } + + node.post_updated_input_ports.clear(); + node.post_updated_output_ports.clear(); + } + } + + if (need_expand_pipeline) + { + { + UpgradableMutex::WriteGuard lock(read_lock); + if (!expandPipeline(updated_processors, pid)) + return false; + } + + /// Add itself back to be prepared again. + updated_processors.push(pid); + } + } + } + + return true; +} + +void ExecutingGraph::cancel() +{ + std::lock_guard guard(processors_mutex); + for (auto & processor : processors) + processor->cancel(); } } diff --git a/src/Processors/Executors/ExecutingGraph.h b/src/Processors/Executors/ExecutingGraph.h index 4f61241a726..32841a470f8 100644 --- a/src/Processors/Executors/ExecutingGraph.h +++ b/src/Processors/Executors/ExecutingGraph.h @@ -1,7 +1,10 @@ #pragma once #include #include +#include #include +#include +#include namespace DB { @@ -81,8 +84,7 @@ public: ExecStatus status = ExecStatus::Idle; std::mutex status_mutex; - /// Job and exception. Job calls processor->work() inside and catch exception. - std::function job; + /// Exception which happened after processor execution. std::exception_ptr exception; /// Last state for profiling. @@ -112,6 +114,7 @@ public: } }; + using Queue = std::queue; using NodePtr = std::unique_ptr; using Nodes = std::vector; Nodes nodes; @@ -120,12 +123,19 @@ public: using ProcessorsMap = std::unordered_map; ProcessorsMap processors_map; - explicit ExecutingGraph(const Processors & processors); + explicit ExecutingGraph(Processors & processors_); - /// Update graph after processor returned ExpandPipeline status. - /// Processors should already contain newly-added processors. - /// Returns newly-added nodes and nodes which edges were modified. - std::vector expandPipeline(const Processors & processors); + const Processors & getProcessors() const { return processors; } + + /// Traverse graph the first time to update all the childless nodes. + void initializeExecution(Queue & queue); + + /// Update processor with pid number (call IProcessor::prepare). + /// Check parents and children of current processor and push them to stacks if they also need to be updated. + /// If processor wants to be expanded, lock will be upgraded to get write access to pipeline. + bool updateNode(uint64_t pid, Queue & queue, Queue & async_queue); + + void cancel(); private: /// Add single edge to edges list. Check processor is known. @@ -134,6 +144,15 @@ private: /// Append new edges for node. It is called for new node or when new port were added after ExpandPipeline. /// Returns true if new edge was added. bool addEdges(uint64_t node); + + /// Update graph after processor (pid) returned ExpandPipeline status. + /// All new nodes and nodes with updated ports are pushed into stack. + bool expandPipeline(std::stack & stack, uint64_t pid); + + Processors & processors; + std::mutex processors_mutex; + + UpgradableMutex nodes_mutex; }; } diff --git a/src/Processors/Executors/ExecutionThreadContext.cpp b/src/Processors/Executors/ExecutionThreadContext.cpp new file mode 100644 index 00000000000..acf702a5c7e --- /dev/null +++ b/src/Processors/Executors/ExecutionThreadContext.cpp @@ -0,0 +1,107 @@ +#include +#include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int TOO_MANY_ROWS_OR_BYTES; + extern const int QUOTA_EXPIRED; + extern const int QUERY_WAS_CANCELLED; +} + +void ExecutionThreadContext::wait(std::atomic_bool & finished) +{ + std::unique_lock lock(mutex); + + condvar.wait(lock, [&] + { + return finished || wake_flag; + }); + + wake_flag = false; +} + +void ExecutionThreadContext::wakeUp() +{ + std::lock_guard guard(mutex); + wake_flag = true; + condvar.notify_one(); +} + +static bool checkCanAddAdditionalInfoToException(const DB::Exception & exception) +{ + /// Don't add additional info to limits and quota exceptions, and in case of kill query (to pass tests). + return exception.code() != ErrorCodes::TOO_MANY_ROWS_OR_BYTES + && exception.code() != ErrorCodes::QUOTA_EXPIRED + && exception.code() != ErrorCodes::QUERY_WAS_CANCELLED; +} + +static void executeJob(IProcessor * processor) +{ + try + { + processor->work(); + } + catch (Exception & exception) + { + if (checkCanAddAdditionalInfoToException(exception)) + exception.addMessage("While executing " + processor->getName()); + throw; + } +} + +bool ExecutionThreadContext::executeTask() +{ +#ifndef NDEBUG + Stopwatch execution_time_watch; +#endif + + try + { + executeJob(node->processor); + + ++node->num_executed_jobs; + } + catch (...) + { + node->exception = std::current_exception(); + } + +#ifndef NDEBUG + execution_time_ns += execution_time_watch.elapsed(); +#endif + + return node->exception == nullptr; +} + +void ExecutionThreadContext::rethrowExceptionIfHas() +{ + if (exception) + std::rethrow_exception(exception); +} + +ExecutingGraph::Node * ExecutionThreadContext::tryPopAsyncTask() +{ + ExecutingGraph::Node * task = nullptr; + + if (!async_tasks.empty()) + { + task = async_tasks.front(); + async_tasks.pop(); + + if (async_tasks.empty()) + has_async_tasks = false; + } + + return task; +} + +void ExecutionThreadContext::pushAsyncTask(ExecutingGraph::Node * async_task) +{ + async_tasks.push(async_task); + has_async_tasks = true; +} + +} diff --git a/src/Processors/Executors/ExecutionThreadContext.h b/src/Processors/Executors/ExecutionThreadContext.h new file mode 100644 index 00000000000..e86e8c2a452 --- /dev/null +++ b/src/Processors/Executors/ExecutionThreadContext.h @@ -0,0 +1,61 @@ +#pragma once +#include +#include +#include + +namespace DB +{ + +/// Context for each executing thread of PipelineExecutor. +class ExecutionThreadContext +{ +private: + /// A queue of async tasks. Task is added to queue when waited. + std::queue async_tasks; + std::atomic_bool has_async_tasks = false; + + /// This objects are used to wait for next available task. + std::condition_variable condvar; + std::mutex mutex; + bool wake_flag = false; + + /// Currently processing node. + ExecutingGraph::Node * node = nullptr; + + /// Exception from executing thread itself. + std::exception_ptr exception; + +public: +#ifndef NDEBUG + /// Time for different processing stages. + UInt64 total_time_ns = 0; + UInt64 execution_time_ns = 0; + UInt64 processing_time_ns = 0; + UInt64 wait_time_ns = 0; +#endif + + const size_t thread_number; + + void wait(std::atomic_bool & finished); + void wakeUp(); + + /// Methods to access/change currently executing task. + bool hasTask() const { return node != nullptr; } + void setTask(ExecutingGraph::Node * task) { node = task; } + bool executeTask(); + uint64_t getProcessorID() const { return node->processors_id; } + + /// Methods to manage async tasks. + ExecutingGraph::Node * tryPopAsyncTask(); + void pushAsyncTask(ExecutingGraph::Node * async_task); + bool hasAsyncTasks() const { return has_async_tasks; } + + std::unique_lock lockStatus() const { return std::unique_lock(node->status_mutex); } + + void setException(std::exception_ptr exception_) { exception = std::move(exception_); } + void rethrowExceptionIfHas(); + + explicit ExecutionThreadContext(size_t thread_number_) : thread_number(thread_number_) {} +}; + +} diff --git a/src/Processors/Executors/ExecutorTasks.cpp b/src/Processors/Executors/ExecutorTasks.cpp new file mode 100644 index 00000000000..57681a41d20 --- /dev/null +++ b/src/Processors/Executors/ExecutorTasks.cpp @@ -0,0 +1,193 @@ +#include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int LOGICAL_ERROR; +} + +void ExecutorTasks::finish() +{ + { + std::lock_guard lock(mutex); + finished = true; + async_task_queue.finish(); + } + + std::lock_guard guard(executor_contexts_mutex); + + for (auto & context : executor_contexts) + context->wakeUp(); +} + +void ExecutorTasks::rethrowFirstThreadException() +{ + for (auto & executor_context : executor_contexts) + executor_context->rethrowExceptionIfHas(); +} + +void ExecutorTasks::tryGetTask(ExecutionThreadContext & context) +{ + { + std::unique_lock lock(mutex); + + if (auto * async_task = context.tryPopAsyncTask()) + { + context.setTask(async_task); + --num_waiting_async_tasks; + } + else if (!task_queue.empty()) + context.setTask(task_queue.pop(context.thread_number)); + + if (context.hasTask()) + { + if (!task_queue.empty() && !threads_queue.empty()) + { + size_t next_thread = context.thread_number + 1 == num_threads ? 0 : (context.thread_number + 1); + auto thread_to_wake = task_queue.getAnyThreadWithTasks(next_thread); + + if (threads_queue.has(thread_to_wake)) + threads_queue.pop(thread_to_wake); + else + thread_to_wake = threads_queue.popAny(); + + lock.unlock(); + executor_contexts[thread_to_wake]->wakeUp(); + } + + return; + } + + if (threads_queue.size() + 1 == num_threads && async_task_queue.empty() && num_waiting_async_tasks == 0) + { + lock.unlock(); + finish(); + return; + } + + #if defined(OS_LINUX) + if (num_threads == 1) + { + /// If we execute in single thread, wait for async tasks here. + auto res = async_task_queue.wait(lock); + if (!res) + { + if (finished) + return; + throw Exception("Empty task was returned from async task queue", ErrorCodes::LOGICAL_ERROR); + } + + context.setTask(static_cast(res.data)); + return; + } + #endif + + threads_queue.push(context.thread_number); + } + + context.wait(finished); +} + +void ExecutorTasks::pushTasks(Queue & queue, Queue & async_queue, ExecutionThreadContext & context) +{ + context.setTask(nullptr); + + /// Take local task from queue if has one. + if (!queue.empty() && !context.hasAsyncTasks()) + { + context.setTask(queue.front()); + queue.pop(); + } + + if (!queue.empty() || !async_queue.empty()) + { + std::unique_lock lock(mutex); + +#if defined(OS_LINUX) + while (!async_queue.empty() && !finished) + { + int fd = async_queue.front()->processor->schedule(); + async_task_queue.addTask(context.thread_number, async_queue.front(), fd); + async_queue.pop(); + } +#endif + + while (!queue.empty() && !finished) + { + task_queue.push(queue.front(), context.thread_number); + queue.pop(); + } + + if (!threads_queue.empty() && !task_queue.empty() && !finished) + { + size_t next_thread = context.thread_number + 1 == num_threads ? 0 : (context.thread_number + 1); + auto thread_to_wake = task_queue.getAnyThreadWithTasks(next_thread); + + if (threads_queue.has(thread_to_wake)) + threads_queue.pop(thread_to_wake); + else + thread_to_wake = threads_queue.popAny(); + + lock.unlock(); + + executor_contexts[thread_to_wake]->wakeUp(); + } + } +} + +void ExecutorTasks::init(size_t num_threads_) +{ + num_threads = num_threads_; + threads_queue.init(num_threads); + task_queue.init(num_threads); + + { + std::lock_guard guard(executor_contexts_mutex); + + executor_contexts.reserve(num_threads); + for (size_t i = 0; i < num_threads; ++i) + executor_contexts.emplace_back(std::make_unique(i)); + } +} + +void ExecutorTasks::fill(Queue & queue) +{ + std::lock_guard lock(mutex); + + size_t next_thread = 0; + while (!queue.empty()) + { + task_queue.push(queue.front(), next_thread); + queue.pop(); + + ++next_thread; + if (next_thread >= num_threads) + next_thread = 0; + } +} + +void ExecutorTasks::processAsyncTasks() +{ +#if defined(OS_LINUX) + { + /// Wait for async tasks. + std::unique_lock lock(mutex); + while (auto task = async_task_queue.wait(lock)) + { + auto * node = static_cast(task.data); + executor_contexts[task.thread_num]->pushAsyncTask(node); + ++num_waiting_async_tasks; + + if (threads_queue.has(task.thread_num)) + { + threads_queue.pop(task.thread_num); + executor_contexts[task.thread_num]->wakeUp(); + } + } + } +#endif +} + +} diff --git a/src/Processors/Executors/ExecutorTasks.h b/src/Processors/Executors/ExecutorTasks.h new file mode 100644 index 00000000000..f80df82b0e2 --- /dev/null +++ b/src/Processors/Executors/ExecutorTasks.h @@ -0,0 +1,64 @@ +#pragma once + +#include +#include +#include +#include +#include + +namespace DB +{ + +/// Manage tasks which are ready for execution. Used in PipelineExecutor. +class ExecutorTasks +{ + /// If query is finished (or cancelled). + std::atomic_bool finished = false; + + /// Contexts for every executing thread. + std::vector> executor_contexts; + /// This mutex protects only executor_contexts vector. Needed to avoid race between init() and finish(). + std::mutex executor_contexts_mutex; + + /// Common mutex for all the following fields. + std::mutex mutex; + + /// Queue with pointers to tasks. Each thread will concurrently read from it until finished flag is set. + /// Stores processors need to be prepared. Preparing status is already set for them. + TaskQueue task_queue; + + /// Queue which stores tasks where processors returned Async status after prepare. + /// If multiple threads are using, main thread will wait for async tasks. + /// For single thread, will wait for async tasks only when task_queue is empty. + PollingQueue async_task_queue; + + size_t num_threads = 0; + + /// This is the total number of waited async tasks which are not executed yet. + /// sum(executor_contexts[i].async_tasks.size()) + size_t num_waiting_async_tasks = 0; + + /// A set of currently waiting threads. + ThreadsQueue threads_queue; + +public: + using Stack = std::stack; + using Queue = std::queue; + + void finish(); + bool isFinished() const { return finished; } + + void rethrowFirstThreadException(); + + void tryGetTask(ExecutionThreadContext & context); + void pushTasks(Queue & queue, Queue & async_queue, ExecutionThreadContext & context); + + void init(size_t num_threads_); + void fill(Queue & queue); + + void processAsyncTasks(); + + ExecutionThreadContext & getThreadContext(size_t thread_num) { return *executor_contexts[thread_num]; } +}; + +} diff --git a/src/Processors/Executors/PipelineExecutor.cpp b/src/Processors/Executors/PipelineExecutor.cpp index ec07cee8738..c8c9153b777 100644 --- a/src/Processors/Executors/PipelineExecutor.cpp +++ b/src/Processors/Executors/PipelineExecutor.cpp @@ -5,6 +5,7 @@ #include #include #include +#include #include #include #include @@ -21,26 +22,12 @@ namespace DB namespace ErrorCodes { extern const int LOGICAL_ERROR; - extern const int TOO_MANY_ROWS_OR_BYTES; - extern const int QUOTA_EXPIRED; extern const int QUERY_WAS_CANCELLED; } -static bool checkCanAddAdditionalInfoToException(const DB::Exception & exception) -{ - /// Don't add additional info to limits and quota exceptions, and in case of kill query (to pass tests). - return exception.code() != ErrorCodes::TOO_MANY_ROWS_OR_BYTES - && exception.code() != ErrorCodes::QUOTA_EXPIRED - && exception.code() != ErrorCodes::QUERY_WAS_CANCELLED; -} -PipelineExecutor::PipelineExecutor(Processors & processors_, QueryStatus * elem) - : processors(processors_) - , cancelled(false) - , finished(false) - , num_processing_executors(0) - , expand_pipeline_task(nullptr) - , process_list_element(elem) +PipelineExecutor::PipelineExecutor(Processors & processors, QueryStatus * elem) + : process_list_element(elem) { try { @@ -67,334 +54,21 @@ PipelineExecutor::~PipelineExecutor() process_list_element->removePipelineExecutor(this); } -void PipelineExecutor::addChildlessProcessorsToStack(Stack & stack) +const Processors & PipelineExecutor::getProcessors() const { - UInt64 num_processors = processors.size(); - for (UInt64 proc = 0; proc < num_processors; ++proc) - { - if (graph->nodes[proc]->direct_edges.empty()) - { - stack.push(proc); - /// do not lock mutex, as this function is executed in single thread - graph->nodes[proc]->status = ExecutingGraph::ExecStatus::Preparing; - } - } -} - -static void executeJob(IProcessor * processor) -{ - try - { - processor->work(); - } - catch (Exception & exception) - { - if (checkCanAddAdditionalInfoToException(exception)) - exception.addMessage("While executing " + processor->getName()); - throw; - } -} - -void PipelineExecutor::addJob(ExecutingGraph::Node * execution_state) -{ - auto job = [execution_state]() - { - try - { - // Stopwatch watch; - executeJob(execution_state->processor); - // execution_state->execution_time_ns += watch.elapsed(); - - ++execution_state->num_executed_jobs; - } - catch (...) - { - execution_state->exception = std::current_exception(); - } - }; - - execution_state->job = std::move(job); -} - -bool PipelineExecutor::expandPipeline(Stack & stack, UInt64 pid) -{ - auto & cur_node = *graph->nodes[pid]; - Processors new_processors; - - try - { - new_processors = cur_node.processor->expandPipeline(); - } - catch (...) - { - cur_node.exception = std::current_exception(); - return false; - } - - { - std::lock_guard guard(processors_mutex); - processors.insert(processors.end(), new_processors.begin(), new_processors.end()); - } - - uint64_t num_processors = processors.size(); - std::vector back_edges_sizes(num_processors, 0); - std::vector direct_edge_sizes(num_processors, 0); - - for (uint64_t node = 0; node < graph->nodes.size(); ++node) - { - direct_edge_sizes[node] = graph->nodes[node]->direct_edges.size(); - back_edges_sizes[node] = graph->nodes[node]->back_edges.size(); - } - - auto updated_nodes = graph->expandPipeline(processors); - - for (auto updated_node : updated_nodes) - { - auto & node = *graph->nodes[updated_node]; - - size_t num_direct_edges = node.direct_edges.size(); - size_t num_back_edges = node.back_edges.size(); - - std::lock_guard guard(node.status_mutex); - - for (uint64_t edge = back_edges_sizes[updated_node]; edge < num_back_edges; ++edge) - node.updated_input_ports.emplace_back(edge); - - for (uint64_t edge = direct_edge_sizes[updated_node]; edge < num_direct_edges; ++edge) - node.updated_output_ports.emplace_back(edge); - - if (node.status == ExecutingGraph::ExecStatus::Idle) - { - node.status = ExecutingGraph::ExecStatus::Preparing; - stack.push(updated_node); - } - } - - return true; -} - -bool PipelineExecutor::tryAddProcessorToStackIfUpdated(ExecutingGraph::Edge & edge, Queue & queue, Queue & async_queue, size_t thread_number) -{ - /// In this method we have ownership on edge, but node can be concurrently accessed. - - auto & node = *graph->nodes[edge.to]; - - std::unique_lock lock(node.status_mutex); - - ExecutingGraph::ExecStatus status = node.status; - - if (status == ExecutingGraph::ExecStatus::Finished) - return true; - - if (edge.backward) - node.updated_output_ports.push_back(edge.output_port_number); - else - node.updated_input_ports.push_back(edge.input_port_number); - - if (status == ExecutingGraph::ExecStatus::Idle) - { - node.status = ExecutingGraph::ExecStatus::Preparing; - return prepareProcessor(edge.to, thread_number, queue, async_queue, std::move(lock)); - } - else - graph->nodes[edge.to]->processor->onUpdatePorts(); - - return true; -} - -bool PipelineExecutor::prepareProcessor(UInt64 pid, size_t thread_number, Queue & queue, Queue & async_queue, std::unique_lock node_lock) -{ - /// In this method we have ownership on node. - auto & node = *graph->nodes[pid]; - - bool need_expand_pipeline = false; - - std::vector updated_back_edges; - std::vector updated_direct_edges; - - { -#ifndef NDEBUG - Stopwatch watch; -#endif - - std::unique_lock lock(std::move(node_lock)); - - try - { - node.last_processor_status = node.processor->prepare(node.updated_input_ports, node.updated_output_ports); - } - catch (...) - { - node.exception = std::current_exception(); - return false; - } - -#ifndef NDEBUG - node.preparation_time_ns += watch.elapsed(); -#endif - - node.updated_input_ports.clear(); - node.updated_output_ports.clear(); - - switch (node.last_processor_status) - { - case IProcessor::Status::NeedData: - case IProcessor::Status::PortFull: - { - node.status = ExecutingGraph::ExecStatus::Idle; - break; - } - case IProcessor::Status::Finished: - { - node.status = ExecutingGraph::ExecStatus::Finished; - break; - } - case IProcessor::Status::Ready: - { - node.status = ExecutingGraph::ExecStatus::Executing; - queue.push(&node); - break; - } - case IProcessor::Status::Async: - { - node.status = ExecutingGraph::ExecStatus::Executing; - async_queue.push(&node); - break; - } - case IProcessor::Status::ExpandPipeline: - { - need_expand_pipeline = true; - break; - } - } - - { - for (auto & edge_id : node.post_updated_input_ports) - { - auto * edge = static_cast(edge_id); - updated_back_edges.emplace_back(edge); - edge->update_info.trigger(); - } - - for (auto & edge_id : node.post_updated_output_ports) - { - auto * edge = static_cast(edge_id); - updated_direct_edges.emplace_back(edge); - edge->update_info.trigger(); - } - - node.post_updated_input_ports.clear(); - node.post_updated_output_ports.clear(); - } - } - - { - for (auto & edge : updated_direct_edges) - { - if (!tryAddProcessorToStackIfUpdated(*edge, queue, async_queue, thread_number)) - return false; - } - - for (auto & edge : updated_back_edges) - { - if (!tryAddProcessorToStackIfUpdated(*edge, queue, async_queue, thread_number)) - return false; - } - } - - if (need_expand_pipeline) - { - Stack stack; - - executor_contexts[thread_number]->task_list.emplace_back(&node, &stack); - - ExpandPipelineTask * desired = &executor_contexts[thread_number]->task_list.back(); - ExpandPipelineTask * expected = nullptr; - - while (!expand_pipeline_task.compare_exchange_strong(expected, desired)) - { - if (!doExpandPipeline(expected, true)) - return false; - - expected = nullptr; - } - - if (!doExpandPipeline(desired, true)) - return false; - - /// Add itself back to be prepared again. - stack.push(pid); - - while (!stack.empty()) - { - auto item = stack.top(); - if (!prepareProcessor(item, thread_number, queue, async_queue, std::unique_lock(graph->nodes[item]->status_mutex))) - return false; - - stack.pop(); - } - } - - return true; -} - -bool PipelineExecutor::doExpandPipeline(ExpandPipelineTask * task, bool processing) -{ - std::unique_lock lock(task->mutex); - - if (processing) - ++task->num_waiting_processing_threads; - - task->condvar.wait(lock, [&]() - { - return task->num_waiting_processing_threads >= num_processing_executors || expand_pipeline_task != task; - }); - - bool result = true; - - /// After condvar.wait() task may point to trash. Can change it only if it is still in expand_pipeline_task. - if (expand_pipeline_task == task) - { - result = expandPipeline(*task->stack, task->node_to_expand->processors_id); - - expand_pipeline_task = nullptr; - - lock.unlock(); - task->condvar.notify_all(); - } - - return result; + return graph->getProcessors(); } void PipelineExecutor::cancel() { cancelled = true; finish(); - - std::lock_guard guard(processors_mutex); - for (auto & processor : processors) - processor->cancel(); + graph->cancel(); } void PipelineExecutor::finish() { - { - std::lock_guard lock(task_queue_mutex); - finished = true; - async_task_queue.finish(); - } - - std::lock_guard guard(executor_contexts_mutex); - - for (auto & context : executor_contexts) - { - { - std::lock_guard lock(context->mutex); - context->wake_flag = true; - } - - context->condvar.notify_one(); - } + tasks.finish(); } void PipelineExecutor::execute(size_t num_threads) @@ -412,9 +86,7 @@ void PipelineExecutor::execute(size_t num_threads) std::rethrow_exception(node->exception); /// Exception which happened in executing thread, but not at processor. - for (auto & executor_context : executor_contexts) - if (executor_context->exception) - std::rethrow_exception(executor_context->exception); + tasks.rethrowFirstThreadException(); } catch (...) { @@ -437,9 +109,9 @@ bool PipelineExecutor::executeStep(std::atomic_bool * yield_flag) return true; } - executeStepImpl(0, 1, yield_flag); + executeStepImpl(0, yield_flag); - if (!finished) + if (!tasks.isFinished()) return true; /// Execution can be stopped because of exception. Check and rethrow if any. @@ -475,138 +147,47 @@ void PipelineExecutor::finalizeExecution() throw Exception("Pipeline stuck. Current state:\n" + dumpPipeline(), ErrorCodes::LOGICAL_ERROR); } -void PipelineExecutor::wakeUpExecutor(size_t thread_num) +void PipelineExecutor::executeSingleThread(size_t thread_num) { - std::lock_guard guard(executor_contexts[thread_num]->mutex); - executor_contexts[thread_num]->wake_flag = true; - executor_contexts[thread_num]->condvar.notify_one(); -} - -void PipelineExecutor::executeSingleThread(size_t thread_num, size_t num_threads) -{ - executeStepImpl(thread_num, num_threads); + executeStepImpl(thread_num); #ifndef NDEBUG - auto & context = executor_contexts[thread_num]; - LOG_TRACE(log, "Thread finished. Total time: {} sec. Execution time: {} sec. Processing time: {} sec. Wait time: {} sec.", (context->total_time_ns / 1e9), (context->execution_time_ns / 1e9), (context->processing_time_ns / 1e9), (context->wait_time_ns / 1e9)); + auto & context = tasks.getThreadContext(thread_num); + LOG_TRACE(log, + "Thread finished. Total time: {} sec. Execution time: {} sec. Processing time: {} sec. Wait time: {} sec.", + (context.total_time_ns / 1e9), + (context.execution_time_ns / 1e9), + (context.processing_time_ns / 1e9), + (context.wait_time_ns / 1e9)); #endif } -void PipelineExecutor::executeStepImpl(size_t thread_num, size_t num_threads, std::atomic_bool * yield_flag) +void PipelineExecutor::executeStepImpl(size_t thread_num, std::atomic_bool * yield_flag) { #ifndef NDEBUG Stopwatch total_time_watch; #endif - auto & context = executor_contexts[thread_num]; - auto & node = context->node; + // auto & node = tasks.getNode(thread_num); + auto & context = tasks.getThreadContext(thread_num); bool yield = false; - while (!finished && !yield) + while (!tasks.isFinished() && !yield) { /// First, find any processor to execute. /// Just traverse graph and prepare any processor. - while (!finished && node == nullptr) + while (!tasks.isFinished() && !context.hasTask()) + tasks.tryGetTask(context); + + while (context.hasTask() && !yield) { - { - std::unique_lock lock(task_queue_mutex); - - if (!context->async_tasks.empty()) - { - node = context->async_tasks.front(); - context->async_tasks.pop(); - --num_waiting_async_tasks; - - if (context->async_tasks.empty()) - context->has_async_tasks = false; - } - else if (!task_queue.empty()) - node = task_queue.pop(thread_num); - - if (node) - { - if (!task_queue.empty() && !threads_queue.empty()) - { - auto thread_to_wake = task_queue.getAnyThreadWithTasks(thread_num + 1 == num_threads ? 0 : (thread_num + 1)); - - if (threads_queue.has(thread_to_wake)) - threads_queue.pop(thread_to_wake); - else - thread_to_wake = threads_queue.popAny(); - - lock.unlock(); - wakeUpExecutor(thread_to_wake); - } - - break; - } - - if (threads_queue.size() + 1 == num_threads && async_task_queue.empty() && num_waiting_async_tasks == 0) - { - lock.unlock(); - finish(); - break; - } - -#if defined(OS_LINUX) - if (num_threads == 1) - { - /// If we execute in single thread, wait for async tasks here. - auto res = async_task_queue.wait(lock); - if (!res) - { - /// The query had been cancelled (finished is also set) - if (finished) - break; - throw Exception("Empty task was returned from async task queue", ErrorCodes::LOGICAL_ERROR); - } - - node = static_cast(res.data); - break; - } -#endif - - threads_queue.push(thread_num); - } - - { - std::unique_lock lock(context->mutex); - - context->condvar.wait(lock, [&] - { - return finished || context->wake_flag; - }); - - context->wake_flag = false; - } - } - - if (finished) - break; - - while (node && !yield) - { - if (finished) + if (tasks.isFinished()) break; - addJob(node); - - { -#ifndef NDEBUG - Stopwatch execution_time_watch; -#endif - - node->job(); - -#ifndef NDEBUG - context->execution_time_ns += execution_time_watch.elapsed(); -#endif - } - - if (node->exception) + if (!context.executeTask()) cancel(); - if (finished) + if (tasks.isFinished()) break; #ifndef NDEBUG @@ -618,67 +199,16 @@ void PipelineExecutor::executeStepImpl(size_t thread_num, size_t num_threads, st Queue queue; Queue async_queue; - ++num_processing_executors; - while (auto * task = expand_pipeline_task.load()) - doExpandPipeline(task, true); - /// Prepare processor after execution. - { - auto lock = std::unique_lock(node->status_mutex); - if (!prepareProcessor(node->processors_id, thread_num, queue, async_queue, std::move(lock))) - finish(); - } - - node = nullptr; - - /// Take local task from queue if has one. - if (!queue.empty() && !context->has_async_tasks) - { - node = queue.front(); - queue.pop(); - } + if (!graph->updateNode(context.getProcessorID(), queue, async_queue)) + finish(); /// Push other tasks to global queue. - if (!queue.empty() || !async_queue.empty()) - { - std::unique_lock lock(task_queue_mutex); - -#if defined(OS_LINUX) - while (!async_queue.empty() && !finished) - { - async_task_queue.addTask(thread_num, async_queue.front(), async_queue.front()->processor->schedule()); - async_queue.pop(); - } -#endif - - while (!queue.empty() && !finished) - { - task_queue.push(queue.front(), thread_num); - queue.pop(); - } - - if (!threads_queue.empty() && !task_queue.empty() && !finished) - { - auto thread_to_wake = task_queue.getAnyThreadWithTasks(thread_num + 1 == num_threads ? 0 : (thread_num + 1)); - - if (threads_queue.has(thread_to_wake)) - threads_queue.pop(thread_to_wake); - else - thread_to_wake = threads_queue.popAny(); - - lock.unlock(); - - wakeUpExecutor(thread_to_wake); - } - } - - --num_processing_executors; - while (auto * task = expand_pipeline_task.load()) - doExpandPipeline(task, false); + tasks.pushTasks(queue, async_queue, context); } #ifndef NDEBUG - context->processing_time_ns += processing_time_watch.elapsed(); + context.processing_time_ns += processing_time_watch.elapsed(); #endif /// We have executed single processor. Check if we need to yield execution. @@ -688,8 +218,8 @@ void PipelineExecutor::executeStepImpl(size_t thread_num, size_t num_threads, st } #ifndef NDEBUG - context->total_time_ns += total_time_watch.elapsed(); - context->wait_time_ns = context->total_time_ns - context->execution_time_ns - context->processing_time_ns; + context.total_time_ns += total_time_watch.elapsed(); + context.wait_time_ns = context.total_time_ns - context.execution_time_ns - context.processing_time_ns; #endif } @@ -697,49 +227,11 @@ void PipelineExecutor::initializeExecution(size_t num_threads) { is_execution_initialized = true; - threads_queue.init(num_threads); - task_queue.init(num_threads); + Queue queue; + graph->initializeExecution(queue); - { - std::lock_guard guard(executor_contexts_mutex); - - executor_contexts.reserve(num_threads); - for (size_t i = 0; i < num_threads; ++i) - executor_contexts.emplace_back(std::make_unique()); - } - - Stack stack; - addChildlessProcessorsToStack(stack); - - { - std::lock_guard lock(task_queue_mutex); - - Queue queue; - Queue async_queue; - size_t next_thread = 0; - - while (!stack.empty()) - { - UInt64 proc = stack.top(); - stack.pop(); - - prepareProcessor(proc, 0, queue, async_queue, std::unique_lock(graph->nodes[proc]->status_mutex)); - - while (!queue.empty()) - { - task_queue.push(queue.front(), next_thread); - queue.pop(); - - ++next_thread; - if (next_thread >= num_threads) - next_thread = 0; - } - - while (!async_queue.empty()) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Async is only possible after work() call. Processor {}", - async_queue.front()->processor->getName()); - } - } + tasks.init(num_threads); + tasks.fill(queue); } void PipelineExecutor::executeImpl(size_t num_threads) @@ -771,7 +263,7 @@ void PipelineExecutor::executeImpl(size_t num_threads) for (size_t i = 0; i < num_threads; ++i) { - threads.emplace_back([this, thread_group, thread_num = i, num_threads] + threads.emplace_back([this, thread_group, thread_num = i] { /// ThreadStatus thread_status; @@ -787,43 +279,25 @@ void PipelineExecutor::executeImpl(size_t num_threads) try { - executeSingleThread(thread_num, num_threads); + executeSingleThread(thread_num); } catch (...) { /// In case of exception from executor itself, stop other threads. finish(); - executor_contexts[thread_num]->exception = std::current_exception(); + tasks.getThreadContext(thread_num).setException(std::current_exception()); } }); } -#if defined(OS_LINUX) - { - /// Wait for async tasks. - std::unique_lock lock(task_queue_mutex); - while (auto task = async_task_queue.wait(lock)) - { - auto * node = static_cast(task.data); - executor_contexts[task.thread_num]->async_tasks.push(node); - executor_contexts[task.thread_num]->has_async_tasks = true; - ++num_waiting_async_tasks; - - if (threads_queue.has(task.thread_num)) - { - threads_queue.pop(task.thread_num); - wakeUpExecutor(task.thread_num); - } - } - } -#endif + tasks.processAsyncTasks(); for (auto & thread : threads) if (thread.joinable()) thread.join(); } else - executeSingleThread(0, num_threads); + executeSingleThread(0); finished_flag = true; } @@ -858,7 +332,7 @@ String PipelineExecutor::dumpPipeline() const } WriteBufferFromOwnString out; - printPipeline(processors, statuses, out); + printPipeline(graph->getProcessors(), statuses, out); out.finalize(); return out.str(); diff --git a/src/Processors/Executors/PipelineExecutor.h b/src/Processors/Executors/PipelineExecutor.h index a68d56bf499..19137b2306a 100644 --- a/src/Processors/Executors/PipelineExecutor.h +++ b/src/Processors/Executors/PipelineExecutor.h @@ -1,11 +1,7 @@ #pragma once #include -#include -#include -#include -#include -#include +#include #include #include @@ -30,7 +26,7 @@ public: /// During pipeline execution new processors can appear. They will be added to existing set. /// /// Explicit graph representation is built in constructor. Throws if graph is not correct. - explicit PipelineExecutor(Processors & processors_, QueryStatus * elem = nullptr); + explicit PipelineExecutor(Processors & processors, QueryStatus * elem = nullptr); ~PipelineExecutor(); /// Execute pipeline in multiple threads. Must be called once. @@ -42,121 +38,36 @@ public: /// Return true if execution should be continued. bool executeStep(std::atomic_bool * yield_flag = nullptr); - const Processors & getProcessors() const { return processors; } + const Processors & getProcessors() const; /// Cancel execution. May be called from another thread. void cancel(); private: - Processors & processors; - std::mutex processors_mutex; - ExecutingGraphPtr graph; + ExecutorTasks tasks; using Stack = std::stack; - /// Queue with pointers to tasks. Each thread will concurrently read from it until finished flag is set. - /// Stores processors need to be prepared. Preparing status is already set for them. - TaskQueue task_queue; - - /// Queue which stores tasks where processors returned Async status after prepare. - /// If multiple threads are using, main thread will wait for async tasks. - /// For single thread, will wait for async tasks only when task_queue is empty. - PollingQueue async_task_queue; - size_t num_waiting_async_tasks = 0; - - ThreadsQueue threads_queue; - std::mutex task_queue_mutex; - /// Flag that checks that initializeExecution was called. bool is_execution_initialized = false; - std::atomic_bool cancelled; - std::atomic_bool finished; + + std::atomic_bool cancelled = false; Poco::Logger * log = &Poco::Logger::get("PipelineExecutor"); - /// Things to stop execution to expand pipeline. - struct ExpandPipelineTask - { - ExecutingGraph::Node * node_to_expand; - Stack * stack; - size_t num_waiting_processing_threads = 0; - std::mutex mutex; - std::condition_variable condvar; - - ExpandPipelineTask(ExecutingGraph::Node * node_to_expand_, Stack * stack_) - : node_to_expand(node_to_expand_), stack(stack_) {} - }; - - std::atomic num_processing_executors; - std::atomic expand_pipeline_task; - - /// Context for each thread. - struct ExecutorContext - { - /// Will store context for all expand pipeline tasks (it's easy and we don't expect many). - /// This can be solved by using atomic shard ptr. - std::list task_list; - - std::queue async_tasks; - std::atomic_bool has_async_tasks = false; - - std::condition_variable condvar; - std::mutex mutex; - bool wake_flag = false; - - /// Currently processing node. - ExecutingGraph::Node * node = nullptr; - - /// Exception from executing thread itself. - std::exception_ptr exception; - -#ifndef NDEBUG - /// Time for different processing stages. - UInt64 total_time_ns = 0; - UInt64 execution_time_ns = 0; - UInt64 processing_time_ns = 0; - UInt64 wait_time_ns = 0; -#endif - }; - - std::vector> executor_contexts; - std::mutex executor_contexts_mutex; - - /// Processor ptr -> node number - using ProcessorsMap = std::unordered_map; - ProcessorsMap processors_map; - /// Now it's used to check if query was killed. QueryStatus * const process_list_element = nullptr; - /// Graph related methods. - bool expandPipeline(Stack & stack, UInt64 pid); - using Queue = std::queue; - /// Pipeline execution related methods. - void addChildlessProcessorsToStack(Stack & stack); - bool tryAddProcessorToStackIfUpdated(ExecutingGraph::Edge & edge, Queue & queue, Queue & async_queue, size_t thread_number); - static void addJob(ExecutingGraph::Node * execution_state); - // TODO: void addAsyncJob(UInt64 pid); - - /// Prepare processor with pid number. - /// Check parents and children of current processor and push them to stacks if they also need to be prepared. - /// If processor wants to be expanded, ExpandPipelineTask from thread_number's execution context will be used. - bool prepareProcessor(UInt64 pid, size_t thread_number, Queue & queue, Queue & async_queue, std::unique_lock node_lock); - bool doExpandPipeline(ExpandPipelineTask * task, bool processing); - - /// Continue executor (in case there are tasks in queue). - void wakeUpExecutor(size_t thread_num); - void initializeExecution(size_t num_threads); /// Initialize executor contexts and task_queue. void finalizeExecution(); /// Check all processors are finished. /// Methods connected to execution. void executeImpl(size_t num_threads); - void executeStepImpl(size_t thread_num, size_t num_threads, std::atomic_bool * yield_flag = nullptr); - void executeSingleThread(size_t thread_num, size_t num_threads); + void executeStepImpl(size_t thread_num, std::atomic_bool * yield_flag = nullptr); + void executeSingleThread(size_t thread_num); void finish(); String dumpPipeline() const; diff --git a/src/Processors/Executors/PushingAsyncPipelineExecutor.cpp b/src/Processors/Executors/PushingAsyncPipelineExecutor.cpp index 0b6d5334716..68898bdc2c2 100644 --- a/src/Processors/Executors/PushingAsyncPipelineExecutor.cpp +++ b/src/Processors/Executors/PushingAsyncPipelineExecutor.cpp @@ -4,8 +4,10 @@ #include #include +#include #include #include +#include namespace DB { diff --git a/src/Processors/Executors/TasksQueue.h b/src/Processors/Executors/TasksQueue.h index de02a33a1c9..9551c732216 100644 --- a/src/Processors/Executors/TasksQueue.h +++ b/src/Processors/Executors/TasksQueue.h @@ -1,6 +1,7 @@ #pragma once #include #include +#include namespace DB { diff --git a/src/Processors/Executors/ThreadsQueue.h b/src/Processors/Executors/ThreadsQueue.h index f9ca6a14857..3631d764d82 100644 --- a/src/Processors/Executors/ThreadsQueue.h +++ b/src/Processors/Executors/ThreadsQueue.h @@ -1,5 +1,6 @@ #pragma once - +#include +#include namespace DB { namespace ErrorCodes diff --git a/src/Processors/Executors/UpgradableLock.h b/src/Processors/Executors/UpgradableLock.h new file mode 100644 index 00000000000..b5a31885424 --- /dev/null +++ b/src/Processors/Executors/UpgradableLock.h @@ -0,0 +1,175 @@ +#pragma once +#include +#include +#include +#include +#include + +namespace DB +{ + +/// RWLock which allows to upgrade read lock to write lock. +/// Read locks should be fast if there is no write lock. +/// +/// Newly created write lock waits for all active read locks. +/// Newly created read lock waits for all write locks. Starvation is possible. +/// +/// Mutex must live longer than locks. +/// Read lock must live longer than corresponding write lock. +/// +/// For every write lock, a new internal state is created inside mutex. +/// This state is not deallocated until the destruction of mutex itself. +/// +/// Usage example: +/// +/// UpgradableMutex mutex; +/// { +/// UpgradableMutex::ReadLock read_lock(mutex); +/// ... +/// { +/// UpgradableMutex::WriteLock write_lock(read_lock); +/// ... +/// } +/// ... +/// } +class UpgradableMutex +{ +private: + /// Implementation idea + /// + /// ----------- (read scope) + /// ++num_readers + /// ** wait for active writer (in loop, starvation is possible here) ** + /// + /// =========== (write scope) + /// ** create new State ** + /// ** wait for active writer (in loop, starvation is possible here) ** + /// ** wait for all active readers ** + /// + /// ** notify all waiting readers for the current state. + /// =========== (end write scope) + /// + /// --num_readers + /// ** notify current active writer ** + /// ----------- (end read scope) + struct State + { + size_t num_waiting = 0; + bool is_done = false; + + std::mutex mutex; + std::condition_variable read_condvar; + std::condition_variable write_condvar; + + void wait() noexcept + { + std::unique_lock lock(mutex); + ++num_waiting; + write_condvar.notify_one(); + while (!is_done) + read_condvar.wait(lock); + } + + void lock(std::atomic_size_t & num_readers_) noexcept + { + /// Note : num_locked is an atomic + /// which can change it's value without locked mutex. + /// We support an invariant that after changing num_locked value, + /// UpgradableMutex::write_state is checked, and in case of active + /// write lock, we always notify it's write condvar. + std::unique_lock lock(mutex); + ++num_waiting; + while (num_waiting < num_readers_.load()) + write_condvar.wait(lock); + } + + void unlock() noexcept + { + { + std::unique_lock lock(mutex); + is_done = true; + } + read_condvar.notify_all(); + } + }; + + std::atomic_size_t num_readers = 0; + + std::list states; + std::mutex states_mutex; + std::atomic write_state{nullptr}; + + void lock() noexcept + { + ++num_readers; + while (auto * state = write_state.load()) + state->wait(); + } + + void unlock() noexcept + { + --num_readers; + while (auto * state = write_state.load()) + state->write_condvar.notify_one(); + } + + State * allocState() + { + std::lock_guard guard(states_mutex); + return &states.emplace_back(); + } + + void upgrade(State & state) noexcept + { + State * expected = nullptr; + + /// Only change nullptr -> state is possible. + while (!write_state.compare_exchange_strong(expected, &state)) + { + expected->wait(); + expected = nullptr; + } + + state.lock(num_readers); + } + + void degrade(State & state) noexcept + { + State * my = write_state.exchange(nullptr); + if (&state != my) + std::terminate(); + state.unlock(); + } + +public: + class ReadGuard + { + public: + explicit ReadGuard(UpgradableMutex & lock_) : lock(lock_) { lock.lock(); } + ~ReadGuard() { lock.unlock(); } + + UpgradableMutex & lock; + }; + + class WriteGuard + { + public: + explicit WriteGuard(ReadGuard & read_guard_) : read_guard(read_guard_) + { + state = read_guard.lock.allocState(); + read_guard.lock.upgrade(*state); + } + + ~WriteGuard() + { + if (state) + read_guard.lock.degrade(*state); + } + + private: + ReadGuard & read_guard; + State * state = nullptr; + }; +}; + +} diff --git a/src/Processors/Formats/IOutputFormat.cpp b/src/Processors/Formats/IOutputFormat.cpp index 0f94622b7c6..d8273878717 100644 --- a/src/Processors/Formats/IOutputFormat.cpp +++ b/src/Processors/Formats/IOutputFormat.cpp @@ -65,18 +65,14 @@ static Chunk prepareTotals(Chunk chunk) void IOutputFormat::work() { - if (!prefix_written) - { - doWritePrefix(); - prefix_written = true; - } + writePrefixIfNot(); if (finished && !finalized) { if (rows_before_limit_counter && rows_before_limit_counter->hasAppliedLimit()) setRowsBeforeLimit(rows_before_limit_counter->get()); - finalize(); + finalizeImpl(); finalized = true; return; } @@ -110,10 +106,17 @@ void IOutputFormat::flush() void IOutputFormat::write(const Block & block) { + writePrefixIfNot(); consume(Chunk(block.getColumns(), block.rows())); if (auto_flush) flush(); } +void IOutputFormat::finalize() +{ + writePrefixIfNot(); + finalizeImpl(); +} + } diff --git a/src/Processors/Formats/IOutputFormat.h b/src/Processors/Formats/IOutputFormat.h index ba4dcee6f70..40dcaf75b53 100644 --- a/src/Processors/Formats/IOutputFormat.h +++ b/src/Processors/Formats/IOutputFormat.h @@ -25,28 +25,6 @@ class IOutputFormat : public IProcessor public: enum PortKind { Main = 0, Totals = 1, Extremes = 2 }; -protected: - WriteBuffer & out; - - Chunk current_chunk; - PortKind current_block_kind = PortKind::Main; - bool has_input = false; - bool finished = false; - bool finalized = false; - - /// Flush data on each consumed chunk. This is intended for interactive applications to output data as soon as it's ready. - bool auto_flush = false; - - RowsBeforeLimitCounterPtr rows_before_limit_counter; - - friend class ParallelFormattingOutputFormat; - - virtual void consume(Chunk) = 0; - virtual void consumeTotals(Chunk) {} - virtual void consumeExtremes(Chunk) {} - virtual void finalize() {} - -public: IOutputFormat(const Block & header_, WriteBuffer & out_); Status prepare() override; @@ -77,8 +55,7 @@ public: void write(const Block & block); - virtual void doWritePrefix() {} - virtual void doWriteSuffix() { finalize(); } + void finalize(); virtual bool expectMaterializedColumns() const { return true; } @@ -88,11 +65,43 @@ public: size_t getResultRows() const { return result_rows; } size_t getResultBytes() const { return result_bytes; } + void doNotWritePrefix() { need_write_prefix = false; } + +protected: + friend class ParallelFormattingOutputFormat; + + virtual void consume(Chunk) = 0; + virtual void consumeTotals(Chunk) {} + virtual void consumeExtremes(Chunk) {} + virtual void finalizeImpl() {} + virtual void writePrefix() {} + + void writePrefixIfNot() + { + if (need_write_prefix) + { + writePrefix(); + need_write_prefix = false; + } + } + + WriteBuffer & out; + + Chunk current_chunk; + PortKind current_block_kind = PortKind::Main; + bool has_input = false; + bool finished = false; + bool finalized = false; + + /// Flush data on each consumed chunk. This is intended for interactive applications to output data as soon as it's ready. + bool auto_flush = false; + bool need_write_prefix = true; + + RowsBeforeLimitCounterPtr rows_before_limit_counter; + private: /// Counters for consumed chunks. Are used for QueryLog. size_t result_rows = 0; size_t result_bytes = 0; - - bool prefix_written = false; }; } diff --git a/src/Processors/Formats/IRowOutputFormat.cpp b/src/Processors/Formats/IRowOutputFormat.cpp index 6b7a9a46eaa..299c6c35fad 100644 --- a/src/Processors/Formats/IRowOutputFormat.cpp +++ b/src/Processors/Formats/IRowOutputFormat.cpp @@ -22,8 +22,6 @@ IRowOutputFormat::IRowOutputFormat(const Block & header, WriteBuffer & out_, con void IRowOutputFormat::consume(DB::Chunk chunk) { - writePrefixIfNot(); - auto num_rows = chunk.getNumRows(); const auto & columns = chunk.getColumns(); @@ -43,7 +41,6 @@ void IRowOutputFormat::consume(DB::Chunk chunk) void IRowOutputFormat::consumeTotals(DB::Chunk chunk) { - writePrefixIfNot(); writeSuffixIfNot(); auto num_rows = chunk.getNumRows(); @@ -59,7 +56,6 @@ void IRowOutputFormat::consumeTotals(DB::Chunk chunk) void IRowOutputFormat::consumeExtremes(DB::Chunk chunk) { - writePrefixIfNot(); writeSuffixIfNot(); auto num_rows = chunk.getNumRows(); @@ -74,9 +70,8 @@ void IRowOutputFormat::consumeExtremes(DB::Chunk chunk) writeAfterExtremes(); } -void IRowOutputFormat::finalize() +void IRowOutputFormat::finalizeImpl() { - writePrefixIfNot(); writeSuffixIfNot(); writeLastSuffix(); } diff --git a/src/Processors/Formats/IRowOutputFormat.h b/src/Processors/Formats/IRowOutputFormat.h index 18575419cd0..11e690c33ee 100644 --- a/src/Processors/Formats/IRowOutputFormat.h +++ b/src/Processors/Formats/IRowOutputFormat.h @@ -26,40 +26,12 @@ class IRowOutputFormat : public IOutputFormat public: using Params = RowOutputFormatParams; -private: - bool prefix_written = false; - bool suffix_written = false; - protected: - DataTypes types; - Serializations serializations; - Params params; - - bool first_row = true; - + IRowOutputFormat(const Block & header, WriteBuffer & out_, const Params & params_); void consume(Chunk chunk) override; void consumeTotals(Chunk chunk) override; void consumeExtremes(Chunk chunk) override; - void finalize() override; - - void writePrefixIfNot() - { - if (!prefix_written) - writePrefix(); - - prefix_written = true; - } - - void writeSuffixIfNot() - { - if (!suffix_written) - writeSuffix(); - - suffix_written = true; - } - -public: - IRowOutputFormat(const Block & header, WriteBuffer & out_, const Params & params_); + void finalizeImpl() override; /** Write a row. * Default implementation calls methods to write single values and delimiters @@ -78,7 +50,7 @@ public: virtual void writeRowStartDelimiter() {} /// delimiter before each row virtual void writeRowEndDelimiter() {} /// delimiter after each row virtual void writeRowBetweenDelimiter() {} /// delimiter between rows - virtual void writePrefix() {} /// delimiter before resultset + virtual void writePrefix() override {} /// delimiter before resultset virtual void writeSuffix() {} /// delimiter after resultset virtual void writeBeforeTotals() {} virtual void writeAfterTotals() {} @@ -86,6 +58,22 @@ public: virtual void writeAfterExtremes() {} virtual void writeLastSuffix() {} /// Write something after resultset, totals end extremes. + DataTypes types; + Serializations serializations; + Params params; + + bool first_row = true; + +private: + void writeSuffixIfNot() + { + if (!suffix_written) + writeSuffix(); + + suffix_written = true; + } + + bool suffix_written = false; }; } diff --git a/src/Processors/Formats/Impl/ArrowBlockInputFormat.h b/src/Processors/Formats/Impl/ArrowBlockInputFormat.h index 1136937e65b..44e18e3f852 100644 --- a/src/Processors/Formats/Impl/ArrowBlockInputFormat.h +++ b/src/Processors/Formats/Impl/ArrowBlockInputFormat.h @@ -24,10 +24,9 @@ public: String getName() const override { return "ArrowBlockInputFormat"; } -protected: +private: Chunk generate() override; -private: // Whether to use ArrowStream format bool stream; // This field is only used for ArrowStream format diff --git a/src/Processors/Formats/Impl/ArrowBlockOutputFormat.cpp b/src/Processors/Formats/Impl/ArrowBlockOutputFormat.cpp index 4404749adb6..692f17f843a 100644 --- a/src/Processors/Formats/Impl/ArrowBlockOutputFormat.cpp +++ b/src/Processors/Formats/Impl/ArrowBlockOutputFormat.cpp @@ -50,7 +50,7 @@ void ArrowBlockOutputFormat::consume(Chunk chunk) "Error while writing a table: {}", status.ToString()); } -void ArrowBlockOutputFormat::finalize() +void ArrowBlockOutputFormat::finalizeImpl() { if (!writer) { diff --git a/src/Processors/Formats/Impl/ArrowBlockOutputFormat.h b/src/Processors/Formats/Impl/ArrowBlockOutputFormat.h index 154292d838f..ab5a0e7351a 100644 --- a/src/Processors/Formats/Impl/ArrowBlockOutputFormat.h +++ b/src/Processors/Formats/Impl/ArrowBlockOutputFormat.h @@ -21,19 +21,20 @@ public: ArrowBlockOutputFormat(WriteBuffer & out_, const Block & header_, bool stream_, const FormatSettings & format_settings_); String getName() const override { return "ArrowBlockOutputFormat"; } - void consume(Chunk) override; - void finalize() override; String getContentType() const override { return "application/octet-stream"; } private: + void consume(Chunk) override; + void finalizeImpl() override; + + void prepareWriter(const std::shared_ptr & schema); + bool stream; const FormatSettings format_settings; std::shared_ptr arrow_ostream; std::shared_ptr writer; std::unique_ptr ch_column_to_arrow_column; - - void prepareWriter(const std::shared_ptr & schema); }; } diff --git a/src/Processors/Formats/Impl/AvroRowInputFormat.h b/src/Processors/Formats/Impl/AvroRowInputFormat.h index 5617b4a7661..2de11178e96 100644 --- a/src/Processors/Formats/Impl/AvroRowInputFormat.h +++ b/src/Processors/Formats/Impl/AvroRowInputFormat.h @@ -107,12 +107,13 @@ class AvroRowInputFormat : public IRowInputFormat { public: AvroRowInputFormat(const Block & header_, ReadBuffer & in_, Params params_, const FormatSettings & format_settings_); - bool readRow(MutableColumns & columns, RowReadExtension & ext) override; - void readPrefix() override; String getName() const override { return "AvroRowInputFormat"; } private: + bool readRow(MutableColumns & columns, RowReadExtension & ext) override; + void readPrefix() override; + std::unique_ptr file_reader_ptr; std::unique_ptr deserializer_ptr; bool allow_missing_fields; @@ -128,14 +129,16 @@ class AvroConfluentRowInputFormat : public IRowInputFormat { public: AvroConfluentRowInputFormat(const Block & header_, ReadBuffer & in_, Params params_, const FormatSettings & format_settings_); - virtual bool readRow(MutableColumns & columns, RowReadExtension & ext) override; String getName() const override { return "AvroConfluentRowInputFormat"; } class SchemaRegistry; -protected: + +private: + virtual bool readRow(MutableColumns & columns, RowReadExtension & ext) override; + bool allowSyncAfterError() const override { return true; } void syncAfterError() override; -private: + std::shared_ptr schema_registry; using SchemaId = uint32_t; std::unordered_map deserializer_cache; diff --git a/src/Processors/Formats/Impl/AvroRowOutputFormat.cpp b/src/Processors/Formats/Impl/AvroRowOutputFormat.cpp index 467738f49e8..fd7b2404c77 100644 --- a/src/Processors/Formats/Impl/AvroRowOutputFormat.cpp +++ b/src/Processors/Formats/Impl/AvroRowOutputFormat.cpp @@ -428,7 +428,6 @@ void AvroRowOutputFormat::consumeImpl(DB::Chunk chunk) auto num_rows = chunk.getNumRows(); const auto & columns = chunk.getColumns(); - writePrefixIfNot(); for (size_t row = 0; row < num_rows; ++row) { write(columns, row); @@ -447,7 +446,7 @@ void AvroRowOutputFormat::consumeImplWithCallback(DB::Chunk chunk) /// used by WriteBufferToKafkaProducer to obtain auxiliary data /// from the starting row of a file - writePrefix(); + writePrefixIfNot(); for (size_t row_in_file = 0; row_in_file < settings.avro.output_rows_in_file && row < num_rows; ++row, ++row_in_file) @@ -457,6 +456,7 @@ void AvroRowOutputFormat::consumeImplWithCallback(DB::Chunk chunk) file_writer_ptr->flush(); writeSuffix(); + need_write_prefix = true; params.callback(columns, current_row); } diff --git a/src/Processors/Formats/Impl/AvroRowOutputFormat.h b/src/Processors/Formats/Impl/AvroRowOutputFormat.h index a3e8493f757..b5583406cb8 100644 --- a/src/Processors/Formats/Impl/AvroRowOutputFormat.h +++ b/src/Processors/Formats/Impl/AvroRowOutputFormat.h @@ -51,12 +51,13 @@ public: void consume(Chunk) override; String getName() const override { return "AvroRowOutputFormat"; } + +private: void write(const Columns & columns, size_t row_num) override; void writeField(const IColumn &, const ISerialization &, size_t) override {} virtual void writePrefix() override; virtual void writeSuffix() override; -private: FormatSettings settings; AvroSerializer serializer; std::unique_ptr file_writer_ptr; diff --git a/src/Processors/Formats/Impl/BinaryRowOutputFormat.h b/src/Processors/Formats/Impl/BinaryRowOutputFormat.h index 36a62098b75..0edfd4bfcf8 100644 --- a/src/Processors/Formats/Impl/BinaryRowOutputFormat.h +++ b/src/Processors/Formats/Impl/BinaryRowOutputFormat.h @@ -21,12 +21,12 @@ public: String getName() const override { return "BinaryRowOutputFormat"; } + String getContentType() const override { return "application/octet-stream"; } + +private: void writeField(const IColumn & column, const ISerialization & serialization, size_t row_num) override; void writePrefix() override; - String getContentType() const override { return "application/octet-stream"; } - -protected: bool with_names; bool with_types; }; diff --git a/src/Processors/Formats/Impl/CSVRowInputFormat.h b/src/Processors/Formats/Impl/CSVRowInputFormat.h index f239464485a..d7c557b58d8 100644 --- a/src/Processors/Formats/Impl/CSVRowInputFormat.h +++ b/src/Processors/Formats/Impl/CSVRowInputFormat.h @@ -25,12 +25,13 @@ public: String getName() const override { return "CSVRowInputFormat"; } +private: bool allowSyncAfterError() const override { return true; } void syncAfterError() override; -private: bool parseFieldDelimiterWithDiagnosticInfo(WriteBuffer & out) override; bool parseRowEndWithDiagnosticInfo(WriteBuffer & out) override; + bool isGarbageAfterField(size_t, ReadBuffer::Position pos) override { return *pos != '\n' && *pos != '\r' && *pos != format_settings.csv.delimiter && *pos != ' ' && *pos != '\t'; diff --git a/src/Processors/Formats/Impl/CSVRowOutputFormat.cpp b/src/Processors/Formats/Impl/CSVRowOutputFormat.cpp index b300928e569..790994cb240 100644 --- a/src/Processors/Formats/Impl/CSVRowOutputFormat.cpp +++ b/src/Processors/Formats/Impl/CSVRowOutputFormat.cpp @@ -31,7 +31,7 @@ void CSVRowOutputFormat::writeLine(const std::vector & values) } } -void CSVRowOutputFormat::doWritePrefix() +void CSVRowOutputFormat::writePrefix() { const auto & sample = getPort(PortKind::Main).getHeader(); diff --git a/src/Processors/Formats/Impl/CSVRowOutputFormat.h b/src/Processors/Formats/Impl/CSVRowOutputFormat.h index 7f5d90203ea..dd9c2179f19 100644 --- a/src/Processors/Formats/Impl/CSVRowOutputFormat.h +++ b/src/Processors/Formats/Impl/CSVRowOutputFormat.h @@ -24,14 +24,6 @@ public: String getName() const override { return "CSVRowOutputFormat"; } - void writeField(const IColumn & column, const ISerialization & serialization, size_t row_num) override; - void writeFieldDelimiter() override; - void writeRowEndDelimiter() override; - void writeBeforeTotals() override; - void writeBeforeExtremes() override; - - void doWritePrefix() override; - /// https://www.iana.org/assignments/media-types/text/csv String getContentType() const override { @@ -39,6 +31,13 @@ public: } private: + void writeField(const IColumn & column, const ISerialization & serialization, size_t row_num) override; + void writeFieldDelimiter() override; + void writeRowEndDelimiter() override; + void writeBeforeTotals() override; + void writeBeforeExtremes() override; + + void writePrefix() override; void writeLine(const std::vector & values); bool with_names; diff --git a/src/Processors/Formats/Impl/CapnProtoRowInputFormat.h b/src/Processors/Formats/Impl/CapnProtoRowInputFormat.h index fc30cf11237..4c0f34d70a3 100644 --- a/src/Processors/Formats/Impl/CapnProtoRowInputFormat.h +++ b/src/Processors/Formats/Impl/CapnProtoRowInputFormat.h @@ -26,9 +26,9 @@ public: String getName() const override { return "CapnProtoRowInputFormat"; } +private: bool readRow(MutableColumns & columns, RowReadExtension &) override; -private: kj::Array readMessage(); std::shared_ptr parser; diff --git a/src/Processors/Formats/Impl/CapnProtoRowOutputFormat.h b/src/Processors/Formats/Impl/CapnProtoRowOutputFormat.h index 0f321071d62..6e27426f2cc 100644 --- a/src/Processors/Formats/Impl/CapnProtoRowOutputFormat.h +++ b/src/Processors/Formats/Impl/CapnProtoRowOutputFormat.h @@ -35,11 +35,11 @@ public: String getName() const override { return "CapnProtoRowOutputFormat"; } +private: void write(const Columns & columns, size_t row_num) override; void writeField(const IColumn &, const ISerialization &, size_t) override { } -private: Names column_names; DataTypes column_types; capnp::StructSchema schema; diff --git a/src/Processors/Formats/Impl/CustomSeparatedRowInputFormat.cpp b/src/Processors/Formats/Impl/CustomSeparatedRowInputFormat.cpp new file mode 100644 index 00000000000..6ff9a8cca2c --- /dev/null +++ b/src/Processors/Formats/Impl/CustomSeparatedRowInputFormat.cpp @@ -0,0 +1,251 @@ +#include +#include +#include +#include +#include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int BAD_ARGUMENTS; +} + +static FormatSettings updateFormatSettings(const FormatSettings & settings) +{ + if (settings.custom.escaping_rule != FormatSettings::EscapingRule::CSV || settings.custom.field_delimiter.empty()) + return settings; + + auto updated = settings; + updated.csv.delimiter = settings.custom.field_delimiter.front(); + return updated; +} + +CustomSeparatedRowInputFormat::CustomSeparatedRowInputFormat( + const Block & header_, + ReadBuffer & in_, + const Params & params_, + bool with_names_, + bool with_types_, + bool ignore_spaces_, + const FormatSettings & format_settings_) + : RowInputFormatWithNamesAndTypes(header_, buf, params_, with_names_, with_types_, updateFormatSettings(format_settings_)) + , buf(in_) + , ignore_spaces(ignore_spaces_) + , escaping_rule(format_settings_.custom.escaping_rule) +{ + /// In case of CustomSeparatedWithNames(AndTypes) formats and enabled setting input_format_with_names_use_header we don't know + /// the exact number of columns in data (because it can contain unknown columns). So, if field_delimiter and row_after_delimiter are + /// the same and row_between_delimiter is empty, we won't be able to determine the end of row while reading column names or types. + if ((with_types_ || with_names_) && format_settings_.with_names_use_header + && format_settings_.custom.field_delimiter == format_settings_.custom.row_after_delimiter + && format_settings_.custom.row_between_delimiter.empty()) + { + throw Exception(ErrorCodes::BAD_ARGUMENTS, + "Input format CustomSeparatedWithNames(AndTypes) cannot work properly with enabled setting input_format_with_names_use_header, " + "when format_custom_field_delimiter and format_custom_row_after_delimiter are the same and format_custom_row_between_delimiter is empty."); + } +} + +void CustomSeparatedRowInputFormat::skipPrefixBeforeHeader() +{ + skipSpaces(); + assertString(format_settings.custom.result_before_delimiter, buf); +} + +void CustomSeparatedRowInputFormat::skipRowStartDelimiter() +{ + skipSpaces(); + assertString(format_settings.custom.row_before_delimiter, buf); +} + +void CustomSeparatedRowInputFormat::skipFieldDelimiter() +{ + skipSpaces(); + assertString(format_settings.custom.field_delimiter, buf); +} + +void CustomSeparatedRowInputFormat::skipRowEndDelimiter() +{ + skipSpaces(); + assertString(format_settings.custom.row_after_delimiter, buf); +} + +void CustomSeparatedRowInputFormat::skipRowBetweenDelimiter() +{ + skipSpaces(); + assertString(format_settings.custom.row_between_delimiter, buf); +} + +void CustomSeparatedRowInputFormat::skipField() +{ + skipSpaces(); + skipFieldByEscapingRule(buf, escaping_rule, format_settings); +} + +bool CustomSeparatedRowInputFormat::checkEndOfRow() +{ + PeekableReadBufferCheckpoint checkpoint{buf, true}; + + skipSpaces(); + if (!checkString(format_settings.custom.row_after_delimiter, buf)) + return false; + + skipSpaces(); + + /// At the end of row after row_after_delimiter we expect result_after_delimiter or row_between_delimiter. + + if (checkString(format_settings.custom.row_between_delimiter, buf)) + return true; + + buf.rollbackToCheckpoint(); + skipSpaces(); + buf.ignore(format_settings.custom.row_after_delimiter.size()); + return checkForSuffixImpl(true); +} + +std::vector CustomSeparatedRowInputFormat::readHeaderRow() +{ + std::vector values; + skipRowStartDelimiter(); + do + { + if (!values.empty()) + skipFieldDelimiter(); + skipSpaces(); + values.push_back(readStringByEscapingRule(buf, escaping_rule, format_settings)); + } + while (!checkEndOfRow()); + + skipRowEndDelimiter(); + return values; +} + +void CustomSeparatedRowInputFormat::skipHeaderRow() +{ + size_t columns = getPort().getHeader().columns(); + skipRowStartDelimiter(); + for (size_t i = 0; i != columns; ++i) + { + skipField(); + if (i + 1 != columns) + skipFieldDelimiter(); + } + skipRowEndDelimiter(); +} + +bool CustomSeparatedRowInputFormat::readField(IColumn & column, const DataTypePtr & type, const SerializationPtr & serialization, bool, const String &) +{ + skipSpaces(); + return deserializeFieldByEscapingRule(type, serialization, column, buf, escaping_rule, format_settings); +} + +bool CustomSeparatedRowInputFormat::checkForSuffixImpl(bool check_eof) +{ + skipSpaces(); + if (format_settings.custom.result_after_delimiter.empty()) + { + if (!check_eof) + return false; + + return buf.eof(); + } + + if (unlikely(checkString(format_settings.custom.result_after_delimiter, buf))) + { + skipSpaces(); + if (!check_eof) + return true; + + if (buf.eof()) + return true; + } + return false; +} + +bool CustomSeparatedRowInputFormat::tryParseSuffixWithDiagnosticInfo(WriteBuffer & out) +{ + PeekableReadBufferCheckpoint checkpoint{buf}; + if (checkForSuffixImpl(false)) + { + if (buf.eof()) + out << "\n"; + else + out << " There is some data after suffix\n"; + return false; + } + buf.rollbackToCheckpoint(); + return true; +} + +bool CustomSeparatedRowInputFormat::checkForSuffix() +{ + PeekableReadBufferCheckpoint checkpoint{buf}; + if (checkForSuffixImpl(true)) + return true; + buf.rollbackToCheckpoint(); + return false; +} + + +bool CustomSeparatedRowInputFormat::allowSyncAfterError() const +{ + return !format_settings.custom.row_after_delimiter.empty() || !format_settings.custom.row_between_delimiter.empty(); +} + +void CustomSeparatedRowInputFormat::syncAfterError() +{ + skipToNextRowOrEof(buf, format_settings.custom.row_after_delimiter, format_settings.custom.row_between_delimiter, ignore_spaces); + end_of_stream = buf.eof(); + /// It can happen that buf.position() is not at the beginning of row + /// if some delimiters is similar to row_format.delimiters.back() and row_between_delimiter. + /// It will cause another parsing error. +} + +bool CustomSeparatedRowInputFormat::parseRowStartWithDiagnosticInfo(WriteBuffer & out) +{ + return parseDelimiterWithDiagnosticInfo(out, buf, format_settings.custom.row_before_delimiter, "delimiter before first firld", ignore_spaces); +} + +bool CustomSeparatedRowInputFormat::parseFieldDelimiterWithDiagnosticInfo(WriteBuffer & out) +{ + return parseDelimiterWithDiagnosticInfo(out, buf, format_settings.custom.field_delimiter, "delimiter between fields", ignore_spaces); +} + +bool CustomSeparatedRowInputFormat::parseRowEndWithDiagnosticInfo(WriteBuffer & out) +{ + return parseDelimiterWithDiagnosticInfo(out, buf, format_settings.custom.row_after_delimiter, "delimiter after last field", ignore_spaces); +} + +bool CustomSeparatedRowInputFormat::parseRowBetweenDelimiterWithDiagnosticInfo(WriteBuffer & out) +{ + return parseDelimiterWithDiagnosticInfo(out, buf, format_settings.custom.row_between_delimiter, "delimiter between rows", ignore_spaces); +} + +void CustomSeparatedRowInputFormat::resetParser() +{ + RowInputFormatWithNamesAndTypes::resetParser(); + buf.reset(); +} + +void registerInputFormatCustomSeparated(FormatFactory & factory) +{ + for (bool ignore_spaces : {false, true}) + { + auto register_func = [&](const String & format_name, bool with_names, bool with_types) + { + factory.registerInputFormat(format_name, [=]( + ReadBuffer & buf, + const Block & sample, + IRowInputFormat::Params params, + const FormatSettings & settings) + { + return std::make_shared(sample, buf, params, with_names, with_types, ignore_spaces, settings); + }); + }; + registerWithNamesAndTypes(ignore_spaces ? "CustomSeparatedIgnoreSpaces" : "CustomSeparated", register_func); + } +} + +} diff --git a/src/Processors/Formats/Impl/CustomSeparatedRowInputFormat.h b/src/Processors/Formats/Impl/CustomSeparatedRowInputFormat.h new file mode 100644 index 00000000000..00ee28e50cc --- /dev/null +++ b/src/Processors/Formats/Impl/CustomSeparatedRowInputFormat.h @@ -0,0 +1,69 @@ +#pragma once + +#include +#include +#include +#include + +namespace DB +{ + +class CustomSeparatedRowInputFormat : public RowInputFormatWithNamesAndTypes +{ +public: + CustomSeparatedRowInputFormat( + const Block & header_, + ReadBuffer & in_, + const Params & params_, + bool with_names_, bool with_types_, bool ignore_spaces_, const FormatSettings & format_settings_); + + void resetParser() override; + String getName() const override { return "CustomSeparatedRowInputFormat"; } + +private: + CustomSeparatedRowInputFormat( + const Block & header_, + std::unique_ptr in_, + const Params & params_, + bool with_names_, bool with_types_, bool ignore_spaces_, const FormatSettings & format_settings_); + using EscapingRule = FormatSettings::EscapingRule; + + bool readField(IColumn & column, const DataTypePtr & type, const SerializationPtr & serialization, bool is_last_file_column, const String & column_name) override; + + void skipField(size_t /*file_column*/) override { skipField(); } + void skipField(); + void skipNames() override { skipHeaderRow(); } + void skipTypes() override { skipHeaderRow(); } + void skipHeaderRow(); + + void skipPrefixBeforeHeader() override; + void skipRowStartDelimiter() override; + void skipFieldDelimiter() override; + void skipRowEndDelimiter() override; + void skipRowBetweenDelimiter() override; + + bool checkForSuffix() override; + + bool allowSyncAfterError() const override; + void syncAfterError() override; + + bool parseRowStartWithDiagnosticInfo(WriteBuffer & out) override; + bool parseFieldDelimiterWithDiagnosticInfo(WriteBuffer & out) override; + bool parseRowEndWithDiagnosticInfo(WriteBuffer & out) override; + bool parseRowBetweenDelimiterWithDiagnosticInfo(WriteBuffer & out) override; + bool tryParseSuffixWithDiagnosticInfo(WriteBuffer & out) override; + + std::vector readNames() override { return readHeaderRow(); } + std::vector readTypes() override { return readHeaderRow(); } + std::vector readHeaderRow(); + + bool checkEndOfRow(); + bool checkForSuffixImpl(bool check_eof); + inline void skipSpaces() { if (ignore_spaces) skipWhitespaceIfAny(buf); } + + PeekableReadBuffer buf; + bool ignore_spaces; + EscapingRule escaping_rule; +}; + +} diff --git a/src/Processors/Formats/Impl/CustomSeparatedRowOutputFormat.cpp b/src/Processors/Formats/Impl/CustomSeparatedRowOutputFormat.cpp new file mode 100644 index 00000000000..5c9664f2daf --- /dev/null +++ b/src/Processors/Formats/Impl/CustomSeparatedRowOutputFormat.cpp @@ -0,0 +1,97 @@ +#include +#include +#include +#include + + +namespace DB +{ + +CustomSeparatedRowOutputFormat::CustomSeparatedRowOutputFormat( + const Block & header_, WriteBuffer & out_, const RowOutputFormatParams & params_, const FormatSettings & format_settings_, bool with_names_, bool with_types_) + : IRowOutputFormat(header_, out_, params_) + , with_names(with_names_) + , with_types(with_types_) + , format_settings(format_settings_) + , escaping_rule(format_settings.custom.escaping_rule) +{ +} + +void CustomSeparatedRowOutputFormat::writeLine(const std::vector & values) +{ + writeRowStartDelimiter(); + for (size_t i = 0; i != values.size(); ++i) + { + writeStringByEscapingRule(values[i], out, escaping_rule, format_settings); + if (i + 1 != values.size()) + writeFieldDelimiter(); + } + writeRowEndDelimiter(); +} + +void CustomSeparatedRowOutputFormat::writePrefix() +{ + writeString(format_settings.custom.result_before_delimiter, out); + + const auto & header = getPort(PortKind::Main).getHeader(); + if (with_names) + { + writeLine(header.getNames()); + writeRowBetweenDelimiter(); + } + + if (with_types) + { + writeLine(header.getDataTypeNames()); + writeRowBetweenDelimiter(); + } +} + +void CustomSeparatedRowOutputFormat::writeSuffix() +{ + writeString(format_settings.custom.result_after_delimiter, out); +} + +void CustomSeparatedRowOutputFormat::writeRowStartDelimiter() +{ + writeString(format_settings.custom.row_before_delimiter, out); +} + +void CustomSeparatedRowOutputFormat::writeFieldDelimiter() +{ + writeString(format_settings.custom.field_delimiter, out); +} + +void CustomSeparatedRowOutputFormat::writeRowEndDelimiter() +{ + writeString(format_settings.custom.row_after_delimiter, out); +} + +void CustomSeparatedRowOutputFormat::writeRowBetweenDelimiter() +{ + writeString(format_settings.custom.row_between_delimiter, out); +} + +void CustomSeparatedRowOutputFormat::writeField(const IColumn & column, const ISerialization & serialization, size_t row_num) +{ + serializeFieldByEscapingRule(column, serialization, out, row_num, escaping_rule, format_settings); +} + +void registerOutputFormatCustomSeparated(FormatFactory & factory) +{ + auto register_func = [&](const String & format_name, bool with_names, bool with_types) + { + factory.registerOutputFormat(format_name, [with_names, with_types]( + WriteBuffer & buf, + const Block & sample, + const RowOutputFormatParams & params, + const FormatSettings & settings) + { + return std::make_shared(sample, buf, params, settings, with_names, with_types); + }); + }; + + registerWithNamesAndTypes("CustomSeparated", register_func); +} + +} diff --git a/src/Processors/Formats/Impl/CustomSeparatedRowOutputFormat.h b/src/Processors/Formats/Impl/CustomSeparatedRowOutputFormat.h new file mode 100644 index 00000000000..274df1af330 --- /dev/null +++ b/src/Processors/Formats/Impl/CustomSeparatedRowOutputFormat.h @@ -0,0 +1,36 @@ +#pragma once + +#include +#include + +namespace DB +{ + +class WriteBuffer; + +class CustomSeparatedRowOutputFormat : public IRowOutputFormat +{ +public: + CustomSeparatedRowOutputFormat(const Block & header_, WriteBuffer & out_, const RowOutputFormatParams & params_, const FormatSettings & format_settings_, bool with_names_, bool with_types_); + + String getName() const override { return "CustomSeparatedRowOutputFormat"; } + +private: + using EscapingRule = FormatSettings::EscapingRule; + + void writeField(const IColumn & column, const ISerialization & serialization, size_t row_num) override; + void writeFieldDelimiter() override; + void writeRowStartDelimiter() override; + void writeRowEndDelimiter() override; + void writeRowBetweenDelimiter() override; + void writePrefix() override; + void writeSuffix() override; + + void writeLine(const std::vector & values); + bool with_names; + bool with_types; + const FormatSettings format_settings; + EscapingRule escaping_rule; +}; + +} diff --git a/src/Processors/Formats/Impl/JSONAsStringRowInputFormat.cpp b/src/Processors/Formats/Impl/JSONAsStringRowInputFormat.cpp index a5e0ac6862c..1cca53b2f56 100644 --- a/src/Processors/Formats/Impl/JSONAsStringRowInputFormat.cpp +++ b/src/Processors/Formats/Impl/JSONAsStringRowInputFormat.cpp @@ -15,7 +15,7 @@ namespace ErrorCodes } JSONAsStringRowInputFormat::JSONAsStringRowInputFormat(const Block & header_, ReadBuffer & in_, Params params_) : - IRowInputFormat(header_, in_, std::move(params_)), buf(*in) + IRowInputFormat(header_, buf, std::move(params_)), buf(in_) { if (header_.columns() > 1) throw Exception(ErrorCodes::BAD_ARGUMENTS, diff --git a/src/Processors/Formats/Impl/JSONAsStringRowInputFormat.h b/src/Processors/Formats/Impl/JSONAsStringRowInputFormat.h index c70d9efb178..96ad60b3fab 100644 --- a/src/Processors/Formats/Impl/JSONAsStringRowInputFormat.h +++ b/src/Processors/Formats/Impl/JSONAsStringRowInputFormat.h @@ -18,14 +18,15 @@ class JSONAsStringRowInputFormat : public IRowInputFormat public: JSONAsStringRowInputFormat(const Block & header_, ReadBuffer & in_, Params params_); - bool readRow(MutableColumns & columns, RowReadExtension & ext) override; String getName() const override { return "JSONAsStringRowInputFormat"; } void resetParser() override; +private: + bool readRow(MutableColumns & columns, RowReadExtension & ext) override; + void readPrefix() override; void readSuffix() override; -private: void readJSONObject(IColumn & column); PeekableReadBuffer buf; diff --git a/src/Processors/Formats/Impl/JSONCompactEachRowRowInputFormat.h b/src/Processors/Formats/Impl/JSONCompactEachRowRowInputFormat.h index 373eb04f06c..e01a4f49b30 100644 --- a/src/Processors/Formats/Impl/JSONCompactEachRowRowInputFormat.h +++ b/src/Processors/Formats/Impl/JSONCompactEachRowRowInputFormat.h @@ -31,10 +31,10 @@ public: String getName() const override { return "JSONCompactEachRowRowInputFormat"; } +private: bool allowSyncAfterError() const override { return true; } void syncAfterError() override; -private: bool parseRowStartWithDiagnosticInfo(WriteBuffer & out) override; bool parseFieldDelimiterWithDiagnosticInfo(WriteBuffer & out) override; bool parseRowEndWithDiagnosticInfo(WriteBuffer & out) override; diff --git a/src/Processors/Formats/Impl/JSONCompactEachRowRowOutputFormat.cpp b/src/Processors/Formats/Impl/JSONCompactEachRowRowOutputFormat.cpp index cdff7ff2070..c4645e0d63d 100644 --- a/src/Processors/Formats/Impl/JSONCompactEachRowRowOutputFormat.cpp +++ b/src/Processors/Formats/Impl/JSONCompactEachRowRowOutputFormat.cpp @@ -81,7 +81,7 @@ void JSONCompactEachRowRowOutputFormat::writeLine(const std::vector & va writeRowEndDelimiter(); } -void JSONCompactEachRowRowOutputFormat::doWritePrefix() +void JSONCompactEachRowRowOutputFormat::writePrefix() { const auto & header = getPort(PortKind::Main).getHeader(); diff --git a/src/Processors/Formats/Impl/JSONCompactEachRowRowOutputFormat.h b/src/Processors/Formats/Impl/JSONCompactEachRowRowOutputFormat.h index aa12ba7e809..6cb78bab49d 100644 --- a/src/Processors/Formats/Impl/JSONCompactEachRowRowOutputFormat.h +++ b/src/Processors/Formats/Impl/JSONCompactEachRowRowOutputFormat.h @@ -26,7 +26,8 @@ public: String getName() const override { return "JSONCompactEachRowRowOutputFormat"; } - void doWritePrefix() override; +private: + void writePrefix() override; void writeTotals(const Columns & columns, size_t row_num) override; @@ -35,12 +36,10 @@ public: void writeRowStartDelimiter() override; void writeRowEndDelimiter() override; -protected: void consumeTotals(Chunk) override; /// No extremes. void consumeExtremes(Chunk) override {} -private: void writeLine(const std::vector & values); FormatSettings settings; diff --git a/src/Processors/Formats/Impl/JSONCompactRowOutputFormat.h b/src/Processors/Formats/Impl/JSONCompactRowOutputFormat.h index 9bb433c50b1..961bd569d39 100644 --- a/src/Processors/Formats/Impl/JSONCompactRowOutputFormat.h +++ b/src/Processors/Formats/Impl/JSONCompactRowOutputFormat.h @@ -25,6 +25,7 @@ public: String getName() const override { return "JSONCompactRowOutputFormat"; } +private: void writeField(const IColumn & column, const ISerialization & serialization, size_t row_num) override; void writeFieldDelimiter() override; void writeRowStartDelimiter() override; @@ -33,7 +34,6 @@ public: void writeBeforeTotals() override; void writeAfterTotals() override; -protected: void writeExtremesElement(const char * title, const Columns & columns, size_t row_num) override; void writeTotalsField(const IColumn & column, const ISerialization & serialization, size_t row_num) override diff --git a/src/Processors/Formats/Impl/JSONEachRowRowInputFormat.h b/src/Processors/Formats/Impl/JSONEachRowRowInputFormat.h index 29a6ce6ecb8..9810f2dc765 100644 --- a/src/Processors/Formats/Impl/JSONEachRowRowInputFormat.h +++ b/src/Processors/Formats/Impl/JSONEachRowRowInputFormat.h @@ -28,16 +28,16 @@ public: bool yield_strings_); String getName() const override { return "JSONEachRowRowInputFormat"; } + void resetParser() override; +private: void readPrefix() override; void readSuffix() override; bool readRow(MutableColumns & columns, RowReadExtension & ext) override; bool allowSyncAfterError() const override { return true; } void syncAfterError() override; - void resetParser() override; -private: const String & columnName(size_t i) const; size_t columnIndex(const StringRef & name, size_t key_index); bool advanceToNextKey(size_t key_index); diff --git a/src/Processors/Formats/Impl/JSONEachRowRowOutputFormat.h b/src/Processors/Formats/Impl/JSONEachRowRowOutputFormat.h index 10b15f3e7b2..23fb506c220 100644 --- a/src/Processors/Formats/Impl/JSONEachRowRowOutputFormat.h +++ b/src/Processors/Formats/Impl/JSONEachRowRowOutputFormat.h @@ -23,6 +23,7 @@ public: String getName() const override { return "JSONEachRowRowOutputFormat"; } +protected: void writeField(const IColumn & column, const ISerialization & serialization, size_t row_num) override; void writeFieldDelimiter() override; void writeRowStartDelimiter() override; @@ -31,7 +32,6 @@ public: void writePrefix() override; void writeSuffix() override; -protected: /// No totals and extremes. void consumeTotals(Chunk) override {} void consumeExtremes(Chunk) override {} diff --git a/src/Processors/Formats/Impl/JSONEachRowWithProgressRowOutputFormat.h b/src/Processors/Formats/Impl/JSONEachRowWithProgressRowOutputFormat.h index 3062d664199..3eac61c4109 100644 --- a/src/Processors/Formats/Impl/JSONEachRowWithProgressRowOutputFormat.h +++ b/src/Processors/Formats/Impl/JSONEachRowWithProgressRowOutputFormat.h @@ -9,11 +9,12 @@ class JSONEachRowWithProgressRowOutputFormat : public JSONEachRowRowOutputFormat public: using JSONEachRowRowOutputFormat::JSONEachRowRowOutputFormat; - void writeRowStartDelimiter() override; - void writeRowEndDelimiter() override; void onProgress(const Progress & value) override; private: + void writeRowStartDelimiter() override; + void writeRowEndDelimiter() override; + Progress progress; }; diff --git a/src/Processors/Formats/Impl/JSONRowOutputFormat.cpp b/src/Processors/Formats/Impl/JSONRowOutputFormat.cpp index 28d774fea74..f867a0bc49b 100644 --- a/src/Processors/Formats/Impl/JSONRowOutputFormat.cpp +++ b/src/Processors/Formats/Impl/JSONRowOutputFormat.cpp @@ -26,8 +26,10 @@ JSONRowOutputFormat::JSONRowOutputFormat( need_validate_utf8 = true; WriteBufferFromOwnString buf; - writeJSONString(fields[i].name, buf, settings); - + { + WriteBufferValidUTF8 validating_buf(buf); + writeJSONString(fields[i].name, validating_buf, settings); + } fields[i].name = buf.str(); } diff --git a/src/Processors/Formats/Impl/JSONRowOutputFormat.h b/src/Processors/Formats/Impl/JSONRowOutputFormat.h index 75d4aa5d201..757cdf01a35 100644 --- a/src/Processors/Formats/Impl/JSONRowOutputFormat.h +++ b/src/Processors/Formats/Impl/JSONRowOutputFormat.h @@ -25,6 +25,25 @@ public: String getName() const override { return "JSONRowOutputFormat"; } + void onProgress(const Progress & value) override; + + String getContentType() const override { return "application/json; charset=UTF-8"; } + + void flush() override + { + ostr->next(); + + if (validating_ostr) + out.next(); + } + + void setRowsBeforeLimit(size_t rows_before_limit_) override + { + applied_limit = true; + rows_before_limit = rows_before_limit_; + } + +protected: void writeField(const IColumn & column, const ISerialization & serialization, size_t row_num) override; void writeFieldDelimiter() override; void writeRowStartDelimiter() override; @@ -44,25 +63,6 @@ public: void writeLastSuffix() override; - void flush() override - { - ostr->next(); - - if (validating_ostr) - out.next(); - } - - void setRowsBeforeLimit(size_t rows_before_limit_) override - { - applied_limit = true; - rows_before_limit = rows_before_limit_; - } - - void onProgress(const Progress & value) override; - - String getContentType() const override { return "application/json; charset=UTF-8"; } - -protected: virtual void writeTotalsField(const IColumn & column, const ISerialization & serialization, size_t row_num); virtual void writeExtremesElement(const char * title, const Columns & columns, size_t row_num); virtual void writeTotalsFieldDelimiter() { writeFieldDelimiter(); } @@ -70,7 +70,6 @@ protected: void writeRowsBeforeLimitAtLeast(); void writeStatistics(); - std::unique_ptr validating_ostr; /// Validates UTF-8 sequences, replaces bad sequences with replacement character. WriteBuffer * ostr; diff --git a/src/Processors/Formats/Impl/LineAsStringRowInputFormat.h b/src/Processors/Formats/Impl/LineAsStringRowInputFormat.h index 7c0187bc3ff..1a6c6247558 100644 --- a/src/Processors/Formats/Impl/LineAsStringRowInputFormat.h +++ b/src/Processors/Formats/Impl/LineAsStringRowInputFormat.h @@ -17,11 +17,12 @@ class LineAsStringRowInputFormat : public IRowInputFormat public: LineAsStringRowInputFormat(const Block & header_, ReadBuffer & in_, Params params_); - bool readRow(MutableColumns & columns, RowReadExtension & ext) override; String getName() const override { return "LineAsStringRowInputFormat"; } void resetParser() override; private: + bool readRow(MutableColumns & columns, RowReadExtension & ext) override; + void readLineObject(IColumn & column); }; diff --git a/src/Processors/Formats/Impl/MarkdownRowOutputFormat.h b/src/Processors/Formats/Impl/MarkdownRowOutputFormat.h index 0b2a4dd0b23..7a2aaf86f7d 100644 --- a/src/Processors/Formats/Impl/MarkdownRowOutputFormat.h +++ b/src/Processors/Formats/Impl/MarkdownRowOutputFormat.h @@ -14,6 +14,9 @@ class MarkdownRowOutputFormat : public IRowOutputFormat public: MarkdownRowOutputFormat(WriteBuffer & out_, const Block & header_, const RowOutputFormatParams & params_, const FormatSettings & format_settings_); + String getName() const override { return "MarkdownRowOutputFormat"; } + +private: /// Write higher part of markdown table like this: /// |columnName1|columnName2|...|columnNameN| /// |:-:|:-:|...|:-:| @@ -29,9 +32,7 @@ public: void writeRowEndDelimiter() override ; void writeField(const IColumn & column, const ISerialization & serialization, size_t row_num) override; - String getName() const override { return "MarkdownRowOutputFormat"; } -protected: const FormatSettings format_settings; }; diff --git a/src/Processors/Formats/Impl/MsgPackRowInputFormat.cpp b/src/Processors/Formats/Impl/MsgPackRowInputFormat.cpp index 86540be17b9..e34729be928 100644 --- a/src/Processors/Formats/Impl/MsgPackRowInputFormat.cpp +++ b/src/Processors/Formats/Impl/MsgPackRowInputFormat.cpp @@ -29,7 +29,7 @@ namespace ErrorCodes } MsgPackRowInputFormat::MsgPackRowInputFormat(const Block & header_, ReadBuffer & in_, Params params_) - : IRowInputFormat(header_, in_, std::move(params_)), buf(*in), parser(visitor), data_types(header_.getDataTypes()) {} + : IRowInputFormat(header_, buf, std::move(params_)), buf(in_), parser(visitor), data_types(header_.getDataTypes()) {} void MsgPackRowInputFormat::resetParser() { diff --git a/src/Processors/Formats/Impl/MsgPackRowInputFormat.h b/src/Processors/Formats/Impl/MsgPackRowInputFormat.h index fa5c2e74584..d2d500a4480 100644 --- a/src/Processors/Formats/Impl/MsgPackRowInputFormat.h +++ b/src/Processors/Formats/Impl/MsgPackRowInputFormat.h @@ -59,11 +59,12 @@ class MsgPackRowInputFormat : public IRowInputFormat public: MsgPackRowInputFormat(const Block & header_, ReadBuffer & in_, Params params_); - bool readRow(MutableColumns & columns, RowReadExtension & ext) override; String getName() const override { return "MagPackRowInputFormat"; } void resetParser() override; private: + bool readRow(MutableColumns & columns, RowReadExtension & ext) override; + bool readObject(); PeekableReadBuffer buf; diff --git a/src/Processors/Formats/Impl/MsgPackRowOutputFormat.cpp b/src/Processors/Formats/Impl/MsgPackRowOutputFormat.cpp index bbc7b005fd8..36a8a62b39e 100644 --- a/src/Processors/Formats/Impl/MsgPackRowOutputFormat.cpp +++ b/src/Processors/Formats/Impl/MsgPackRowOutputFormat.cpp @@ -182,7 +182,6 @@ void MsgPackRowOutputFormat::write(const Columns & columns, size_t row_num) void registerOutputFormatMsgPack(FormatFactory & factory) { - factory.registerOutputFormat("MsgPack", []( WriteBuffer & buf, const Block & sample, @@ -191,6 +190,7 @@ void registerOutputFormatMsgPack(FormatFactory & factory) { return std::make_shared(buf, sample, params); }); + factory.markOutputFormatSupportsParallelFormatting("MsgPack"); } } diff --git a/src/Processors/Formats/Impl/MsgPackRowOutputFormat.h b/src/Processors/Formats/Impl/MsgPackRowOutputFormat.h index 2766eb144e4..17d055818e9 100644 --- a/src/Processors/Formats/Impl/MsgPackRowOutputFormat.h +++ b/src/Processors/Formats/Impl/MsgPackRowOutputFormat.h @@ -22,11 +22,11 @@ public: String getName() const override { return "MsgPackRowOutputFormat"; } +private: void write(const Columns & columns, size_t row_num) override; void writeField(const IColumn &, const ISerialization &, size_t) override {} void serializeField(const IColumn & column, DataTypePtr data_type, size_t row_num); -private: msgpack::packer packer; }; diff --git a/src/Processors/Formats/Impl/MySQLOutputFormat.cpp b/src/Processors/Formats/Impl/MySQLOutputFormat.cpp index 0b366244611..5033176ca4b 100644 --- a/src/Processors/Formats/Impl/MySQLOutputFormat.cpp +++ b/src/Processors/Formats/Impl/MySQLOutputFormat.cpp @@ -24,19 +24,6 @@ MySQLOutputFormat::MySQLOutputFormat(WriteBuffer & out_, const Block & header_, /// But it's also possible to specify MySQLWire as output format for clickhouse-client or clickhouse-local. /// There is no `sequence_id` stored in `settings_.mysql_wire` in this case, so we create a dummy one. sequence_id = settings_.mysql_wire.sequence_id ? settings_.mysql_wire.sequence_id : &dummy_sequence_id; -} - -void MySQLOutputFormat::setContext(ContextPtr context_) -{ - context = context_; -} - -void MySQLOutputFormat::initialize() -{ - if (initialized) - return; - - initialized = true; const auto & header = getPort(PortKind::Main).getHeader(); data_types = header.getDataTypes(); @@ -46,6 +33,16 @@ void MySQLOutputFormat::initialize() serializations.emplace_back(type->getDefaultSerialization()); packet_endpoint = MySQLProtocol::PacketEndpoint::create(out, *sequence_id); +} + +void MySQLOutputFormat::setContext(ContextPtr context_) +{ + context = context_; +} + +void MySQLOutputFormat::writePrefix() +{ + const auto & header = getPort(PortKind::Main).getHeader(); if (header.columns()) { @@ -66,8 +63,6 @@ void MySQLOutputFormat::initialize() void MySQLOutputFormat::consume(Chunk chunk) { - initialize(); - for (size_t i = 0; i < chunk.getNumRows(); i++) { ProtocolText::ResultSetRow row_packet(serializations, chunk.getColumns(), i); @@ -75,7 +70,7 @@ void MySQLOutputFormat::consume(Chunk chunk) } } -void MySQLOutputFormat::finalize() +void MySQLOutputFormat::finalizeImpl() { size_t affected_rows = 0; std::string human_readable_info; diff --git a/src/Processors/Formats/Impl/MySQLOutputFormat.h b/src/Processors/Formats/Impl/MySQLOutputFormat.h index a8e1ada3d6a..9481ef67070 100644 --- a/src/Processors/Formats/Impl/MySQLOutputFormat.h +++ b/src/Processors/Formats/Impl/MySQLOutputFormat.h @@ -26,15 +26,13 @@ public: void setContext(ContextPtr context_); - void consume(Chunk) override; - void finalize() override; void flush() override; - void doWritePrefix() override { initialize(); } private: - void initialize(); + void consume(Chunk) override; + void finalizeImpl() override; + void writePrefix() override; - bool initialized = false; uint32_t client_capabilities = 0; uint8_t * sequence_id = nullptr; uint8_t dummy_sequence_id = 0; diff --git a/src/Processors/Formats/Impl/ODBCDriver2BlockOutputFormat.cpp b/src/Processors/Formats/Impl/ODBCDriver2BlockOutputFormat.cpp index 0e486715c98..a82285c1c19 100644 --- a/src/Processors/Formats/Impl/ODBCDriver2BlockOutputFormat.cpp +++ b/src/Processors/Formats/Impl/ODBCDriver2BlockOutputFormat.cpp @@ -64,21 +64,14 @@ void ODBCDriver2BlockOutputFormat::write(Chunk chunk, PortKind port_kind) void ODBCDriver2BlockOutputFormat::consume(Chunk chunk) { - writePrefixIfNot(); write(std::move(chunk), PortKind::Main); } void ODBCDriver2BlockOutputFormat::consumeTotals(Chunk chunk) { - writePrefixIfNot(); write(std::move(chunk), PortKind::Totals); } -void ODBCDriver2BlockOutputFormat::finalize() -{ - writePrefixIfNot(); -} - void ODBCDriver2BlockOutputFormat::writePrefix() { const auto & header = getPort(PortKind::Main).getHeader(); diff --git a/src/Processors/Formats/Impl/ODBCDriver2BlockOutputFormat.h b/src/Processors/Formats/Impl/ODBCDriver2BlockOutputFormat.h index 4545e429cc2..de6ea22dfd7 100644 --- a/src/Processors/Formats/Impl/ODBCDriver2BlockOutputFormat.h +++ b/src/Processors/Formats/Impl/ODBCDriver2BlockOutputFormat.h @@ -24,30 +24,20 @@ public: String getName() const override { return "ODBCDriver2BlockOutputFormat"; } - void consume(Chunk) override; - void consumeTotals(Chunk) override; - void finalize() override; - std::string getContentType() const override { return "application/octet-stream"; } private: + void consume(Chunk) override; + void consumeTotals(Chunk) override; + void writePrefix() override; + const FormatSettings format_settings; - bool prefix_written = false; - - void writePrefixIfNot() - { - if (!prefix_written) - writePrefix(); - - prefix_written = true; - } void writeRow(const Serializations & serializations, const Columns & columns, size_t row_idx, std::string & buffer); void write(Chunk chunk, PortKind port_kind); - void writePrefix(); }; diff --git a/src/Processors/Formats/Impl/ORCBlockOutputFormat.cpp b/src/Processors/Formats/Impl/ORCBlockOutputFormat.cpp index 0a1eb9c6836..4c8f6ab2c54 100644 --- a/src/Processors/Formats/Impl/ORCBlockOutputFormat.cpp +++ b/src/Processors/Formats/Impl/ORCBlockOutputFormat.cpp @@ -495,7 +495,7 @@ void ORCBlockOutputFormat::consume(Chunk chunk) writer->add(*batch); } -void ORCBlockOutputFormat::finalize() +void ORCBlockOutputFormat::finalizeImpl() { if (!writer) prepareWriter(); diff --git a/src/Processors/Formats/Impl/ORCBlockOutputFormat.h b/src/Processors/Formats/Impl/ORCBlockOutputFormat.h index c131d724450..2ffee597e8f 100644 --- a/src/Processors/Formats/Impl/ORCBlockOutputFormat.h +++ b/src/Processors/Formats/Impl/ORCBlockOutputFormat.h @@ -37,10 +37,11 @@ public: ORCBlockOutputFormat(WriteBuffer & out_, const Block & header_, const FormatSettings & format_settings_); String getName() const override { return "ORCBlockOutputFormat"; } - void consume(Chunk chunk) override; - void finalize() override; private: + void consume(Chunk chunk) override; + void finalizeImpl() override; + ORC_UNIQUE_PTR getORCType(const DataTypePtr & type, const std::string & column_name); /// ConvertFunc is needed for type UInt8, because firstly UInt8 (char8_t) must be diff --git a/src/Processors/Formats/Impl/ParallelFormattingOutputFormat.cpp b/src/Processors/Formats/Impl/ParallelFormattingOutputFormat.cpp index f7723e3f1d2..30bfae4972d 100644 --- a/src/Processors/Formats/Impl/ParallelFormattingOutputFormat.cpp +++ b/src/Processors/Formats/Impl/ParallelFormattingOutputFormat.cpp @@ -4,7 +4,7 @@ namespace DB { - void ParallelFormattingOutputFormat::finalize() + void ParallelFormattingOutputFormat::finalizeImpl() { need_flush = true; IOutputFormat::finalized = true; @@ -171,7 +171,7 @@ namespace DB { case ProcessingUnitType::START : { - formatter->doWritePrefix(); + formatter->writePrefix(); break; } case ProcessingUnitType::PLAIN : @@ -191,7 +191,7 @@ namespace DB } case ProcessingUnitType::FINALIZE : { - formatter->doWriteSuffix(); + formatter->finalizeImpl(); break; } } diff --git a/src/Processors/Formats/Impl/ParallelFormattingOutputFormat.h b/src/Processors/Formats/Impl/ParallelFormattingOutputFormat.h index 65f4a329505..50f2ea9dc1b 100644 --- a/src/Processors/Formats/Impl/ParallelFormattingOutputFormat.h +++ b/src/Processors/Formats/Impl/ParallelFormattingOutputFormat.h @@ -95,7 +95,7 @@ public: need_flush = true; } - void doWritePrefix() override + void writePrefix() override { addChunk(Chunk{}, ProcessingUnitType::START, /*can_throw_exception*/ true); } @@ -114,7 +114,7 @@ public: return internal_formatter_creator(buffer)->getContentType(); } -protected: +private: void consume(Chunk chunk) override final { addChunk(std::move(chunk), ProcessingUnitType::PLAIN, /*can_throw_exception*/ true); @@ -130,9 +130,8 @@ protected: addChunk(std::move(extremes), ProcessingUnitType::EXTREMES, /*can_throw_exception*/ true); } - void finalize() override; + void finalizeImpl() override; -private: InternalFormatterCreator internal_formatter_creator; /// Status to synchronize multiple threads. diff --git a/src/Processors/Formats/Impl/ParallelParsingInputFormat.h b/src/Processors/Formats/Impl/ParallelParsingInputFormat.h index 3355b7334dc..264beba8589 100644 --- a/src/Processors/Formats/Impl/ParallelParsingInputFormat.h +++ b/src/Processors/Formats/Impl/ParallelParsingInputFormat.h @@ -117,7 +117,7 @@ public: String getName() const override final { return "ParallelParsingBlockInputFormat"; } -protected: +private: Chunk generate() override final; @@ -137,8 +137,6 @@ protected: finishAndWait(); } -private: - class InternalParser { public: diff --git a/src/Processors/Formats/Impl/ParquetBlockInputFormat.h b/src/Processors/Formats/Impl/ParquetBlockInputFormat.h index bae380f2c80..472aec66da3 100644 --- a/src/Processors/Formats/Impl/ParquetBlockInputFormat.h +++ b/src/Processors/Formats/Impl/ParquetBlockInputFormat.h @@ -23,13 +23,11 @@ public: String getName() const override { return "ParquetBlockInputFormat"; } -protected: +private: Chunk generate() override; -private: void prepareReader(); -private: std::unique_ptr file_reader; int row_group_total = 0; // indices of columns to read from Parquet file diff --git a/src/Processors/Formats/Impl/ParquetBlockOutputFormat.cpp b/src/Processors/Formats/Impl/ParquetBlockOutputFormat.cpp index 416d1b80988..a10858ee668 100644 --- a/src/Processors/Formats/Impl/ParquetBlockOutputFormat.cpp +++ b/src/Processors/Formats/Impl/ParquetBlockOutputFormat.cpp @@ -60,7 +60,7 @@ void ParquetBlockOutputFormat::consume(Chunk chunk) throw Exception{"Error while writing a table: " + status.ToString(), ErrorCodes::UNKNOWN_EXCEPTION}; } -void ParquetBlockOutputFormat::finalize() +void ParquetBlockOutputFormat::finalizeImpl() { if (!file_writer) { diff --git a/src/Processors/Formats/Impl/ParquetBlockOutputFormat.h b/src/Processors/Formats/Impl/ParquetBlockOutputFormat.h index 80d11b540b1..dee25ee1aa4 100644 --- a/src/Processors/Formats/Impl/ParquetBlockOutputFormat.h +++ b/src/Processors/Formats/Impl/ParquetBlockOutputFormat.h @@ -30,12 +30,13 @@ public: ParquetBlockOutputFormat(WriteBuffer & out_, const Block & header_, const FormatSettings & format_settings_); String getName() const override { return "ParquetBlockOutputFormat"; } - void consume(Chunk) override; - void finalize() override; String getContentType() const override { return "application/octet-stream"; } private: + void consume(Chunk) override; + void finalizeImpl() override; + const FormatSettings format_settings; std::unique_ptr file_writer; diff --git a/src/Processors/Formats/Impl/PostgreSQLOutputFormat.cpp b/src/Processors/Formats/Impl/PostgreSQLOutputFormat.cpp index a5f92e41da4..f46488fd0a8 100644 --- a/src/Processors/Formats/Impl/PostgreSQLOutputFormat.cpp +++ b/src/Processors/Formats/Impl/PostgreSQLOutputFormat.cpp @@ -11,12 +11,8 @@ PostgreSQLOutputFormat::PostgreSQLOutputFormat(WriteBuffer & out_, const Block & { } -void PostgreSQLOutputFormat::doWritePrefix() +void PostgreSQLOutputFormat::writePrefix() { - if (initialized) - return; - - initialized = true; const auto & header = getPort(PortKind::Main).getHeader(); auto data_types = header.getDataTypes(); @@ -37,8 +33,6 @@ void PostgreSQLOutputFormat::doWritePrefix() void PostgreSQLOutputFormat::consume(Chunk chunk) { - doWritePrefix(); - for (size_t i = 0; i != chunk.getNumRows(); ++i) { const Columns & columns = chunk.getColumns(); @@ -61,8 +55,6 @@ void PostgreSQLOutputFormat::consume(Chunk chunk) } } -void PostgreSQLOutputFormat::finalize() {} - void PostgreSQLOutputFormat::flush() { message_transport.flush(); diff --git a/src/Processors/Formats/Impl/PostgreSQLOutputFormat.h b/src/Processors/Formats/Impl/PostgreSQLOutputFormat.h index 257fbdff341..f5fd55530b9 100644 --- a/src/Processors/Formats/Impl/PostgreSQLOutputFormat.h +++ b/src/Processors/Formats/Impl/PostgreSQLOutputFormat.h @@ -17,13 +17,11 @@ public: String getName() const override {return "PostgreSQLOutputFormat";} - void doWritePrefix() override; - void consume(Chunk) override; - void finalize() override; void flush() override; private: - bool initialized = false; + void writePrefix() override; + void consume(Chunk) override; FormatSettings format_settings; PostgreSQLProtocol::Messaging::MessageTransport message_transport; diff --git a/src/Processors/Formats/Impl/PrettyBlockOutputFormat.cpp b/src/Processors/Formats/Impl/PrettyBlockOutputFormat.cpp index 2bae0c6bd63..66ddaa616cd 100644 --- a/src/Processors/Formats/Impl/PrettyBlockOutputFormat.cpp +++ b/src/Processors/Formats/Impl/PrettyBlockOutputFormat.cpp @@ -401,7 +401,7 @@ void PrettyBlockOutputFormat::writeSuffix() } } -void PrettyBlockOutputFormat::finalize() +void PrettyBlockOutputFormat::finalizeImpl() { writeSuffixIfNot(); } diff --git a/src/Processors/Formats/Impl/PrettyBlockOutputFormat.h b/src/Processors/Formats/Impl/PrettyBlockOutputFormat.h index 02b438d2571..597c82e8166 100644 --- a/src/Processors/Formats/Impl/PrettyBlockOutputFormat.h +++ b/src/Processors/Formats/Impl/PrettyBlockOutputFormat.h @@ -22,13 +22,13 @@ public: String getName() const override { return "PrettyBlockOutputFormat"; } +protected: void consume(Chunk) override; void consumeTotals(Chunk) override; void consumeExtremes(Chunk) override; - void finalize() override; + void finalizeImpl() override; -protected: size_t total_rows = 0; size_t terminal_width = 0; bool suffix_written = false; diff --git a/src/Processors/Formats/Impl/PrettyCompactBlockOutputFormat.h b/src/Processors/Formats/Impl/PrettyCompactBlockOutputFormat.h index 96344397a0c..1779a20e122 100644 --- a/src/Processors/Formats/Impl/PrettyCompactBlockOutputFormat.h +++ b/src/Processors/Formats/Impl/PrettyCompactBlockOutputFormat.h @@ -16,7 +16,7 @@ public: PrettyCompactBlockOutputFormat(WriteBuffer & out_, const Block & header, const FormatSettings & format_settings_, bool mono_block_); String getName() const override { return "PrettyCompactBlockOutputFormat"; } -protected: +private: void write(const Chunk & chunk, PortKind port_kind) override; void writeHeader(const Block & block, const Widths & max_widths, const Widths & name_widths); void writeBottom(const Widths & max_widths); @@ -28,7 +28,6 @@ protected: const WidthsPerColumn & widths, const Widths & max_widths); -private: bool mono_block; /// For mono_block == true only Chunk mono_chunk; diff --git a/src/Processors/Formats/Impl/PrettySpaceBlockOutputFormat.h b/src/Processors/Formats/Impl/PrettySpaceBlockOutputFormat.h index a041d324fd3..b3090497783 100644 --- a/src/Processors/Formats/Impl/PrettySpaceBlockOutputFormat.h +++ b/src/Processors/Formats/Impl/PrettySpaceBlockOutputFormat.h @@ -16,7 +16,7 @@ public: String getName() const override { return "PrettySpaceBlockOutputFormat"; } -protected: +private: void write(const Chunk & chunk, PortKind port_kind) override; void writeSuffix() override; }; diff --git a/src/Processors/Formats/Impl/ProtobufRowInputFormat.h b/src/Processors/Formats/Impl/ProtobufRowInputFormat.h index 553a2dfd7f0..6f465e3f0b8 100644 --- a/src/Processors/Formats/Impl/ProtobufRowInputFormat.h +++ b/src/Processors/Formats/Impl/ProtobufRowInputFormat.h @@ -32,11 +32,11 @@ public: String getName() const override { return "ProtobufRowInputFormat"; } +private: bool readRow(MutableColumns & columns, RowReadExtension &) override; bool allowSyncAfterError() const override; void syncAfterError() override; -private: std::unique_ptr reader; std::vector missing_column_indices; std::unique_ptr serializer; diff --git a/src/Processors/Formats/Impl/ProtobufRowOutputFormat.h b/src/Processors/Formats/Impl/ProtobufRowOutputFormat.h index 5323aa56323..97b727842a7 100644 --- a/src/Processors/Formats/Impl/ProtobufRowOutputFormat.h +++ b/src/Processors/Formats/Impl/ProtobufRowOutputFormat.h @@ -39,11 +39,12 @@ public: String getName() const override { return "ProtobufRowOutputFormat"; } - void write(const Columns & columns, size_t row_num) override; - void writeField(const IColumn &, const ISerialization &, size_t) override {} std::string getContentType() const override { return "application/octet-stream"; } private: + void write(const Columns & columns, size_t row_num) override; + void writeField(const IColumn &, const ISerialization &, size_t) override {} + std::unique_ptr writer; std::unique_ptr serializer; const bool allow_multiple_rows; diff --git a/src/Processors/Formats/Impl/RawBLOBRowInputFormat.h b/src/Processors/Formats/Impl/RawBLOBRowInputFormat.h index fd2c849687a..343af9f4068 100644 --- a/src/Processors/Formats/Impl/RawBLOBRowInputFormat.h +++ b/src/Processors/Formats/Impl/RawBLOBRowInputFormat.h @@ -16,8 +16,10 @@ class RawBLOBRowInputFormat : public IRowInputFormat public: RawBLOBRowInputFormat(const Block & header_, ReadBuffer & in_, Params params_); - bool readRow(MutableColumns & columns, RowReadExtension &) override; String getName() const override { return "RawBLOBRowInputFormat"; } + +private: + bool readRow(MutableColumns & columns, RowReadExtension &) override; }; } diff --git a/src/Processors/Formats/Impl/RawBLOBRowOutputFormat.h b/src/Processors/Formats/Impl/RawBLOBRowOutputFormat.h index 7a29c62e4d8..2c34595c1a4 100644 --- a/src/Processors/Formats/Impl/RawBLOBRowOutputFormat.h +++ b/src/Processors/Formats/Impl/RawBLOBRowOutputFormat.h @@ -34,6 +34,7 @@ public: String getName() const override { return "RawBLOBRowOutputFormat"; } +private: void writeField(const IColumn & column, const ISerialization &, size_t row_num) override; }; diff --git a/src/Processors/Formats/Impl/RegexpRowInputFormat.cpp b/src/Processors/Formats/Impl/RegexpRowInputFormat.cpp index 62c0eaa457e..7dd7e6df267 100644 --- a/src/Processors/Formats/Impl/RegexpRowInputFormat.cpp +++ b/src/Processors/Formats/Impl/RegexpRowInputFormat.cpp @@ -2,6 +2,7 @@ #include #include #include +#include #include namespace DB @@ -10,16 +11,15 @@ namespace DB namespace ErrorCodes { extern const int INCORRECT_DATA; - extern const int BAD_ARGUMENTS; extern const int LOGICAL_ERROR; } RegexpRowInputFormat::RegexpRowInputFormat( ReadBuffer & in_, const Block & header_, Params params_, const FormatSettings & format_settings_) - : IRowInputFormat(header_, in_, std::move(params_)) + : IRowInputFormat(header_, buf, std::move(params_)) , buf(in_) , format_settings(format_settings_) - , field_format(stringToFormat(format_settings_.regexp.escaping_rule)) + , escaping_rule(format_settings_.regexp.escaping_rule) , regexp(format_settings_.regexp.regexp) { size_t fields_count = regexp.NumberOfCapturingGroups(); @@ -42,72 +42,19 @@ void RegexpRowInputFormat::resetParser() buf.reset(); } -RegexpRowInputFormat::ColumnFormat RegexpRowInputFormat::stringToFormat(const String & format) -{ - if (format == "Escaped") - return ColumnFormat::Escaped; - if (format == "Quoted") - return ColumnFormat::Quoted; - if (format == "CSV") - return ColumnFormat::Csv; - if (format == "JSON") - return ColumnFormat::Json; - if (format == "Raw") - return ColumnFormat::Raw; - throw Exception("Unsupported column format \"" + format + "\".", ErrorCodes::BAD_ARGUMENTS); -} - bool RegexpRowInputFormat::readField(size_t index, MutableColumns & columns) { const auto & type = getPort().getHeader().getByPosition(index).type; - bool parse_as_nullable = format_settings.null_as_default && !type->isNullable() && !type->isLowCardinalityNullable(); - bool read = true; ReadBuffer field_buf(const_cast(matched_fields[index].data()), matched_fields[index].size(), 0); try { - const auto & serialization = serializations[index]; - switch (field_format) - { - case ColumnFormat::Escaped: - if (parse_as_nullable) - read = SerializationNullable::deserializeTextEscapedImpl(*columns[index], field_buf, format_settings, serialization); - else - serialization->deserializeTextEscaped(*columns[index], field_buf, format_settings); - break; - case ColumnFormat::Quoted: - if (parse_as_nullable) - read = SerializationNullable::deserializeTextQuotedImpl(*columns[index], field_buf, format_settings, serialization); - else - serialization->deserializeTextQuoted(*columns[index], field_buf, format_settings); - break; - case ColumnFormat::Csv: - if (parse_as_nullable) - read = SerializationNullable::deserializeTextCSVImpl(*columns[index], field_buf, format_settings, serialization); - else - serialization->deserializeTextCSV(*columns[index], field_buf, format_settings); - break; - case ColumnFormat::Json: - if (parse_as_nullable) - read = SerializationNullable::deserializeTextJSONImpl(*columns[index], field_buf, format_settings, serialization); - else - serialization->deserializeTextJSON(*columns[index], field_buf, format_settings); - break; - case ColumnFormat::Raw: - if (parse_as_nullable) - read = SerializationNullable::deserializeTextRawImpl(*columns[index], field_buf, format_settings, serialization); - else - serialization->deserializeTextRaw(*columns[index], field_buf, format_settings); - break; - default: - break; - } + return deserializeFieldByEscapingRule(type, serializations[index], *columns[index], field_buf, escaping_rule, format_settings); } catch (Exception & e) { e.addMessage("(while reading the value of column " + getPort().getHeader().getByPosition(index).name + ")"); throw; } - return read; } void RegexpRowInputFormat::readFieldsFromMatch(MutableColumns & columns, RowReadExtension & ext) diff --git a/src/Processors/Formats/Impl/RegexpRowInputFormat.h b/src/Processors/Formats/Impl/RegexpRowInputFormat.h index 0cd8778e499..c54549b8bac 100644 --- a/src/Processors/Formats/Impl/RegexpRowInputFormat.h +++ b/src/Processors/Formats/Impl/RegexpRowInputFormat.h @@ -25,23 +25,22 @@ class ReadBuffer; class RegexpRowInputFormat : public IRowInputFormat { - using ColumnFormat = ParsedTemplateFormatString::ColumnFormat; + using EscapingRule = FormatSettings::EscapingRule; public: RegexpRowInputFormat(ReadBuffer & in_, const Block & header_, Params params_, const FormatSettings & format_settings_); String getName() const override { return "RegexpRowInputFormat"; } - - bool readRow(MutableColumns & columns, RowReadExtension & ext) override; void resetParser() override; private: + bool readRow(MutableColumns & columns, RowReadExtension & ext) override; + bool readField(size_t index, MutableColumns & columns); void readFieldsFromMatch(MutableColumns & columns, RowReadExtension & ext); - static ColumnFormat stringToFormat(const String & format); PeekableReadBuffer buf; const FormatSettings format_settings; - const ColumnFormat field_format; + const EscapingRule escaping_rule; const RE2 regexp; // The vector of fields extracted from line using regexp. diff --git a/src/Processors/Formats/Impl/TSKVRowInputFormat.h b/src/Processors/Formats/Impl/TSKVRowInputFormat.h index bc537158d9b..7d732bae691 100644 --- a/src/Processors/Formats/Impl/TSKVRowInputFormat.h +++ b/src/Processors/Formats/Impl/TSKVRowInputFormat.h @@ -27,14 +27,14 @@ public: String getName() const override { return "TSKVRowInputFormat"; } + void resetParser() override; + +private: void readPrefix() override; bool readRow(MutableColumns & columns, RowReadExtension &) override; bool allowSyncAfterError() const override { return true; } void syncAfterError() override; - void resetParser() override; - -private: const FormatSettings format_settings; /// Buffer for the read from the stream the field name. Used when you have to copy it. diff --git a/src/Processors/Formats/Impl/TSKVRowOutputFormat.h b/src/Processors/Formats/Impl/TSKVRowOutputFormat.h index 24c4e5ca866..980e36c7e25 100644 --- a/src/Processors/Formats/Impl/TSKVRowOutputFormat.h +++ b/src/Processors/Formats/Impl/TSKVRowOutputFormat.h @@ -18,10 +18,10 @@ public: String getName() const override { return "TSKVRowOutputFormat"; } +private: void writeField(const IColumn & column, const ISerialization & serialization, size_t row_num) override; void writeRowEndDelimiter() override; -protected: NamesAndTypes fields; size_t field_number = 0; }; diff --git a/src/Processors/Formats/Impl/TabSeparatedRowInputFormat.h b/src/Processors/Formats/Impl/TabSeparatedRowInputFormat.h index 11a788bc900..6e2e283e792 100644 --- a/src/Processors/Formats/Impl/TabSeparatedRowInputFormat.h +++ b/src/Processors/Formats/Impl/TabSeparatedRowInputFormat.h @@ -21,12 +21,10 @@ public: String getName() const override { return "TabSeparatedRowInputFormat"; } +private: bool allowSyncAfterError() const override { return true; } void syncAfterError() override; -private: - bool is_raw; - bool readField(IColumn & column, const DataTypePtr & type, const SerializationPtr & serialization, bool is_last_file_column, const String & column_name) override; @@ -48,6 +46,8 @@ private: bool parseFieldDelimiterWithDiagnosticInfo(WriteBuffer & out) override; bool parseRowEndWithDiagnosticInfo(WriteBuffer & out) override; bool isGarbageAfterField(size_t, ReadBuffer::Position pos) override { return *pos != '\n' && *pos != '\t'; } + + bool is_raw; }; } diff --git a/src/Processors/Formats/Impl/TabSeparatedRowOutputFormat.cpp b/src/Processors/Formats/Impl/TabSeparatedRowOutputFormat.cpp index df0c19ad409..5d87f5a0b14 100644 --- a/src/Processors/Formats/Impl/TabSeparatedRowOutputFormat.cpp +++ b/src/Processors/Formats/Impl/TabSeparatedRowOutputFormat.cpp @@ -30,7 +30,7 @@ void TabSeparatedRowOutputFormat::writeLine(const std::vector & values) } } -void TabSeparatedRowOutputFormat::doWritePrefix() +void TabSeparatedRowOutputFormat::writePrefix() { const auto & header = getPort(PortKind::Main).getHeader(); diff --git a/src/Processors/Formats/Impl/TabSeparatedRowOutputFormat.h b/src/Processors/Formats/Impl/TabSeparatedRowOutputFormat.h index 7dcc6529f1c..eeada54d74e 100644 --- a/src/Processors/Formats/Impl/TabSeparatedRowOutputFormat.h +++ b/src/Processors/Formats/Impl/TabSeparatedRowOutputFormat.h @@ -29,24 +29,22 @@ public: String getName() const override { return "TabSeparatedRowOutputFormat"; } + /// https://www.iana.org/assignments/media-types/text/tab-separated-values + String getContentType() const override { return "text/tab-separated-values; charset=UTF-8"; } + +protected: void writeField(const IColumn & column, const ISerialization & serialization, size_t row_num) override; void writeFieldDelimiter() override; void writeRowEndDelimiter() override; void writeBeforeTotals() override; void writeBeforeExtremes() override; - void doWritePrefix() override; - - /// https://www.iana.org/assignments/media-types/text/tab-separated-values - String getContentType() const override { return "text/tab-separated-values; charset=UTF-8"; } - -private: + void writePrefix() override; void writeLine(const std::vector & values); + bool with_names; bool with_types; bool is_raw; - -protected: const FormatSettings format_settings; }; diff --git a/src/Processors/Formats/Impl/TemplateBlockOutputFormat.cpp b/src/Processors/Formats/Impl/TemplateBlockOutputFormat.cpp index db5db4701a9..e94503f06f5 100644 --- a/src/Processors/Formats/Impl/TemplateBlockOutputFormat.cpp +++ b/src/Processors/Formats/Impl/TemplateBlockOutputFormat.cpp @@ -1,5 +1,6 @@ #include #include +#include #include #include #include @@ -39,7 +40,7 @@ TemplateBlockOutputFormat::TemplateBlockOutputFormat(const Block & header_, Writ case static_cast(ResultsetPart::Totals): case static_cast(ResultsetPart::ExtremesMin): case static_cast(ResultsetPart::ExtremesMax): - if (format.formats[i] != ColumnFormat::None) + if (format.escaping_rules[i] != EscapingRule::None) format.throwInvalidFormat("Serialization type for data, totals, min and max must be empty or None", i); break; case static_cast(ResultsetPart::Rows): @@ -47,7 +48,7 @@ TemplateBlockOutputFormat::TemplateBlockOutputFormat(const Block & header_, Writ case static_cast(ResultsetPart::TimeElapsed): case static_cast(ResultsetPart::RowsRead): case static_cast(ResultsetPart::BytesRead): - if (format.formats[i] == ColumnFormat::None) + if (format.escaping_rules[i] == EscapingRule::None) format.throwInvalidFormat("Serialization type for output part rows, rows_before_limit, time, " "rows_read or bytes_read is not specified", i); break; @@ -68,7 +69,7 @@ TemplateBlockOutputFormat::TemplateBlockOutputFormat(const Block & header_, Writ if (header_.columns() <= *row_format.format_idx_to_column_idx[i]) row_format.throwInvalidFormat("Column index " + std::to_string(*row_format.format_idx_to_column_idx[i]) + " must be less then number of columns (" + std::to_string(header_.columns()) + ")", i); - if (row_format.formats[i] == ColumnFormat::None) + if (row_format.escaping_rules[i] == EscapingRule::None) row_format.throwInvalidFormat("Serialization type for file column is not specified", i); } } @@ -105,50 +106,21 @@ void TemplateBlockOutputFormat::writeRow(const Chunk & chunk, size_t row_num) writeString(row_format.delimiters[j], out); size_t col_idx = *row_format.format_idx_to_column_idx[j]; - serializeField(*chunk.getColumns()[col_idx], *serializations[col_idx], row_num, row_format.formats[j]); + serializeFieldByEscapingRule(*chunk.getColumns()[col_idx], *serializations[col_idx], out, row_num, row_format.escaping_rules[j], settings); } writeString(row_format.delimiters[columns], out); } -void TemplateBlockOutputFormat::serializeField(const IColumn & column, const ISerialization & serialization, size_t row_num, ColumnFormat col_format) -{ - switch (col_format) - { - case ColumnFormat::Escaped: - serialization.serializeTextEscaped(column, row_num, out, settings); - break; - case ColumnFormat::Quoted: - serialization.serializeTextQuoted(column, row_num, out, settings); - break; - case ColumnFormat::Csv: - serialization.serializeTextCSV(column, row_num, out, settings); - break; - case ColumnFormat::Json: - serialization.serializeTextJSON(column, row_num, out, settings); - break; - case ColumnFormat::Xml: - serialization.serializeTextXML(column, row_num, out, settings); - break; - case ColumnFormat::Raw: - serialization.serializeTextRaw(column, row_num, out, settings); - break; - default: - __builtin_unreachable(); - } -} - -template void TemplateBlockOutputFormat::writeValue(U value, ColumnFormat col_format) +template void TemplateBlockOutputFormat::writeValue(U value, EscapingRule escaping_rule) { auto type = std::make_unique(); auto col = type->createColumn(); col->insert(value); - serializeField(*col, *type->getDefaultSerialization(), 0, col_format); + serializeFieldByEscapingRule(*col, *type->getDefaultSerialization(), out, 0, escaping_rule, settings); } void TemplateBlockOutputFormat::consume(Chunk chunk) { - doWritePrefix(); - size_t rows = chunk.getNumRows(); for (size_t i = 0; i < rows; ++i) @@ -161,22 +133,16 @@ void TemplateBlockOutputFormat::consume(Chunk chunk) } } -void TemplateBlockOutputFormat::doWritePrefix() +void TemplateBlockOutputFormat::writePrefix() { - if (need_write_prefix) - { - writeString(format.delimiters.front(), out); - need_write_prefix = false; - } + writeString(format.delimiters.front(), out); } -void TemplateBlockOutputFormat::finalize() +void TemplateBlockOutputFormat::finalizeImpl() { if (finalized) return; - doWritePrefix(); - size_t parts = format.format_idx_to_column_idx.size(); for (size_t i = 0; i < parts; ++i) @@ -201,21 +167,21 @@ void TemplateBlockOutputFormat::finalize() writeRow(extremes, 1); break; case ResultsetPart::Rows: - writeValue(row_count, format.formats[i]); + writeValue(row_count, format.escaping_rules[i]); break; case ResultsetPart::RowsBeforeLimit: if (!rows_before_limit_set) format.throwInvalidFormat("Cannot print rows_before_limit for this request", i); - writeValue(rows_before_limit, format.formats[i]); + writeValue(rows_before_limit, format.escaping_rules[i]); break; case ResultsetPart::TimeElapsed: - writeValue(watch.elapsedSeconds(), format.formats[i]); + writeValue(watch.elapsedSeconds(), format.escaping_rules[i]); break; case ResultsetPart::RowsRead: - writeValue(progress.read_rows.load(), format.formats[i]); + writeValue(progress.read_rows.load(), format.escaping_rules[i]); break; case ResultsetPart::BytesRead: - writeValue(progress.read_bytes.load(), format.formats[i]); + writeValue(progress.read_bytes.load(), format.escaping_rules[i]); break; default: break; @@ -240,7 +206,7 @@ void registerOutputFormatTemplate(FormatFactory & factory) { /// Default format string: "${data}" resultset_format.delimiters.resize(2); - resultset_format.formats.emplace_back(ParsedTemplateFormatString::ColumnFormat::None); + resultset_format.escaping_rules.emplace_back(ParsedTemplateFormatString::EscapingRule::None); resultset_format.format_idx_to_column_idx.emplace_back(0); resultset_format.column_names.emplace_back("data"); } @@ -266,17 +232,5 @@ void registerOutputFormatTemplate(FormatFactory & factory) return std::make_shared(sample, buf, settings, resultset_format, row_format, settings.template_settings.row_between_delimiter); }); - - factory.registerOutputFormat("CustomSeparated", []( - WriteBuffer & buf, - const Block & sample, - const RowOutputFormatParams &, - const FormatSettings & settings) - { - ParsedTemplateFormatString resultset_format = ParsedTemplateFormatString::setupCustomSeparatedResultsetFormat(settings.custom); - ParsedTemplateFormatString row_format = ParsedTemplateFormatString::setupCustomSeparatedRowFormat(settings.custom, sample); - - return std::make_shared(sample, buf, settings, resultset_format, row_format, settings.custom.row_between_delimiter); - }); } } diff --git a/src/Processors/Formats/Impl/TemplateBlockOutputFormat.h b/src/Processors/Formats/Impl/TemplateBlockOutputFormat.h index 0d41b8888d4..5fb340bdb4d 100644 --- a/src/Processors/Formats/Impl/TemplateBlockOutputFormat.h +++ b/src/Processors/Formats/Impl/TemplateBlockOutputFormat.h @@ -12,7 +12,7 @@ namespace DB class TemplateBlockOutputFormat : public IOutputFormat { - using ColumnFormat = ParsedTemplateFormatString::ColumnFormat; + using EscapingRule = FormatSettings::EscapingRule; public: TemplateBlockOutputFormat(const Block & header_, WriteBuffer & out_, const FormatSettings & settings_, ParsedTemplateFormatString format_, ParsedTemplateFormatString row_format_, @@ -20,8 +20,6 @@ public: String getName() const override { return "TemplateBlockOutputFormat"; } - void doWritePrefix() override; - void setRowsBeforeLimit(size_t rows_before_limit_) override { rows_before_limit = rows_before_limit_; rows_before_limit_set = true; } void onProgress(const Progress & progress_) override { progress.incrementPiecewiseAtomically(progress_); } @@ -40,17 +38,16 @@ public: static ResultsetPart stringToResultsetPart(const String & part); -protected: +private: + void writePrefix() override; void consume(Chunk chunk) override; void consumeTotals(Chunk chunk) override { totals = std::move(chunk); } void consumeExtremes(Chunk chunk) override { extremes = std::move(chunk); } - void finalize() override; + void finalizeImpl() override; void writeRow(const Chunk & chunk, size_t row_num); - void serializeField(const IColumn & column, const ISerialization & serialization, size_t row_num, ColumnFormat format); - template void writeValue(U value, ColumnFormat col_format); + template void writeValue(U value, EscapingRule escaping_rule); -protected: const FormatSettings settings; Serializations serializations; @@ -65,7 +62,6 @@ protected: Stopwatch watch; size_t row_count = 0; - bool need_write_prefix = true; std::string row_between_delimiter; }; diff --git a/src/Processors/Formats/Impl/TemplateRowInputFormat.cpp b/src/Processors/Formats/Impl/TemplateRowInputFormat.cpp index c096b62e967..25162e927ac 100644 --- a/src/Processors/Formats/Impl/TemplateRowInputFormat.cpp +++ b/src/Processors/Formats/Impl/TemplateRowInputFormat.cpp @@ -1,6 +1,7 @@ #include #include #include +#include #include #include #include @@ -38,14 +39,14 @@ TemplateRowInputFormat::TemplateRowInputFormat(const Block & header_, ReadBuffer format.throwInvalidFormat("Invalid input part", i); if (has_data) format.throwInvalidFormat("${data} can occur only once", i); - if (format.formats[i] != ColumnFormat::None) + if (format.escaping_rules[i] != EscapingRule::None) format.throwInvalidFormat("${data} must have empty or None deserialization type", i); has_data = true; format_data_idx = i; } else { - if (format.formats[i] == ColumnFormat::Xml) + if (format.escaping_rules[i] == EscapingRule::XML) format.throwInvalidFormat("XML deserialization is not supported", i); } } @@ -54,7 +55,7 @@ TemplateRowInputFormat::TemplateRowInputFormat(const Block & header_, ReadBuffer std::vector column_in_format(header_.columns(), false); for (size_t i = 0; i < row_format.columnsCount(); ++i) { - if (row_format.formats[i] == ColumnFormat::Xml) + if (row_format.escaping_rules[i] == EscapingRule::XML) row_format.throwInvalidFormat("XML deserialization is not supported", i); if (row_format.format_idx_to_column_idx[i]) @@ -62,7 +63,7 @@ TemplateRowInputFormat::TemplateRowInputFormat(const Block & header_, ReadBuffer if (header_.columns() <= *row_format.format_idx_to_column_idx[i]) row_format.throwInvalidFormat("Column index " + std::to_string(*row_format.format_idx_to_column_idx[i]) + " must be less then number of columns (" + std::to_string(header_.columns()) + ")", i); - if (row_format.formats[i] == ColumnFormat::None) + if (row_format.escaping_rules[i] == EscapingRule::None) row_format.throwInvalidFormat("Column is not skipped, but deserialization type is None", i); size_t col_idx = *row_format.format_idx_to_column_idx[i]; @@ -111,12 +112,12 @@ ReturnType TemplateRowInputFormat::tryReadPrefixOrSuffix(size_t & input_part_beg { skipSpaces(); if constexpr (throw_exception) - skipField(format.formats[input_part_beg]); + skipField(format.escaping_rules[input_part_beg]); else { try { - skipField(format.formats[input_part_beg]); + skipField(format.escaping_rules[input_part_beg]); } catch (const Exception & e) { @@ -176,7 +177,7 @@ bool TemplateRowInputFormat::readRow(MutableColumns & columns, RowReadExtension extra.read_columns[col_idx] = deserializeField(data_types[col_idx], serializations[col_idx], *columns[col_idx], i); } else - skipField(row_format.formats[i]); + skipField(row_format.escaping_rules[i]); } @@ -192,49 +193,14 @@ bool TemplateRowInputFormat::readRow(MutableColumns & columns, RowReadExtension bool TemplateRowInputFormat::deserializeField(const DataTypePtr & type, const SerializationPtr & serialization, IColumn & column, size_t file_column) { - ColumnFormat col_format = row_format.formats[file_column]; - bool read = true; - bool parse_as_nullable = settings.null_as_default && !type->isNullable() && !type->isLowCardinalityNullable(); + EscapingRule escaping_rule = row_format.escaping_rules[file_column]; + if (escaping_rule == EscapingRule::CSV) + /// Will read unquoted string until settings.csv.delimiter + settings.csv.delimiter = row_format.delimiters[file_column + 1].empty() ? default_csv_delimiter : + row_format.delimiters[file_column + 1].front(); try { - switch (col_format) - { - case ColumnFormat::Escaped: - if (parse_as_nullable) - read = SerializationNullable::deserializeTextEscapedImpl(column, buf, settings, serialization); - else - serialization->deserializeTextEscaped(column, buf, settings); - break; - case ColumnFormat::Quoted: - if (parse_as_nullable) - read = SerializationNullable::deserializeTextQuotedImpl(column, buf, settings, serialization); - else - serialization->deserializeTextQuoted(column, buf, settings); - break; - case ColumnFormat::Csv: - /// Will read unquoted string until settings.csv.delimiter - settings.csv.delimiter = row_format.delimiters[file_column + 1].empty() ? default_csv_delimiter : - row_format.delimiters[file_column + 1].front(); - if (parse_as_nullable) - read = SerializationNullable::deserializeTextCSVImpl(column, buf, settings, serialization); - else - serialization->deserializeTextCSV(column, buf, settings); - break; - case ColumnFormat::Json: - if (parse_as_nullable) - read = SerializationNullable::deserializeTextJSONImpl(column, buf, settings, serialization); - else - serialization->deserializeTextJSON(column, buf, settings); - break; - case ColumnFormat::Raw: - if (parse_as_nullable) - read = SerializationNullable::deserializeTextRawImpl(column, buf, settings, serialization); - else - serialization->deserializeTextRaw(column, buf, settings); - break; - default: - __builtin_unreachable(); - } + return deserializeFieldByEscapingRule(type, serialization, column, buf, escaping_rule, settings); } catch (Exception & e) { @@ -242,36 +208,13 @@ bool TemplateRowInputFormat::deserializeField(const DataTypePtr & type, throwUnexpectedEof(); throw; } - return read; } -void TemplateRowInputFormat::skipField(TemplateRowInputFormat::ColumnFormat col_format) +void TemplateRowInputFormat::skipField(TemplateRowInputFormat::EscapingRule escaping_rule) { - String tmp; - constexpr const char * field_name = ""; - constexpr size_t field_name_len = 16; try { - switch (col_format) - { - case ColumnFormat::None: - /// Empty field, just skip spaces - break; - case ColumnFormat::Escaped: - readEscapedString(tmp, buf); - break; - case ColumnFormat::Quoted: - readQuotedString(tmp, buf); - break; - case ColumnFormat::Csv: - readCSVString(tmp, buf, settings.csv); - break; - case ColumnFormat::Json: - skipJSONField(buf, StringRef(field_name, field_name_len)); - break; - default: - __builtin_unreachable(); - } + skipFieldByEscapingRule(buf, escaping_rule, settings); } catch (Exception & e) { @@ -344,29 +287,13 @@ bool TemplateRowInputFormat::parseRowAndPrintDiagnosticInfo(MutableColumns & col out << "\nUsing format string (from format_schema_rows): " << row_format.dump() << "\n"; out << "\nTrying to parse next row, because suffix does not match:\n"; - try - { - if (likely(row_num != 1)) - assertString(row_between_delimiter, buf); - } - catch (const DB::Exception &) - { - writeErrorStringForWrongDelimiter(out, "delimiter between rows", row_between_delimiter); - + if (likely(row_num != 1) && !parseDelimiterWithDiagnosticInfo(out, buf, row_between_delimiter, "delimiter between rows", ignore_spaces)) return false; - } + for (size_t i = 0; i < row_format.columnsCount(); ++i) { - skipSpaces(); - try - { - assertString(row_format.delimiters[i], buf); - } - catch (const DB::Exception &) - { - writeErrorStringForWrongDelimiter(out, "delimiter before field " + std::to_string(i), row_format.delimiters[i]); + if (!parseDelimiterWithDiagnosticInfo(out, buf, row_format.delimiters[i], "delimiter before field " + std::to_string(i), ignore_spaces)) return false; - } skipSpaces(); if (row_format.format_idx_to_column_idx[i]) @@ -377,7 +304,7 @@ bool TemplateRowInputFormat::parseRowAndPrintDiagnosticInfo(MutableColumns & col *columns[col_idx], out, i)) { out << "Maybe it's not possible to deserialize field " + std::to_string(i) + - " as " + ParsedTemplateFormatString::formatToString(row_format.formats[i]); + " as " + escapingRuleToString(row_format.escaping_rules[i]); return false; } } @@ -391,39 +318,39 @@ bool TemplateRowInputFormat::parseRowAndPrintDiagnosticInfo(MutableColumns & col } } - skipSpaces(); + return parseDelimiterWithDiagnosticInfo(out, buf, row_format.delimiters.back(), "delimiter after last field", ignore_spaces); +} + +bool parseDelimiterWithDiagnosticInfo(WriteBuffer & out, ReadBuffer & buf, const String & delimiter, const String & description, bool skip_spaces) +{ + if (skip_spaces) + skipWhitespaceIfAny(buf); try { - assertString(row_format.delimiters.back(), buf); + assertString(delimiter, buf); } catch (const DB::Exception &) { - writeErrorStringForWrongDelimiter(out, "delimiter after last field", row_format.delimiters.back()); + out << "ERROR: There is no " << description << ": expected "; + verbosePrintString(delimiter.data(), delimiter.data() + delimiter.size(), out); + out << ", got "; + if (buf.eof()) + out << ""; + else + verbosePrintString(buf.position(), std::min(buf.position() + delimiter.size() + 10, buf.buffer().end()), out); + out << '\n'; return false; } - return true; } -void TemplateRowInputFormat::writeErrorStringForWrongDelimiter(WriteBuffer & out, const String & description, const String & delim) -{ - out << "ERROR: There is no " << description << ": expected "; - verbosePrintString(delim.data(), delim.data() + delim.size(), out); - out << ", got "; - if (buf.eof()) - out << ""; - else - verbosePrintString(buf.position(), std::min(buf.position() + delim.size() + 10, buf.buffer().end()), out); - out << '\n'; -} - void TemplateRowInputFormat::tryDeserializeField(const DataTypePtr & type, IColumn & column, size_t file_column) { const auto & index = row_format.format_idx_to_column_idx[file_column]; if (index) deserializeField(type, serializations[*index], column, file_column); else - skipField(row_format.formats[file_column]); + skipField(row_format.escaping_rules[file_column]); } bool TemplateRowInputFormat::isGarbageAfterField(size_t, ReadBuffer::Position) @@ -439,62 +366,13 @@ bool TemplateRowInputFormat::allowSyncAfterError() const void TemplateRowInputFormat::syncAfterError() { - bool at_beginning_of_row_or_eof = false; - while (!at_beginning_of_row_or_eof) - { - skipToNextDelimiterOrEof(row_format.delimiters.back()); - if (buf.eof()) - { - end_of_stream = true; - return; - } - buf.ignore(row_format.delimiters.back().size()); - - skipSpaces(); - if (checkForSuffix()) - return; - - bool last_delimiter_in_row_found = !row_format.delimiters.back().empty(); - - if (last_delimiter_in_row_found && checkString(row_between_delimiter, buf)) - at_beginning_of_row_or_eof = true; - else - skipToNextDelimiterOrEof(row_between_delimiter); - - if (buf.eof()) - at_beginning_of_row_or_eof = end_of_stream = true; - } + skipToNextRowOrEof(buf, row_format.delimiters.back(), row_between_delimiter, ignore_spaces); + end_of_stream = buf.eof(); /// It can happen that buf.position() is not at the beginning of row /// if some delimiters is similar to row_format.delimiters.back() and row_between_delimiter. /// It will cause another parsing error. } -/// Searches for delimiter in input stream and sets buffer position to the beginning of delimiter (if found) or EOF (if not) -void TemplateRowInputFormat::skipToNextDelimiterOrEof(const String & delimiter) -{ - if (delimiter.empty()) - return; - - while (!buf.eof()) - { - void * pos = memchr(buf.position(), delimiter[0], buf.available()); - if (!pos) - { - buf.position() += buf.available(); - continue; - } - - buf.position() = static_cast(pos); - - PeekableReadBufferCheckpoint checkpoint{buf}; - if (checkString(delimiter, buf)) - return; - - buf.rollbackToCheckpoint(); - ++buf.position(); - } -} - void TemplateRowInputFormat::throwUnexpectedEof() { throw ParsingException("Unexpected EOF while parsing row " + std::to_string(row_num) + ". " @@ -524,7 +402,7 @@ void registerInputFormatTemplate(FormatFactory & factory) { /// Default format string: "${data}" resultset_format.delimiters.resize(2); - resultset_format.formats.emplace_back(ParsedTemplateFormatString::ColumnFormat::None); + resultset_format.escaping_rules.emplace_back(ParsedTemplateFormatString::EscapingRule::None); resultset_format.format_idx_to_column_idx.emplace_back(0); resultset_format.column_names.emplace_back("data"); } @@ -554,21 +432,6 @@ void registerInputFormatTemplate(FormatFactory & factory) return std::make_shared(sample, buf, params, settings, ignore_spaces, resultset_format, row_format, settings.template_settings.row_between_delimiter); }); } - - for (bool ignore_spaces : {false, true}) - { - factory.registerInputFormat(ignore_spaces ? "CustomSeparatedIgnoreSpaces" : "CustomSeparated", [=]( - ReadBuffer & buf, - const Block & sample, - IRowInputFormat::Params params, - const FormatSettings & settings) - { - ParsedTemplateFormatString resultset_format = ParsedTemplateFormatString::setupCustomSeparatedResultsetFormat(settings.custom); - ParsedTemplateFormatString row_format = ParsedTemplateFormatString::setupCustomSeparatedRowFormat(settings.custom, sample); - - return std::make_shared(sample, buf, params, settings, ignore_spaces, resultset_format, row_format, settings.custom.row_between_delimiter); - }); - } } } diff --git a/src/Processors/Formats/Impl/TemplateRowInputFormat.h b/src/Processors/Formats/Impl/TemplateRowInputFormat.h index 322f8570ab7..1663bf3ba02 100644 --- a/src/Processors/Formats/Impl/TemplateRowInputFormat.h +++ b/src/Processors/Formats/Impl/TemplateRowInputFormat.h @@ -13,7 +13,7 @@ namespace DB class TemplateRowInputFormat : public RowInputFormatWithDiagnosticInfo { - using ColumnFormat = ParsedTemplateFormatString::ColumnFormat; + using EscapingRule = FormatSettings::EscapingRule; public: TemplateRowInputFormat(const Block & header_, ReadBuffer & in_, const Params & params_, FormatSettings settings_, bool ignore_spaces_, @@ -22,6 +22,9 @@ public: String getName() const override { return "TemplateRowInputFormat"; } + void resetParser() override; + +private: bool readRow(MutableColumns & columns, RowReadExtension & extra) override; void readPrefix() override; @@ -29,13 +32,10 @@ public: bool allowSyncAfterError() const override; void syncAfterError() override; - void resetParser() override; - -private: bool deserializeField(const DataTypePtr & type, const SerializationPtr & serialization, IColumn & column, size_t file_column); - void skipField(ColumnFormat col_format); + void skipField(EscapingRule escaping_rule); inline void skipSpaces() { if (ignore_spaces) skipWhitespaceIfAny(buf); } template @@ -47,11 +47,7 @@ private: void tryDeserializeField(const DataTypePtr & type, IColumn & column, size_t file_column) override; bool isGarbageAfterField(size_t after_col_idx, ReadBuffer::Position pos) override; - void writeErrorStringForWrongDelimiter(WriteBuffer & out, const String & description, const String & delim); - void skipToNextDelimiterOrEof(const String & delimiter); - -private: PeekableReadBuffer buf; const DataTypes data_types; @@ -68,4 +64,6 @@ private: const std::string row_between_delimiter; }; +bool parseDelimiterWithDiagnosticInfo(WriteBuffer & out, ReadBuffer & buf, const String & delimiter, const String & description, bool skip_spaces); + } diff --git a/src/Processors/Formats/Impl/ValuesBlockInputFormat.cpp b/src/Processors/Formats/Impl/ValuesBlockInputFormat.cpp index 5f471dc0151..adf6d2e8a25 100644 --- a/src/Processors/Formats/Impl/ValuesBlockInputFormat.cpp +++ b/src/Processors/Formats/Impl/ValuesBlockInputFormat.cpp @@ -3,7 +3,6 @@ #include #include #include -#include #include #include #include @@ -12,7 +11,6 @@ #include #include #include -#include #include #include #include diff --git a/src/Processors/Formats/Impl/ValuesRowOutputFormat.h b/src/Processors/Formats/Impl/ValuesRowOutputFormat.h index 493ce458b1e..8d89854d43c 100644 --- a/src/Processors/Formats/Impl/ValuesRowOutputFormat.h +++ b/src/Processors/Formats/Impl/ValuesRowOutputFormat.h @@ -19,13 +19,13 @@ public: String getName() const override { return "ValuesRowOutputFormat"; } +private: void writeField(const IColumn & column, const ISerialization & serialization, size_t row_num) override; void writeFieldDelimiter() override; void writeRowStartDelimiter() override; void writeRowEndDelimiter() override; void writeRowBetweenDelimiter() override; -private: const FormatSettings format_settings; }; diff --git a/src/Processors/Formats/Impl/VerticalRowOutputFormat.h b/src/Processors/Formats/Impl/VerticalRowOutputFormat.h index 9e89f677f87..075c943cd76 100644 --- a/src/Processors/Formats/Impl/VerticalRowOutputFormat.h +++ b/src/Processors/Formats/Impl/VerticalRowOutputFormat.h @@ -22,6 +22,7 @@ public: String getName() const override { return "VerticalRowOutputFormat"; } +private: void writeField(const IColumn & column, const ISerialization & serialization, size_t row_num) override; void writeRowStartDelimiter() override; void writeRowBetweenDelimiter() override; @@ -34,8 +35,7 @@ public: void writeBeforeTotals() override; void writeBeforeExtremes() override; -protected: - virtual void writeValue(const IColumn & column, const ISerialization & serialization, size_t row_num) const; + void writeValue(const IColumn & column, const ISerialization & serialization, size_t row_num) const; /// For totals and extremes. void writeSpecialRow(const Columns & columns, size_t row_num, const char * title); diff --git a/src/Processors/Formats/Impl/XMLRowOutputFormat.h b/src/Processors/Formats/Impl/XMLRowOutputFormat.h index 8ca4721c459..7f08fc12bd1 100644 --- a/src/Processors/Formats/Impl/XMLRowOutputFormat.h +++ b/src/Processors/Formats/Impl/XMLRowOutputFormat.h @@ -20,6 +20,7 @@ public: String getName() const override { return "XMLRowOutputFormat"; } +private: void writeField(const IColumn & column, const ISerialization & serialization, size_t row_num) override; void writeRowStartDelimiter() override; void writeRowEndDelimiter() override; @@ -54,7 +55,6 @@ public: String getContentType() const override { return "application/xml; charset=UTF-8"; } -protected: void writeExtremesElement(const char * title, const Columns & columns, size_t row_num); void writeRowsBeforeLimitAtLeast(); void writeStatistics(); diff --git a/src/Processors/Formats/LazyOutputFormat.h b/src/Processors/Formats/LazyOutputFormat.h index 123d393a871..b539a8494c7 100644 --- a/src/Processors/Formats/LazyOutputFormat.h +++ b/src/Processors/Formats/LazyOutputFormat.h @@ -34,7 +34,7 @@ public: queue.clearAndFinish(); } - void finalize() override + void finalizeImpl() override { queue.finish(); } diff --git a/src/Processors/Formats/RowInputFormatWithNamesAndTypes.cpp b/src/Processors/Formats/RowInputFormatWithNamesAndTypes.cpp index b5690d9dafb..87fa5ec1c4a 100644 --- a/src/Processors/Formats/RowInputFormatWithNamesAndTypes.cpp +++ b/src/Processors/Formats/RowInputFormatWithNamesAndTypes.cpp @@ -75,6 +75,11 @@ void RowInputFormatWithNamesAndTypes::addInputColumn(const String & column_name, void RowInputFormatWithNamesAndTypes::readPrefix() { + /// This is a bit of abstraction leakage, but we need it in parallel parsing: + /// we check if this InputFormat is working with the "real" beginning of the data. + if (getCurrentUnitNumber() != 0) + return; + if (with_names || with_types || data_types.at(0)->textCanContainOnlyValidUTF8()) { /// We assume that column name or type cannot contain BOM, so, if format has header, @@ -82,9 +87,12 @@ void RowInputFormatWithNamesAndTypes::readPrefix() skipBOMIfExists(*in); } + /// Skip prefix before names and types. + skipPrefixBeforeHeader(); + /// This is a bit of abstraction leakage, but we need it in parallel parsing: /// we check if this InputFormat is working with the "real" beginning of the data. - if (with_names && getCurrentUnitNumber() == 0) + if (with_names) { if (format_settings.with_names_use_header) { @@ -108,8 +116,10 @@ void RowInputFormatWithNamesAndTypes::readPrefix() else if (!column_mapping->is_set) setupAllColumnsByTableSchema(); - if (with_types && getCurrentUnitNumber() == 0) + if (with_types) { + /// Skip delimiter between names and types. + skipRowBetweenDelimiter(); if (format_settings.with_types_use_header) { auto types = readTypes(); @@ -148,10 +158,20 @@ void RowInputFormatWithNamesAndTypes::insertDefaultsForNotSeenColumns(MutableCol bool RowInputFormatWithNamesAndTypes::readRow(MutableColumns & columns, RowReadExtension & ext) { - if (in->eof()) + if (unlikely(end_of_stream)) return false; + if (unlikely(checkForSuffix())) + { + end_of_stream = true; + return false; + } + updateDiagnosticInfo(); + + if (likely(row_num != 1 || (getCurrentUnitNumber() == 0 && (with_names || with_types)))) + skipRowBetweenDelimiter(); + skipRowStartDelimiter(); ext.read_columns.resize(data_types.size()); @@ -190,6 +210,7 @@ void RowInputFormatWithNamesAndTypes::resetParser() column_mapping->column_indexes_for_input_fields.clear(); column_mapping->not_presented_columns.clear(); column_mapping->names_of_columns.clear(); + end_of_stream = false; } void RowInputFormatWithNamesAndTypes::tryDeserializeField(const DataTypePtr & type, IColumn & column, size_t file_column) @@ -215,6 +236,12 @@ bool RowInputFormatWithNamesAndTypes::parseRowAndPrintDiagnosticInfo(MutableColu return false; } + if (!tryParseSuffixWithDiagnosticInfo(out)) + return false; + + if (likely(row_num != 1) && !parseRowBetweenDelimiterWithDiagnosticInfo(out)) + return false; + if (!parseRowStartWithDiagnosticInfo(out)) return false; diff --git a/src/Processors/Formats/RowInputFormatWithNamesAndTypes.h b/src/Processors/Formats/RowInputFormatWithNamesAndTypes.h index 0fd83238f5f..cd7cd34d7e6 100644 --- a/src/Processors/Formats/RowInputFormatWithNamesAndTypes.h +++ b/src/Processors/Formats/RowInputFormatWithNamesAndTypes.h @@ -30,8 +30,6 @@ public: const Params & params_, bool with_names_, bool with_types_, const FormatSettings & format_settings_); - bool readRow(MutableColumns & columns, RowReadExtension & ext) override; - void readPrefix() override; void resetParser() override; protected: @@ -46,16 +44,22 @@ protected: virtual void skipTypes() = 0; /// Skip delimiters, if any. + virtual void skipPrefixBeforeHeader() {} virtual void skipRowStartDelimiter() {} virtual void skipFieldDelimiter() {} virtual void skipRowEndDelimiter() {} + virtual void skipRowBetweenDelimiter() {} + /// Check suffix. + virtual bool checkForSuffix() { return in->eof(); } /// Methods for parsing with diagnostic info. virtual void checkNullValueForNonNullable(DataTypePtr) {} virtual bool parseRowStartWithDiagnosticInfo(WriteBuffer &) { return true; } virtual bool parseFieldDelimiterWithDiagnosticInfo(WriteBuffer &) { return true; } virtual bool parseRowEndWithDiagnosticInfo(WriteBuffer &) { return true;} + virtual bool parseRowBetweenDelimiterWithDiagnosticInfo(WriteBuffer &) { return true;} + virtual bool tryParseSuffixWithDiagnosticInfo(WriteBuffer &) { return true; } bool isGarbageAfterField(size_t, ReadBuffer::Position) override {return false; } /// Read row with names and return the list of them. @@ -65,8 +69,12 @@ protected: const FormatSettings format_settings; DataTypes data_types; + bool end_of_stream = false; private: + bool readRow(MutableColumns & columns, RowReadExtension & ext) override; + void readPrefix() override; + bool parseRowAndPrintDiagnosticInfo(MutableColumns & columns, WriteBuffer & out) override; void tryDeserializeField(const DataTypePtr & type, IColumn & column, size_t file_column) override; diff --git a/src/Processors/Sources/SourceWithProgress.cpp b/src/Processors/Sources/SourceWithProgress.cpp index 0ebdd968997..9b7a5c6a762 100644 --- a/src/Processors/Sources/SourceWithProgress.cpp +++ b/src/Processors/Sources/SourceWithProgress.cpp @@ -143,7 +143,7 @@ void SourceWithProgress::progress(const Progress & value) limits.speed_limits.throttle(progress.read_rows, progress.read_bytes, total_rows, total_elapsed_microseconds); if (quota && limits.mode == LimitsMode::LIMITS_TOTAL) - quota->used({Quota::READ_ROWS, value.read_rows}, {Quota::READ_BYTES, value.read_bytes}); + quota->used({QuotaType::READ_ROWS, value.read_rows}, {QuotaType::READ_BYTES, value.read_bytes}); } ProfileEvents::increment(ProfileEvents::SelectedRows, value.read_rows); diff --git a/src/Processors/Transforms/CheckConstraintsTransform.cpp b/src/Processors/Transforms/CheckConstraintsTransform.cpp index ef165502bed..b7849b8a627 100644 --- a/src/Processors/Transforms/CheckConstraintsTransform.cpp +++ b/src/Processors/Transforms/CheckConstraintsTransform.cpp @@ -29,7 +29,7 @@ CheckConstraintsTransform::CheckConstraintsTransform( ContextPtr context_) : ExceptionKeepingTransform(header, header) , table_id(table_id_) - , constraints(constraints_) + , constraints_to_check(constraints_.filterConstraints(ConstraintsDescription::ConstraintType::CHECK)) , expressions(constraints_.getExpressions(context_, header.getNamesAndTypesList())) { } @@ -45,7 +45,7 @@ void CheckConstraintsTransform::transform(Chunk & chunk) auto constraint_expr = expressions[i]; constraint_expr->execute(block_to_calculate); - auto * constraint_ptr = constraints.constraints[i]->as(); + auto * constraint_ptr = constraints_to_check[i]->as(); ColumnWithTypeAndName res_column = block_to_calculate.getByName(constraint_ptr->expr->getColumnName()); diff --git a/src/Processors/Transforms/CheckConstraintsTransform.h b/src/Processors/Transforms/CheckConstraintsTransform.h index 13569bac0de..3198ec84198 100644 --- a/src/Processors/Transforms/CheckConstraintsTransform.h +++ b/src/Processors/Transforms/CheckConstraintsTransform.h @@ -27,7 +27,7 @@ public: private: StorageID table_id; - const ConstraintsDescription constraints; + const ASTs constraints_to_check; const ConstraintsExpressions expressions; size_t rows_written = 0; }; diff --git a/src/Processors/Transforms/LimitsCheckingTransform.cpp b/src/Processors/Transforms/LimitsCheckingTransform.cpp index 64b6b64ccd9..e5f74003ac3 100644 --- a/src/Processors/Transforms/LimitsCheckingTransform.cpp +++ b/src/Processors/Transforms/LimitsCheckingTransform.cpp @@ -65,9 +65,9 @@ void LimitsCheckingTransform::checkQuota(Chunk & chunk) { UInt64 total_elapsed = info.total_stopwatch.elapsedNanoseconds(); quota->used( - {Quota::RESULT_ROWS, chunk.getNumRows()}, - {Quota::RESULT_BYTES, chunk.bytes()}, - {Quota::EXECUTION_TIME, total_elapsed - prev_elapsed}); + {QuotaType::RESULT_ROWS, chunk.getNumRows()}, + {QuotaType::RESULT_BYTES, chunk.bytes()}, + {QuotaType::EXECUTION_TIME, total_elapsed - prev_elapsed}); prev_elapsed = total_elapsed; break; } diff --git a/src/Processors/Transforms/SortingTransform.cpp b/src/Processors/Transforms/SortingTransform.cpp index eeb576731ab..7bdc927d0d8 100644 --- a/src/Processors/Transforms/SortingTransform.cpp +++ b/src/Processors/Transforms/SortingTransform.cpp @@ -13,15 +13,9 @@ #include -namespace ProfileEvents -{ - extern const Event ExternalSortWritePart; - extern const Event ExternalSortMerge; -} - - namespace DB { + namespace ErrorCodes { extern const int NOT_IMPLEMENTED; diff --git a/src/QueryPipeline/Pipe.h b/src/QueryPipeline/Pipe.h index 0af02a5e662..613e92a782d 100644 --- a/src/QueryPipeline/Pipe.h +++ b/src/QueryPipeline/Pipe.h @@ -3,12 +3,12 @@ #include #include #include -#include #include namespace DB { +class EnabledQuota; struct StreamLocalLimits; class Pipe; diff --git a/src/QueryPipeline/QueryPipelineBuilder.h b/src/QueryPipeline/QueryPipelineBuilder.h index 12f74805173..9e198f45e98 100644 --- a/src/QueryPipeline/QueryPipelineBuilder.h +++ b/src/QueryPipeline/QueryPipelineBuilder.h @@ -1,11 +1,11 @@ #pragma once -#include #include #include #include #include #include +#include namespace DB { @@ -19,6 +19,9 @@ using AggregatingTransformParamsPtr = std::shared_ptr; + struct SubqueryForSet; using SubqueriesForSets = std::unordered_map; @@ -122,7 +125,7 @@ public: const Block & getHeader() const { return pipe.getHeader(); } void addTableLock(TableLockHolder lock) { pipe.addTableLock(std::move(lock)); } - void addInterpreterContext(std::shared_ptr context) { pipe.addInterpreterContext(std::move(context)); } + void addInterpreterContext(ContextPtr context) { pipe.addInterpreterContext(std::move(context)); } void addStorageHolder(StoragePtr storage) { pipe.addStorageHolder(std::move(storage)); } void addQueryPlan(std::unique_ptr plan); void setLimits(const StreamLocalLimits & limits) { pipe.setLimits(limits); } diff --git a/src/QueryPipeline/RemoteQueryExecutor.cpp b/src/QueryPipeline/RemoteQueryExecutor.cpp index b01ed7ba9a2..a7f74da53ab 100644 --- a/src/QueryPipeline/RemoteQueryExecutor.cpp +++ b/src/QueryPipeline/RemoteQueryExecutor.cpp @@ -296,6 +296,12 @@ std::variant RemoteQueryExecutor::read(std::unique_ptr } else { + /// We need to check that query was not cancelled again, + /// to avoid the race between cancel() thread and read() thread. + /// (since cancel() thread will steal the fiber and may update the packet). + if (was_cancelled) + return Block(); + if (auto data = processPacket(std::move(read_context->packet))) return std::move(*data); else if (got_duplicated_part_uuids) diff --git a/src/Server/GRPCServer.cpp b/src/Server/GRPCServer.cpp index ba2644e0fba..b10961975f6 100644 --- a/src/Server/GRPCServer.cpp +++ b/src/Server/GRPCServer.cpp @@ -28,6 +28,7 @@ #include #include #include +#include #include #include #include @@ -39,6 +40,7 @@ #include #include #include +#include #include #include #include @@ -956,7 +958,18 @@ namespace { if (insert_query) { - auto table_id = query_context->resolveStorageID(insert_query->table_id, Context::ResolveOrdinary); + auto table_id = StorageID::createEmpty(); + + if (insert_query->table_id) + { + table_id = query_context->resolveStorageID(insert_query->table_id, Context::ResolveOrdinary); + } + else + { + StorageID local_table_id(insert_query->getDatabase(), insert_query->getTable()); + table_id = query_context->resolveStorageID(local_table_id, Context::ResolveOrdinary); + } + if (query_context->getSettingsRef().input_format_defaults_for_omitted_fields && table_id) { StoragePtr storage = DatabaseCatalog::instance().getTable(table_id, query_context); @@ -1090,7 +1103,6 @@ namespace write_buffer.emplace(*result.mutable_output()); output_format_processor = query_context->getOutputFormat(output_format, *write_buffer, header); - output_format_processor->doWritePrefix(); Stopwatch after_send_progress; /// Unless the input() function is used we are not going to receive input data anymore. @@ -1169,7 +1181,7 @@ namespace executor->execute(); } - output_format_processor->doWriteSuffix(); + output_format_processor->finalize(); } void Call::finishQuery() @@ -1380,9 +1392,8 @@ namespace WriteBufferFromString buf{*result.mutable_totals()}; auto format = query_context->getOutputFormat(output_format, buf, totals); - format->doWritePrefix(); format->write(materializeBlock(totals)); - format->doWriteSuffix(); + format->finalize(); } void Call::addExtremesToResult(const Block & extremes) @@ -1392,9 +1403,8 @@ namespace WriteBufferFromString buf{*result.mutable_extremes()}; auto format = query_context->getOutputFormat(output_format, buf, extremes); - format->doWritePrefix(); format->write(materializeBlock(extremes)); - format->doWriteSuffix(); + format->finalize(); } void Call::addProfileInfoToResult(const ProfileInfo & info) diff --git a/src/Server/KeeperTCPHandler.cpp b/src/Server/KeeperTCPHandler.cpp index 17a13955043..afcf03b2574 100644 --- a/src/Server/KeeperTCPHandler.cpp +++ b/src/Server/KeeperTCPHandler.cpp @@ -333,6 +333,16 @@ void KeeperTCPHandler::runImpl() }; keeper_dispatcher->registerSession(session_id, response_callback); + Stopwatch logging_stopwatch; + auto log_long_operation = [&](const String & operation) + { + constexpr UInt64 operation_max_ms = 500; + auto elapsed_ms = logging_stopwatch.elapsedMilliseconds(); + if (operation_max_ms < elapsed_ms) + LOG_TEST(log, "{} for session {} took {} ms", operation, session_id, elapsed_ms); + logging_stopwatch.restart(); + }; + session_stopwatch.start(); bool close_received = false; try @@ -342,9 +352,11 @@ void KeeperTCPHandler::runImpl() using namespace std::chrono_literals; PollResult result = poll_wrapper->poll(session_timeout, in); + log_long_operation("Polling socket"); if (result.has_requests && !close_received) { auto [received_op, received_xid] = receiveRequest(); + log_long_operation("Receiving request"); if (received_op == Coordination::OpNum::Close) { @@ -370,6 +382,7 @@ void KeeperTCPHandler::runImpl() if (!responses->tryPop(response)) throw Exception(ErrorCodes::LOGICAL_ERROR, "We must have ready response, but queue is empty. It's a bug."); + log_long_operation("Waiting for response to be ready"); if (response->xid == close_xid) { @@ -378,6 +391,7 @@ void KeeperTCPHandler::runImpl() } response->write(*out); + log_long_operation("Sending response"); if (response->error == Coordination::Error::ZSESSIONEXPIRED) { LOG_DEBUG(log, "Session #{} expired because server shutting down or quorum is not alive", session_id); @@ -401,6 +415,8 @@ void KeeperTCPHandler::runImpl() } catch (const Exception & ex) { + log_long_operation("Unknown operation"); + LOG_TRACE(log, "Has {} responses in the queue", responses->size()); LOG_INFO(log, "Got exception processing session #{}: {}", session_id, getExceptionMessage(ex, true)); keeper_dispatcher->finishSession(session_id); } diff --git a/src/Server/MySQLHandler.cpp b/src/Server/MySQLHandler.cpp index 2f28095f976..deebc073ad5 100644 --- a/src/Server/MySQLHandler.cpp +++ b/src/Server/MySQLHandler.cpp @@ -20,6 +20,7 @@ #include #include #include +#include #include diff --git a/src/Storages/AlterCommands.cpp b/src/Storages/AlterCommands.cpp index c5101f162ee..52825c4fb43 100644 --- a/src/Storages/AlterCommands.cpp +++ b/src/Storages/AlterCommands.cpp @@ -549,9 +549,10 @@ void AlterCommand::apply(StorageInMemoryMetadata & metadata, ContextPtr context) } else if (type == ADD_CONSTRAINT) { + auto constraints = metadata.constraints.getConstraints(); if (std::any_of( - metadata.constraints.constraints.cbegin(), - metadata.constraints.constraints.cend(), + constraints.cbegin(), + constraints.cend(), [this](const ASTPtr & constraint_ast) { return constraint_ast->as().name == constraint_name; @@ -563,28 +564,30 @@ void AlterCommand::apply(StorageInMemoryMetadata & metadata, ContextPtr context) ErrorCodes::ILLEGAL_COLUMN); } - auto insert_it = metadata.constraints.constraints.end(); - - metadata.constraints.constraints.emplace(insert_it, std::dynamic_pointer_cast(constraint_decl)); + auto insert_it = constraints.end(); + constraints.emplace(insert_it, constraint_decl); + metadata.constraints = ConstraintsDescription(constraints); } else if (type == DROP_CONSTRAINT) { + auto constraints = metadata.constraints.getConstraints(); auto erase_it = std::find_if( - metadata.constraints.constraints.begin(), - metadata.constraints.constraints.end(), + constraints.begin(), + constraints.end(), [this](const ASTPtr & constraint_ast) { return constraint_ast->as().name == constraint_name; }); - if (erase_it == metadata.constraints.constraints.end()) + if (erase_it == constraints.end()) { if (if_exists) return; throw Exception("Wrong constraint name. Cannot find constraint `" + constraint_name + "` to drop.", ErrorCodes::BAD_ARGUMENTS); } - metadata.constraints.constraints.erase(erase_it); + constraints.erase(erase_it); + metadata.constraints = ConstraintsDescription(constraints); } else if (type == ADD_PROJECTION) { @@ -654,8 +657,10 @@ void AlterCommand::apply(StorageInMemoryMetadata & metadata, ContextPtr context) if (metadata.table_ttl.definition_ast) rename_visitor.visit(metadata.table_ttl.definition_ast); - for (auto & constraint : metadata.constraints.constraints) + auto constraints_data = metadata.constraints.getConstraints(); + for (auto & constraint : constraints_data) rename_visitor.visit(constraint); + metadata.constraints = ConstraintsDescription(constraints_data); if (metadata.isSortingKeyDefined()) rename_visitor.visit(metadata.sorting_key.definition_ast); diff --git a/src/Storages/ConstraintsDescription.cpp b/src/Storages/ConstraintsDescription.cpp index 7015c3f8e48..60202e2055e 100644 --- a/src/Storages/ConstraintsDescription.cpp +++ b/src/Storages/ConstraintsDescription.cpp @@ -7,12 +7,17 @@ #include #include #include +#include #include namespace DB { +namespace ErrorCodes +{ + extern const int LOGICAL_ERROR; +} String ConstraintsDescription::toString() const { @@ -41,6 +46,84 @@ ConstraintsDescription ConstraintsDescription::parse(const String & str) return res; } +ASTs ConstraintsDescription::filterConstraints(ConstraintType selection) const +{ + const auto ast_to_decr_constraint_type = [](ASTConstraintDeclaration::Type constraint_type) -> UInt8 + { + switch (constraint_type) + { + case ASTConstraintDeclaration::Type::CHECK: + return static_cast(ConstraintType::CHECK); + case ASTConstraintDeclaration::Type::ASSUME: + return static_cast(ConstraintType::ASSUME); + } + throw Exception("Unknown constraint type.", ErrorCodes::LOGICAL_ERROR); + }; + + ASTs res; + res.reserve(constraints.size()); + for (const auto & constraint : constraints) + { + if ((ast_to_decr_constraint_type(constraint->as()->type) & static_cast(selection)) != 0) + { + res.push_back(constraint); + } + } + return res; +} + +std::vector> ConstraintsDescription::buildConstraintData() const +{ + std::vector> constraint_data; + for (const auto & constraint : filterConstraints(ConstraintsDescription::ConstraintType::ALWAYS_TRUE)) + { + const auto cnf = TreeCNFConverter::toCNF(constraint->as()->expr->ptr()) + .pullNotOutFunctions(); /// TODO: move prepare stage to ConstraintsDescription + for (const auto & group : cnf.getStatements()) + constraint_data.emplace_back(std::begin(group), std::end(group)); + } + + return constraint_data; +} + +std::vector ConstraintsDescription::getAtomicConstraintData() const +{ + std::vector constraint_data; + for (const auto & constraint : filterConstraints(ConstraintsDescription::ConstraintType::ALWAYS_TRUE)) + { + const auto cnf = TreeCNFConverter::toCNF(constraint->as()->expr->ptr()) + .pullNotOutFunctions(); + for (const auto & group : cnf.getStatements()) + { + if (group.size() == 1) + constraint_data.push_back(*group.begin()); + } + } + + return constraint_data; +} + +std::unique_ptr ConstraintsDescription::buildGraph() const +{ + static const NameSet relations = { "equals", "less", "lessOrEquals", "greaterOrEquals", "greater" }; + + std::vector constraints_for_graph; + auto atomic_formulas = getAtomicConstraintData(); + for (const auto & atomic_formula : atomic_formulas) + { + CNFQuery::AtomicFormula atom{atomic_formula.negative, atomic_formula.ast->clone()}; + pushNotIn(atom); + auto * func = atom.ast->as(); + if (func && relations.count(func->name)) + { + assert(!atom.negative); + constraints_for_graph.push_back(atom.ast); + } + } + + return std::make_unique(constraints_for_graph); +} + ConstraintsExpressions ConstraintsDescription::getExpressions(const DB::ContextPtr context, const DB::NamesAndTypesList & source_columns_) const { @@ -48,20 +131,62 @@ ConstraintsExpressions ConstraintsDescription::getExpressions(const DB::ContextP res.reserve(constraints.size()); for (const auto & constraint : constraints) { - // TreeRewriter::analyze has query as non-const argument so to avoid accidental query changes we clone it auto * constraint_ptr = constraint->as(); - ASTPtr expr = constraint_ptr->expr->clone(); - auto syntax_result = TreeRewriter(context).analyze(expr, source_columns_); - res.push_back(ExpressionAnalyzer(constraint_ptr->expr->clone(), syntax_result, context).getActions(false, true, CompileExpressions::yes)); + if (constraint_ptr->type == ASTConstraintDeclaration::Type::CHECK) + { + // TreeRewriter::analyze has query as non-const argument so to avoid accidental query changes we clone it + ASTPtr expr = constraint_ptr->expr->clone(); + auto syntax_result = TreeRewriter(context).analyze(expr, source_columns_); + res.push_back(ExpressionAnalyzer(constraint_ptr->expr->clone(), syntax_result, context).getActions(false, true, CompileExpressions::yes)); + } } return res; } +const ComparisonGraph & ConstraintsDescription::getGraph() const +{ + return *graph; +} + +const std::vector> & ConstraintsDescription::getConstraintData() const +{ + return cnf_constraints; +} + +const std::vector & ConstraintsDescription::getConstraints() const +{ + return constraints; +} + +std::optional ConstraintsDescription::getAtomIds(const ASTPtr & ast) const +{ + const auto hash = ast->getTreeHash(); + auto it = ast_to_atom_ids.find(hash); + if (it != ast_to_atom_ids.end()) + return it->second; + return std::nullopt; +} + +std::vector ConstraintsDescription::getAtomsById(const ConstraintsDescription::AtomIds & ids) const +{ + std::vector result; + for (const auto & id : ids) + result.push_back(cnf_constraints[id.group_id][id.atom_id]); + return result; +} + +ConstraintsDescription::ConstraintsDescription(const ASTs & constraints_) + : constraints(constraints_) +{ + update(); +} + ConstraintsDescription::ConstraintsDescription(const ConstraintsDescription & other) { constraints.reserve(other.constraints.size()); for (const auto & constraint : other.constraints) constraints.emplace_back(constraint->clone()); + update(); } ConstraintsDescription & ConstraintsDescription::operator=(const ConstraintsDescription & other) @@ -69,7 +194,27 @@ ConstraintsDescription & ConstraintsDescription::operator=(const ConstraintsDesc constraints.resize(other.constraints.size()); for (size_t i = 0; i < constraints.size(); ++i) constraints[i] = other.constraints[i]->clone(); + update(); return *this; } +void ConstraintsDescription::update() +{ + if (constraints.empty()) + { + cnf_constraints.clear(); + ast_to_atom_ids.clear(); + graph = std::make_unique(std::vector()); + return; + } + + cnf_constraints = buildConstraintData(); + ast_to_atom_ids.clear(); + for (size_t i = 0; i < cnf_constraints.size(); ++i) + for (size_t j = 0; j < cnf_constraints[i].size(); ++j) + ast_to_atom_ids[cnf_constraints[i][j].ast->getTreeHash()].push_back({i, j}); + + graph = buildGraph(); +} + } diff --git a/src/Storages/ConstraintsDescription.h b/src/Storages/ConstraintsDescription.h index 5e6416822bb..ad8bd371f38 100644 --- a/src/Storages/ConstraintsDescription.h +++ b/src/Storages/ConstraintsDescription.h @@ -2,6 +2,8 @@ #include #include +#include +#include namespace DB { @@ -10,19 +12,56 @@ using ConstraintsExpressions = std::vector; struct ConstraintsDescription { - std::vector constraints; - - ConstraintsDescription() = default; +public: + ConstraintsDescription() { update(); } + ConstraintsDescription(const ASTs & constraints_); + ConstraintsDescription(const ConstraintsDescription & other); + ConstraintsDescription & operator=(const ConstraintsDescription & other); bool empty() const { return constraints.empty(); } String toString() const; static ConstraintsDescription parse(const String & str); + enum class ConstraintType : UInt8 + { + CHECK = 1, + ASSUME = 2, + ALWAYS_TRUE = CHECK | ASSUME, + ALL = CHECK | ASSUME, + }; + + ASTs filterConstraints(ConstraintType selection) const; + + const ASTs & getConstraints() const; + + const std::vector> & getConstraintData() const; + std::vector getAtomicConstraintData() const; + + const ComparisonGraph & getGraph() const; + ConstraintsExpressions getExpressions(ContextPtr context, const NamesAndTypesList & source_columns_) const; - ConstraintsDescription(const ConstraintsDescription & other); - ConstraintsDescription & operator=(const ConstraintsDescription & other); + struct AtomId + { + size_t group_id; + size_t atom_id; + }; + + using AtomIds = std::vector; + + std::optional getAtomIds(const ASTPtr & ast) const; + std::vector getAtomsById(const AtomIds & ids) const; + +private: + std::vector> buildConstraintData() const; + std::unique_ptr buildGraph() const; + void update(); + + ASTs constraints; + std::vector> cnf_constraints; + std::map ast_to_atom_ids; + std::unique_ptr graph; }; } diff --git a/src/Storages/HDFS/StorageHDFS.cpp b/src/Storages/HDFS/StorageHDFS.cpp index 4e780edeb96..90e63aef46d 100644 --- a/src/Storages/HDFS/StorageHDFS.cpp +++ b/src/Storages/HDFS/StorageHDFS.cpp @@ -211,11 +211,6 @@ public: void consume(Chunk chunk) override { - if (is_first_chunk) - { - writer->doWritePrefix(); - is_first_chunk = false; - } writer->write(getHeader().cloneWithColumns(chunk.detachColumns())); } @@ -223,7 +218,7 @@ public: { try { - writer->doWriteSuffix(); + writer->finalize(); writer->flush(); write_buf->sync(); write_buf->finalize(); @@ -238,7 +233,6 @@ public: private: std::unique_ptr write_buf; OutputFormatPtr writer; - bool is_first_chunk = true; }; diff --git a/src/Storages/IStorage.h b/src/Storages/IStorage.h index fa5f2c28b06..cf7b075a204 100644 --- a/src/Storages/IStorage.h +++ b/src/Storages/IStorage.h @@ -158,6 +158,9 @@ public: /// This is true for most storages that store data on disk. virtual bool prefersLargeBlocks() const { return true; } + /// Returns true if the storage is for system, which cannot be target of SHOW CREATE TABLE. + virtual bool isSystemStorage() const { return false; } + /// Optional size information of each physical column. /// Currently it's only used by the MergeTree family for query optimizations. diff --git a/src/Storages/Kafka/KafkaBlockOutputStream.cpp b/src/Storages/Kafka/KafkaBlockOutputStream.cpp index d3c51fef9b7..aeaee04c506 100644 --- a/src/Storages/Kafka/KafkaBlockOutputStream.cpp +++ b/src/Storages/Kafka/KafkaBlockOutputStream.cpp @@ -42,7 +42,7 @@ void KafkaSink::consume(Chunk chunk) void KafkaSink::onFinish() { if (format) - format->doWriteSuffix(); + format->finalize(); //flush(); if (buffer) diff --git a/src/Storages/LiveView/StorageLiveView.cpp b/src/Storages/LiveView/StorageLiveView.cpp index e3e6b4382f4..9eb20015ed9 100644 --- a/src/Storages/LiveView/StorageLiveView.cpp +++ b/src/Storages/LiveView/StorageLiveView.cpp @@ -18,13 +18,15 @@ limitations under the License. */ #include #include #include -#include +#include +#include #include #include #include #include #include #include +#include "QueryPipeline/printPipeline.h" #include #include @@ -120,7 +122,7 @@ MergeableBlocksPtr StorageLiveView::collectMergeableBlocks(ContextPtr local_cont new_mergeable_blocks->sample_block = builder.getHeader(); auto pipeline = QueryPipelineBuilder::getPipeline(std::move(builder)); - PullingPipelineExecutor executor(pipeline); + PullingAsyncPipelineExecutor executor(pipeline); Block this_block; while (executor.pull(this_block)) @@ -223,7 +225,7 @@ void StorageLiveView::writeIntoLiveView( mergeable_query = live_view.getInnerSubQuery(); Pipes pipes; - pipes.emplace_back(std::make_shared(block.cloneEmpty(), Chunk(block.getColumns(), block.rows()))); + pipes.emplace_back(std::make_shared(block)); auto creator = [&](const StorageID & blocks_id_global) { @@ -244,7 +246,7 @@ void StorageLiveView::writeIntoLiveView( }); auto pipeline = QueryPipelineBuilder::getPipeline(std::move(builder)); - PullingPipelineExecutor executor(pipeline); + PullingAsyncPipelineExecutor executor(pipeline); Block this_block; while (executor.pull(this_block)) @@ -385,10 +387,13 @@ bool StorageLiveView::getNewBlocks() auto builder = completeQuery(std::move(from)); auto pipeline = QueryPipelineBuilder::getPipeline(std::move(builder)); - PullingPipelineExecutor executor(pipeline); + PullingAsyncPipelineExecutor executor(pipeline); Block block; while (executor.pull(block)) { + if (block.rows() == 0) + continue; + /// calculate hash before virtual column is added block.updateHash(hash); /// add result version meta column diff --git a/src/Storages/LiveView/TemporaryLiveViewCleaner.cpp b/src/Storages/LiveView/TemporaryLiveViewCleaner.cpp index 69369cbc1a3..12af472247d 100644 --- a/src/Storages/LiveView/TemporaryLiveViewCleaner.cpp +++ b/src/Storages/LiveView/TemporaryLiveViewCleaner.cpp @@ -24,8 +24,8 @@ namespace { /// We create and execute `drop` query for this table auto drop_query = std::make_shared(); - drop_query->database = storage_id.database_name; - drop_query->table = storage_id.table_name; + drop_query->setDatabase(storage_id.database_name); + drop_query->setTable(storage_id.table_name); drop_query->kind = ASTDropQuery::Kind::Drop; ASTPtr ast_drop_query = drop_query; InterpreterDropQuery drop_interpreter(ast_drop_query, context); diff --git a/src/Storages/MergeTree/BackgroundJobsAssignee.cpp b/src/Storages/MergeTree/BackgroundJobsAssignee.cpp index f7f02ab40e2..4dc15d6e794 100644 --- a/src/Storages/MergeTree/BackgroundJobsAssignee.cpp +++ b/src/Storages/MergeTree/BackgroundJobsAssignee.cpp @@ -24,7 +24,9 @@ void BackgroundJobsAssignee::trigger() if (!holder) return; - no_work_done_count = 0; + /// Do not reset backoff factor if some task has appeared, + /// but decrease it exponentially on every new task. + no_work_done_count /= 2; /// We have background jobs, schedule task as soon as possible holder->schedule(); } @@ -36,12 +38,12 @@ void BackgroundJobsAssignee::postpone() if (!holder) return; - auto no_work_done_times = no_work_done_count.fetch_add(1, std::memory_order_relaxed); + no_work_done_count += 1; double random_addition = std::uniform_real_distribution(0, sleep_settings.task_sleep_seconds_when_no_work_random_part)(rng); size_t next_time_to_execute = 1000 * (std::min( sleep_settings.task_sleep_seconds_when_no_work_max, - sleep_settings.thread_sleep_seconds_if_nothing_to_do * std::pow(sleep_settings.task_sleep_seconds_when_no_work_multiplier, no_work_done_times)) + sleep_settings.thread_sleep_seconds_if_nothing_to_do * std::pow(sleep_settings.task_sleep_seconds_when_no_work_multiplier, no_work_done_count)) + random_addition); holder->scheduleAfter(next_time_to_execute, false); @@ -69,9 +71,9 @@ void BackgroundJobsAssignee::scheduleMoveTask(ExecutableTaskPtr move_task) } -void BackgroundJobsAssignee::scheduleCommonTask(ExecutableTaskPtr common_task) +void BackgroundJobsAssignee::scheduleCommonTask(ExecutableTaskPtr common_task, bool need_trigger) { - bool res = getContext()->getCommonExecutor()->trySchedule(common_task); + bool res = getContext()->getCommonExecutor()->trySchedule(common_task) && need_trigger; res ? trigger() : postpone(); } diff --git a/src/Storages/MergeTree/BackgroundJobsAssignee.h b/src/Storages/MergeTree/BackgroundJobsAssignee.h index b9b6114f121..e6c5845c657 100644 --- a/src/Storages/MergeTree/BackgroundJobsAssignee.h +++ b/src/Storages/MergeTree/BackgroundJobsAssignee.h @@ -41,7 +41,7 @@ private: /// How many times execution of background job failed or we have /// no new jobs. - std::atomic no_work_done_count{0}; + size_t no_work_done_count = 0; /// Scheduling task which assign jobs in background pool BackgroundSchedulePool::TaskHolder holder; @@ -69,7 +69,7 @@ public: void scheduleMergeMutateTask(ExecutableTaskPtr merge_task); void scheduleFetchTask(ExecutableTaskPtr fetch_task); void scheduleMoveTask(ExecutableTaskPtr move_task); - void scheduleCommonTask(ExecutableTaskPtr common_task); + void scheduleCommonTask(ExecutableTaskPtr common_task, bool need_trigger); /// Just call finish ~BackgroundJobsAssignee(); diff --git a/src/Storages/MergeTree/DataPartsExchange.cpp b/src/Storages/MergeTree/DataPartsExchange.cpp index 8960a208680..e952deb5a87 100644 --- a/src/Storages/MergeTree/DataPartsExchange.cpp +++ b/src/Storages/MergeTree/DataPartsExchange.cpp @@ -16,6 +16,7 @@ #include #include #include +#include #include #include @@ -24,6 +25,7 @@ namespace fs = std::filesystem; namespace CurrentMetrics { extern const Metric ReplicatedSend; + extern const Metric ReplicatedFetch; } namespace DB diff --git a/src/Storages/MergeTree/KeyCondition.cpp b/src/Storages/MergeTree/KeyCondition.cpp index dda7f235d97..f8f6bfff0ae 100644 --- a/src/Storages/MergeTree/KeyCondition.cpp +++ b/src/Storages/MergeTree/KeyCondition.cpp @@ -276,27 +276,6 @@ const KeyCondition::AtomMap KeyCondition::atom_map return true; } }, - { - "notLike", - [] (RPNElement & out, const Field & value) - { - if (value.getType() != Field::Types::String) - return false; - - String prefix = extractFixedPrefixFromLikePattern(value.get()); - if (prefix.empty()) - return false; - - String right_bound = firstStringThatIsGreaterThanAllStringsWithPrefix(prefix); - - out.function = RPNElement::FUNCTION_NOT_IN_RANGE; - out.range = !right_bound.empty() - ? Range(prefix, true, right_bound, false) - : Range::createLeftBounded(prefix, true); - - return true; - } - }, { "startsWith", [] (RPNElement & out, const Field & value) diff --git a/src/Storages/MergeTree/MergeTask.cpp b/src/Storages/MergeTree/MergeTask.cpp index 4a438795c88..a3e549ecda3 100644 --- a/src/Storages/MergeTree/MergeTask.cpp +++ b/src/Storages/MergeTree/MergeTask.cpp @@ -94,7 +94,7 @@ bool MergeTask::ExecuteAndFinalizeHorizontalPart::prepare() const String local_tmp_prefix = global_ctx->parent_part ? "" : "tmp_merge_"; const String local_tmp_suffix = global_ctx->parent_part ? ctx->suffix : ""; - if (global_ctx->merges_blocker->isCancelled()) + if (global_ctx->merges_blocker->isCancelled() || global_ctx->merge_list_element_ptr->is_cancelled.load(std::memory_order_relaxed)) throw Exception("Cancelled merging parts", ErrorCodes::ABORTED); /// We don't want to perform merge assigned with TTL as normal merge, so @@ -344,7 +344,7 @@ bool MergeTask::ExecuteAndFinalizeHorizontalPart::executeImpl() global_ctx->merging_executor.reset(); global_ctx->merged_pipeline.reset(); - if (global_ctx->merges_blocker->isCancelled()) + if (global_ctx->merges_blocker->isCancelled() || global_ctx->merge_list_element_ptr->is_cancelled.load(std::memory_order_relaxed)) throw Exception("Cancelled merging parts", ErrorCodes::ABORTED); if (ctx->need_remove_expired_values && global_ctx->ttl_merges_blocker->isCancelled()) @@ -443,7 +443,8 @@ void MergeTask::VerticalMergeStage::prepareVerticalMergeForOneColumn() const bool MergeTask::VerticalMergeStage::executeVerticalMergeForOneColumn() const { Block block; - if (!global_ctx->merges_blocker->isCancelled() && ctx->executor->pull(block)) + if (!global_ctx->merges_blocker->isCancelled() && !global_ctx->merge_list_element_ptr->is_cancelled.load(std::memory_order_relaxed) + && ctx->executor->pull(block)) { ctx->column_elems_written += block.rows(); ctx->column_to->write(block); @@ -458,7 +459,7 @@ bool MergeTask::VerticalMergeStage::executeVerticalMergeForOneColumn() const void MergeTask::VerticalMergeStage::finalizeVerticalMergeForOneColumn() const { const String & column_name = ctx->it_name_and_type->name; - if (global_ctx->merges_blocker->isCancelled()) + if (global_ctx->merges_blocker->isCancelled() || global_ctx->merge_list_element_ptr->is_cancelled.load(std::memory_order_relaxed)) throw Exception("Cancelled merging parts", ErrorCodes::ABORTED); ctx->executor.reset(); diff --git a/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp index 1cc02034c8d..5522edf86a3 100644 --- a/src/Storages/MergeTree/MergeTreeData.cpp +++ b/src/Storages/MergeTree/MergeTreeData.cpp @@ -35,6 +35,7 @@ #include #include #include +#include #include #include #include @@ -87,7 +88,6 @@ namespace ProfileEvents namespace CurrentMetrics { extern const Metric DelayedInserts; - extern const Metric BackgroundMovePoolTask; } @@ -1345,17 +1345,19 @@ static bool isOldPartDirectory(const DiskPtr & disk, const String & directory_pa } -void MergeTreeData::clearOldTemporaryDirectories(const MergeTreeDataMergerMutator & merger_mutator, size_t custom_directories_lifetime_seconds) +size_t MergeTreeData::clearOldTemporaryDirectories(const MergeTreeDataMergerMutator & merger_mutator, size_t custom_directories_lifetime_seconds) { /// If the method is already called from another thread, then we don't need to do anything. std::unique_lock lock(clear_old_temporary_directories_mutex, std::defer_lock); if (!lock.try_lock()) - return; + return 0; const auto settings = getSettings(); time_t current_time = time(nullptr); ssize_t deadline = current_time - custom_directories_lifetime_seconds; + size_t cleared_count = 0; + /// Delete temporary directories older than a day. for (const auto & [path, disk] : getRelativeDataPathsWithDisks()) { @@ -1367,18 +1369,22 @@ void MergeTreeData::clearOldTemporaryDirectories(const MergeTreeDataMergerMutato continue; } const std::string & full_path = fullPath(disk, it->path()); - if (merger_mutator.hasTemporaryPart(basename)) - { - LOG_WARNING(log, "{} is an active destination for one of merge/mutation (consider increasing temporary_directories_lifetime setting)", full_path); - continue; - } try { if (disk->isDirectory(it->path()) && isOldPartDirectory(disk, it->path(), deadline)) { - LOG_WARNING(log, "Removing temporary directory {}", full_path); - disk->removeRecursive(it->path()); + if (merger_mutator.hasTemporaryPart(basename)) + { + LOG_WARNING(log, "{} is an active destination for one of merge/mutation (consider increasing temporary_directories_lifetime setting)", full_path); + continue; + } + else + { + LOG_WARNING(log, "Removing temporary directory {}", full_path); + disk->removeRecursive(it->path()); + ++cleared_count; + } } } /// see getModificationTime() @@ -1402,6 +1408,8 @@ void MergeTreeData::clearOldTemporaryDirectories(const MergeTreeDataMergerMutato } } } + + return cleared_count; } @@ -1515,7 +1523,7 @@ void MergeTreeData::removePartsFinally(const MergeTreeData::DataPartsVector & pa } } -void MergeTreeData::clearOldPartsFromFilesystem(bool force) +size_t MergeTreeData::clearOldPartsFromFilesystem(bool force) { DataPartsVector parts_to_remove = grabOldParts(force); clearPartsFromFilesystem(parts_to_remove); @@ -1525,6 +1533,8 @@ void MergeTreeData::clearOldPartsFromFilesystem(bool force) /// NOTE: we can drop files from cache more selectively but this is good enough. if (!parts_to_remove.empty()) getContext()->dropMMappedFileCache(); + + return parts_to_remove.size(); } void MergeTreeData::clearPartsFromFilesystem(const DataPartsVector & parts_to_remove) @@ -1566,7 +1576,7 @@ void MergeTreeData::clearPartsFromFilesystem(const DataPartsVector & parts_to_re } } -void MergeTreeData::clearOldWriteAheadLogs() +size_t MergeTreeData::clearOldWriteAheadLogs() { DataPartsVector parts = getDataPartsVector(); std::vector> all_block_numbers_on_disk; @@ -1577,7 +1587,7 @@ void MergeTreeData::clearOldWriteAheadLogs() all_block_numbers_on_disk.emplace_back(part->info.min_block, part->info.max_block); if (all_block_numbers_on_disk.empty()) - return; + return 0; std::sort(all_block_numbers_on_disk.begin(), all_block_numbers_on_disk.end()); block_numbers_on_disk.push_back(all_block_numbers_on_disk[0]); @@ -1605,6 +1615,7 @@ void MergeTreeData::clearOldWriteAheadLogs() return false; }; + size_t cleared_count = 0; auto disks = getStoragePolicy()->getDisks(); for (auto disk_it = disks.rbegin(); disk_it != disks.rend(); ++disk_it) { @@ -1616,22 +1627,30 @@ void MergeTreeData::clearOldWriteAheadLogs() { LOG_DEBUG(log, "Removing from filesystem the outdated WAL file " + it->name()); disk_ptr->removeFile(relative_data_path + it->name()); + ++cleared_count; } } } + + return cleared_count; } -void MergeTreeData::clearEmptyParts() +size_t MergeTreeData::clearEmptyParts() { if (!getSettings()->remove_empty_parts) - return; + return 0; + size_t cleared_count = 0; auto parts = getDataPartsVector(); for (const auto & part : parts) { if (part->rows_count == 0) + { dropPartNoWaitNoThrow(part->name); + ++cleared_count; + } } + return cleared_count; } void MergeTreeData::rename(const String & new_table_path, const StorageID & new_table_id) @@ -4394,8 +4413,13 @@ static void selectBestProjection( if (normal_result_ptr->error()) return; - sum_marks += normal_result_ptr->marks(); - candidate.merge_tree_normal_select_result_ptr = normal_result_ptr; + if (normal_result_ptr->marks() == 0) + candidate.complete = true; + else + { + sum_marks += normal_result_ptr->marks(); + candidate.merge_tree_normal_select_result_ptr = normal_result_ptr; + } } candidate.merge_tree_projection_select_result_ptr = projection_result_ptr; @@ -4430,7 +4454,7 @@ Block MergeTreeData::getMinMaxCountProjectionBlock( required_columns.begin(), required_columns.end(), [&](const auto & name) { return primary_key_max_column_name == name; }); } - auto minmax_count_columns = block.mutateColumns(); + auto partition_minmax_count_columns = block.mutateColumns(); auto insert = [](ColumnAggregateFunction & column, const Field & value) { auto func = column.getAggregateFunction(); @@ -4475,11 +4499,18 @@ Block MergeTreeData::getMinMaxCountProjectionBlock( } size_t pos = 0; + for (size_t i : metadata_snapshot->minmax_count_projection->partition_value_indices) + { + if (i >= part->partition.value.size()) + throw Exception("Partition value index is out of boundary. It's a bug", ErrorCodes::LOGICAL_ERROR); + partition_minmax_count_columns[pos++]->insert(part->partition.value[i]); + } + size_t minmax_idx_size = part->minmax_idx->hyperrectangle.size(); for (size_t i = 0; i < minmax_idx_size; ++i) { - auto & min_column = assert_cast(*minmax_count_columns[pos++]); - auto & max_column = assert_cast(*minmax_count_columns[pos++]); + auto & min_column = assert_cast(*partition_minmax_count_columns[pos++]); + auto & max_column = assert_cast(*partition_minmax_count_columns[pos++]); const auto & range = part->minmax_idx->hyperrectangle[i]; insert(min_column, range.left); insert(max_column, range.right); @@ -4488,15 +4519,14 @@ Block MergeTreeData::getMinMaxCountProjectionBlock( if (!primary_key_max_column_name.empty()) { const auto & primary_key_column = *part->index[0]; - auto primary_key_column_size = primary_key_column.size(); - auto & min_column = assert_cast(*minmax_count_columns[pos++]); - auto & max_column = assert_cast(*minmax_count_columns[pos++]); + auto & min_column = assert_cast(*partition_minmax_count_columns[pos++]); + auto & max_column = assert_cast(*partition_minmax_count_columns[pos++]); insert(min_column, primary_key_column[0]); - insert(max_column, primary_key_column[primary_key_column_size - 1]); + insert(max_column, primary_key_column[primary_key_column.size() - 1]); } { - auto & column = assert_cast(*minmax_count_columns.back()); + auto & column = assert_cast(*partition_minmax_count_columns.back()); auto func = column.getAggregateFunction(); Arena & arena = column.createOrGetArena(); size_t size_of_state = func->sizeOfData(); @@ -4508,7 +4538,7 @@ Block MergeTreeData::getMinMaxCountProjectionBlock( column.insertFrom(place); } } - block.setColumns(std::move(minmax_count_columns)); + block.setColumns(std::move(partition_minmax_count_columns)); Block res; for (const auto & name : required_columns) @@ -4527,12 +4557,12 @@ Block MergeTreeData::getMinMaxCountProjectionBlock( } -bool MergeTreeData::getQueryProcessingStageWithAggregateProjection( +std::optional MergeTreeData::getQueryProcessingStageWithAggregateProjection( ContextPtr query_context, const StorageMetadataPtr & metadata_snapshot, SelectQueryInfo & query_info) const { const auto & settings = query_context->getSettingsRef(); if (!settings.allow_experimental_projection_optimization || query_info.ignore_projections || query_info.is_projection_query) - return false; + return std::nullopt; const auto & query_ptr = query_info.original_query; @@ -4540,16 +4570,16 @@ bool MergeTreeData::getQueryProcessingStageWithAggregateProjection( { // Currently projections don't support final yet. if (select->final()) - return false; + return std::nullopt; // Currently projections don't support ARRAY JOIN yet. if (select->arrayJoinExpressionList().first) - return false; + return std::nullopt; } // Currently projections don't support sampling yet. if (settings.parallel_replicas_count > 1) - return false; + return std::nullopt; InterpreterSelectQuery select( query_ptr, @@ -4777,6 +4807,25 @@ bool MergeTreeData::getQueryProcessingStageWithAggregateProjection( query_info.minmax_count_projection_block = getMinMaxCountProjectionBlock( metadata_snapshot, minmax_conut_projection_candidate->required_columns, query_info, parts, normal_parts, query_context); + if (minmax_conut_projection_candidate->prewhere_info) + { + const auto & prewhere_info = minmax_conut_projection_candidate->prewhere_info; + if (prewhere_info->alias_actions) + ExpressionActions(prewhere_info->alias_actions, actions_settings).execute(query_info.minmax_count_projection_block); + + if (prewhere_info->row_level_filter) + { + ExpressionActions(prewhere_info->row_level_filter, actions_settings).execute(query_info.minmax_count_projection_block); + query_info.minmax_count_projection_block.erase(prewhere_info->row_level_column_name); + } + + if (prewhere_info->prewhere_actions) + ExpressionActions(prewhere_info->prewhere_actions, actions_settings).execute(query_info.minmax_count_projection_block); + + if (prewhere_info->remove_prewhere_column) + query_info.minmax_count_projection_block.erase(prewhere_info->prewhere_column_name); + } + if (normal_parts.empty()) { selected_candidate = &*minmax_conut_projection_candidate; @@ -4887,14 +4936,14 @@ bool MergeTreeData::getQueryProcessingStageWithAggregateProjection( } if (!selected_candidate) - return false; + return std::nullopt; else if (min_sum_marks == 0) { /// If selected_projection indicated an empty result set. Remember it in query_info but /// don't use projection to run the query, because projection pipeline with empty result /// set will not work correctly with empty_result_for_aggregation_by_empty_set. query_info.merge_tree_empty_result = true; - return false; + return std::nullopt; } if (selected_candidate->desc->type == ProjectionDescription::Type::Aggregate) @@ -4905,8 +4954,7 @@ bool MergeTreeData::getQueryProcessingStageWithAggregateProjection( = std::make_shared(std::move(select.getQueryAnalyzer()->getSubqueriesForSets())); } - query_info.projection = std::move(*selected_candidate); - return true; + return *selected_candidate; } @@ -4918,11 +4966,14 @@ QueryProcessingStage::Enum MergeTreeData::getQueryProcessingStage( { if (to_stage >= QueryProcessingStage::Enum::WithMergeableState) { - if (getQueryProcessingStageWithAggregateProjection(query_context, metadata_snapshot, query_info)) + if (auto projection = getQueryProcessingStageWithAggregateProjection(query_context, metadata_snapshot, query_info)) { + query_info.projection = std::move(projection); if (query_info.projection->desc->type == ProjectionDescription::Type::Aggregate) return QueryProcessingStage::Enum::WithMergeableState; } + else + query_info.projection = std::nullopt; } return QueryProcessingStage::Enum::FetchColumns; diff --git a/src/Storages/MergeTree/MergeTreeData.h b/src/Storages/MergeTree/MergeTreeData.h index 1b617a2ec71..2eee6774b60 100644 --- a/src/Storages/MergeTree/MergeTreeData.h +++ b/src/Storages/MergeTree/MergeTreeData.h @@ -383,7 +383,7 @@ public: DataPartsVector & normal_parts, ContextPtr query_context) const; - bool getQueryProcessingStageWithAggregateProjection( + std::optional getQueryProcessingStageWithAggregateProjection( ContextPtr query_context, const StorageMetadataPtr & metadata_snapshot, SelectQueryInfo & query_info) const; QueryProcessingStage::Enum getQueryProcessingStage( @@ -542,17 +542,17 @@ public: /// Delete irrelevant parts from memory and disk. /// If 'force' - don't wait for old_parts_lifetime. - void clearOldPartsFromFilesystem(bool force = false); + size_t clearOldPartsFromFilesystem(bool force = false); void clearPartsFromFilesystem(const DataPartsVector & parts); /// Delete WAL files containing parts, that all already stored on disk. - void clearOldWriteAheadLogs(); + size_t clearOldWriteAheadLogs(); /// Delete all directories which names begin with "tmp" /// Must be called with locked lockForShare() because it's using relative_data_path. - void clearOldTemporaryDirectories(const MergeTreeDataMergerMutator & merger_mutator, size_t custom_directories_lifetime_seconds); + size_t clearOldTemporaryDirectories(const MergeTreeDataMergerMutator & merger_mutator, size_t custom_directories_lifetime_seconds); - void clearEmptyParts(); + size_t clearEmptyParts(); /// After the call to dropAllData() no method can be called. /// Deletes the data directory and flushes the uncompressed blocks cache and the marks cache. diff --git a/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp b/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp index c1637ab538b..6161c4c32a3 100644 --- a/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp +++ b/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp @@ -41,18 +41,9 @@ #include -namespace ProfileEvents -{ - extern const Event MergedRows; - extern const Event MergedUncompressedBytes; - extern const Event MergesTimeMilliseconds; - extern const Event Merge; -} - namespace CurrentMetrics { extern const Metric BackgroundMergesAndMutationsPoolTask; - extern const Metric PartMutation; } namespace DB diff --git a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp index 106bca97a38..036e7d89c5a 100644 --- a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp +++ b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp @@ -42,14 +42,6 @@ #include #include -namespace ProfileEvents -{ - extern const Event SelectedParts; - extern const Event SelectedRanges; - extern const Event SelectedMarks; -} - - namespace DB { @@ -783,30 +775,61 @@ RangesInDataParts MergeTreeDataSelectExecutor::filterPartsByPrimaryKeyAndSkipInd /// Let's start analyzing all useful indices - struct DataSkippingIndexAndCondition + struct IndexStat { - MergeTreeIndexPtr index; - MergeTreeIndexConditionPtr condition; std::atomic total_granules{0}; std::atomic granules_dropped{0}; std::atomic total_parts{0}; std::atomic parts_dropped{0}; + }; + + struct DataSkippingIndexAndCondition + { + MergeTreeIndexPtr index; + MergeTreeIndexConditionPtr condition; + IndexStat stat; DataSkippingIndexAndCondition(MergeTreeIndexPtr index_, MergeTreeIndexConditionPtr condition_) : index(index_), condition(condition_) { } }; + + struct MergedDataSkippingIndexAndCondition + { + std::vector indices; + MergeTreeIndexMergedConditionPtr condition; + IndexStat stat; + + void addIndex(const MergeTreeIndexPtr & index) + { + indices.push_back(index); + condition->addIndex(indices.back()); + } + }; + std::list useful_indices; + std::map, MergedDataSkippingIndexAndCondition> merged_indices; if (use_skip_indexes) { for (const auto & index : metadata_snapshot->getSecondaryIndices()) { auto index_helper = MergeTreeIndexFactory::instance().get(index); - auto condition = index_helper->createIndexCondition(query_info, context); - if (!condition->alwaysUnknownOrTrue()) - useful_indices.emplace_back(index_helper, condition); + if (index_helper->isMergeable()) + { + auto [it, inserted] = merged_indices.try_emplace({index_helper->index.type, index_helper->getGranularity()}); + if (inserted) + it->second.condition = index_helper->createIndexMergedCondtition(query_info, metadata_snapshot); + + it->second.addIndex(index_helper); + } + else + { + auto condition = index_helper->createIndexCondition(query_info, context); + if (!condition->alwaysUnknownOrTrue()) + useful_indices.emplace_back(index_helper, condition); + } } } @@ -883,7 +906,7 @@ RangesInDataParts MergeTreeDataSelectExecutor::filterPartsByPrimaryKeyAndSkipInd if (ranges.ranges.empty()) break; - index_and_condition.total_parts.fetch_add(1, std::memory_order_relaxed); + index_and_condition.stat.total_parts.fetch_add(1, std::memory_order_relaxed); size_t total_granules = 0; size_t granules_dropped = 0; @@ -900,11 +923,34 @@ RangesInDataParts MergeTreeDataSelectExecutor::filterPartsByPrimaryKeyAndSkipInd uncompressed_cache.get(), log); - index_and_condition.total_granules.fetch_add(total_granules, std::memory_order_relaxed); - index_and_condition.granules_dropped.fetch_add(granules_dropped, std::memory_order_relaxed); + index_and_condition.stat.total_granules.fetch_add(total_granules, std::memory_order_relaxed); + index_and_condition.stat.granules_dropped.fetch_add(granules_dropped, std::memory_order_relaxed); if (ranges.ranges.empty()) - index_and_condition.parts_dropped.fetch_add(1, std::memory_order_relaxed); + index_and_condition.stat.parts_dropped.fetch_add(1, std::memory_order_relaxed); + } + + for (auto & [_, indices_and_condition] : merged_indices) + { + if (ranges.ranges.empty()) + break; + + indices_and_condition.stat.total_parts.fetch_add(1, std::memory_order_relaxed); + + size_t total_granules = 0; + size_t granules_dropped = 0; + ranges.ranges = filterMarksUsingMergedIndex( + indices_and_condition.indices, indices_and_condition.condition, + part, ranges.ranges, + settings, reader_settings, + total_granules, granules_dropped, + mark_cache.get(), uncompressed_cache.get(), log); + + indices_and_condition.stat.total_granules.fetch_add(total_granules, std::memory_order_relaxed); + indices_and_condition.stat.granules_dropped.fetch_add(granules_dropped, std::memory_order_relaxed); + + if (ranges.ranges.empty()) + indices_and_condition.stat.parts_dropped.fetch_add(1, std::memory_order_relaxed); } if (!ranges.ranges.empty()) @@ -985,8 +1031,8 @@ RangesInDataParts MergeTreeDataSelectExecutor::filterPartsByPrimaryKeyAndSkipInd log, "Index {} has dropped {}/{} granules.", backQuote(index_name), - index_and_condition.granules_dropped, - index_and_condition.total_granules); + index_and_condition.stat.granules_dropped, + index_and_condition.stat.total_granules); std::string description = index_and_condition.index->index.type + " GRANULARITY " + std::to_string(index_and_condition.index->index.granularity); @@ -995,8 +1041,25 @@ RangesInDataParts MergeTreeDataSelectExecutor::filterPartsByPrimaryKeyAndSkipInd .type = ReadFromMergeTree::IndexType::Skip, .name = index_name, .description = std::move(description), //-V1030 - .num_parts_after = index_and_condition.total_parts - index_and_condition.parts_dropped, - .num_granules_after = index_and_condition.total_granules - index_and_condition.granules_dropped}); + .num_parts_after = index_and_condition.stat.total_parts - index_and_condition.stat.parts_dropped, + .num_granules_after = index_and_condition.stat.total_granules - index_and_condition.stat.granules_dropped}); + } + + for (const auto & [type_with_granularity, index_and_condition] : merged_indices) + { + const auto & index_name = "Merged"; + LOG_DEBUG(log, "Index {} has dropped {}/{} granules.", + backQuote(index_name), + index_and_condition.stat.granules_dropped, index_and_condition.stat.total_granules); + + std::string description = "MERGED GRANULARITY " + std::to_string(type_with_granularity.second); + + index_stats.emplace_back(ReadFromMergeTree::IndexStat{ + .type = ReadFromMergeTree::IndexType::Skip, + .name = index_name, + .description = std::move(description), //-V1030 + .num_parts_after = index_and_condition.stat.total_parts - index_and_condition.stat.parts_dropped, + .num_granules_after = index_and_condition.stat.total_granules - index_and_condition.stat.granules_dropped}); } return parts_with_ranges; @@ -1512,6 +1575,106 @@ MarkRanges MergeTreeDataSelectExecutor::filterMarksUsingIndex( return res; } +MarkRanges MergeTreeDataSelectExecutor::filterMarksUsingMergedIndex( + MergeTreeIndices indices, + MergeTreeIndexMergedConditionPtr condition, + MergeTreeData::DataPartPtr part, + const MarkRanges & ranges, + const Settings & settings, + const MergeTreeReaderSettings & reader_settings, + size_t & total_granules, + size_t & granules_dropped, + MarkCache * mark_cache, + UncompressedCache * uncompressed_cache, + Poco::Logger * log) +{ + for (const auto & index_helper : indices) + { + if (!part->volume->getDisk()->exists(part->getFullRelativePath() + index_helper->getFileName() + ".idx")) + { + LOG_DEBUG(log, "File for index {} does not exist. Skipping it.", backQuote(index_helper->index.name)); + return ranges; + } + } + + auto index_granularity = indices.front()->index.granularity; + + const size_t min_marks_for_seek = roundRowsOrBytesToMarks( + settings.merge_tree_min_rows_for_seek, + settings.merge_tree_min_bytes_for_seek, + part->index_granularity_info.fixed_index_granularity, + part->index_granularity_info.index_granularity_bytes); + + size_t marks_count = part->getMarksCount(); + size_t final_mark = part->index_granularity.hasFinalMark(); + size_t index_marks_count = (marks_count - final_mark + index_granularity - 1) / index_granularity; + + std::vector> readers; + for (const auto & index_helper : indices) + { + readers.emplace_back( + std::make_unique( + index_helper, + part, + index_marks_count, + ranges, + mark_cache, + uncompressed_cache, + reader_settings)); + } + + MarkRanges res; + + /// Some granules can cover two or more ranges, + /// this variable is stored to avoid reading the same granule twice. + MergeTreeIndexGranules granules(indices.size(), nullptr); + bool granules_filled = false; + size_t last_index_mark = 0; + for (const auto & range : ranges) + { + MarkRange index_range( + range.begin / index_granularity, + (range.end + index_granularity - 1) / index_granularity); + + if (last_index_mark != index_range.begin || !granules_filled) + for (auto & reader : readers) + reader->seek(index_range.begin); + + total_granules += index_range.end - index_range.begin; + + for (size_t index_mark = index_range.begin; index_mark < index_range.end; ++index_mark) + { + if (index_mark != index_range.begin || !granules_filled || last_index_mark != index_range.begin) + { + for (size_t i = 0; i < readers.size(); ++i) + { + granules[i] = readers[i]->read(); + granules_filled = true; + } + } + + MarkRange data_range( + std::max(range.begin, index_mark * index_granularity), + std::min(range.end, (index_mark + 1) * index_granularity)); + + if (!condition->mayBeTrueOnGranule(granules)) + { + ++granules_dropped; + continue; + } + + if (res.empty() || res.back().end - data_range.begin > min_marks_for_seek) + res.push_back(data_range); + else + res.back().end = data_range.end; + } + + last_index_mark = index_range.end - 1; + } + + return res; +} + void MergeTreeDataSelectExecutor::selectPartsToRead( MergeTreeData::DataPartsVector & parts, const std::optional> & part_values, diff --git a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.h b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.h index 04eb75a0dbb..f19d145fc93 100644 --- a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.h +++ b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.h @@ -94,6 +94,19 @@ private: UncompressedCache * uncompressed_cache, Poco::Logger * log); + static MarkRanges filterMarksUsingMergedIndex( + MergeTreeIndices indices, + MergeTreeIndexMergedConditionPtr condition, + MergeTreeData::DataPartPtr part, + const MarkRanges & ranges, + const Settings & settings, + const MergeTreeReaderSettings & reader_settings, + size_t & total_granules, + size_t & granules_dropped, + MarkCache * mark_cache, + UncompressedCache * uncompressed_cache, + Poco::Logger * log); + struct PartFilterCounters { size_t num_initial_selected_parts = 0; diff --git a/src/Storages/MergeTree/MergeTreeIndexBloomFilter.cpp b/src/Storages/MergeTree/MergeTreeIndexBloomFilter.cpp index 950765ed2b9..da66506dd44 100644 --- a/src/Storages/MergeTree/MergeTreeIndexBloomFilter.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexBloomFilter.cpp @@ -68,7 +68,7 @@ bool MergeTreeIndexBloomFilter::mayBenefitFromIndexForIn(const ASTPtr & node) co } } - return true; + return false; } MergeTreeIndexAggregatorPtr MergeTreeIndexBloomFilter::createIndexAggregator() const diff --git a/src/Storages/MergeTree/MergeTreeIndexHypothesis.cpp b/src/Storages/MergeTree/MergeTreeIndexHypothesis.cpp new file mode 100644 index 00000000000..58629b63cda --- /dev/null +++ b/src/Storages/MergeTree/MergeTreeIndexHypothesis.cpp @@ -0,0 +1,115 @@ +#include +#include + +#include +#include +#include + +#include +#include +#include + + +namespace DB +{ +namespace ErrorCodes +{ + extern const int LOGICAL_ERROR; +} + + +MergeTreeIndexGranuleHypothesis::MergeTreeIndexGranuleHypothesis(const String & index_name_) + : index_name(index_name_), is_empty(true), met(false) +{ +} + +MergeTreeIndexGranuleHypothesis::MergeTreeIndexGranuleHypothesis(const String & index_name_, const bool met_) + : index_name(index_name_), is_empty(false), met(met_) +{ +} + +void MergeTreeIndexGranuleHypothesis::serializeBinary(WriteBuffer & ostr) const +{ + const auto & size_type = DataTypePtr(std::make_shared()); + size_type->getDefaultSerialization()->serializeBinary(static_cast(met), ostr); +} + +void MergeTreeIndexGranuleHypothesis::deserializeBinary(ReadBuffer & istr, MergeTreeIndexVersion version) +{ + if (version != 1) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Unknown index version {}.", version); + + Field field_met; + const auto & size_type = DataTypePtr(std::make_shared()); + size_type->getDefaultSerialization()->deserializeBinary(field_met, istr); + met = field_met.get(); + is_empty = false; +} + +MergeTreeIndexAggregatorHypothesis::MergeTreeIndexAggregatorHypothesis(const String & index_name_, const String & column_name_) + : index_name(index_name_), column_name(column_name_) +{ +} + +MergeTreeIndexGranulePtr MergeTreeIndexAggregatorHypothesis::getGranuleAndReset() +{ + const auto granule = std::make_shared(index_name, met); + met = true; + is_empty = true; + return granule; +} + +void MergeTreeIndexAggregatorHypothesis::update(const Block & block, size_t * pos, size_t limit) +{ + size_t rows_read = std::min(limit, block.rows() - *pos); + if (rows_read == 0) + return; + const auto & column = block.getByName(column_name).column->cut(*pos, rows_read); + + if (!column->hasEqualValues() || column->get64(0) == 0) + met = false; + + is_empty = false; + *pos += rows_read; +} + +MergeTreeIndexGranulePtr MergeTreeIndexHypothesis::createIndexGranule() const +{ + return std::make_shared(index.name); +} + +MergeTreeIndexAggregatorPtr MergeTreeIndexHypothesis::createIndexAggregator() const +{ + return std::make_shared(index.name, index.sample_block.getNames().front()); +} + +MergeTreeIndexConditionPtr MergeTreeIndexHypothesis::createIndexCondition( + const SelectQueryInfo &, ContextPtr) const +{ + throw Exception("Not supported", ErrorCodes::LOGICAL_ERROR); +} + +MergeTreeIndexMergedConditionPtr MergeTreeIndexHypothesis::createIndexMergedCondtition( + const SelectQueryInfo & query_info, StorageMetadataPtr storage_metadata) const +{ + return std::make_shared( + query_info, storage_metadata->getConstraints(), index.granularity); +} + +bool MergeTreeIndexHypothesis::mayBenefitFromIndexForIn(const ASTPtr &) const +{ + return false; +} + +MergeTreeIndexPtr hypothesisIndexCreator(const IndexDescription & index) +{ + return std::make_shared(index); +} + +void hypothesisIndexValidator(const IndexDescription & index, bool /*attach*/) +{ + if (index.expression_list_ast->children.size() != 1) + throw Exception("Hypothesis index needs exactly one expression", ErrorCodes::LOGICAL_ERROR); +} + +} diff --git a/src/Storages/MergeTree/MergeTreeIndexHypothesis.h b/src/Storages/MergeTree/MergeTreeIndexHypothesis.h new file mode 100644 index 00000000000..bbdf70a052c --- /dev/null +++ b/src/Storages/MergeTree/MergeTreeIndexHypothesis.h @@ -0,0 +1,81 @@ +#pragma once + +#include +#include +#include + +namespace DB +{ + +class MergeTreeIndexHyposesis; + +struct MergeTreeIndexGranuleHypothesis : public IMergeTreeIndexGranule +{ + explicit MergeTreeIndexGranuleHypothesis( + const String & index_name_); + + MergeTreeIndexGranuleHypothesis( + const String & index_name_, + const bool met_); + + void serializeBinary(WriteBuffer & ostr) const override; + void deserializeBinary(ReadBuffer & istr, MergeTreeIndexVersion version) override; + + bool empty() const override { return is_empty; } + + ~MergeTreeIndexGranuleHypothesis() override = default; + + const String & index_name; + bool is_empty = true; + bool met = true; +}; + + +struct MergeTreeIndexAggregatorHypothesis : IMergeTreeIndexAggregator +{ + explicit MergeTreeIndexAggregatorHypothesis( + const String & index_name_, const String & column_name_); + + ~MergeTreeIndexAggregatorHypothesis() override = default; + + bool empty() const override { return is_empty; } + + MergeTreeIndexGranulePtr getGranuleAndReset() override; + + void update(const Block & block, size_t * pos, size_t limit) override; + +private: + const String & index_name; + String column_name; + + bool met = true; + bool is_empty = true; +}; + +class MergeTreeIndexHypothesis : public IMergeTreeIndex +{ +public: + MergeTreeIndexHypothesis( + const IndexDescription & index_) + : IMergeTreeIndex(index_) + {} + + ~MergeTreeIndexHypothesis() override = default; + + bool isMergeable() const override { return true; } + + MergeTreeIndexGranulePtr createIndexGranule() const override; + MergeTreeIndexAggregatorPtr createIndexAggregator() const override; + + MergeTreeIndexConditionPtr createIndexCondition( + const SelectQueryInfo & query, ContextPtr context) const override; + + MergeTreeIndexMergedConditionPtr createIndexMergedCondtition( + const SelectQueryInfo & query_info, StorageMetadataPtr storage_metadata) const override; + + bool mayBenefitFromIndexForIn(const ASTPtr & node) const override; + + size_t max_rows = 0; +}; + +} diff --git a/src/Storages/MergeTree/MergeTreeIndexHypothesisMergedCondition.cpp b/src/Storages/MergeTree/MergeTreeIndexHypothesisMergedCondition.cpp new file mode 100644 index 00000000000..2fa0b98bc30 --- /dev/null +++ b/src/Storages/MergeTree/MergeTreeIndexHypothesisMergedCondition.cpp @@ -0,0 +1,204 @@ +#include + +#include +#include +#include +#include +#include +#include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int LOGICAL_ERROR; +} + +MergeTreeIndexhypothesisMergedCondition::MergeTreeIndexhypothesisMergedCondition( + const SelectQueryInfo & query, const ConstraintsDescription & constraints, size_t granularity_) + : IMergeTreeIndexMergedCondition(granularity_) +{ + const auto & select = query.query->as(); + + if (select.where() && select.prewhere()) + expression_ast = makeASTFunction( + "and", + select.where()->clone(), + select.prewhere()->clone()); + else if (select.where()) + expression_ast = select.where()->clone(); + else if (select.prewhere()) + expression_ast = select.prewhere()->clone(); + + expression_cnf = std::make_unique( + expression_ast ? TreeCNFConverter::toCNF(expression_ast) : CNFQuery::AndGroup{}); + + addConstraints(constraints); +} + +void MergeTreeIndexhypothesisMergedCondition::addIndex(const MergeTreeIndexPtr & index) +{ + if (!index->isMergeable() || index->getGranularity() != granularity) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Index {} can not be merged", index->index.type); + + const auto hypothesis_index = std::dynamic_pointer_cast(index); + if (!hypothesis_index) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Only hypothesis index is supported here"); + + static const NameSet relations = { "equals", "notEquals", "less", "lessOrEquals", "greaterOrEquals", "greater"}; + + // TODO: move to index hypothesis + std::vector compare_hypotheses_data; + std::vector hypotheses_data; + const auto cnf = TreeCNFConverter::toCNF(hypothesis_index->index.expression_list_ast->children.front()).pullNotOutFunctions(); + + for (const auto & group : cnf.getStatements()) + { + if (group.size() == 1) + { + hypotheses_data.push_back(group); + CNFQuery::AtomicFormula atomic_formula = *group.begin(); + CNFQuery::AtomicFormula atom{atomic_formula.negative, atomic_formula.ast->clone()}; + pushNotIn(atom); + assert(!atom.negative); + + const auto * func = atom.ast->as(); + if (func && relations.count(func->name)) + compare_hypotheses_data.push_back(atom.ast); + } + } + + index_to_compare_atomic_hypotheses.push_back(compare_hypotheses_data); + index_to_atomic_hypotheses.push_back(hypotheses_data); +} + +void MergeTreeIndexhypothesisMergedCondition::addConstraints(const ConstraintsDescription & constraints_description) +{ + auto atomic_constraints_data = constraints_description.getAtomicConstraintData(); + for (const auto & atomic_formula : atomic_constraints_data) + { + CNFQuery::AtomicFormula atom{atomic_formula.negative, atomic_formula.ast->clone()}; + pushNotIn(atom); + atomic_constraints.push_back(atom.ast); + } +} + +/// Replaces < -> <=, > -> >= and assumes that all hypotheses are true then checks if path exists +bool MergeTreeIndexhypothesisMergedCondition::alwaysUnknownOrTrue() const +{ + std::vector active_atomic_formulas(atomic_constraints); + for (const auto & hypothesis : index_to_compare_atomic_hypotheses) + { + active_atomic_formulas.insert( + std::end(active_atomic_formulas), + std::begin(hypothesis), + std::end(hypothesis)); + } + + /// transform active formulas + for (auto & formula : active_atomic_formulas) + { + formula = formula->clone(); /// do all operations with copy + auto * func = formula->as(); + if (func && func->name == "less") + func->name = "lessOrEquals"; + if (func && func->name == "greater") + func->name = "greaterOrEquals"; + } + + const auto weak_graph = std::make_unique(active_atomic_formulas); + + bool useless = true; + expression_cnf->iterateGroups( + [&](const CNFQuery::OrGroup & or_group) + { + for (const auto & atomic_formula : or_group) + { + CNFQuery::AtomicFormula atom{atomic_formula.negative, atomic_formula.ast->clone()}; + pushNotIn(atom); + + const auto * func = atom.ast->as(); + if (func && func->arguments->children.size() == 2) + { + const auto left = weak_graph->getComponentId(func->arguments->children[0]); + const auto right = weak_graph->getComponentId(func->arguments->children[1]); + if (left && right && weak_graph->hasPath(left.value(), right.value())) + { + useless = false; + return; + } + } + } + }); + return useless; +} + +bool MergeTreeIndexhypothesisMergedCondition::mayBeTrueOnGranule(const MergeTreeIndexGranules & granules) const +{ + std::vector values; + for (const auto & index_granule : granules) + { + const auto granule = std::dynamic_pointer_cast(index_granule); + if (!granule) + throw Exception("Only hypothesis index is supported here.", ErrorCodes::LOGICAL_ERROR); + values.push_back(granule->met); + } + + if (const auto it = answer_cache.find(values); it != std::end(answer_cache)) + return it->second; + + const auto & graph = getGraph(values); + + bool always_false = false; + expression_cnf->iterateGroups( + [&](const CNFQuery::OrGroup & or_group) + { + if (always_false) + return; + + for (const auto & atomic_formula : or_group) + { + CNFQuery::AtomicFormula atom{atomic_formula.negative, atomic_formula.ast->clone()}; + pushNotIn(atom); + const auto * func = atom.ast->as(); + if (func && func->arguments->children.size() == 2) + { + const auto expected = ComparisonGraph::atomToCompareResult(atom); + if (graph.isPossibleCompare(expected, func->arguments->children[0], func->arguments->children[1])) + { + /// If graph failed use matching. + /// We don't need to check constraints. + return; + } + } + } + always_false = true; + }); + + answer_cache[values] = !always_false; + return !always_false; +} + +std::unique_ptr MergeTreeIndexhypothesisMergedCondition::buildGraph(const std::vector & values) const +{ + std::vector active_atomic_formulas(atomic_constraints); + for (size_t i = 0; i < values.size(); ++i) + { + if (values[i]) + active_atomic_formulas.insert( + std::end(active_atomic_formulas), + std::begin(index_to_compare_atomic_hypotheses[i]), + std::end(index_to_compare_atomic_hypotheses[i])); + } + return std::make_unique(active_atomic_formulas); +} + +const ComparisonGraph & MergeTreeIndexhypothesisMergedCondition::getGraph(const std::vector & values) const +{ + if (!graph_cache.contains(values)) + graph_cache[values] = buildGraph(values); + return *graph_cache.at(values); +} + +} diff --git a/src/Storages/MergeTree/MergeTreeIndexHypothesisMergedCondition.h b/src/Storages/MergeTree/MergeTreeIndexHypothesisMergedCondition.h new file mode 100644 index 00000000000..530e14e15cc --- /dev/null +++ b/src/Storages/MergeTree/MergeTreeIndexHypothesisMergedCondition.h @@ -0,0 +1,37 @@ +#pragma once + +#include +#include +#include + +namespace DB +{ + +/// MergedCondition for Indexhypothesis. +class MergeTreeIndexhypothesisMergedCondition : public IMergeTreeIndexMergedCondition +{ +public: + MergeTreeIndexhypothesisMergedCondition( + const SelectQueryInfo & query, const ConstraintsDescription & constraints, size_t granularity_); + + void addIndex(const MergeTreeIndexPtr & index) override; + bool alwaysUnknownOrTrue() const override; + bool mayBeTrueOnGranule(const MergeTreeIndexGranules & granules) const override; + +private: + void addConstraints(const ConstraintsDescription & constraints_description); + std::unique_ptr buildGraph(const std::vector & values) const; + const ComparisonGraph & getGraph(const std::vector & values) const; + + ASTPtr expression_ast; + std::unique_ptr expression_cnf; + + mutable std::unordered_map, std::unique_ptr> graph_cache; + mutable std::unordered_map, bool> answer_cache; + + std::vector> index_to_compare_atomic_hypotheses; + std::vector> index_to_atomic_hypotheses; + std::vector atomic_constraints; +}; + +} diff --git a/src/Storages/MergeTree/MergeTreeIndices.cpp b/src/Storages/MergeTree/MergeTreeIndices.cpp index b0f5b4d92f5..9d7e0cdfdbe 100644 --- a/src/Storages/MergeTree/MergeTreeIndices.cpp +++ b/src/Storages/MergeTree/MergeTreeIndices.cpp @@ -98,6 +98,9 @@ MergeTreeIndexFactory::MergeTreeIndexFactory() registerCreator("bloom_filter", bloomFilterIndexCreatorNew); registerValidator("bloom_filter", bloomFilterIndexValidatorNew); + + registerCreator("hypothesis", hypothesisIndexCreator); + registerValidator("hypothesis", hypothesisIndexValidator); } MergeTreeIndexFactory & MergeTreeIndexFactory::instance() diff --git a/src/Storages/MergeTree/MergeTreeIndices.h b/src/Storages/MergeTree/MergeTreeIndices.h index 557af891b74..8f10b2c51ba 100644 --- a/src/Storages/MergeTree/MergeTreeIndices.h +++ b/src/Storages/MergeTree/MergeTreeIndices.h @@ -18,6 +18,11 @@ constexpr auto INDEX_FILE_PREFIX = "skp_idx_"; namespace DB { +namespace ErrorCodes +{ + extern const int NOT_IMPLEMENTED; +} + using MergeTreeIndexVersion = uint8_t; struct MergeTreeIndexFormat { @@ -87,6 +92,32 @@ public: }; using MergeTreeIndexConditionPtr = std::shared_ptr; +using MergeTreeIndexConditions = std::vector; + +struct IMergeTreeIndex; +using MergeTreeIndexPtr = std::shared_ptr; + +/// IndexCondition that checks several indexes at the same time. +class IMergeTreeIndexMergedCondition +{ +public: + explicit IMergeTreeIndexMergedCondition(size_t granularity_) + : granularity(granularity_) + { + } + + virtual ~IMergeTreeIndexMergedCondition() = default; + + virtual void addIndex(const MergeTreeIndexPtr & index) = 0; + virtual bool alwaysUnknownOrTrue() const = 0; + virtual bool mayBeTrueOnGranule(const MergeTreeIndexGranules & granules) const = 0; + +protected: + const size_t granularity; +}; + +using MergeTreeIndexMergedConditionPtr = std::shared_ptr; +using MergeTreeIndexMergedConditions = std::vector; struct IMergeTreeIndex @@ -100,6 +131,9 @@ struct IMergeTreeIndex /// Returns filename without extension. String getFileName() const { return INDEX_FILE_PREFIX + index.name; } + size_t getGranularity() const { return index.granularity; } + + virtual bool isMergeable() const { return false; } /// Returns extension for serialization. /// Reimplement if you want new index format. @@ -126,7 +160,14 @@ struct IMergeTreeIndex virtual MergeTreeIndexAggregatorPtr createIndexAggregator() const = 0; virtual MergeTreeIndexConditionPtr createIndexCondition( - const SelectQueryInfo & query_info, ContextPtr context) const = 0; + const SelectQueryInfo & query_info, ContextPtr context) const = 0; + + virtual MergeTreeIndexMergedConditionPtr createIndexMergedCondtition( + const SelectQueryInfo & /*query_info*/, StorageMetadataPtr /*storage_metadata*/) const + { + throw Exception(ErrorCodes::NOT_IMPLEMENTED, + "MergedCondition is not implemented for index of type {}", index.type); + } Names getColumnsRequiredForIndexCalc() const { return index.expression->getRequiredColumns(); } @@ -177,4 +218,7 @@ void bloomFilterIndexValidator(const IndexDescription & index, bool attach); MergeTreeIndexPtr bloomFilterIndexCreatorNew(const IndexDescription & index); void bloomFilterIndexValidatorNew(const IndexDescription & index, bool attach); +MergeTreeIndexPtr hypothesisIndexCreator(const IndexDescription & index); +void hypothesisIndexValidator(const IndexDescription & index, bool attach); + } diff --git a/src/Storages/MergeTree/MergeTreeMutationEntry.cpp b/src/Storages/MergeTree/MergeTreeMutationEntry.cpp index 2aefb3df2be..0f71742fb09 100644 --- a/src/Storages/MergeTree/MergeTreeMutationEntry.cpp +++ b/src/Storages/MergeTree/MergeTreeMutationEntry.cpp @@ -1,5 +1,6 @@ #include #include +#include #include #include #include @@ -10,7 +11,39 @@ namespace DB { -MergeTreeMutationEntry::MergeTreeMutationEntry(MutationCommands commands_, DiskPtr disk_, const String & path_prefix_, Int64 tmp_number) +namespace ErrorCodes +{ + extern const int BAD_ARGUMENTS; +} + +String MergeTreeMutationEntry::versionToFileName(UInt64 block_number_) +{ + assert(block_number_); + return fmt::format("mutation_{}.txt", block_number_); +} + +UInt64 MergeTreeMutationEntry::tryParseFileName(const String & file_name_) +{ + UInt64 maybe_block_number = 0; + ReadBufferFromString file_name_buf(file_name_); + if (!checkString("mutation_", file_name_buf)) + return 0; + if (!tryReadIntText(maybe_block_number, file_name_buf)) + return 0; + if (!checkString(".txt", file_name_buf)) + return 0; + assert(maybe_block_number); + return maybe_block_number; +} + +UInt64 MergeTreeMutationEntry::parseFileName(const String & file_name_) +{ + if (UInt64 maybe_block_number = tryParseFileName(file_name_)) + return maybe_block_number; + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Cannot parse mutation version from file name, expected 'mutation_.txt', got '{}'", file_name_); +} + +MergeTreeMutationEntry::MergeTreeMutationEntry(MutationCommands commands_, DiskPtr disk_, const String & path_prefix_, UInt64 tmp_number) : create_time(time(nullptr)) , commands(std::move(commands_)) , disk(std::move(disk_)) @@ -35,10 +68,11 @@ MergeTreeMutationEntry::MergeTreeMutationEntry(MutationCommands commands_, DiskP } } -void MergeTreeMutationEntry::commit(Int64 block_number_) +void MergeTreeMutationEntry::commit(UInt64 block_number_) { + assert(block_number_); block_number = block_number_; - String new_file_name = "mutation_" + toString(block_number) + ".txt"; + String new_file_name = versionToFileName(block_number); disk->moveFile(path_prefix + file_name, path_prefix + new_file_name); is_temp = false; file_name = new_file_name; @@ -62,10 +96,7 @@ MergeTreeMutationEntry::MergeTreeMutationEntry(DiskPtr disk_, const String & pat , file_name(file_name_) , is_temp(false) { - ReadBufferFromString file_name_buf(file_name); - file_name_buf >> "mutation_" >> block_number >> ".txt"; - assertEOF(file_name_buf); - + block_number = parseFileName(file_name); auto buf = disk->readFile(path_prefix + file_name); *buf >> "format version: 1\n"; diff --git a/src/Storages/MergeTree/MergeTreeMutationEntry.h b/src/Storages/MergeTree/MergeTreeMutationEntry.h index e01ce4320b3..7554a03836e 100644 --- a/src/Storages/MergeTree/MergeTreeMutationEntry.h +++ b/src/Storages/MergeTree/MergeTreeMutationEntry.h @@ -21,7 +21,7 @@ struct MergeTreeMutationEntry String file_name; bool is_temp = false; - Int64 block_number = 0; + UInt64 block_number = 0; String latest_failed_part; MergeTreePartInfo latest_failed_part_info; @@ -29,15 +29,19 @@ struct MergeTreeMutationEntry String latest_fail_reason; /// Create a new entry and write it to a temporary file. - MergeTreeMutationEntry(MutationCommands commands_, DiskPtr disk, const String & path_prefix_, Int64 tmp_number); + MergeTreeMutationEntry(MutationCommands commands_, DiskPtr disk, const String & path_prefix_, UInt64 tmp_number); MergeTreeMutationEntry(const MergeTreeMutationEntry &) = delete; MergeTreeMutationEntry(MergeTreeMutationEntry &&) = default; /// Commit entry and rename it to a permanent file. - void commit(Int64 block_number_); + void commit(UInt64 block_number_); void removeFile(); + static String versionToFileName(UInt64 block_number_); + static UInt64 tryParseFileName(const String & file_name_); + static UInt64 parseFileName(const String & file_name_); + /// Load an existing entry. MergeTreeMutationEntry(DiskPtr disk_, const String & path_prefix_, const String & file_name_); diff --git a/src/Storages/MergeTree/MergeTreeReaderCompact.cpp b/src/Storages/MergeTree/MergeTreeReaderCompact.cpp index f000b43f61a..1f8642db886 100644 --- a/src/Storages/MergeTree/MergeTreeReaderCompact.cpp +++ b/src/Storages/MergeTree/MergeTreeReaderCompact.cpp @@ -71,6 +71,9 @@ MergeTreeReaderCompact::MergeTreeReaderCompact( if (buffer_size) settings.read_settings = settings.read_settings.adjustBufferSize(buffer_size); + if (!settings.read_settings.local_fs_buffer_size || !settings.read_settings.remote_fs_buffer_size) + throw Exception(ErrorCodes::CANNOT_READ_ALL_DATA, "Cannot read to empty buffer."); + const String full_data_path = data_part->getFullRelativePath() + MergeTreeDataPartCompact::DATA_FILE_NAME_WITH_EXTENSION; if (uncompressed_cache) { diff --git a/src/Storages/MergeTree/MergeTreeSettings.h b/src/Storages/MergeTree/MergeTreeSettings.h index 3401eb46e87..decc72df14c 100644 --- a/src/Storages/MergeTree/MergeTreeSettings.h +++ b/src/Storages/MergeTree/MergeTreeSettings.h @@ -58,6 +58,8 @@ struct Settings; M(UInt64, non_replicated_deduplication_window, 0, "How many last blocks of hashes should be kept on disk (0 - disabled).", 0) \ M(UInt64, max_parts_to_merge_at_once, 100, "Max amount of parts which can be merged at once (0 - disabled). Doesn't affect OPTIMIZE FINAL query.", 0) \ M(UInt64, merge_selecting_sleep_ms, 5000, "Sleep time for merge selecting when no part selected, a lower setting will trigger selecting tasks in background_schedule_pool frequently which result in large amount of requests to zookeeper in large-scale clusters", 0) \ + M(UInt64, merge_tree_clear_old_temporary_directories_interval_seconds, 60, "The period of executing the clear old temporary directories operation in background.", 0) \ + M(UInt64, merge_tree_clear_old_parts_interval_seconds, 1, "The period of executing the clear old parts operation in background.", 0) \ \ /** Inserts settings. */ \ M(UInt64, parts_to_delay_insert, 150, "If table contains at least that many active parts in single partition, artificially slow down insert into table.", 0) \ diff --git a/src/Storages/MergeTree/MergeTreeWhereOptimizer.cpp b/src/Storages/MergeTree/MergeTreeWhereOptimizer.cpp index a32eecd4a49..7a85791d172 100644 --- a/src/Storages/MergeTree/MergeTreeWhereOptimizer.cpp +++ b/src/Storages/MergeTree/MergeTreeWhereOptimizer.cpp @@ -14,7 +14,6 @@ #include #include - namespace DB { diff --git a/src/Storages/MergeTree/MutateTask.cpp b/src/Storages/MergeTree/MutateTask.cpp index 36ce3f25744..accf167f5ff 100644 --- a/src/Storages/MergeTree/MutateTask.cpp +++ b/src/Storages/MergeTree/MutateTask.cpp @@ -16,6 +16,8 @@ #include #include #include +#include + namespace CurrentMetrics { diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp index e94e0b903b5..cc9a142c65c 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp @@ -10,11 +10,6 @@ #include -namespace CurrentMetrics -{ - extern const Metric BackgroundPoolTask; -} - namespace DB { @@ -842,35 +837,16 @@ void ReplicatedMergeTreeQueue::updateMutations(zkutil::ZooKeeperPtr zookeeper, C LOG_TRACE(log, "Adding mutation {} for partition {} for all block numbers less than {}", entry->znode_name, partition_id, block_num); } - /// Initialize `mutation.parts_to_do`. First we need to mutate all parts in `current_parts`. - Strings current_parts_to_mutate = getPartNamesToMutate(*entry, current_parts, drop_ranges); - for (const String & current_part_to_mutate : current_parts_to_mutate) + /// Initialize `mutation.parts_to_do`. + /// We need to mutate all parts in `current_parts` and all parts that will appear after queue entries execution. + /// So, we need to mutate all parts in virtual_parts (with the corresponding block numbers). + Strings virtual_parts_to_mutate = getPartNamesToMutate(*entry, virtual_parts, drop_ranges); + for (const String & current_part_to_mutate : virtual_parts_to_mutate) { assert(MergeTreePartInfo::fromPartName(current_part_to_mutate, format_version).level < MergeTreePartInfo::MAX_LEVEL); mutation.parts_to_do.add(current_part_to_mutate); } - /// And next we would need to mutate all parts with getDataVersion() greater than - /// mutation block number that would appear as a result of executing the queue. - for (const auto & queue_entry : queue) - { - for (const String & produced_part_name : queue_entry->getVirtualPartNames(format_version)) - { - auto part_info = MergeTreePartInfo::fromPartName(produced_part_name, format_version); - - /// Oddly enough, getVirtualPartNames() may return _virtual_ part name. - /// Such parts do not exist and will never appear, so we should not add virtual parts to parts_to_do list. - /// Fortunately, it's easy to distinguish virtual parts from normal parts by part level. - /// See StorageReplicatedMergeTree::getFakePartCoveringAllPartsInPartition(...) - if (part_info.isFakeDropRangePart()) - continue; - - auto it = entry->block_numbers.find(part_info.partition_id); - if (it != entry->block_numbers.end() && it->second > part_info.getDataVersion()) - mutation.parts_to_do.add(produced_part_name); - } - } - if (mutation.parts_to_do.size() == 0) { some_mutations_are_probably_done = true; diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.cpp index c29ae873c5c..3bb592dcdcb 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.cpp @@ -6,6 +6,7 @@ #include #include #include +#include namespace ProfileEvents diff --git a/src/Storages/MergeTree/registerStorageMergeTree.cpp b/src/Storages/MergeTree/registerStorageMergeTree.cpp index 982acfe62a4..e2f0fc761b1 100644 --- a/src/Storages/MergeTree/registerStorageMergeTree.cpp +++ b/src/Storages/MergeTree/registerStorageMergeTree.cpp @@ -684,8 +684,8 @@ static StoragePtr create(const StorageFactory::Arguments & args) auto minmax_columns = metadata.getColumnsRequiredForPartitionKey(); auto primary_key_asts = metadata.primary_key.expression_list_ast->children; - metadata.minmax_count_projection.emplace( - ProjectionDescription::getMinMaxCountProjection(args.columns, minmax_columns, primary_key_asts, args.getContext())); + metadata.minmax_count_projection.emplace(ProjectionDescription::getMinMaxCountProjection( + args.columns, metadata.partition_key.expression_list_ast, minmax_columns, primary_key_asts, args.getContext())); if (args.storage_def->sample_by) metadata.sampling_key = KeyDescription::getKeyFromAST(args.storage_def->sample_by->ptr(), metadata.columns, args.getContext()); @@ -707,9 +707,11 @@ static StoragePtr create(const StorageFactory::Arguments & args) metadata.projections.add(std::move(projection)); } + auto constraints = metadata.constraints.getConstraints(); if (args.query.columns_list && args.query.columns_list->constraints) for (auto & constraint : args.query.columns_list->constraints->children) - metadata.constraints.constraints.push_back(constraint); + constraints.push_back(constraint); + metadata.constraints = ConstraintsDescription(constraints); auto column_ttl_asts = args.columns.getColumnTTLs(); for (const auto & [name, ast] : column_ttl_asts) @@ -764,8 +766,8 @@ static StoragePtr create(const StorageFactory::Arguments & args) auto minmax_columns = metadata.getColumnsRequiredForPartitionKey(); auto primary_key_asts = metadata.primary_key.expression_list_ast->children; - metadata.minmax_count_projection.emplace( - ProjectionDescription::getMinMaxCountProjection(args.columns, minmax_columns, primary_key_asts, args.getContext())); + metadata.minmax_count_projection.emplace(ProjectionDescription::getMinMaxCountProjection( + args.columns, metadata.partition_key.expression_list_ast, minmax_columns, primary_key_asts, args.getContext())); const auto * ast = engine_args[arg_num]->as(); if (ast && ast->value.getType() == Field::Types::UInt64) diff --git a/src/Storages/PartitionedSink.cpp b/src/Storages/PartitionedSink.cpp index 1d13cbe5f94..5e8f2a9e132 100644 --- a/src/Storages/PartitionedSink.cpp +++ b/src/Storages/PartitionedSink.cpp @@ -13,6 +13,8 @@ #include +#include + namespace DB { diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp index 670148e9baa..56cd58c4c03 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp @@ -10,6 +10,7 @@ #include #include #include +#include namespace DB diff --git a/src/Storages/PostgreSQL/StorageMaterializedPostgreSQL.cpp b/src/Storages/PostgreSQL/StorageMaterializedPostgreSQL.cpp index f771b2239ef..84afe740091 100644 --- a/src/Storages/PostgreSQL/StorageMaterializedPostgreSQL.cpp +++ b/src/Storages/PostgreSQL/StorageMaterializedPostgreSQL.cpp @@ -369,8 +369,8 @@ ASTPtr StorageMaterializedPostgreSQL::getCreateNestedTableQuery(PostgreSQLTableS auto create_table_query = std::make_shared(); auto table_id = getStorageID(); - create_table_query->table = getNestedTableName(); - create_table_query->database = table_id.database_name; + create_table_query->setTable(getNestedTableName()); + create_table_query->setDatabase(table_id.database_name); if (is_materialized_postgresql_database) create_table_query->uuid = table_id.uuid; diff --git a/src/Storages/ProjectionsDescription.cpp b/src/Storages/ProjectionsDescription.cpp index dc0598de72f..c7d242476a1 100644 --- a/src/Storages/ProjectionsDescription.cpp +++ b/src/Storages/ProjectionsDescription.cpp @@ -15,6 +15,7 @@ #include #include #include +#include namespace DB @@ -61,6 +62,7 @@ ProjectionDescription ProjectionDescription::clone() const other.key_size = key_size; other.is_minmax_count_projection = is_minmax_count_projection; other.primary_key_max_column_name = primary_key_max_column_name; + other.partition_value_indices = partition_value_indices; return other; } @@ -175,6 +177,7 @@ ProjectionDescription::getProjectionFromAST(const ASTPtr & definition_ast, const ProjectionDescription ProjectionDescription::getMinMaxCountProjection( const ColumnsDescription & columns, + const ASTPtr & partition_columns, const Names & minmax_columns, const ASTs & primary_key_asts, ContextPtr query_context) @@ -197,6 +200,9 @@ ProjectionDescription ProjectionDescription::getMinMaxCountProjection( select_expression_list->children.push_back(makeASTFunction("count")); select_query->setExpression(ASTProjectionSelectQuery::Expression::SELECT, std::move(select_expression_list)); + if (partition_columns) + select_query->setExpression(ASTProjectionSelectQuery::Expression::GROUP_BY, partition_columns->clone()); + result.definition_ast = select_query; result.name = MINMAX_COUNT_PROJECTION_NAME; result.query_ast = select_query->cloneToASTSelect(); @@ -207,12 +213,36 @@ ProjectionDescription ProjectionDescription::getMinMaxCountProjection( result.query_ast, query_context, storage, {}, SelectQueryOptions{QueryProcessingStage::WithMergeableState}.modify().ignoreAlias()); result.required_columns = select.getRequiredColumns(); result.sample_block = select.getSampleBlock(); - /// If we have primary key and it's not in minmax_columns, it will be used as one additional minmax columns. - if (!primary_key_asts.empty() && result.sample_block.columns() == 2 * (minmax_columns.size() + 1) + 1) + + std::map partition_column_name_to_value_index; + if (partition_columns) { - /// min(p1), max(p1), min(p2), max(p2), ..., min(k1), max(k1), count() - /// ^ - /// size - 2 + for (auto i : collections::range(partition_columns->children.size())) + partition_column_name_to_value_index[partition_columns->children[i]->getColumnNameWithoutAlias()] = i; + } + + const auto & analysis_result = select.getAnalysisResult(); + if (analysis_result.need_aggregate) + { + for (const auto & key : select.getQueryAnalyzer()->aggregationKeys()) + { + result.sample_block_for_keys.insert({nullptr, key.type, key.name}); + auto it = partition_column_name_to_value_index.find(key.name); + if (it == partition_column_name_to_value_index.end()) + throw Exception("minmax_count projection can only have keys about partition columns. It's a bug", ErrorCodes::LOGICAL_ERROR); + result.partition_value_indices.push_back(it->second); + } + } + + /// If we have primary key and it's not in minmax_columns, it will be used as one additional minmax columns. + if (!primary_key_asts.empty() + && result.sample_block.columns() + == 2 * (minmax_columns.size() + 1) /* minmax columns */ + 1 /* count() */ + + result.partition_value_indices.size() /* partition_columns */) + { + /// partition_expr1, partition_expr2, ..., min(p1), max(p1), min(p2), max(p2), ..., min(k1), max(k1), count() + /// ^ + /// size - 2 result.primary_key_max_column_name = *(result.sample_block.getNames().cend() - 2); } result.type = ProjectionDescription::Type::Aggregate; @@ -250,7 +280,7 @@ Block ProjectionDescription::calculate(const Block & block, ContextPtr context) Block ret; executor.pull(ret); if (executor.pull(ret)) - throw Exception("Projection cannot increase the number of rows in a block", ErrorCodes::LOGICAL_ERROR); + throw Exception("Projection cannot increase the number of rows in a block. It's a bug", ErrorCodes::LOGICAL_ERROR); return ret; } diff --git a/src/Storages/ProjectionsDescription.h b/src/Storages/ProjectionsDescription.h index 7c254182ba4..960e94e22f4 100644 --- a/src/Storages/ProjectionsDescription.h +++ b/src/Storages/ProjectionsDescription.h @@ -61,12 +61,22 @@ struct ProjectionDescription /// If a primary key expression is used in the minmax_count projection, store the name of max expression. String primary_key_max_column_name; + /// Stores partition value indices of partition value row. It's needed because identical + /// partition columns will appear only once in projection block, but every column will have a + /// value in the partition value row. This vector holds the biggest value index of give + /// partition columns. + std::vector partition_value_indices; + /// Parse projection from definition AST static ProjectionDescription getProjectionFromAST(const ASTPtr & definition_ast, const ColumnsDescription & columns, ContextPtr query_context); static ProjectionDescription getMinMaxCountProjection( - const ColumnsDescription & columns, const Names & minmax_columns, const ASTs & primary_key_asts, ContextPtr query_context); + const ColumnsDescription & columns, + const ASTPtr & partition_columns, + const Names & minmax_columns, + const ASTs & primary_key_asts, + ContextPtr query_context); ProjectionDescription() = default; diff --git a/src/Storages/RabbitMQ/RabbitMQSink.cpp b/src/Storages/RabbitMQ/RabbitMQSink.cpp index ce569afb99b..0f0708cc2d3 100644 --- a/src/Storages/RabbitMQ/RabbitMQSink.cpp +++ b/src/Storages/RabbitMQ/RabbitMQSink.cpp @@ -47,7 +47,7 @@ void RabbitMQSink::consume(Chunk chunk) void RabbitMQSink::onFinish() { - format->doWriteSuffix(); + format->finalize(); if (buffer) buffer->updateMaxWait(); diff --git a/src/Storages/RabbitMQ/WriteBufferToRabbitMQProducer.cpp b/src/Storages/RabbitMQ/WriteBufferToRabbitMQProducer.cpp index f6b12708e81..9b4f32d93df 100644 --- a/src/Storages/RabbitMQ/WriteBufferToRabbitMQProducer.cpp +++ b/src/Storages/RabbitMQ/WriteBufferToRabbitMQProducer.cpp @@ -7,6 +7,7 @@ #include #include #include +#include #include #include #include diff --git a/src/Storages/StorageExecutable.cpp b/src/Storages/StorageExecutable.cpp index 16647d0b60f..51ecfc1e884 100644 --- a/src/Storages/StorageExecutable.cpp +++ b/src/Storages/StorageExecutable.cpp @@ -21,6 +21,8 @@ #include #include +#include + namespace DB { diff --git a/src/Storages/StorageFactory.cpp b/src/Storages/StorageFactory.cpp index ba2ac3f72a3..1aa6ae98e7a 100644 --- a/src/Storages/StorageFactory.cpp +++ b/src/Storages/StorageFactory.cpp @@ -201,7 +201,7 @@ StoragePtr StorageFactory::get( .storage_def = storage_def, .query = query, .relative_data_path = relative_data_path, - .table_id = StorageID(query.database, query.table, query.uuid), + .table_id = StorageID(query.getDatabase(), query.getTable(), query.uuid), .local_context = local_context, .context = context, .columns = columns, diff --git a/src/Storages/StorageFile.cpp b/src/Storages/StorageFile.cpp index 4a1eac2a39e..fd7a3c77241 100644 --- a/src/Storages/StorageFile.cpp +++ b/src/Storages/StorageFile.cpp @@ -621,26 +621,21 @@ public: naked_buffer = std::make_unique(paths[0], DBMS_DEFAULT_BUFFER_SIZE, flags); } - /// In case of CSVWithNames we have already written prefix. - if (naked_buffer->size()) - prefix_written = true; + /// In case of formats with prefixes if file is not empty we have already written prefix. + bool do_not_write_prefix = naked_buffer->size(); write_buf = wrapWriteBufferWithCompressionMethod(std::move(naked_buffer), compression_method, 3); writer = FormatFactory::instance().getOutputFormatParallelIfPossible(format_name, *write_buf, metadata_snapshot->getSampleBlock(), context, {}, format_settings); + + if (do_not_write_prefix) + writer->doNotWritePrefix(); } String getName() const override { return "StorageFileSink"; } - void onStart() override - { - if (!prefix_written) - writer->doWritePrefix(); - prefix_written = true; - } - void consume(Chunk chunk) override { writer->write(getHeader().cloneWithColumns(chunk.detachColumns())); @@ -648,7 +643,7 @@ public: void onFinish() override { - writer->doWriteSuffix(); + writer->finalize(); } // void flush() override @@ -662,7 +657,6 @@ private: std::unique_ptr write_buf; OutputFormatPtr writer; - bool prefix_written{false}; int table_fd; bool use_table_fd; diff --git a/src/Storages/StorageMaterializedMySQL.cpp b/src/Storages/StorageMaterializedMySQL.cpp index c214540151a..922dff2faf7 100644 --- a/src/Storages/StorageMaterializedMySQL.cpp +++ b/src/Storages/StorageMaterializedMySQL.cpp @@ -48,8 +48,8 @@ Pipe StorageMaterializedMySQL::read( size_t max_block_size, unsigned int num_streams) { - /// If the background synchronization thread has exception. - rethrowSyncExceptionIfNeed(database); + if (const auto * db = typeid_cast(database)) + db->rethrowExceptionIfNeeded(); return readFinalFromNestedStorage(nested_storage, column_names, metadata_snapshot, query_info, context, processed_stage, max_block_size, num_streams); @@ -57,8 +57,9 @@ Pipe StorageMaterializedMySQL::read( NamesAndTypesList StorageMaterializedMySQL::getVirtuals() const { - /// If the background synchronization thread has exception. - rethrowSyncExceptionIfNeed(database); + if (const auto * db = typeid_cast(database)) + db->rethrowExceptionIfNeeded(); + return nested_storage->getVirtuals(); } diff --git a/src/Storages/StorageMaterializedView.cpp b/src/Storages/StorageMaterializedView.cpp index e7de3010c33..210e6548465 100644 --- a/src/Storages/StorageMaterializedView.cpp +++ b/src/Storages/StorageMaterializedView.cpp @@ -105,8 +105,8 @@ StorageMaterializedView::StorageMaterializedView( /// We will create a query to create an internal table. auto create_context = Context::createCopy(local_context); auto manual_create_query = std::make_shared(); - manual_create_query->database = getStorageID().database_name; - manual_create_query->table = generateInnerTableName(getStorageID()); + manual_create_query->setDatabase(getStorageID().database_name); + manual_create_query->setTable(generateInnerTableName(getStorageID())); manual_create_query->uuid = query.to_inner_uuid; auto new_columns_list = std::make_shared(); @@ -119,7 +119,7 @@ StorageMaterializedView::StorageMaterializedView( create_interpreter.setInternal(true); create_interpreter.execute(); - target_table_id = DatabaseCatalog::instance().getTable({manual_create_query->database, manual_create_query->table}, getContext())->getStorageID(); + target_table_id = DatabaseCatalog::instance().getTable({manual_create_query->getDatabase(), manual_create_query->getTable()}, getContext())->getStorageID(); } if (!select.select_table_id.empty()) diff --git a/src/Storages/StorageMerge.cpp b/src/Storages/StorageMerge.cpp index 06fbfafc9b6..23fd129e157 100644 --- a/src/Storages/StorageMerge.cpp +++ b/src/Storages/StorageMerge.cpp @@ -180,6 +180,28 @@ QueryProcessingStage::Enum StorageMerge::getQueryProcessingStage( } +SelectQueryInfo StorageMerge::getModifiedQueryInfo( + const SelectQueryInfo & query_info, ContextPtr modified_context, const StorageID & current_storage_id, bool is_merge_engine) +{ + SelectQueryInfo modified_query_info = query_info; + modified_query_info.query = query_info.query->clone(); + + /// Original query could contain JOIN but we need only the first joined table and its columns. + auto & modified_select = modified_query_info.query->as(); + TreeRewriterResult new_analyzer_res = *modified_query_info.syntax_analyzer_result; + removeJoin(modified_select, new_analyzer_res, modified_context); + modified_query_info.syntax_analyzer_result = std::make_shared(std::move(new_analyzer_res)); + + if (!is_merge_engine) + { + VirtualColumnUtils::rewriteEntityInAst(modified_query_info.query, "_table", current_storage_id.table_name); + VirtualColumnUtils::rewriteEntityInAst(modified_query_info.query, "_database", current_storage_id.database_name); + } + + return modified_query_info; +} + + Pipe StorageMerge::read( const Names & column_names, const StorageMetadataPtr & metadata_snapshot, @@ -222,10 +244,12 @@ Pipe StorageMerge::read( = getSelectedTables(local_context, query_info.query, has_database_virtual_column, has_table_virtual_column); if (selected_tables.empty()) + { + auto modified_query_info = getModifiedQueryInfo(query_info, modified_context, getStorageID(), false); /// FIXME: do we support sampling in this case? return createSources( {}, - query_info, + modified_query_info, processed_stage, max_block_size, header, @@ -236,6 +260,7 @@ Pipe StorageMerge::read( 0, has_database_virtual_column, has_table_virtual_column); + } size_t tables_count = selected_tables.size(); Float64 num_streams_multiplier @@ -264,7 +289,6 @@ Pipe StorageMerge::read( } auto sample_block = getInMemoryMetadataPtr()->getSampleBlock(); - Names required_columns; for (const auto & table : selected_tables) { @@ -283,12 +307,16 @@ Pipe StorageMerge::read( auto storage_metadata_snapshot = storage->getInMemoryMetadataPtr(); auto storage_columns = storage_metadata_snapshot->getColumns(); - if (processed_stage == QueryProcessingStage::FetchColumns && !storage_columns.getAliases().empty()) - { - auto syntax_result = TreeRewriter(local_context).analyzeSelect(query_info.query, TreeRewriterResult({}, storage, storage_metadata_snapshot)); - ASTPtr required_columns_expr_list = std::make_shared(); + auto modified_query_info = getModifiedQueryInfo(query_info, modified_context, storage->getStorageID(), storage->as()); + auto syntax_result = TreeRewriter(local_context).analyzeSelect(modified_query_info.query, TreeRewriterResult({}, storage, storage_metadata_snapshot)); + Names column_names_as_aliases; + bool with_aliases = processed_stage == QueryProcessingStage::FetchColumns && !storage_columns.getAliases().empty(); + if (with_aliases) + { + ASTPtr required_columns_expr_list = std::make_shared(); ASTPtr column_expr; + for (const auto & column : real_column_names) { const auto column_default = storage_columns.getDefault(column); @@ -314,21 +342,24 @@ Pipe StorageMerge::read( required_columns_expr_list->children.emplace_back(std::move(column_expr)); } - syntax_result = TreeRewriter(local_context).analyze(required_columns_expr_list, storage_columns.getAllPhysical(), - storage, storage_metadata_snapshot); + syntax_result = TreeRewriter(local_context).analyze( + required_columns_expr_list, storage_columns.getAllPhysical(), storage, storage_metadata_snapshot); auto alias_actions = ExpressionAnalyzer(required_columns_expr_list, syntax_result, local_context).getActionsDAG(true); - required_columns = alias_actions->getRequiredColumns().getNames(); + + column_names_as_aliases = alias_actions->getRequiredColumns().getNames(); + if (column_names_as_aliases.empty()) + column_names_as_aliases.push_back(ExpressionActions::getSmallestColumn(storage_metadata_snapshot->getColumns().getAllPhysical())); } auto source_pipe = createSources( storage_metadata_snapshot, - query_info, + modified_query_info, processed_stage, max_block_size, header, aliases, table, - required_columns.empty() ? real_column_names : required_columns, + column_names_as_aliases.empty() ? real_column_names : column_names_as_aliases, modified_context, current_streams, has_database_virtual_column, @@ -350,7 +381,7 @@ Pipe StorageMerge::read( Pipe StorageMerge::createSources( const StorageMetadataPtr & metadata_snapshot, - SelectQueryInfo & query_info, + SelectQueryInfo & modified_query_info, const QueryProcessingStage::Enum & processed_stage, const UInt64 max_block_size, const Block & header, @@ -364,19 +395,8 @@ Pipe StorageMerge::createSources( bool concat_streams) { const auto & [database_name, storage, struct_lock, table_name] = storage_with_lock; - SelectQueryInfo modified_query_info = query_info; - modified_query_info.query = query_info.query->clone(); - - /// Original query could contain JOIN but we need only the first joined table and its columns. auto & modified_select = modified_query_info.query->as(); - TreeRewriterResult new_analyzer_res = *query_info.syntax_analyzer_result; - removeJoin(modified_select, new_analyzer_res, modified_context); - modified_query_info.syntax_analyzer_result = std::make_shared(std::move(new_analyzer_res)); - - VirtualColumnUtils::rewriteEntityInAst(modified_query_info.query, "_table", table_name); - VirtualColumnUtils::rewriteEntityInAst(modified_query_info.query, "_database", database_name); - Pipe pipe; if (!storage) @@ -710,27 +730,30 @@ void StorageMerge::convertingSourceStream( if (!where_expression) return; - for (size_t column_index : collections::range(0, header.columns())) + if (processed_stage > QueryProcessingStage::FetchColumns) { - ColumnWithTypeAndName header_column = header.getByPosition(column_index); - ColumnWithTypeAndName before_column = before_block_header.getByName(header_column.name); - /// If the processed_stage greater than FetchColumns and the block structure between streams is different. - /// the where expression maybe invalid because of convertingBlockInputStream. - /// So we need to throw exception. - if (!header_column.type->equals(*before_column.type.get()) && processed_stage > QueryProcessingStage::FetchColumns) + for (size_t column_index : collections::range(0, header.columns())) { - NamesAndTypesList source_columns = metadata_snapshot->getSampleBlock().getNamesAndTypesList(); - auto virtual_column = *getVirtuals().tryGetByName("_table"); - source_columns.emplace_back(NameAndTypePair{virtual_column.name, virtual_column.type}); - auto syntax_result = TreeRewriter(local_context).analyze(where_expression, source_columns); - ExpressionActionsPtr actions = ExpressionAnalyzer{where_expression, syntax_result, local_context}.getActions(false, false); - Names required_columns = actions->getRequiredColumns(); - - for (const auto & required_column : required_columns) + ColumnWithTypeAndName header_column = header.getByPosition(column_index); + ColumnWithTypeAndName before_column = before_block_header.getByName(header_column.name); + /// If the processed_stage greater than FetchColumns and the block structure between streams is different. + /// the where expression maybe invalid because of convertingBlockInputStream. + /// So we need to throw exception. + if (!header_column.type->equals(*before_column.type.get())) { - if (required_column == header_column.name) - throw Exception("Block structure mismatch in Merge Storage: different types:\n" + before_block_header.dumpStructure() - + "\n" + header.dumpStructure(), ErrorCodes::LOGICAL_ERROR); + NamesAndTypesList source_columns = metadata_snapshot->getSampleBlock().getNamesAndTypesList(); + auto virtual_column = *getVirtuals().tryGetByName("_table"); + source_columns.emplace_back(NameAndTypePair{virtual_column.name, virtual_column.type}); + auto syntax_result = TreeRewriter(local_context).analyze(where_expression, source_columns); + ExpressionActionsPtr actions = ExpressionAnalyzer{where_expression, syntax_result, local_context}.getActions(false, false); + Names required_columns = actions->getRequiredColumns(); + + for (const auto & required_column : required_columns) + { + if (required_column == header_column.name) + throw Exception("Block structure mismatch in Merge Storage: different types:\n" + before_block_header.dumpStructure() + + "\n" + header.dumpStructure(), ErrorCodes::LOGICAL_ERROR); + } } } } diff --git a/src/Storages/StorageMerge.h b/src/Storages/StorageMerge.h index 52a91747433..56adeab9279 100644 --- a/src/Storages/StorageMerge.h +++ b/src/Storages/StorageMerge.h @@ -129,6 +129,9 @@ protected: const Block & header, const StorageMetadataPtr & metadata_snapshot, const Aliases & aliases, ContextPtr context, ASTPtr & query, Pipe & pipe, QueryProcessingStage::Enum processed_stage); + + static SelectQueryInfo getModifiedQueryInfo( + const SelectQueryInfo & query_info, ContextPtr modified_context, const StorageID & current_storage_id, bool is_merge_engine); }; } diff --git a/src/Storages/StorageMergeTree.cpp b/src/Storages/StorageMergeTree.cpp index 7322d57fed2..8e352a74d33 100644 --- a/src/Storages/StorageMergeTree.cpp +++ b/src/Storages/StorageMergeTree.cpp @@ -411,8 +411,9 @@ Int64 StorageMergeTree::startMutation(const MutationCommands & commands, String version = increment.get(); entry.commit(version); mutation_file_name = entry.file_name; - auto insertion = current_mutations_by_id.emplace(mutation_file_name, std::move(entry)); - current_mutations_by_version.emplace(version, insertion.first->second); + bool inserted = current_mutations_by_version.try_emplace(version, std::move(entry)).second; + if (!inserted) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Mutation {} already exists, it's a bug", version); LOG_INFO(log, "Added mutation: {}", mutation_file_name); } @@ -617,16 +618,18 @@ std::vector StorageMergeTree::getMutationsStatus() cons CancellationCode StorageMergeTree::killMutation(const String & mutation_id) { LOG_TRACE(log, "Killing mutation {}", mutation_id); + UInt64 mutation_version = MergeTreeMutationEntry::tryParseFileName(mutation_id); + if (!mutation_version) + return CancellationCode::NotFound; std::optional to_kill; { std::lock_guard lock(currently_processing_in_background_mutex); - auto it = current_mutations_by_id.find(mutation_id); - if (it != current_mutations_by_id.end()) + auto it = current_mutations_by_version.find(mutation_version); + if (it != current_mutations_by_version.end()) { to_kill.emplace(std::move(it->second)); - current_mutations_by_id.erase(it); - current_mutations_by_version.erase(to_kill->block_number); + current_mutations_by_version.erase(it); } } @@ -667,10 +670,11 @@ void StorageMergeTree::loadMutations() if (startsWith(it->name(), "mutation_")) { MergeTreeMutationEntry entry(disk, path, it->name()); - Int64 block_number = entry.block_number; + UInt64 block_number = entry.block_number; LOG_DEBUG(log, "Loading mutation: {} entry, commands size: {}", it->name(), entry.commands.size()); - auto insertion = current_mutations_by_id.emplace(it->name(), std::move(entry)); - current_mutations_by_version.emplace(block_number, insertion.first->second); + auto inserted = current_mutations_by_version.try_emplace(block_number, std::move(entry)).second; + if (!inserted) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Mutation {} already exists, it's a bug", block_number); } else if (startsWith(it->name(), "tmp_mutation_")) { @@ -680,7 +684,7 @@ void StorageMergeTree::loadMutations() } if (!current_mutations_by_version.empty()) - increment.value = std::max(Int64(increment.value.load()), current_mutations_by_version.rbegin()->first); + increment.value = std::max(increment.value.load(), current_mutations_by_version.rbegin()->first); } std::shared_ptr StorageMergeTree::selectPartsToMerge( @@ -1014,30 +1018,31 @@ bool StorageMergeTree::scheduleDataProcessingJob(BackgroundJobsAssignee & assign bool scheduled = false; if (time_after_previous_cleanup_temporary_directories.compareAndRestartDeferred( - getContext()->getSettingsRef().merge_tree_clear_old_temporary_directories_interval_seconds)) + getSettings()->merge_tree_clear_old_temporary_directories_interval_seconds)) { assignee.scheduleCommonTask(ExecutableLambdaAdapter::create( [this, share_lock] () { - clearOldTemporaryDirectories(merger_mutator, getSettings()->temporary_directories_lifetime.totalSeconds()); - return true; - }, common_assignee_trigger, getStorageID())); + return clearOldTemporaryDirectories(merger_mutator, getSettings()->temporary_directories_lifetime.totalSeconds()); + }, common_assignee_trigger, getStorageID()), /* need_trigger */ false); scheduled = true; } if (auto lock = time_after_previous_cleanup_parts.compareAndRestartDeferred( - getContext()->getSettingsRef().merge_tree_clear_old_parts_interval_seconds)) + getSettings()->merge_tree_clear_old_parts_interval_seconds)) { assignee.scheduleCommonTask(ExecutableLambdaAdapter::create( [this, share_lock] () { /// All use relative_data_path which changes during rename /// so execute under share lock. - clearOldPartsFromFilesystem(); - clearOldWriteAheadLogs(); - clearOldMutations(); - clearEmptyParts(); - return true; - }, common_assignee_trigger, getStorageID())); + size_t cleared_count = 0; + cleared_count += clearOldPartsFromFilesystem(); + cleared_count += clearOldWriteAheadLogs(); + cleared_count += clearOldMutations(); + cleared_count += clearEmptyParts(); + return cleared_count; + /// TODO maybe take into account number of cleared objects when calculating backoff + }, common_assignee_trigger, getStorageID()), /* need_trigger */ false); scheduled = true; } @@ -1055,18 +1060,18 @@ Int64 StorageMergeTree::getCurrentMutationVersion( return it->first; } -void StorageMergeTree::clearOldMutations(bool truncate) +size_t StorageMergeTree::clearOldMutations(bool truncate) { const auto settings = getSettings(); if (!truncate && !settings->finished_mutations_to_keep) - return; + return 0; std::vector mutations_to_delete; { std::lock_guard lock(currently_processing_in_background_mutex); if (!truncate && current_mutations_by_version.size() <= settings->finished_mutations_to_keep) - return; + return 0; auto end_it = current_mutations_by_version.end(); auto begin_it = current_mutations_by_version.begin(); @@ -1079,7 +1084,7 @@ void StorageMergeTree::clearOldMutations(bool truncate) size_t done_count = std::distance(begin_it, end_it); if (done_count <= settings->finished_mutations_to_keep) - return; + return 0; to_delete_count = done_count - settings->finished_mutations_to_keep; } @@ -1088,7 +1093,6 @@ void StorageMergeTree::clearOldMutations(bool truncate) for (size_t i = 0; i < to_delete_count; ++i) { mutations_to_delete.push_back(std::move(it->second)); - current_mutations_by_id.erase(mutations_to_delete.back().file_name); it = current_mutations_by_version.erase(it); } } @@ -1098,6 +1102,8 @@ void StorageMergeTree::clearOldMutations(bool truncate) LOG_TRACE(log, "Removing mutation: {}", mutation.file_name); mutation.removeFile(); } + + return mutations_to_delete.size(); } bool StorageMergeTree::optimize( diff --git a/src/Storages/StorageMergeTree.h b/src/Storages/StorageMergeTree.h index 8ed4b707b34..6d6ee785ad0 100644 --- a/src/Storages/StorageMergeTree.h +++ b/src/Storages/StorageMergeTree.h @@ -131,9 +131,7 @@ private: /// This set have to be used with `currently_processing_in_background_mutex`. DataParts currently_merging_mutating_parts; - - std::map current_mutations_by_id; - std::multimap current_mutations_by_version; + std::map current_mutations_by_version; std::atomic shutdown_called {false}; @@ -183,7 +181,7 @@ private: const DataPartPtr & part, std::unique_lock & /* currently_processing_in_background_mutex_lock */) const; - void clearOldMutations(bool truncate = false); + size_t clearOldMutations(bool truncate = false); // Partition helpers void dropPartNoWaitNoThrow(const String & part_name) override; diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index 63bb8af9148..60b576d8eac 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -60,13 +60,15 @@ #include #include - #include #include #include #include +#include +#include + #include #include #include @@ -75,20 +77,14 @@ #include #include -#include - namespace fs = std::filesystem; namespace ProfileEvents { - extern const Event ReplicatedPartMerges; - extern const Event ReplicatedPartMutations; extern const Event ReplicatedPartFailedFetches; extern const Event ReplicatedPartFetchesOfMerged; extern const Event ObsoleteReplicatedParts; extern const Event ReplicatedPartFetches; - extern const Event DataAfterMergeDiffersFromReplica; - extern const Event DataAfterMutationDiffersFromReplica; extern const Event CreatedLogEntryForMerge; extern const Event NotCreatedLogEntryForMerge; extern const Event CreatedLogEntryForMutation; @@ -2191,12 +2187,6 @@ bool StorageReplicatedMergeTree::executeReplaceRange(const LogEntry & entry) { renameTempPartAndReplace(part_desc->res_part, nullptr, &transaction); getCommitPartOps(ops, part_desc->res_part); - - if (ops.size() > zkutil::MULTI_BATCH_SIZE) - { - zookeeper->multi(ops); - ops.clear(); - } } if (!ops.empty()) @@ -2986,7 +2976,7 @@ bool StorageReplicatedMergeTree::scheduleDataProcessingJob(BackgroundJobsAssigne [this, selected_entry] () mutable { return processQueueEntry(selected_entry); - }, common_assignee_trigger, getStorageID())); + }, common_assignee_trigger, getStorageID()), /* need_trigger */ true); return true; } } @@ -6257,186 +6247,195 @@ void StorageReplicatedMergeTree::replacePartitionFrom( /// NOTE: Some covered parts may be missing in src_all_parts if corresponding log entries are not executed yet. DataPartsVector src_all_parts = src_data.getDataPartsVectorInPartition(MergeTreeDataPartState::Committed, partition_id); - DataPartsVector src_parts; - MutableDataPartsVector dst_parts; - Strings block_id_paths; - Strings part_checksums; - auto zookeeper = getZooKeeper(); - std::vector ephemeral_locks; LOG_DEBUG(log, "Cloning {} parts", src_all_parts.size()); static const String TMP_PREFIX = "tmp_replace_from_"; + auto zookeeper = getZooKeeper(); - String alter_partition_version_path = zookeeper_path + "/alter_partition_version"; - Coordination::Stat alter_partition_version_stat; - zookeeper->get(alter_partition_version_path, &alter_partition_version_stat); - - /// Firstly, generate last block number and compute drop_range - /// NOTE: Even if we make ATTACH PARTITION instead of REPLACE PARTITION drop_range will not be empty, it will contain a block. - /// So, such case has special meaning, if drop_range contains only one block it means that nothing to drop. - /// TODO why not to add normal DROP_RANGE entry to replication queue if `replace` is true? - MergeTreePartInfo drop_range; - std::optional delimiting_block_lock; - bool partition_was_empty = !getFakePartCoveringAllPartsInPartition(partition_id, drop_range, delimiting_block_lock, true); - if (replace && partition_was_empty) + /// Retry if alter_partition_version changes + for (size_t retry = 0; retry < 1000; ++retry) { - /// Nothing to drop, will just attach new parts - LOG_INFO(log, "Partition {} was empty, REPLACE PARTITION will work as ATTACH PARTITION FROM", drop_range.partition_id); - replace = false; - } + DataPartsVector src_parts; + MutableDataPartsVector dst_parts; + Strings block_id_paths; + Strings part_checksums; + std::vector ephemeral_locks; + String alter_partition_version_path = zookeeper_path + "/alter_partition_version"; + Coordination::Stat alter_partition_version_stat; + zookeeper->get(alter_partition_version_path, &alter_partition_version_stat); - if (!replace) - { - /// It's ATTACH PARTITION FROM, not REPLACE PARTITION. We have to reset drop range - drop_range = makeDummyDropRangeForMovePartitionOrAttachPartitionFrom(partition_id); - } - - assert(replace == !LogEntry::ReplaceRangeEntry::isMovePartitionOrAttachFrom(drop_range)); - - String drop_range_fake_part_name = getPartNamePossiblyFake(format_version, drop_range); - - for (const auto & src_part : src_all_parts) - { - /// We also make some kind of deduplication to avoid duplicated parts in case of ATTACH PARTITION - /// Assume that merges in the partition are quite rare - /// Save deduplication block ids with special prefix replace_partition - - if (!canReplacePartition(src_part)) - throw Exception( - "Cannot replace partition '" + partition_id + "' because part '" + src_part->name + "' has inconsistent granularity with table", - ErrorCodes::LOGICAL_ERROR); - - String hash_hex = src_part->checksums.getTotalChecksumHex(); - - if (replace) - LOG_INFO(log, "Trying to replace {} with hash_hex {}", src_part->name, hash_hex); - else - LOG_INFO(log, "Trying to attach {} with hash_hex {}", src_part->name, hash_hex); - - String block_id_path = replace ? "" : (fs::path(zookeeper_path) / "blocks" / (partition_id + "_replace_from_" + hash_hex)); - - auto lock = allocateBlockNumber(partition_id, zookeeper, block_id_path); - if (!lock) + /// Firstly, generate last block number and compute drop_range + /// NOTE: Even if we make ATTACH PARTITION instead of REPLACE PARTITION drop_range will not be empty, it will contain a block. + /// So, such case has special meaning, if drop_range contains only one block it means that nothing to drop. + /// TODO why not to add normal DROP_RANGE entry to replication queue if `replace` is true? + MergeTreePartInfo drop_range; + std::optional delimiting_block_lock; + bool partition_was_empty = !getFakePartCoveringAllPartsInPartition(partition_id, drop_range, delimiting_block_lock, true); + if (replace && partition_was_empty) { - LOG_INFO(log, "Part {} (hash {}) has been already attached", src_part->name, hash_hex); - continue; + /// Nothing to drop, will just attach new parts + LOG_INFO(log, "Partition {} was empty, REPLACE PARTITION will work as ATTACH PARTITION FROM", drop_range.partition_id); + replace = false; } - UInt64 index = lock->getNumber(); - MergeTreePartInfo dst_part_info(partition_id, index, index, src_part->info.level); - auto dst_part = cloneAndLoadDataPartOnSameDisk(src_part, TMP_PREFIX, dst_part_info, metadata_snapshot); - - src_parts.emplace_back(src_part); - dst_parts.emplace_back(dst_part); - ephemeral_locks.emplace_back(std::move(*lock)); - block_id_paths.emplace_back(block_id_path); - part_checksums.emplace_back(hash_hex); - } - - ReplicatedMergeTreeLogEntryData entry; - { - auto src_table_id = src_data.getStorageID(); - entry.type = ReplicatedMergeTreeLogEntryData::REPLACE_RANGE; - entry.source_replica = replica_name; - entry.create_time = time(nullptr); - entry.replace_range_entry = std::make_shared(); - - auto & entry_replace = *entry.replace_range_entry; - entry_replace.drop_range_part_name = drop_range_fake_part_name; - entry_replace.from_database = src_table_id.database_name; - entry_replace.from_table = src_table_id.table_name; - for (const auto & part : src_parts) - entry_replace.src_part_names.emplace_back(part->name); - for (const auto & part : dst_parts) - entry_replace.new_part_names.emplace_back(part->name); - for (const String & checksum : part_checksums) - entry_replace.part_names_checksums.emplace_back(checksum); - entry_replace.columns_version = -1; - } - - /// Remove deduplication block_ids of replacing parts - if (replace) - clearBlocksInPartition(*zookeeper, drop_range.partition_id, drop_range.max_block, drop_range.max_block); - - DataPartsVector parts_to_remove; - Coordination::Responses op_results; - - try - { - Coordination::Requests ops; - for (size_t i = 0; i < dst_parts.size(); ++i) + if (!replace) { - getCommitPartOps(ops, dst_parts[i], block_id_paths[i]); - ephemeral_locks[i].getUnlockOps(ops); - - if (ops.size() > zkutil::MULTI_BATCH_SIZE) - { - /// It is unnecessary to add parts to working set until we commit log entry - zookeeper->multi(ops); - ops.clear(); - } + /// It's ATTACH PARTITION FROM, not REPLACE PARTITION. We have to reset drop range + drop_range = makeDummyDropRangeForMovePartitionOrAttachPartitionFrom(partition_id); } - if (auto txn = query_context->getZooKeeperMetadataTransaction()) - txn->moveOpsTo(ops); + assert(replace == !LogEntry::ReplaceRangeEntry::isMovePartitionOrAttachFrom(drop_range)); - delimiting_block_lock->getUnlockOps(ops); - /// Check and update version to avoid race with DROP_RANGE - ops.emplace_back(zkutil::makeSetRequest(alter_partition_version_path, "", alter_partition_version_stat.version)); - /// Just update version, because merges assignment relies on it - ops.emplace_back(zkutil::makeSetRequest(fs::path(zookeeper_path) / "log", "", -1)); - ops.emplace_back(zkutil::makeCreateRequest(fs::path(zookeeper_path) / "log/log-", entry.toString(), zkutil::CreateMode::PersistentSequential)); + String drop_range_fake_part_name = getPartNamePossiblyFake(format_version, drop_range); - Transaction transaction(*this); + for (const auto & src_part : src_all_parts) { - auto data_parts_lock = lockParts(); + /// We also make some kind of deduplication to avoid duplicated parts in case of ATTACH PARTITION + /// Assume that merges in the partition are quite rare + /// Save deduplication block ids with special prefix replace_partition - for (MutableDataPartPtr & part : dst_parts) - renameTempPartAndReplace(part, nullptr, &transaction, data_parts_lock); - } + if (!canReplacePartition(src_part)) + throw Exception( + "Cannot replace partition '" + partition_id + "' because part '" + src_part->name + "' has inconsistent granularity with table", + ErrorCodes::LOGICAL_ERROR); - Coordination::Error code = zookeeper->tryMulti(ops, op_results); - if (code == Coordination::Error::ZOK) - delimiting_block_lock->assumeUnlocked(); - else if (code == Coordination::Error::ZBADVERSION) - throw Exception(ErrorCodes::CANNOT_ASSIGN_ALTER, "Cannot assign ALTER PARTITION, because another ALTER PARTITION query was concurrently executed"); - else - zkutil::KeeperMultiException::check(code, ops, op_results); + String hash_hex = src_part->checksums.getTotalChecksumHex(); - { - auto data_parts_lock = lockParts(); - - transaction.commit(&data_parts_lock); if (replace) - parts_to_remove = removePartsInRangeFromWorkingSet(drop_range, true, data_parts_lock); + LOG_INFO(log, "Trying to replace {} with hash_hex {}", src_part->name, hash_hex); + else + LOG_INFO(log, "Trying to attach {} with hash_hex {}", src_part->name, hash_hex); + + String block_id_path = replace ? "" : (fs::path(zookeeper_path) / "blocks" / (partition_id + "_replace_from_" + hash_hex)); + + auto lock = allocateBlockNumber(partition_id, zookeeper, block_id_path); + if (!lock) + { + LOG_INFO(log, "Part {} (hash {}) has been already attached", src_part->name, hash_hex); + continue; + } + + UInt64 index = lock->getNumber(); + MergeTreePartInfo dst_part_info(partition_id, index, index, src_part->info.level); + auto dst_part = cloneAndLoadDataPartOnSameDisk(src_part, TMP_PREFIX, dst_part_info, metadata_snapshot); + + src_parts.emplace_back(src_part); + dst_parts.emplace_back(dst_part); + ephemeral_locks.emplace_back(std::move(*lock)); + block_id_paths.emplace_back(block_id_path); + part_checksums.emplace_back(hash_hex); } - PartLog::addNewParts(getContext(), dst_parts, watch.elapsed()); - } - catch (...) - { - PartLog::addNewParts(getContext(), dst_parts, watch.elapsed(), ExecutionStatus::fromCurrentException()); - throw; + ReplicatedMergeTreeLogEntryData entry; + { + auto src_table_id = src_data.getStorageID(); + entry.type = ReplicatedMergeTreeLogEntryData::REPLACE_RANGE; + entry.source_replica = replica_name; + entry.create_time = time(nullptr); + entry.replace_range_entry = std::make_shared(); + + auto & entry_replace = *entry.replace_range_entry; + entry_replace.drop_range_part_name = drop_range_fake_part_name; + entry_replace.from_database = src_table_id.database_name; + entry_replace.from_table = src_table_id.table_name; + for (const auto & part : src_parts) + entry_replace.src_part_names.emplace_back(part->name); + for (const auto & part : dst_parts) + entry_replace.new_part_names.emplace_back(part->name); + for (const String & checksum : part_checksums) + entry_replace.part_names_checksums.emplace_back(checksum); + entry_replace.columns_version = -1; + } + + /// Remove deduplication block_ids of replacing parts + if (replace) + clearBlocksInPartition(*zookeeper, drop_range.partition_id, drop_range.max_block, drop_range.max_block); + + DataPartsVector parts_to_remove; + Coordination::Responses op_results; + + try + { + Coordination::Requests ops; + for (size_t i = 0; i < dst_parts.size(); ++i) + { + getCommitPartOps(ops, dst_parts[i], block_id_paths[i]); + ephemeral_locks[i].getUnlockOps(ops); + } + + if (auto txn = query_context->getZooKeeperMetadataTransaction()) + txn->moveOpsTo(ops); + + delimiting_block_lock->getUnlockOps(ops); + /// Check and update version to avoid race with DROP_RANGE + ops.emplace_back(zkutil::makeSetRequest(alter_partition_version_path, "", alter_partition_version_stat.version)); + /// Just update version, because merges assignment relies on it + ops.emplace_back(zkutil::makeSetRequest(fs::path(zookeeper_path) / "log", "", -1)); + ops.emplace_back(zkutil::makeCreateRequest(fs::path(zookeeper_path) / "log/log-", entry.toString(), zkutil::CreateMode::PersistentSequential)); + + Transaction transaction(*this); + { + auto data_parts_lock = lockParts(); + + for (MutableDataPartPtr & part : dst_parts) + renameTempPartAndReplace(part, nullptr, &transaction, data_parts_lock); + } + + Coordination::Error code = zookeeper->tryMulti(ops, op_results); + if (code == Coordination::Error::ZOK) + delimiting_block_lock->assumeUnlocked(); + else if (code == Coordination::Error::ZBADVERSION) + { + /// Cannot retry automatically, because some zookeeper ops were lost on the first attempt. Will retry on DDLWorker-level. + if (query_context->getZooKeeperMetadataTransaction()) + throw Exception( + "Cannot execute alter, because alter partition version was suddenly changed due to concurrent alter", + ErrorCodes::CANNOT_ASSIGN_ALTER); + continue; + } + else + zkutil::KeeperMultiException::check(code, ops, op_results); + + { + auto data_parts_lock = lockParts(); + + transaction.commit(&data_parts_lock); + if (replace) + parts_to_remove = removePartsInRangeFromWorkingSet(drop_range, true, data_parts_lock); + } + + PartLog::addNewParts(getContext(), dst_parts, watch.elapsed()); + } + catch (...) + { + PartLog::addNewParts(getContext(), dst_parts, watch.elapsed(), ExecutionStatus::fromCurrentException()); + throw; + } + + String log_znode_path = dynamic_cast(*op_results.back()).path_created; + entry.znode_name = log_znode_path.substr(log_znode_path.find_last_of('/') + 1); + + for (auto & lock : ephemeral_locks) + lock.assumeUnlocked(); + + /// Forcibly remove replaced parts from ZooKeeper + tryRemovePartsFromZooKeeperWithRetries(parts_to_remove); + + /// Speedup removing of replaced parts from filesystem + parts_to_remove.clear(); + cleanup_thread.wakeup(); + + lock2.reset(); + lock1.reset(); + + waitForLogEntryToBeProcessedIfNecessary(entry, query_context); + + return; } - String log_znode_path = dynamic_cast(*op_results.back()).path_created; - entry.znode_name = log_znode_path.substr(log_znode_path.find_last_of('/') + 1); - - for (auto & lock : ephemeral_locks) - lock.assumeUnlocked(); - - /// Forcibly remove replaced parts from ZooKeeper - tryRemovePartsFromZooKeeperWithRetries(parts_to_remove); - - /// Speedup removing of replaced parts from filesystem - parts_to_remove.clear(); - cleanup_thread.wakeup(); - - lock2.reset(); - lock1.reset(); - - waitForLogEntryToBeProcessedIfNecessary(entry, query_context); + throw Exception( + ErrorCodes::CANNOT_ASSIGN_ALTER, "Cannot assign ALTER PARTITION, because another ALTER PARTITION query was concurrently executed"); } void StorageReplicatedMergeTree::movePartitionToTable(const StoragePtr & dest_table, const ASTPtr & partition, ContextPtr query_context) @@ -6464,199 +6463,201 @@ void StorageReplicatedMergeTree::movePartitionToTable(const StoragePtr & dest_ta /// A range for log entry to remove parts from the source table (myself). auto zookeeper = getZooKeeper(); - String alter_partition_version_path = zookeeper_path + "/alter_partition_version"; - Coordination::Stat alter_partition_version_stat; - zookeeper->get(alter_partition_version_path, &alter_partition_version_stat); - - MergeTreePartInfo drop_range; - std::optional delimiting_block_lock; - getFakePartCoveringAllPartsInPartition(partition_id, drop_range, delimiting_block_lock, true); - String drop_range_fake_part_name = getPartNamePossiblyFake(format_version, drop_range); - - DataPartPtr covering_part; - DataPartsVector src_all_parts; + /// Retry if alter_partition_version changes + for (size_t retry = 0; retry < 1000; ++retry) { - /// NOTE: Some covered parts may be missing in src_all_parts if corresponding log entries are not executed yet. - auto parts_lock = src_data.lockParts(); - src_all_parts = src_data.getActivePartsToReplace(drop_range, drop_range_fake_part_name, covering_part, parts_lock); - } + String alter_partition_version_path = zookeeper_path + "/alter_partition_version"; + Coordination::Stat alter_partition_version_stat; + zookeeper->get(alter_partition_version_path, &alter_partition_version_stat); - if (covering_part) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Got part {} covering drop range {}, it's a bug", - covering_part->name, drop_range_fake_part_name); + MergeTreePartInfo drop_range; + std::optional delimiting_block_lock; + getFakePartCoveringAllPartsInPartition(partition_id, drop_range, delimiting_block_lock, true); + String drop_range_fake_part_name = getPartNamePossiblyFake(format_version, drop_range); - /// After allocating block number for drop_range we must ensure that it does not intersect block numbers - /// allocated by concurrent REPLACE query. - /// We could check it in multi-request atomically with creation of DROP_RANGE entry in source table log, - /// but it's better to check it here and fail as early as possible (before we have done something to destination table). - Coordination::Error version_check_code = zookeeper->trySet(alter_partition_version_path, "", alter_partition_version_stat.version); - if (version_check_code != Coordination::Error::ZOK) - throw Exception(ErrorCodes::CANNOT_ASSIGN_ALTER, "Cannot DROP PARTITION in {} after copying partition to {}, " - "because another ALTER PARTITION query was concurrently executed", - getStorageID().getFullTableName(), dest_table_storage->getStorageID().getFullTableName()); - - DataPartsVector src_parts; - MutableDataPartsVector dst_parts; - Strings block_id_paths; - Strings part_checksums; - std::vector ephemeral_locks; - - LOG_DEBUG(log, "Cloning {} parts", src_all_parts.size()); - - static const String TMP_PREFIX = "tmp_move_from_"; - - /// Clone parts into destination table. - String dest_alter_partition_version_path = dest_table_storage->zookeeper_path + "/alter_partition_version"; - Coordination::Stat dest_alter_partition_version_stat; - zookeeper->get(dest_alter_partition_version_path, &dest_alter_partition_version_stat); - for (const auto & src_part : src_all_parts) - { - if (!dest_table_storage->canReplacePartition(src_part)) - throw Exception( - "Cannot move partition '" + partition_id + "' because part '" + src_part->name + "' has inconsistent granularity with table", - ErrorCodes::LOGICAL_ERROR); - - String hash_hex = src_part->checksums.getTotalChecksumHex(); - String block_id_path; - - auto lock = dest_table_storage->allocateBlockNumber(partition_id, zookeeper, block_id_path); - if (!lock) + DataPartPtr covering_part; + DataPartsVector src_all_parts; { - LOG_INFO(log, "Part {} (hash {}) has been already attached", src_part->name, hash_hex); - continue; + /// NOTE: Some covered parts may be missing in src_all_parts if corresponding log entries are not executed yet. + auto parts_lock = src_data.lockParts(); + src_all_parts = src_data.getActivePartsToReplace(drop_range, drop_range_fake_part_name, covering_part, parts_lock); } - UInt64 index = lock->getNumber(); - MergeTreePartInfo dst_part_info(partition_id, index, index, src_part->info.level); - auto dst_part = dest_table_storage->cloneAndLoadDataPartOnSameDisk(src_part, TMP_PREFIX, dst_part_info, dest_metadata_snapshot); + if (covering_part) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Got part {} covering drop range {}, it's a bug", + covering_part->name, drop_range_fake_part_name); - src_parts.emplace_back(src_part); - dst_parts.emplace_back(dst_part); - ephemeral_locks.emplace_back(std::move(*lock)); - block_id_paths.emplace_back(block_id_path); - part_checksums.emplace_back(hash_hex); - } + /// After allocating block number for drop_range we must ensure that it does not intersect block numbers + /// allocated by concurrent REPLACE query. + /// We could check it in multi-request atomically with creation of DROP_RANGE entry in source table log, + /// but it's better to check it here and fail as early as possible (before we have done something to destination table). + Coordination::Error version_check_code = zookeeper->trySet(alter_partition_version_path, "", alter_partition_version_stat.version); + if (version_check_code != Coordination::Error::ZOK) + throw Exception(ErrorCodes::CANNOT_ASSIGN_ALTER, "Cannot DROP PARTITION in {} after copying partition to {}, " + "because another ALTER PARTITION query was concurrently executed", + getStorageID().getFullTableName(), dest_table_storage->getStorageID().getFullTableName()); - ReplicatedMergeTreeLogEntryData entry_delete; - { - entry_delete.type = LogEntry::DROP_RANGE; - entry_delete.source_replica = replica_name; - entry_delete.new_part_name = drop_range_fake_part_name; - entry_delete.detach = false; //-V1048 - entry_delete.create_time = time(nullptr); - } + DataPartsVector src_parts; + MutableDataPartsVector dst_parts; + Strings block_id_paths; + Strings part_checksums; + std::vector ephemeral_locks; - ReplicatedMergeTreeLogEntryData entry; - { - MergeTreePartInfo drop_range_dest = makeDummyDropRangeForMovePartitionOrAttachPartitionFrom(partition_id); + LOG_DEBUG(log, "Cloning {} parts", src_all_parts.size()); - entry.type = ReplicatedMergeTreeLogEntryData::REPLACE_RANGE; - entry.source_replica = dest_table_storage->replica_name; - entry.create_time = time(nullptr); - entry.replace_range_entry = std::make_shared(); + static const String TMP_PREFIX = "tmp_move_from_"; - auto & entry_replace = *entry.replace_range_entry; - entry_replace.drop_range_part_name = getPartNamePossiblyFake(format_version, drop_range_dest); - entry_replace.from_database = src_data_id.database_name; - entry_replace.from_table = src_data_id.table_name; - for (const auto & part : src_parts) - entry_replace.src_part_names.emplace_back(part->name); - for (const auto & part : dst_parts) - entry_replace.new_part_names.emplace_back(part->name); - for (const String & checksum : part_checksums) - entry_replace.part_names_checksums.emplace_back(checksum); - entry_replace.columns_version = -1; - } - - clearBlocksInPartition(*zookeeper, drop_range.partition_id, drop_range.max_block, drop_range.max_block); - - DataPartsVector parts_to_remove; - Coordination::Responses op_results; - - try - { - Coordination::Requests ops; - for (size_t i = 0; i < dst_parts.size(); ++i) + /// Clone parts into destination table. + String dest_alter_partition_version_path = dest_table_storage->zookeeper_path + "/alter_partition_version"; + Coordination::Stat dest_alter_partition_version_stat; + zookeeper->get(dest_alter_partition_version_path, &dest_alter_partition_version_stat); + for (const auto & src_part : src_all_parts) { - dest_table_storage->getCommitPartOps(ops, dst_parts[i], block_id_paths[i]); - ephemeral_locks[i].getUnlockOps(ops); + if (!dest_table_storage->canReplacePartition(src_part)) + throw Exception( + "Cannot move partition '" + partition_id + "' because part '" + src_part->name + "' has inconsistent granularity with table", + ErrorCodes::LOGICAL_ERROR); - if (ops.size() > zkutil::MULTI_BATCH_SIZE) + String hash_hex = src_part->checksums.getTotalChecksumHex(); + String block_id_path; + + auto lock = dest_table_storage->allocateBlockNumber(partition_id, zookeeper, block_id_path); + if (!lock) { - zookeeper->multi(ops); - ops.clear(); + LOG_INFO(log, "Part {} (hash {}) has been already attached", src_part->name, hash_hex); + continue; } + + UInt64 index = lock->getNumber(); + MergeTreePartInfo dst_part_info(partition_id, index, index, src_part->info.level); + auto dst_part = dest_table_storage->cloneAndLoadDataPartOnSameDisk(src_part, TMP_PREFIX, dst_part_info, dest_metadata_snapshot); + + src_parts.emplace_back(src_part); + dst_parts.emplace_back(dst_part); + ephemeral_locks.emplace_back(std::move(*lock)); + block_id_paths.emplace_back(block_id_path); + part_checksums.emplace_back(hash_hex); } - /// Check and update version to avoid race with DROP_RANGE - ops.emplace_back(zkutil::makeSetRequest(dest_alter_partition_version_path, "", dest_alter_partition_version_stat.version)); - /// Just update version, because merges assignment relies on it - ops.emplace_back(zkutil::makeSetRequest(fs::path(dest_table_storage->zookeeper_path) / "log", "", -1)); - ops.emplace_back(zkutil::makeCreateRequest(fs::path(dest_table_storage->zookeeper_path) / "log/log-", - entry.toString(), zkutil::CreateMode::PersistentSequential)); - + ReplicatedMergeTreeLogEntryData entry_delete; { - Transaction transaction(*dest_table_storage); - - auto src_data_parts_lock = lockParts(); - auto dest_data_parts_lock = dest_table_storage->lockParts(); - - std::mutex mutex; - DataPartsLock lock(mutex); - - for (MutableDataPartPtr & part : dst_parts) - dest_table_storage->renameTempPartAndReplace(part, nullptr, &transaction, lock); - - Coordination::Error code = zookeeper->tryMulti(ops, op_results); - if (code == Coordination::Error::ZBADVERSION) - throw Exception(ErrorCodes::CANNOT_ASSIGN_ALTER, "Cannot assign ALTER PARTITION, because another ALTER PARTITION query was concurrently executed"); - else - zkutil::KeeperMultiException::check(code, ops, op_results); - - parts_to_remove = removePartsInRangeFromWorkingSet(drop_range, true, lock); - transaction.commit(&lock); + entry_delete.type = LogEntry::DROP_RANGE; + entry_delete.source_replica = replica_name; + entry_delete.new_part_name = drop_range_fake_part_name; + entry_delete.detach = false; //-V1048 + entry_delete.create_time = time(nullptr); } - PartLog::addNewParts(getContext(), dst_parts, watch.elapsed()); - } - catch (...) - { - PartLog::addNewParts(getContext(), dst_parts, watch.elapsed(), ExecutionStatus::fromCurrentException()); - throw; + ReplicatedMergeTreeLogEntryData entry; + { + MergeTreePartInfo drop_range_dest = makeDummyDropRangeForMovePartitionOrAttachPartitionFrom(partition_id); + + entry.type = ReplicatedMergeTreeLogEntryData::REPLACE_RANGE; + entry.source_replica = dest_table_storage->replica_name; + entry.create_time = time(nullptr); + entry.replace_range_entry = std::make_shared(); + + auto & entry_replace = *entry.replace_range_entry; + entry_replace.drop_range_part_name = getPartNamePossiblyFake(format_version, drop_range_dest); + entry_replace.from_database = src_data_id.database_name; + entry_replace.from_table = src_data_id.table_name; + for (const auto & part : src_parts) + entry_replace.src_part_names.emplace_back(part->name); + for (const auto & part : dst_parts) + entry_replace.new_part_names.emplace_back(part->name); + for (const String & checksum : part_checksums) + entry_replace.part_names_checksums.emplace_back(checksum); + entry_replace.columns_version = -1; + } + + clearBlocksInPartition(*zookeeper, drop_range.partition_id, drop_range.max_block, drop_range.max_block); + + DataPartsVector parts_to_remove; + Coordination::Responses op_results; + + try + { + Coordination::Requests ops; + for (size_t i = 0; i < dst_parts.size(); ++i) + { + dest_table_storage->getCommitPartOps(ops, dst_parts[i], block_id_paths[i]); + ephemeral_locks[i].getUnlockOps(ops); + } + + /// Check and update version to avoid race with DROP_RANGE + ops.emplace_back(zkutil::makeSetRequest(dest_alter_partition_version_path, "", dest_alter_partition_version_stat.version)); + /// Just update version, because merges assignment relies on it + ops.emplace_back(zkutil::makeSetRequest(fs::path(dest_table_storage->zookeeper_path) / "log", "", -1)); + ops.emplace_back(zkutil::makeCreateRequest(fs::path(dest_table_storage->zookeeper_path) / "log/log-", + entry.toString(), zkutil::CreateMode::PersistentSequential)); + + { + Transaction transaction(*dest_table_storage); + + auto src_data_parts_lock = lockParts(); + auto dest_data_parts_lock = dest_table_storage->lockParts(); + + std::mutex mutex; + DataPartsLock lock(mutex); + + for (MutableDataPartPtr & part : dst_parts) + dest_table_storage->renameTempPartAndReplace(part, nullptr, &transaction, lock); + + Coordination::Error code = zookeeper->tryMulti(ops, op_results); + if (code == Coordination::Error::ZBADVERSION) + continue; + else + zkutil::KeeperMultiException::check(code, ops, op_results); + + parts_to_remove = removePartsInRangeFromWorkingSet(drop_range, true, lock); + transaction.commit(&lock); + } + + PartLog::addNewParts(getContext(), dst_parts, watch.elapsed()); + } + catch (...) + { + PartLog::addNewParts(getContext(), dst_parts, watch.elapsed(), ExecutionStatus::fromCurrentException()); + throw; + } + + String log_znode_path = dynamic_cast(*op_results.back()).path_created; + entry.znode_name = log_znode_path.substr(log_znode_path.find_last_of('/') + 1); + + for (auto & lock : ephemeral_locks) + lock.assumeUnlocked(); + + tryRemovePartsFromZooKeeperWithRetries(parts_to_remove); + + parts_to_remove.clear(); + cleanup_thread.wakeup(); + lock2.reset(); + + dest_table_storage->waitForLogEntryToBeProcessedIfNecessary(entry, query_context); + + /// Create DROP_RANGE for the source table + Coordination::Requests ops_src; + ops_src.emplace_back(zkutil::makeCreateRequest( + fs::path(zookeeper_path) / "log/log-", entry_delete.toString(), zkutil::CreateMode::PersistentSequential)); + /// Just update version, because merges assignment relies on it + ops_src.emplace_back(zkutil::makeSetRequest(fs::path(zookeeper_path) / "log", "", -1)); + delimiting_block_lock->getUnlockOps(ops_src); + + op_results = zookeeper->multi(ops_src); + + log_znode_path = dynamic_cast(*op_results.front()).path_created; + entry_delete.znode_name = log_znode_path.substr(log_znode_path.find_last_of('/') + 1); + + lock1.reset(); + waitForLogEntryToBeProcessedIfNecessary(entry_delete, query_context); + + /// Cleaning possibly stored information about parts from /quorum/last_part node in ZooKeeper. + cleanLastPartNode(partition_id); + + return; } - String log_znode_path = dynamic_cast(*op_results.back()).path_created; - entry.znode_name = log_znode_path.substr(log_znode_path.find_last_of('/') + 1); - - for (auto & lock : ephemeral_locks) - lock.assumeUnlocked(); - - tryRemovePartsFromZooKeeperWithRetries(parts_to_remove); - - parts_to_remove.clear(); - cleanup_thread.wakeup(); - lock2.reset(); - - dest_table_storage->waitForLogEntryToBeProcessedIfNecessary(entry, query_context); - - /// Create DROP_RANGE for the source table - Coordination::Requests ops_src; - ops_src.emplace_back(zkutil::makeCreateRequest( - fs::path(zookeeper_path) / "log/log-", entry_delete.toString(), zkutil::CreateMode::PersistentSequential)); - /// Just update version, because merges assignment relies on it - ops_src.emplace_back(zkutil::makeSetRequest(fs::path(zookeeper_path) / "log", "", -1)); - delimiting_block_lock->getUnlockOps(ops_src); - - op_results = zookeeper->multi(ops_src); - - log_znode_path = dynamic_cast(*op_results.front()).path_created; - entry_delete.znode_name = log_znode_path.substr(log_znode_path.find_last_of('/') + 1); - - lock1.reset(); - waitForLogEntryToBeProcessedIfNecessary(entry_delete, query_context); - - /// Cleaning possibly stored information about parts from /quorum/last_part node in ZooKeeper. - cleanLastPartNode(partition_id); + throw Exception(ErrorCodes::CANNOT_ASSIGN_ALTER, "Cannot assign ALTER PARTITION, because another ALTER PARTITION query was concurrently executed"); } void StorageReplicatedMergeTree::movePartitionToShard( @@ -6984,68 +6985,80 @@ bool StorageReplicatedMergeTree::dropPartImpl( bool StorageReplicatedMergeTree::dropAllPartsInPartition( zkutil::ZooKeeper & zookeeper, String & partition_id, LogEntry & entry, ContextPtr query_context, bool detach) { - String alter_partition_version_path = zookeeper_path + "/alter_partition_version"; - Coordination::Stat alter_partition_version_stat; - zookeeper.get(alter_partition_version_path, &alter_partition_version_stat); - - MergeTreePartInfo drop_range_info; - - /// It would prevent other replicas from assigning merges which intersect locked block number. - std::optional delimiting_block_lock; - - if (!getFakePartCoveringAllPartsInPartition(partition_id, drop_range_info, delimiting_block_lock)) + /// Retry if alter_partition_version changes + for (size_t retry = 0; retry < 1000; ++retry) { - LOG_INFO(log, "Will not drop partition {}, it is empty.", partition_id); - return false; + String alter_partition_version_path = zookeeper_path + "/alter_partition_version"; + Coordination::Stat alter_partition_version_stat; + zookeeper.get(alter_partition_version_path, &alter_partition_version_stat); + + MergeTreePartInfo drop_range_info; + + /// It would prevent other replicas from assigning merges which intersect locked block number. + std::optional delimiting_block_lock; + + if (!getFakePartCoveringAllPartsInPartition(partition_id, drop_range_info, delimiting_block_lock)) + { + LOG_INFO(log, "Will not drop partition {}, it is empty.", partition_id); + return false; + } + + clearBlocksInPartition(zookeeper, partition_id, drop_range_info.min_block, drop_range_info.max_block); + + String drop_range_fake_part_name = getPartNamePossiblyFake(format_version, drop_range_info); + + LOG_DEBUG(log, "Disabled merges covered by range {}", drop_range_fake_part_name); + + /// Finally, having achieved the necessary invariants, you can put an entry in the log. + entry.type = LogEntry::DROP_RANGE; + entry.source_replica = replica_name; + entry.new_part_name = drop_range_fake_part_name; + entry.detach = detach; + entry.create_time = time(nullptr); + + Coordination::Requests ops; + + ops.emplace_back(zkutil::makeCreateRequest(fs::path(zookeeper_path) / "log/log-", entry.toString(), + zkutil::CreateMode::PersistentSequential)); + + /// Check and update version to avoid race with REPLACE_RANGE. + /// Otherwise new parts covered by drop_range_info may appear after execution of current DROP_RANGE entry + /// as a result of execution of concurrently created REPLACE_RANGE entry. + ops.emplace_back(zkutil::makeSetRequest(alter_partition_version_path, "", alter_partition_version_stat.version)); + + /// Just update version, because merges assignment relies on it + ops.emplace_back(zkutil::makeSetRequest(fs::path(zookeeper_path) / "log", "", -1)); + delimiting_block_lock->getUnlockOps(ops); + + if (auto txn = query_context->getZooKeeperMetadataTransaction()) + txn->moveOpsTo(ops); + + Coordination::Responses responses; + Coordination::Error code = zookeeper.tryMulti(ops, responses); + + if (code == Coordination::Error::ZOK) + delimiting_block_lock->assumeUnlocked(); + else if (code == Coordination::Error::ZBADVERSION) + { + /// Cannot retry automatically, because some zookeeper ops were lost on the first attempt. Will retry on DDLWorker-level. + if (query_context->getZooKeeperMetadataTransaction()) + throw Exception( + "Cannot execute alter, because alter partition version was suddenly changed due to concurrent alter", + ErrorCodes::CANNOT_ASSIGN_ALTER); + continue; + } + else + zkutil::KeeperMultiException::check(code, ops, responses); + + String log_znode_path = dynamic_cast(*responses.front()).path_created; + entry.znode_name = log_znode_path.substr(log_znode_path.find_last_of('/') + 1); + + getContext()->getMergeList().cancelInPartition(getStorageID(), partition_id, drop_range_info.max_block); + + return true; } - - clearBlocksInPartition(zookeeper, partition_id, drop_range_info.min_block, drop_range_info.max_block); - - String drop_range_fake_part_name = getPartNamePossiblyFake(format_version, drop_range_info); - - LOG_DEBUG(log, "Disabled merges covered by range {}", drop_range_fake_part_name); - - /// Finally, having achieved the necessary invariants, you can put an entry in the log. - entry.type = LogEntry::DROP_RANGE; - entry.source_replica = replica_name; - entry.new_part_name = drop_range_fake_part_name; - entry.detach = detach; - entry.create_time = time(nullptr); - - Coordination::Requests ops; - - ops.emplace_back(zkutil::makeCreateRequest(fs::path(zookeeper_path) / "log/log-", entry.toString(), - zkutil::CreateMode::PersistentSequential)); - - /// Check and update version to avoid race with REPLACE_RANGE. - /// Otherwise new parts covered by drop_range_info may appear after execution of current DROP_RANGE entry - /// as a result of execution of concurrently created REPLACE_RANGE entry. - ops.emplace_back(zkutil::makeSetRequest(alter_partition_version_path, "", alter_partition_version_stat.version)); - - /// Just update version, because merges assignment relies on it - ops.emplace_back(zkutil::makeSetRequest(fs::path(zookeeper_path) / "log", "", -1)); - delimiting_block_lock->getUnlockOps(ops); - - if (auto txn = query_context->getZooKeeperMetadataTransaction()) - txn->moveOpsTo(ops); - - Coordination::Responses responses; - Coordination::Error code = zookeeper.tryMulti(ops, responses); - - if (code == Coordination::Error::ZOK) - delimiting_block_lock->assumeUnlocked(); - else if (code == Coordination::Error::ZBADVERSION) - throw Exception(ErrorCodes::CANNOT_ASSIGN_ALTER, - "Cannot assign ALTER PARTITION because another ALTER PARTITION query was concurrently executed"); - else - zkutil::KeeperMultiException::check(code, ops, responses); - - String log_znode_path = dynamic_cast(*responses.front()).path_created; - entry.znode_name = log_znode_path.substr(log_znode_path.find_last_of('/') + 1); - - getContext()->getMergeList().cancelInPartition(getStorageID(), partition_id, drop_range_info.max_block); - - return true; + throw Exception(ErrorCodes::CANNOT_ASSIGN_ALTER, + "Cannot assign ALTER PARTITION because another ALTER PARTITION query was concurrently executed"); } diff --git a/src/Storages/StorageS3.cpp b/src/Storages/StorageS3.cpp index 80011cde077..3f08dee62b6 100644 --- a/src/Storages/StorageS3.cpp +++ b/src/Storages/StorageS3.cpp @@ -321,11 +321,6 @@ public: void consume(Chunk chunk) override { - if (is_first_chunk) - { - writer->doWritePrefix(); - is_first_chunk = false; - } writer->write(getHeader().cloneWithColumns(chunk.detachColumns())); } @@ -333,7 +328,7 @@ public: { try { - writer->doWriteSuffix(); + writer->finalize(); writer->flush(); write_buf->finalize(); } @@ -350,7 +345,6 @@ private: std::optional format_settings; std::unique_ptr write_buf; OutputFormatPtr writer; - bool is_first_chunk = true; }; diff --git a/src/Storages/StorageURL.cpp b/src/Storages/StorageURL.cpp index 66033f7a7d6..527458ab668 100644 --- a/src/Storages/StorageURL.cpp +++ b/src/Storages/StorageURL.cpp @@ -134,11 +134,12 @@ namespace http_method, callback, timeouts, + credentials, context->getSettingsRef().max_http_get_redirects, - Poco::Net::HTTPBasicCredentials{}, DBMS_DEFAULT_BUFFER_SIZE, context->getReadSettings(), headers, + ReadWriteBufferFromHTTP::Range{}, context->getRemoteHostFilter()), chooseCompressionMethod(request_uri.getPath(), compression_method)); } @@ -202,6 +203,8 @@ namespace std::unique_ptr read_buf; std::unique_ptr pipeline; std::unique_ptr reader; + + Poco::Net::HTTPBasicCredentials credentials{}; }; } @@ -226,18 +229,12 @@ StorageURLSink::StorageURLSink( void StorageURLSink::consume(Chunk chunk) { - if (is_first_chunk) - { - writer->doWritePrefix(); - is_first_chunk = false; - } - writer->write(getHeader().cloneWithColumns(chunk.detachColumns())); } void StorageURLSink::onFinish() { - writer->doWriteSuffix(); + writer->finalize(); writer->flush(); write_buf->finalize(); } @@ -576,6 +573,8 @@ void registerStorageURL(StorageFactory & factory) for (const auto & [header, value] : configuration.headers) { auto value_literal = value.safeGet(); + if (header == "Range") + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Range headers are not allowed"); headers.emplace_back(std::make_pair(header, value_literal)); } diff --git a/src/Storages/StorageURL.h b/src/Storages/StorageURL.h index a79100c8d70..cf72352a183 100644 --- a/src/Storages/StorageURL.h +++ b/src/Storages/StorageURL.h @@ -110,8 +110,6 @@ public: private: std::unique_ptr write_buf; OutputFormatPtr writer; - - bool is_first_chunk = true; }; class StorageURL : public shared_ptr_helper, public IStorageURLBase diff --git a/src/Storages/System/IStorageSystemOneBlock.h b/src/Storages/System/IStorageSystemOneBlock.h index 9b51bd73903..33086498730 100644 --- a/src/Storages/System/IStorageSystemOneBlock.h +++ b/src/Storages/System/IStorageSystemOneBlock.h @@ -61,6 +61,8 @@ public: return Pipe(std::make_shared(sample_block, std::move(chunk))); } + bool isSystemStorage() const override { return true; } + static NamesAndAliases getNamesAndAliases() { return {}; } }; diff --git a/src/Storages/System/StorageSystemColumns.h b/src/Storages/System/StorageSystemColumns.h index adcbf384ca7..dc184b1ae42 100644 --- a/src/Storages/System/StorageSystemColumns.h +++ b/src/Storages/System/StorageSystemColumns.h @@ -26,6 +26,8 @@ public: size_t max_block_size, unsigned num_streams) override; + bool isSystemStorage() const override { return true; } + protected: StorageSystemColumns(const StorageID & table_id_); }; diff --git a/src/Storages/System/StorageSystemDataSkippingIndices.h b/src/Storages/System/StorageSystemDataSkippingIndices.h index 72f66b025cf..d86890f5e27 100644 --- a/src/Storages/System/StorageSystemDataSkippingIndices.h +++ b/src/Storages/System/StorageSystemDataSkippingIndices.h @@ -23,6 +23,8 @@ public: size_t max_block_size, unsigned num_streams) override; + bool isSystemStorage() const override { return true; } + protected: StorageSystemDataSkippingIndices(const StorageID & table_id_); }; diff --git a/src/Storages/System/StorageSystemDetachedParts.h b/src/Storages/System/StorageSystemDetachedParts.h index 9350e758d93..ece9d495500 100644 --- a/src/Storages/System/StorageSystemDetachedParts.h +++ b/src/Storages/System/StorageSystemDetachedParts.h @@ -18,6 +18,7 @@ class StorageSystemDetachedParts final : friend struct shared_ptr_helper; public: std::string getName() const override { return "SystemDetachedParts"; } + bool isSystemStorage() const override { return true; } protected: explicit StorageSystemDetachedParts(const StorageID & table_id_); diff --git a/src/Storages/System/StorageSystemDisks.h b/src/Storages/System/StorageSystemDisks.h index d6908a295d4..2541dedd8fc 100644 --- a/src/Storages/System/StorageSystemDisks.h +++ b/src/Storages/System/StorageSystemDisks.h @@ -29,6 +29,8 @@ public: size_t max_block_size, unsigned num_streams) override; + bool isSystemStorage() const override { return true; } + protected: StorageSystemDisks(const StorageID & table_id_); }; diff --git a/src/Storages/System/StorageSystemGrants.cpp b/src/Storages/System/StorageSystemGrants.cpp index d443830ee0a..f55145ccfc7 100644 --- a/src/Storages/System/StorageSystemGrants.cpp +++ b/src/Storages/System/StorageSystemGrants.cpp @@ -17,7 +17,6 @@ namespace DB { -using EntityType = IAccessEntity::Type; NamesAndTypesList StorageSystemGrants::getNamesAndTypes() { @@ -58,7 +57,7 @@ void StorageSystemGrants::fillData(MutableColumns & res_columns, ContextPtr cont auto & column_grant_option = assert_cast(*res_columns[column_index++]).getData(); auto add_row = [&](const String & grantee_name, - EntityType grantee_type, + AccessEntityType grantee_type, AccessType access_type, const String * database, const String * table, @@ -66,14 +65,14 @@ void StorageSystemGrants::fillData(MutableColumns & res_columns, ContextPtr cont bool is_partial_revoke, bool grant_option) { - if (grantee_type == EntityType::USER) + if (grantee_type == AccessEntityType::USER) { column_user_name.insertData(grantee_name.data(), grantee_name.length()); column_user_name_null_map.push_back(false); column_role_name.insertDefault(); column_role_name_null_map.push_back(true); } - else if (grantee_type == EntityType::ROLE) + else if (grantee_type == AccessEntityType::ROLE) { column_user_name.insertDefault(); column_user_name_null_map.push_back(true); @@ -123,7 +122,7 @@ void StorageSystemGrants::fillData(MutableColumns & res_columns, ContextPtr cont }; auto add_rows = [&](const String & grantee_name, - IAccessEntity::Type grantee_type, + AccessEntityType grantee_type, const AccessRightsElements & elements) { for (const auto & element : elements) diff --git a/src/Storages/System/StorageSystemNumbers.cpp b/src/Storages/System/StorageSystemNumbers.cpp index 136c2489be2..4aed5098bd1 100644 --- a/src/Storages/System/StorageSystemNumbers.cpp +++ b/src/Storages/System/StorageSystemNumbers.cpp @@ -149,7 +149,14 @@ Pipe StorageSystemNumbers::read( UInt64 max_counter = offset + *limit; for (size_t i = 0; i < num_streams; ++i) - pipe.addSource(std::make_shared(state, max_block_size, max_counter)); + { + auto source = std::make_shared(state, max_block_size, max_counter); + + if (i == 0) + source->addTotalRowsApprox(*limit); + + pipe.addSource(std::move(source)); + } return pipe; } diff --git a/src/Storages/System/StorageSystemNumbers.h b/src/Storages/System/StorageSystemNumbers.h index da16dc73ca8..32105bb055d 100644 --- a/src/Storages/System/StorageSystemNumbers.h +++ b/src/Storages/System/StorageSystemNumbers.h @@ -39,6 +39,7 @@ public: unsigned num_streams) override; bool hasEvenlyDistributedRead() const override { return true; } + bool isSystemStorage() const override { return true; } private: bool multithreaded; diff --git a/src/Storages/System/StorageSystemOne.h b/src/Storages/System/StorageSystemOne.h index 3fe0c91113c..cc1d5e05b75 100644 --- a/src/Storages/System/StorageSystemOne.h +++ b/src/Storages/System/StorageSystemOne.h @@ -30,6 +30,8 @@ public: size_t max_block_size, unsigned num_streams) override; + bool isSystemStorage() const override { return true; } + protected: explicit StorageSystemOne(const StorageID & table_id_); }; diff --git a/src/Storages/System/StorageSystemPartsBase.h b/src/Storages/System/StorageSystemPartsBase.h index aec52e6b410..87247f96b24 100644 --- a/src/Storages/System/StorageSystemPartsBase.h +++ b/src/Storages/System/StorageSystemPartsBase.h @@ -67,6 +67,8 @@ public: NamesAndTypesList getVirtuals() const override; + bool isSystemStorage() const override { return true; } + private: bool hasStateColumn(const Names & column_names, const StorageMetadataPtr & metadata_snapshot) const; diff --git a/src/Storages/System/StorageSystemQuotaLimits.cpp b/src/Storages/System/StorageSystemQuotaLimits.cpp index 330b9935b48..c98e060a62f 100644 --- a/src/Storages/System/StorageSystemQuotaLimits.cpp +++ b/src/Storages/System/StorageSystemQuotaLimits.cpp @@ -15,27 +15,22 @@ namespace DB { -using ResourceAmount = Quota::ResourceAmount; -using ResourceType = Quota::ResourceType; -using ResourceTypeInfo = Quota::ResourceTypeInfo; -constexpr auto MAX_RESOURCE_TYPE = Quota::MAX_RESOURCE_TYPE; - namespace { - void addValue(IColumn & out_column, NullMap & out_column_null_map, ResourceAmount amount, const ResourceTypeInfo & type_info) + void addValue(IColumn & out_column, NullMap & out_column_null_map, QuotaValue value, const QuotaTypeInfo & type_info) { out_column_null_map.push_back(false); if (type_info.output_as_float) - static_cast(out_column).getData().push_back(double(amount) / type_info.output_denominator); + static_cast(out_column).getData().push_back(double(value) / type_info.output_denominator); else - static_cast(out_column).getData().push_back(amount / type_info.output_denominator); + static_cast(out_column).getData().push_back(value / type_info.output_denominator); } - void addValue(IColumn & out_column, NullMap & out_column_null_map, std::optional amount, const ResourceTypeInfo & type_info) + void addValue(IColumn & out_column, NullMap & out_column_null_map, std::optional value, const QuotaTypeInfo & type_info) { - if (amount) - addValue(out_column, out_column_null_map, *amount, type_info); + if (value) + addValue(out_column, out_column_null_map, *value, type_info); else { out_column_null_map.push_back(true); @@ -53,9 +48,9 @@ NamesAndTypesList StorageSystemQuotaLimits::getNamesAndTypes() {"is_randomized_interval", std::make_shared()}, }; - for (auto resource_type : collections::range(MAX_RESOURCE_TYPE)) + for (auto quota_type : collections::range(QuotaType::MAX)) { - const auto & type_info = ResourceTypeInfo::get(resource_type); + const auto & type_info = QuotaTypeInfo::get(quota_type); String column_name = "max_" + type_info.name; DataTypePtr data_type; if (type_info.output_as_float) @@ -80,12 +75,13 @@ void StorageSystemQuotaLimits::fillData(MutableColumns & res_columns, ContextPtr auto & column_duration = assert_cast(*res_columns[column_index++]).getData(); auto & column_is_randomized_interval = assert_cast(*res_columns[column_index++]).getData(); - IColumn * column_max[MAX_RESOURCE_TYPE]; - NullMap * column_max_null_map[MAX_RESOURCE_TYPE]; - for (auto resource_type : collections::range(MAX_RESOURCE_TYPE)) + IColumn * column_max[static_cast(QuotaType::MAX)]; + NullMap * column_max_null_map[static_cast(QuotaType::MAX)]; + for (auto quota_type : collections::range(QuotaType::MAX)) { - column_max[resource_type] = &assert_cast(*res_columns[column_index]).getNestedColumn(); - column_max_null_map[resource_type] = &assert_cast(*res_columns[column_index++]).getNullMapData(); + auto quota_type_i = static_cast(quota_type); + column_max[quota_type_i] = &assert_cast(*res_columns[column_index]).getNestedColumn(); + column_max_null_map[quota_type_i] = &assert_cast(*res_columns[column_index++]).getNullMapData(); } auto add_row = [&](const String & quota_name, const Quota::Limits & limits) @@ -94,10 +90,11 @@ void StorageSystemQuotaLimits::fillData(MutableColumns & res_columns, ContextPtr column_duration.push_back(limits.duration.count()); column_is_randomized_interval.push_back(limits.randomize_interval); - for (auto resource_type : collections::range(MAX_RESOURCE_TYPE)) + for (auto quota_type : collections::range(QuotaType::MAX)) { - const auto & type_info = ResourceTypeInfo::get(resource_type); - addValue(*column_max[resource_type], *column_max_null_map[resource_type], limits.max[resource_type], type_info); + auto quota_type_i = static_cast(quota_type); + const auto & type_info = QuotaTypeInfo::get(quota_type); + addValue(*column_max[quota_type_i], *column_max_null_map[quota_type_i], limits.max[quota_type_i], type_info); } }; diff --git a/src/Storages/System/StorageSystemQuotaUsage.cpp b/src/Storages/System/StorageSystemQuotaUsage.cpp index a08f6686030..54f403803d6 100644 --- a/src/Storages/System/StorageSystemQuotaUsage.cpp +++ b/src/Storages/System/StorageSystemQuotaUsage.cpp @@ -15,27 +15,22 @@ namespace DB { -using ResourceAmount = Quota::ResourceAmount; -using ResourceType = Quota::ResourceType; -using ResourceTypeInfo = Quota::ResourceTypeInfo; -constexpr auto MAX_RESOURCE_TYPE = Quota::MAX_RESOURCE_TYPE; - namespace { - void addValue(IColumn & out_column, NullMap & out_column_null_map, ResourceAmount amount, const ResourceTypeInfo & type_info) + void addValue(IColumn & out_column, NullMap & out_column_null_map, QuotaValue value, const QuotaTypeInfo & type_info) { out_column_null_map.push_back(false); if (type_info.output_as_float) - static_cast(out_column).getData().push_back(double(amount) / type_info.output_denominator); + static_cast(out_column).getData().push_back(double(value) / type_info.output_denominator); else - static_cast(out_column).getData().push_back(amount / type_info.output_denominator); + static_cast(out_column).getData().push_back(value / type_info.output_denominator); } - void addValue(IColumn & out_column, NullMap & out_column_null_map, std::optional amount, const ResourceTypeInfo & type_info) + void addValue(IColumn & out_column, NullMap & out_column_null_map, std::optional value, const QuotaTypeInfo & type_info) { - if (amount) - addValue(out_column, out_column_null_map, *amount, type_info); + if (value) + addValue(out_column, out_column_null_map, *value, type_info); else { out_column_null_map.push_back(true); @@ -64,9 +59,9 @@ NamesAndTypesList StorageSystemQuotaUsage::getNamesAndTypesImpl(bool add_column_ names_and_types.push_back({"end_time", std::make_shared(std::make_shared())}); names_and_types.push_back({"duration", std::make_shared(std::make_shared())}); - for (auto resource_type : collections::range(MAX_RESOURCE_TYPE)) + for (auto quota_type : collections::range(QuotaType::MAX)) { - const auto & type_info = ResourceTypeInfo::get(resource_type); + const auto & type_info = QuotaTypeInfo::get(quota_type); String column_name = type_info.name; DataTypePtr data_type; if (type_info.output_as_float) @@ -113,16 +108,17 @@ void StorageSystemQuotaUsage::fillDataImpl( auto & column_duration = assert_cast(assert_cast(*res_columns[column_index]).getNestedColumn()); auto & column_duration_null_map = assert_cast(*res_columns[column_index++]).getNullMapData(); - IColumn * column_usage[MAX_RESOURCE_TYPE]; - NullMap * column_usage_null_map[MAX_RESOURCE_TYPE]; - IColumn * column_max[MAX_RESOURCE_TYPE]; - NullMap * column_max_null_map[MAX_RESOURCE_TYPE]; - for (auto resource_type : collections::range(MAX_RESOURCE_TYPE)) + IColumn * column_usage[static_cast(QuotaType::MAX)]; + NullMap * column_usage_null_map[static_cast(QuotaType::MAX)]; + IColumn * column_max[static_cast(QuotaType::MAX)]; + NullMap * column_max_null_map[static_cast(QuotaType::MAX)]; + for (auto quota_type : collections::range(QuotaType::MAX)) { - column_usage[resource_type] = &assert_cast(*res_columns[column_index]).getNestedColumn(); - column_usage_null_map[resource_type] = &assert_cast(*res_columns[column_index++]).getNullMapData(); - column_max[resource_type] = &assert_cast(*res_columns[column_index]).getNestedColumn(); - column_max_null_map[resource_type] = &assert_cast(*res_columns[column_index++]).getNullMapData(); + auto quota_type_i = static_cast(quota_type); + column_usage[quota_type_i] = &assert_cast(*res_columns[column_index]).getNestedColumn(); + column_usage_null_map[quota_type_i] = &assert_cast(*res_columns[column_index++]).getNullMapData(); + column_max[quota_type_i] = &assert_cast(*res_columns[column_index]).getNestedColumn(); + column_max_null_map[quota_type_i] = &assert_cast(*res_columns[column_index++]).getNullMapData(); } std::optional current_quota_id; @@ -148,12 +144,13 @@ void StorageSystemQuotaUsage::fillDataImpl( column_end_time_null_map.push_back(true); column_duration.insertDefault(); column_duration_null_map.push_back(true); - for (auto resource_type : collections::range(MAX_RESOURCE_TYPE)) + for (auto quota_type : collections::range(QuotaType::MAX)) { - column_usage[resource_type]->insertDefault(); - column_usage_null_map[resource_type]->push_back(true); - column_max[resource_type]->insertDefault(); - column_max_null_map[resource_type]->push_back(true); + auto quota_type_i = static_cast(quota_type); + column_usage[quota_type_i]->insertDefault(); + column_usage_null_map[quota_type_i]->push_back(true); + column_max[quota_type_i]->insertDefault(); + column_max_null_map[quota_type_i]->push_back(true); } return; } @@ -168,11 +165,12 @@ void StorageSystemQuotaUsage::fillDataImpl( column_end_time_null_map.push_back(false); column_duration_null_map.push_back(false); - for (auto resource_type : collections::range(Quota::MAX_RESOURCE_TYPE)) + for (auto quota_type : collections::range(QuotaType::MAX)) { - const auto & type_info = ResourceTypeInfo::get(resource_type); - addValue(*column_max[resource_type], *column_max_null_map[resource_type], interval->max[resource_type], type_info); - addValue(*column_usage[resource_type], *column_usage_null_map[resource_type], interval->used[resource_type], type_info); + auto quota_type_i = static_cast(quota_type); + const auto & type_info = QuotaTypeInfo::get(quota_type); + addValue(*column_max[quota_type_i], *column_max_null_map[quota_type_i], interval->max[quota_type_i], type_info); + addValue(*column_usage[quota_type_i], *column_usage_null_map[quota_type_i], interval->used[quota_type_i], type_info); } }; diff --git a/src/Storages/System/StorageSystemQuotas.cpp b/src/Storages/System/StorageSystemQuotas.cpp index 2294af87fed..fa262f22d2c 100644 --- a/src/Storages/System/StorageSystemQuotas.cpp +++ b/src/Storages/System/StorageSystemQuotas.cpp @@ -19,16 +19,13 @@ namespace DB { namespace { - using KeyType = Quota::KeyType; - using KeyTypeInfo = Quota::KeyTypeInfo; - DataTypeEnum8::Values getKeyTypeEnumValues() { DataTypeEnum8::Values enum_values; - for (auto key_type : collections::range(KeyType::MAX)) + for (auto key_type : collections::range(QuotaKeyType::MAX)) { - const auto & type_info = KeyTypeInfo::get(key_type); - if ((key_type != KeyType::NONE) && type_info.base_types.empty()) + const auto & type_info = QuotaKeyTypeInfo::get(key_type); + if ((key_type != QuotaKeyType::NONE) && type_info.base_types.empty()) enum_values.push_back({type_info.name, static_cast(key_type)}); } return enum_values; @@ -76,16 +73,16 @@ void StorageSystemQuotas::fillData(MutableColumns & res_columns, ContextPtr cont const UUID & id, const String & storage_name, const std::vector & all_limits, - KeyType key_type, + QuotaKeyType key_type, const RolesOrUsersSet & apply_to) { column_name.insertData(name.data(), name.length()); column_id.push_back(id.toUnderType()); column_storage.insertData(storage_name.data(), storage_name.length()); - if (key_type != KeyType::NONE) + if (key_type != QuotaKeyType::NONE) { - const auto & type_info = KeyTypeInfo::get(key_type); + const auto & type_info = QuotaKeyTypeInfo::get(key_type); for (auto base_type : type_info.base_types) column_key_types.push_back(static_cast(base_type)); if (type_info.base_types.empty()) diff --git a/src/Storages/System/StorageSystemReplicas.h b/src/Storages/System/StorageSystemReplicas.h index ca36eef28c1..cf457efe250 100644 --- a/src/Storages/System/StorageSystemReplicas.h +++ b/src/Storages/System/StorageSystemReplicas.h @@ -27,6 +27,8 @@ public: size_t max_block_size, unsigned num_streams) override; + bool isSystemStorage() const override { return true; } + protected: StorageSystemReplicas(const StorageID & table_id_); }; diff --git a/src/Storages/System/StorageSystemRoleGrants.cpp b/src/Storages/System/StorageSystemRoleGrants.cpp index 080c73726bc..94ee28cfe83 100644 --- a/src/Storages/System/StorageSystemRoleGrants.cpp +++ b/src/Storages/System/StorageSystemRoleGrants.cpp @@ -15,8 +15,6 @@ namespace DB { -using EntityType = IAccessEntity::Type; - NamesAndTypesList StorageSystemRoleGrants::getNamesAndTypes() { @@ -48,19 +46,19 @@ void StorageSystemRoleGrants::fillData(MutableColumns & res_columns, ContextPtr auto & column_admin_option = assert_cast(*res_columns[column_index++]).getData(); auto add_row = [&](const String & grantee_name, - IAccessEntity::Type grantee_type, + AccessEntityType grantee_type, const String & granted_role_name, bool is_default, bool with_admin_option) { - if (grantee_type == EntityType::USER) + if (grantee_type == AccessEntityType::USER) { column_user_name.insertData(grantee_name.data(), grantee_name.length()); column_user_name_null_map.push_back(false); column_role_name.insertDefault(); column_role_name_null_map.push_back(true); } - else if (grantee_type == EntityType::ROLE) + else if (grantee_type == AccessEntityType::ROLE) { column_user_name.insertDefault(); column_user_name_null_map.push_back(true); @@ -76,7 +74,7 @@ void StorageSystemRoleGrants::fillData(MutableColumns & res_columns, ContextPtr }; auto add_rows = [&](const String & grantee_name, - IAccessEntity::Type grantee_type, + AccessEntityType grantee_type, const GrantedRoles & granted_roles, const RolesOrUsersSet * default_roles) { diff --git a/src/Storages/System/StorageSystemRowPolicies.cpp b/src/Storages/System/StorageSystemRowPolicies.cpp index 202ec5078d8..455d715d5da 100644 --- a/src/Storages/System/StorageSystemRowPolicies.cpp +++ b/src/Storages/System/StorageSystemRowPolicies.cpp @@ -19,10 +19,6 @@ namespace DB { -using ConditionTypeInfo = RowPolicy::ConditionTypeInfo; -constexpr auto MAX_CONDITION_TYPE = RowPolicy::MAX_CONDITION_TYPE; - - NamesAndTypesList StorageSystemRowPolicies::getNamesAndTypes() { NamesAndTypesList names_and_types{ @@ -34,9 +30,9 @@ NamesAndTypesList StorageSystemRowPolicies::getNamesAndTypes() {"storage", std::make_shared()}, }; - for (auto type : collections::range(MAX_CONDITION_TYPE)) + for (auto filter_type : collections::range(RowPolicyFilterType::MAX)) { - const String & column_name = ConditionTypeInfo::get(type).name; + const String & column_name = RowPolicyFilterTypeInfo::get(filter_type).name; names_and_types.push_back({column_name, std::make_shared(std::make_shared())}); } @@ -66,12 +62,13 @@ void StorageSystemRowPolicies::fillData(MutableColumns & res_columns, ContextPtr auto & column_id = assert_cast(*res_columns[column_index++]).getData(); auto & column_storage = assert_cast(*res_columns[column_index++]); - ColumnString * column_condition[MAX_CONDITION_TYPE]; - NullMap * column_condition_null_map[MAX_CONDITION_TYPE]; - for (auto condition_type : collections::range(MAX_CONDITION_TYPE)) + ColumnString * column_filter[static_cast(RowPolicyFilterType::MAX)]; + NullMap * column_filter_null_map[static_cast(RowPolicyFilterType::MAX)]; + for (auto filter_type : collections::range(RowPolicyFilterType::MAX)) { - column_condition[condition_type] = &assert_cast(assert_cast(*res_columns[column_index]).getNestedColumn()); - column_condition_null_map[condition_type] = &assert_cast(*res_columns[column_index++]).getNullMapData(); + auto filter_type_i = static_cast(filter_type); + column_filter[filter_type_i] = &assert_cast(assert_cast(*res_columns[column_index]).getNestedColumn()); + column_filter_null_map[filter_type_i] = &assert_cast(*res_columns[column_index++]).getNullMapData(); } auto & column_is_restrictive = assert_cast(*res_columns[column_index++]).getData(); @@ -82,32 +79,33 @@ void StorageSystemRowPolicies::fillData(MutableColumns & res_columns, ContextPtr auto & column_apply_to_except_offsets = assert_cast(*res_columns[column_index++]).getOffsets(); auto add_row = [&](const String & name, - const RowPolicy::NameParts & name_parts, + const RowPolicyName & full_name, const UUID & id, const String & storage_name, - const std::array & conditions, + const std::array(RowPolicyFilterType::MAX)> & filters, bool is_restrictive, const RolesOrUsersSet & apply_to) { column_name.insertData(name.data(), name.length()); - column_short_name.insertData(name_parts.short_name.data(), name_parts.short_name.length()); - column_database.insertData(name_parts.database.data(), name_parts.database.length()); - column_table.insertData(name_parts.table_name.data(), name_parts.table_name.length()); + column_short_name.insertData(full_name.short_name.data(), full_name.short_name.length()); + column_database.insertData(full_name.database.data(), full_name.database.length()); + column_table.insertData(full_name.table_name.data(), full_name.table_name.length()); column_id.push_back(id.toUnderType()); column_storage.insertData(storage_name.data(), storage_name.length()); - for (auto condition_type : collections::range(MAX_CONDITION_TYPE)) + for (auto filter_type : collections::range(RowPolicyFilterType::MAX)) { - const String & condition = conditions[condition_type]; - if (condition.empty()) + auto filter_type_i = static_cast(filter_type); + const String & filter = filters[filter_type_i]; + if (filter.empty()) { - column_condition[condition_type]->insertDefault(); - column_condition_null_map[condition_type]->push_back(true); + column_filter[filter_type_i]->insertDefault(); + column_filter_null_map[filter_type_i]->push_back(true); } else { - column_condition[condition_type]->insertData(condition.data(), condition.length()); - column_condition_null_map[condition_type]->push_back(false); + column_filter[filter_type_i]->insertData(filter.data(), filter.length()); + column_filter_null_map[filter_type_i]->push_back(false); } } @@ -134,7 +132,7 @@ void StorageSystemRowPolicies::fillData(MutableColumns & res_columns, ContextPtr if (!storage) continue; - add_row(policy->getName(), policy->getNameParts(), id, storage->getStorageName(), policy->conditions, policy->isRestrictive(), policy->to_roles); + add_row(policy->getName(), policy->getFullName(), id, storage->getStorageName(), policy->filters, policy->isRestrictive(), policy->to_roles); } } } diff --git a/src/Storages/System/StorageSystemSettingsProfileElements.cpp b/src/Storages/System/StorageSystemSettingsProfileElements.cpp index b2991baf9cb..8013a3f2e9e 100644 --- a/src/Storages/System/StorageSystemSettingsProfileElements.cpp +++ b/src/Storages/System/StorageSystemSettingsProfileElements.cpp @@ -16,8 +16,6 @@ namespace DB { -using EntityType = IAccessEntity::Type; - NamesAndTypesList StorageSystemSettingsProfileElements::getNamesAndTypes() { @@ -66,7 +64,7 @@ void StorageSystemSettingsProfileElements::fillData(MutableColumns & res_columns auto & column_inherit_profile = assert_cast(assert_cast(*res_columns[i]).getNestedColumn()); auto & column_inherit_profile_null_map = assert_cast(*res_columns[i++]).getNullMapData(); - auto add_rows_for_single_element = [&](const String & owner_name, EntityType owner_type, const SettingsProfileElement & element, size_t & index) + auto add_rows_for_single_element = [&](const String & owner_name, AccessEntityType owner_type, const SettingsProfileElement & element, size_t & index) { size_t old_num_rows = column_profile_name.size(); size_t new_num_rows = old_num_rows + 1; @@ -133,19 +131,19 @@ void StorageSystemSettingsProfileElements::fillData(MutableColumns & res_columns { switch (owner_type) { - case EntityType::SETTINGS_PROFILE: + case AccessEntityType::SETTINGS_PROFILE: { column_profile_name.insertData(owner_name.data(), owner_name.length()); column_profile_name_null_map.push_back(false); break; } - case EntityType::USER: + case AccessEntityType::USER: { column_user_name.insertData(owner_name.data(), owner_name.length()); column_user_name_null_map.push_back(false); break; } - case EntityType::ROLE: + case AccessEntityType::ROLE: { column_role_name.insertData(owner_name.data(), owner_name.length()); column_role_name_null_map.push_back(false); @@ -162,7 +160,7 @@ void StorageSystemSettingsProfileElements::fillData(MutableColumns & res_columns } }; - auto add_rows = [&](const String & owner_name, IAccessEntity::Type owner_type, const SettingsProfileElements & elements) + auto add_rows = [&](const String & owner_name, AccessEntityType owner_type, const SettingsProfileElements & elements) { size_t index = 0; for (const auto & element : elements) diff --git a/src/Storages/System/StorageSystemStoragePolicies.h b/src/Storages/System/StorageSystemStoragePolicies.h index ad8b507b7c0..f202299db1f 100644 --- a/src/Storages/System/StorageSystemStoragePolicies.h +++ b/src/Storages/System/StorageSystemStoragePolicies.h @@ -29,6 +29,8 @@ public: size_t max_block_size, unsigned num_streams) override; + bool isSystemStorage() const override { return true; } + protected: StorageSystemStoragePolicies(const StorageID & table_id_); }; diff --git a/src/Storages/System/StorageSystemTables.cpp b/src/Storages/System/StorageSystemTables.cpp index f32a609077f..4daf0a68f1d 100644 --- a/src/Storages/System/StorageSystemTables.cpp +++ b/src/Storages/System/StorageSystemTables.cpp @@ -59,6 +59,10 @@ StorageSystemTables::StorageSystemTables(const StorageID & table_id_) {"lifetime_bytes", std::make_shared(std::make_shared())}, {"comment", std::make_shared()}, {"has_own_data", std::make_shared()}, + {"loading_dependencies_database", std::make_shared(std::make_shared())}, + {"loading_dependencies_table", std::make_shared(std::make_shared())}, + {"loading_dependent_database", std::make_shared(std::make_shared())}, + {"loading_dependent_table", std::make_shared(std::make_shared())}, }, { {"table", std::make_shared(), "name"} })); @@ -213,53 +217,10 @@ protected: if (columns_mask[src_index++]) res_columns[res_index++]->insert(table.second->getName()); - // as_select - if (columns_mask[src_index++]) - res_columns[res_index++]->insertDefault(); - - // partition_key - if (columns_mask[src_index++]) - res_columns[res_index++]->insertDefault(); - - // sorting_key - if (columns_mask[src_index++]) - res_columns[res_index++]->insertDefault(); - - // primary_key - if (columns_mask[src_index++]) - res_columns[res_index++]->insertDefault(); - - // sampling_key - if (columns_mask[src_index++]) - res_columns[res_index++]->insertDefault(); - - // storage_policy - if (columns_mask[src_index++]) - res_columns[res_index++]->insertDefault(); - - // total_rows - if (columns_mask[src_index++]) - res_columns[res_index++]->insertDefault(); - - // total_bytes - if (columns_mask[src_index++]) - res_columns[res_index++]->insertDefault(); - - // lifetime_rows - if (columns_mask[src_index++]) - res_columns[res_index++]->insertDefault(); - - // lifetime_bytes - if (columns_mask[src_index++]) - res_columns[res_index++]->insertDefault(); - - // comment - if (columns_mask[src_index++]) - res_columns[res_index++]->insertDefault(); - - // has_own_data - if (columns_mask[src_index++]) - res_columns[res_index++]->insertDefault(); + /// Fill the rest columns with defaults + while (src_index < columns_mask.size()) + if (columns_mask[src_index++]) + res_columns[res_index++]->insertDefault(); } } @@ -507,6 +468,42 @@ protected: else res_columns[res_index++]->insertDefault(); } + + if (columns_mask[src_index] || columns_mask[src_index + 1] || columns_mask[src_index + 2] || columns_mask[src_index + 3]) + { + DependenciesInfo info = DatabaseCatalog::instance().getLoadingDependenciesInfo({database_name, table_name}); + + Array loading_dependencies_databases; + Array loading_dependencies_tables; + loading_dependencies_databases.reserve(info.dependencies.size()); + loading_dependencies_tables.reserve(info.dependencies.size()); + for (auto && dependency : info.dependencies) + { + loading_dependencies_databases.push_back(std::move(dependency.database)); + loading_dependencies_tables.push_back(std::move(dependency.table)); + } + + Array loading_dependent_databases; + Array loading_dependent_tables; + loading_dependent_databases.reserve(info.dependencies.size()); + loading_dependent_tables.reserve(info.dependencies.size()); + for (auto && dependent : info.dependent_database_objects) + { + loading_dependent_databases.push_back(std::move(dependent.database)); + loading_dependent_tables.push_back(std::move(dependent.table)); + } + + if (columns_mask[src_index++]) + res_columns[res_index++]->insert(loading_dependencies_databases); + if (columns_mask[src_index++]) + res_columns[res_index++]->insert(loading_dependencies_tables); + + if (columns_mask[src_index++]) + res_columns[res_index++]->insert(loading_dependent_databases); + if (columns_mask[src_index++]) + res_columns[res_index++]->insert(loading_dependent_tables); + + } } } diff --git a/src/Storages/System/StorageSystemTables.h b/src/Storages/System/StorageSystemTables.h index 6424d623830..808dc862e8d 100644 --- a/src/Storages/System/StorageSystemTables.h +++ b/src/Storages/System/StorageSystemTables.h @@ -27,6 +27,8 @@ public: size_t max_block_size, unsigned num_streams) override; + bool isSystemStorage() const override { return true; } + protected: StorageSystemTables(const StorageID & table_id_); }; diff --git a/src/Storages/System/StorageSystemZeros.h b/src/Storages/System/StorageSystemZeros.h index e40b9c3993a..f5b2bb43117 100644 --- a/src/Storages/System/StorageSystemZeros.h +++ b/src/Storages/System/StorageSystemZeros.h @@ -30,6 +30,7 @@ public: unsigned num_streams) override; bool hasEvenlyDistributedRead() const override { return true; } + bool isSystemStorage() const override { return true; } private: bool multithreaded; diff --git a/src/Storages/System/attachInformationSchemaTables.cpp b/src/Storages/System/attachInformationSchemaTables.cpp index cfe5de1dc41..803e9d55dac 100644 --- a/src/Storages/System/attachInformationSchemaTables.cpp +++ b/src/Storages/System/attachInformationSchemaTables.cpp @@ -31,14 +31,14 @@ static void createInformationSchemaView(ContextMutablePtr context, IDatabase & d DBMS_DEFAULT_MAX_QUERY_SIZE, DBMS_DEFAULT_MAX_PARSER_DEPTH); auto & ast_create = ast->as(); - assert(view_name == ast_create.table); + assert(view_name == ast_create.getTable()); if (is_uppercase) - ast_create.table = Poco::toUpper(view_name); + ast_create.setTable(Poco::toUpper(view_name)); StoragePtr view = createTableFromAST(ast_create, database.getDatabaseName(), database.getTableDataPath(ast_create), context, true).second; - database.createTable(context, ast_create.table, view, ast); + database.createTable(context, ast_create.getTable(), view, ast); } catch (...) { diff --git a/src/Storages/System/attachSystemTables.cpp b/src/Storages/System/attachSystemTables.cpp index 30fe38f4936..023ced35a6b 100644 --- a/src/Storages/System/attachSystemTables.cpp +++ b/src/Storages/System/attachSystemTables.cpp @@ -4,6 +4,7 @@ #include #include +#include #include #include #include @@ -80,92 +81,92 @@ namespace DB { -void attachSystemTablesLocal(IDatabase & system_database) +void attachSystemTablesLocal(ContextPtr context, IDatabase & system_database) { - attach(system_database, "one"); - attach(system_database, "numbers", false); - attach(system_database, "numbers_mt", true); - attach(system_database, "zeros", false); - attach(system_database, "zeros_mt", true); - attach(system_database, "databases"); - attach(system_database, "tables"); - attach(system_database, "columns"); - attach(system_database, "functions"); - attach(system_database, "events"); - attach(system_database, "settings"); - attach>(system_database, "merge_tree_settings"); - attach>(system_database, "replicated_merge_tree_settings"); - attach(system_database, "build_options"); - attach(system_database, "formats"); - attach(system_database, "table_functions"); - attach(system_database, "aggregate_function_combinators"); - attach(system_database, "data_type_families"); - attach(system_database, "collations"); - attach(system_database, "table_engines"); - attach(system_database, "contributors"); - attach(system_database, "users"); - attach(system_database, "roles"); - attach(system_database, "grants"); - attach(system_database, "role_grants"); - attach(system_database, "current_roles"); - attach(system_database, "enabled_roles"); - attach(system_database, "settings_profiles"); - attach(system_database, "settings_profile_elements"); - attach(system_database, "row_policies"); - attach(system_database, "quotas"); - attach(system_database, "quota_limits"); - attach(system_database, "quota_usage"); - attach(system_database, "quotas_usage"); - attach(system_database, "user_directories"); - attach(system_database, "privileges"); - attach(system_database, "errors"); - attach(system_database, "warnings"); - attach(system_database, "data_skipping_indices"); - attach(system_database, "licenses"); - attach(system_database, "time_zones"); + attach(context, system_database, "one"); + attach(context, system_database, "numbers", false); + attach(context, system_database, "numbers_mt", true); + attach(context, system_database, "zeros", false); + attach(context, system_database, "zeros_mt", true); + attach(context, system_database, "databases"); + attach(context, system_database, "tables"); + attach(context, system_database, "columns"); + attach(context, system_database, "functions"); + attach(context, system_database, "events"); + attach(context, system_database, "settings"); + attach>(context, system_database, "merge_tree_settings"); + attach>(context, system_database, "replicated_merge_tree_settings"); + attach(context, system_database, "build_options"); + attach(context, system_database, "formats"); + attach(context, system_database, "table_functions"); + attach(context, system_database, "aggregate_function_combinators"); + attach(context, system_database, "data_type_families"); + attach(context, system_database, "collations"); + attach(context, system_database, "table_engines"); + attach(context, system_database, "contributors"); + attach(context, system_database, "users"); + attach(context, system_database, "roles"); + attach(context, system_database, "grants"); + attach(context, system_database, "role_grants"); + attach(context, system_database, "current_roles"); + attach(context, system_database, "enabled_roles"); + attach(context, system_database, "settings_profiles"); + attach(context, system_database, "settings_profile_elements"); + attach(context, system_database, "row_policies"); + attach(context, system_database, "quotas"); + attach(context, system_database, "quota_limits"); + attach(context, system_database, "quota_usage"); + attach(context, system_database, "quotas_usage"); + attach(context, system_database, "user_directories"); + attach(context, system_database, "privileges"); + attach(context, system_database, "errors"); + attach(context, system_database, "warnings"); + attach(context, system_database, "data_skipping_indices"); + attach(context, system_database, "licenses"); + attach(context, system_database, "time_zones"); #ifdef OS_LINUX - attach(system_database, "stack_trace"); + attach(context, system_database, "stack_trace"); #endif #if USE_ROCKSDB - attach(system_database, "rocksdb"); + attach(context, system_database, "rocksdb"); #endif } -void attachSystemTablesServer(IDatabase & system_database, bool has_zookeeper) +void attachSystemTablesServer(ContextPtr context, IDatabase & system_database, bool has_zookeeper) { - attachSystemTablesLocal(system_database); + attachSystemTablesLocal(context, system_database); - attach(system_database, "parts"); - attach(system_database, "projection_parts"); - attach(system_database, "detached_parts"); - attach(system_database, "parts_columns"); - attach(system_database, "projection_parts_columns"); - attach(system_database, "disks"); - attach(system_database, "storage_policies"); - attach(system_database, "processes"); - attach(system_database, "metrics"); - attach(system_database, "merges"); - attach(system_database, "mutations"); - attach(system_database, "replicas"); - attach(system_database, "replication_queue"); - attach(system_database, "distributed_ddl_queue"); - attach(system_database, "distribution_queue"); - attach(system_database, "dictionaries"); - attach(system_database, "models"); - attach(system_database, "clusters"); - attach(system_database, "graphite_retentions"); - attach(system_database, "macros"); - attach(system_database, "replicated_fetches"); - attach(system_database, "part_moves_between_shards"); - attach(system_database, "asynchronous_inserts"); + attach(context, system_database, "parts"); + attach(context, system_database, "projection_parts"); + attach(context, system_database, "detached_parts"); + attach(context, system_database, "parts_columns"); + attach(context, system_database, "projection_parts_columns"); + attach(context, system_database, "disks"); + attach(context, system_database, "storage_policies"); + attach(context, system_database, "processes"); + attach(context, system_database, "metrics"); + attach(context, system_database, "merges"); + attach(context, system_database, "mutations"); + attach(context, system_database, "replicas"); + attach(context, system_database, "replication_queue"); + attach(context, system_database, "distributed_ddl_queue"); + attach(context, system_database, "distribution_queue"); + attach(context, system_database, "dictionaries"); + attach(context, system_database, "models"); + attach(context, system_database, "clusters"); + attach(context, system_database, "graphite_retentions"); + attach(context, system_database, "macros"); + attach(context, system_database, "replicated_fetches"); + attach(context, system_database, "part_moves_between_shards"); + attach(context, system_database, "asynchronous_inserts"); if (has_zookeeper) - attach(system_database, "zookeeper"); + attach(context, system_database, "zookeeper"); } -void attachSystemTablesAsync(IDatabase & system_database, AsynchronousMetrics & async_metrics) +void attachSystemTablesAsync(ContextPtr context, IDatabase & system_database, AsynchronousMetrics & async_metrics) { - attach(system_database, "asynchronous_metrics", async_metrics); + attach(context, system_database, "asynchronous_metrics", async_metrics); } } diff --git a/src/Storages/System/attachSystemTables.h b/src/Storages/System/attachSystemTables.h index 71570506a1a..4c1a79f84dd 100644 --- a/src/Storages/System/attachSystemTables.h +++ b/src/Storages/System/attachSystemTables.h @@ -1,17 +1,16 @@ #pragma once #include - +#include namespace DB { -class Context; class AsynchronousMetrics; class IDatabase; -void attachSystemTablesServer(IDatabase & system_database, bool has_zookeeper); -void attachSystemTablesLocal(IDatabase & system_database); -void attachSystemTablesAsync(IDatabase & system_database, AsynchronousMetrics & async_metrics); +void attachSystemTablesServer(ContextPtr context, IDatabase & system_database, bool has_zookeeper); +void attachSystemTablesLocal(ContextPtr context, IDatabase & system_database); +void attachSystemTablesAsync(ContextPtr context, IDatabase & system_database, AsynchronousMetrics & async_metrics); } diff --git a/src/Storages/System/attachSystemTablesImpl.h b/src/Storages/System/attachSystemTablesImpl.h index 1fdf677699a..4f83a0a4fda 100644 --- a/src/Storages/System/attachSystemTablesImpl.h +++ b/src/Storages/System/attachSystemTablesImpl.h @@ -7,14 +7,14 @@ namespace DB { template -void attach(IDatabase & system_database, const String & table_name, StorageArgs && ... args) +void attach(ContextPtr context, IDatabase & system_database, const String & table_name, StorageArgs && ... args) { assert(system_database.getDatabaseName() == DatabaseCatalog::SYSTEM_DATABASE); if (system_database.getUUID() == UUIDHelpers::Nil) { /// Attach to Ordinary database auto table_id = StorageID(DatabaseCatalog::SYSTEM_DATABASE, table_name); - system_database.attachTable(table_name, StorageT::create(table_id, std::forward(args)...)); + system_database.attachTable(context, table_name, StorageT::create(table_id, std::forward(args)...)); } else { @@ -23,7 +23,7 @@ void attach(IDatabase & system_database, const String & table_name, StorageArgs /// and path is actually not used auto table_id = StorageID(DatabaseCatalog::SYSTEM_DATABASE, table_name, UUIDHelpers::generateV4()); String path = "store/" + DatabaseCatalog::getPathForUUID(table_id.uuid); - system_database.attachTable(table_name, StorageT::create(table_id, std::forward(args)...), path); + system_database.attachTable(context, table_name, StorageT::create(table_id, std::forward(args)...), path); } } diff --git a/src/Storages/tests/gtest_transform_query_for_external_database.cpp b/src/Storages/tests/gtest_transform_query_for_external_database.cpp index ae9ec8745fd..f161400630b 100644 --- a/src/Storages/tests/gtest_transform_query_for_external_database.cpp +++ b/src/Storages/tests/gtest_transform_query_for_external_database.cpp @@ -84,6 +84,7 @@ private: const auto & table_name = tab.table.table; const auto & db_name = tab.table.database; database->attachTable( + context, table_name, StorageMemory::create( StorageID(db_name, table_name), ColumnsDescription{getColumns()}, ConstraintsDescription{}, String{})); diff --git a/src/TableFunctions/ITableFunctionXDBC.cpp b/src/TableFunctions/ITableFunctionXDBC.cpp index d5ad2b73552..e9d33acc6de 100644 --- a/src/TableFunctions/ITableFunctionXDBC.cpp +++ b/src/TableFunctions/ITableFunctionXDBC.cpp @@ -82,7 +82,8 @@ ColumnsDescription ITableFunctionXDBC::getActualTableStructure(ContextPtr contex columns_info_uri.addQueryParameter("external_table_functions_use_nulls", Poco::NumberFormatter::format(use_nulls)); - ReadWriteBufferFromHTTP buf(columns_info_uri, Poco::Net::HTTPRequest::HTTP_POST, {}, ConnectionTimeouts::getHTTPTimeouts(context)); + Poco::Net::HTTPBasicCredentials credentials{}; + ReadWriteBufferFromHTTP buf(columns_info_uri, Poco::Net::HTTPRequest::HTTP_POST, {}, ConnectionTimeouts::getHTTPTimeouts(context), credentials); std::string columns_info; readStringBinary(columns_info, buf); diff --git a/src/TableFunctions/TableFunctionURL.cpp b/src/TableFunctions/TableFunctionURL.cpp index 4df1e1d4982..c3ea30f800f 100644 --- a/src/TableFunctions/TableFunctionURL.cpp +++ b/src/TableFunctions/TableFunctionURL.cpp @@ -68,6 +68,8 @@ StoragePtr TableFunctionURL::getStorage( for (const auto & [header, value] : configuration.headers) { auto value_literal = value.safeGet(); + if (header == "Range") + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Range headers are not allowed"); headers.emplace_back(std::make_pair(header, value_literal)); } diff --git a/tests/ci/ast_fuzzer_check.py b/tests/ci/ast_fuzzer_check.py index 8cf46f608cb..d842d484841 100644 --- a/tests/ci/ast_fuzzer_check.py +++ b/tests/ci/ast_fuzzer_check.py @@ -4,7 +4,6 @@ import logging import subprocess import os import json -import time import sys from github import Github @@ -12,43 +11,15 @@ from github import Github from s3_helper import S3Helper from get_robot_token import get_best_robot_token from pr_info import PRInfo +from ci_config import build_config_to_string +from build_download_helper import get_build_config_for_check, get_build_urls +from docker_pull_helper import get_image_with_version +from commit_status_helper import post_commit_status +from clickhouse_helper import ClickHouseHelper, prepare_tests_results_for_clickhouse +from stopwatch import Stopwatch - -DOWNLOAD_RETRIES_COUNT = 5 IMAGE_NAME = 'clickhouse/fuzzer' -def get_build_urls(build_config_str, reports_path): - for root, _, files in os.walk(reports_path): - for f in files: - if build_config_str in f : - logging.info("Found build report json %s", f) - with open(os.path.join(root, f), 'r', encoding='utf-8') as file_handler: - build_report = json.load(file_handler) - return build_report['build_urls'] - return [] - -def get_build_config(build_number, repo_path): - ci_config_path = os.path.join(repo_path, "tests/ci/ci_config.json") - with open(ci_config_path, 'r', encoding='utf-8') as ci_config: - config_dict = json.load(ci_config) - return config_dict['build_config'][build_number] - -def build_config_to_string(build_config): - if build_config["package-type"] == "performance": - return "performance" - - return "_".join([ - build_config['compiler'], - build_config['build-type'] if build_config['build-type'] else "relwithdebuginfo", - build_config['sanitizer'] if build_config['sanitizer'] else "none", - build_config['bundled'], - build_config['splitted'], - "tidy" if build_config['tidy'] == "enable" else "notidy", - "with_coverage" if build_config['with_coverage'] else "without_coverage", - build_config['package-type'], - ]) - - def get_run_command(pr_number, sha, download_url, workspace_path, image): return f'docker run --network=host --volume={workspace_path}:/workspace ' \ '--cap-add syslog --cap-add sys_admin ' \ @@ -62,12 +33,14 @@ def get_commit(gh, commit_sha): if __name__ == "__main__": logging.basicConfig(level=logging.INFO) + + stopwatch = Stopwatch() + temp_path = os.getenv("TEMP_PATH", os.path.abspath(".")) repo_path = os.getenv("REPO_COPY", os.path.abspath("../../")) reports_path = os.getenv("REPORTS_PATH", "./reports") check_name = sys.argv[1] - build_number = int(sys.argv[2]) if not os.path.exists(temp_path): os.makedirs(temp_path) @@ -79,30 +52,12 @@ if __name__ == "__main__": gh = Github(get_best_robot_token()) - images_path = os.path.join(temp_path, 'changed_images.json') + docker_image = get_image_with_version(temp_path, IMAGE_NAME) - docker_image = IMAGE_NAME - if os.path.exists(images_path): - logging.info("Images file exists") - with open(images_path, 'r', encoding='utf-8') as images_fd: - images = json.load(images_fd) - logging.info("Got images %s", images) - if IMAGE_NAME in images: - docker_image += ':' + images[IMAGE_NAME] - - for i in range(10): - try: - logging.info("Pulling image %s", docker_image) - subprocess.check_output(f"docker pull {docker_image}", stderr=subprocess.STDOUT, shell=True) - break - except Exception as ex: - time.sleep(i * 3) - logging.info("Got execption pulling docker %s", ex) - else: - raise Exception(f"Cannot pull dockerhub for image docker pull {docker_image}") - - build_config = get_build_config(build_number, repo_path) + build_config = get_build_config_for_check(check_name) + print(build_config) build_config_str = build_config_to_string(build_config) + print(build_config_str) urls = get_build_urls(build_config_str, reports_path) if not urls: raise Exception("No build URLs found") @@ -152,7 +107,7 @@ if __name__ == "__main__": logging.info("Exception uploading file %s text %s", f, ex) paths[f] = '' - report_url = f"https://github.com/ClickHouse/ClickHouse/actions/runs/{os.getenv('GITHUB_RUN_ID')}" + report_url = f"{os.getenv('GITHUB_SERVER_URL')}/{os.getenv('GITHUB_REPOSITORY')}/actions/runs/{os.getenv('GITHUB_RUN_ID')}" if paths['runlog.log']: report_url = paths['runlog.log'] if paths['main.log']: @@ -175,7 +130,15 @@ if __name__ == "__main__": status = 'failure' description = 'Task failed: $?=' + str(retcode) + if 'fail' in status: + test_result = [(description, 'FAIL')] + else: + test_result = [(description, 'OK')] + + ch_helper = ClickHouseHelper() + + prepared_events = prepare_tests_results_for_clickhouse(pr_info, test_result, status, stopwatch.duration_seconds, stopwatch.start_time_str, report_url, check_name) + logging.info("Result: '%s', '%s', '%s'", status, description, report_url) print(f"::notice ::Report url: {report_url}") - commit = get_commit(gh, pr_info.sha) - commit.create_status(context=check_name, description=description, state=status, target_url=report_url) + post_commit_status(gh, pr_info.sha, check_name, description, status, report_url) diff --git a/tests/ci/build_check.py b/tests/ci/build_check.py index fbb0fc11c74..1ba5589965c 100644 --- a/tests/ci/build_check.py +++ b/tests/ci/build_check.py @@ -11,9 +11,12 @@ from s3_helper import S3Helper from pr_info import PRInfo from get_robot_token import get_best_robot_token from version_helper import get_version_from_repo, update_version_local +from ccache_utils import get_ccache_if_not_exists, upload_ccache +from ci_config import build_config_to_string, CI_CONFIG +from docker_pull_helper import get_image_with_version -def get_build_config(build_check_name, build_number, repo_path): +def get_build_config(build_check_name, build_number): if build_check_name == 'ClickHouse build check (actions)': build_config_name = 'build_config' elif build_check_name == 'ClickHouse special build check (actions)': @@ -21,14 +24,11 @@ def get_build_config(build_check_name, build_number, repo_path): else: raise Exception(f"Unknown build check name {build_check_name}") - ci_config_path = os.path.join(repo_path, "tests/ci/ci_config.json") - with open(ci_config_path, 'r') as ci_config: - config_dict = json.load(ci_config) - return config_dict[build_config_name][build_number] + return CI_CONFIG[build_config_name][build_number] def _can_export_binaries(build_config): - if build_config['package-type'] != 'deb': + if build_config['package_type'] != 'deb': return False if build_config['bundled'] != "bundled": return False @@ -36,18 +36,18 @@ def _can_export_binaries(build_config): return False if build_config['sanitizer'] != '': return True - if build_config['build-type'] != '': + if build_config['build_type'] != '': return True return False -def get_packager_cmd(build_config, packager_path, output_path, build_version, image_version, ccache_path): - package_type = build_config['package-type'] +def get_packager_cmd(build_config, packager_path, output_path, build_version, image_version, ccache_path, pr_info): + package_type = build_config['package_type'] comp = build_config['compiler'] cmd = f"cd {packager_path} && ./packager --output-dir={output_path} --package-type={package_type} --compiler={comp}" - if build_config['build-type']: - cmd += ' --build-type={}'.format(build_config['build-type']) + if build_config['build_type']: + cmd += ' --build-type={}'.format(build_config['build_type']) if build_config['sanitizer']: cmd += ' --sanitizer={}'.format(build_config['sanitizer']) if build_config['bundled'] == 'unbundled': @@ -61,7 +61,8 @@ def get_packager_cmd(build_config, packager_path, output_path, build_version, im cmd += ' --ccache_dir={}'.format(ccache_path) if 'alien_pkgs' in build_config and build_config['alien_pkgs']: - cmd += ' --alien-pkgs' + if pr_info == 0 or 'release' in pr_info.labels: + cmd += ' --alien-pkgs rpm tgz' cmd += ' --docker-image-version={}'.format(image_version) cmd += ' --version={}'.format(build_version) @@ -74,7 +75,7 @@ def get_packager_cmd(build_config, packager_path, output_path, build_version, im def get_image_name(build_config): if build_config['bundled'] != 'bundled': return 'clickhouse/unbundled-builder' - elif build_config['package-type'] != 'deb': + elif build_config['package_type'] != 'deb': return 'clickhouse/binary-builder' else: return 'clickhouse/deb-builder' @@ -90,21 +91,6 @@ def build_clickhouse(packager_cmd, logs_path): logging.info("Build failed") return build_log_path, retcode == 0 -def build_config_to_string(build_config): - if build_config["package-type"] == "performance": - return "performance" - - return "_".join([ - build_config['compiler'], - build_config['build-type'] if build_config['build-type'] else "relwithdebuginfo", - build_config['sanitizer'] if build_config['sanitizer'] else "none", - build_config['bundled'], - build_config['splitted'], - "tidy" if build_config['tidy'] == "enable" else "notidy", - "with_coverage" if build_config['with_coverage'] else "without_coverage", - build_config['package-type'], - ]) - if __name__ == "__main__": logging.basicConfig(level=logging.INFO) repo_path = os.getenv("REPO_COPY", os.path.abspath("../../")) @@ -114,7 +100,7 @@ if __name__ == "__main__": build_check_name = sys.argv[1] build_number = int(sys.argv[2]) - build_config = get_build_config(build_check_name, build_number, repo_path) + build_config = get_build_config(build_check_name, build_number) if not os.path.exists(temp_path): os.makedirs(temp_path) @@ -128,27 +114,9 @@ if __name__ == "__main__": gh = Github(get_best_robot_token()) - images_path = os.path.join(os.getenv("IMAGES_PATH", temp_path), 'changed_images.json') image_name = get_image_name(build_config) - image_version = 'latest' - if os.path.exists(images_path): - logging.info("Images file exists") - with open(images_path, 'r') as images_fd: - images = json.load(images_fd) - logging.info("Got images %s", images) - if image_name in images: - image_version = images[image_name] - - for i in range(10): - try: - logging.info("Pulling image %s:%s", image_name, image_version) - subprocess.check_output(f"docker pull {image_name}:{image_version}", stderr=subprocess.STDOUT, shell=True) - break - except Exception as ex: - time.sleep(i * 3) - logging.info("Got execption pulling docker %s", ex) - else: - raise Exception(f"Cannot pull dockerhub for image docker pull {image_name}:{image_version}") + docker_image = get_image_with_version(os.getenv("IMAGES_PATH"), image_name) + image_version = docker_image.version version = get_version_from_repo(repo_path) version.tweak_update() @@ -163,10 +131,16 @@ if __name__ == "__main__": os.makedirs(build_output_path) ccache_path = os.path.join(caches_path, build_name + '_ccache') + s3_helper = S3Helper('https://s3.amazonaws.com') + + logging.info("Will try to fetch cache for our build") + get_ccache_if_not_exists(ccache_path, s3_helper, pr_info.number, temp_path) + if not os.path.exists(ccache_path): + logging.info("cache was not fetched, will create empty dir") os.makedirs(ccache_path) - packager_cmd = get_packager_cmd(build_config, os.path.join(repo_path, "docker/packager"), build_output_path, version.get_version_string(), image_version, ccache_path) + packager_cmd = get_packager_cmd(build_config, os.path.join(repo_path, "docker/packager"), build_output_path, version.get_version_string(), image_version, ccache_path, pr_info) logging.info("Going to run packager with %s", packager_cmd) build_clickhouse_log = os.path.join(temp_path, "build_log") @@ -180,7 +154,10 @@ if __name__ == "__main__": subprocess.check_call(f"sudo chown -R ubuntu:ubuntu {ccache_path}", shell=True) logging.info("Build finished with %s, log path %s", success, log_path) - s3_helper = S3Helper('https://s3.amazonaws.com') + + logging.info("Will upload cache") + upload_ccache(ccache_path, s3_helper, pr_info.number, temp_path) + s3_path_prefix = str(pr_info.number) + "/" + pr_info.sha + "/" + build_check_name.lower().replace(' ', '_') + "/" + build_name if os.path.exists(log_path): log_url = s3_helper.upload_build_file_to_s3(log_path, s3_path_prefix + "/" + os.path.basename(log_path)) diff --git a/tests/ci/build_download_helper.py b/tests/ci/build_download_helper.py new file mode 100644 index 00000000000..2770b737041 --- /dev/null +++ b/tests/ci/build_download_helper.py @@ -0,0 +1,97 @@ +#!/usr/bin/env python3 + +import os +import json +import logging +import sys +import time + +import requests + +from ci_config import CI_CONFIG, build_config_to_string + +DOWNLOAD_RETRIES_COUNT = 5 + +def get_build_config_for_check(check_name): + return CI_CONFIG["tests_config"][check_name]['required_build_properties'] + +def get_build_urls(build_config_str, reports_path): + for root, _, files in os.walk(reports_path): + for f in files: + if build_config_str in f : + logging.info("Found build report json %s", f) + with open(os.path.join(root, f), 'r', encoding='utf-8') as file_handler: + build_report = json.load(file_handler) + return build_report['build_urls'] + return [] + +def dowload_build_with_progress(url, path): + logging.info("Downloading from %s to temp path %s", url, path) + for i in range(DOWNLOAD_RETRIES_COUNT): + try: + with open(path, 'wb') as f: + response = requests.get(url, stream=True) + response.raise_for_status() + total_length = response.headers.get('content-length') + if total_length is None or int(total_length) == 0: + logging.info("No content-length, will download file without progress") + f.write(response.content) + else: + dl = 0 + total_length = int(total_length) + logging.info("Content length is %ld bytes", total_length) + for data in response.iter_content(chunk_size=4096): + dl += len(data) + f.write(data) + if sys.stdout.isatty(): + done = int(50 * dl / total_length) + percent = int(100 * float(dl) / total_length) + eq_str = '=' * done + space_str = ' ' * (50 - done) + sys.stdout.write(f"\r[{eq_str}{space_str}] {percent}%") + sys.stdout.flush() + break + except Exception as ex: + sys.stdout.write("\n") + time.sleep(3) + logging.info("Exception while downloading %s, retry %s", ex, i + 1) + if os.path.exists(path): + os.remove(path) + else: + raise Exception(f"Cannot download dataset from {url}, all retries exceeded") + + sys.stdout.write("\n") + logging.info("Downloading finished") + + +def download_builds(result_path, build_urls, filter_fn): + for url in build_urls: + if filter_fn(url): + fname = os.path.basename(url.replace('%2B', '+').replace('%20', ' ')) + logging.info("Will download %s to %s", fname, result_path) + dowload_build_with_progress(url, os.path.join(result_path, fname)) + +def download_builds_filter(check_name, reports_path, result_path, filter_fn=lambda _: True): + build_config = get_build_config_for_check(check_name) + print(build_config) + build_config_str = build_config_to_string(build_config) + print(build_config_str) + urls = get_build_urls(build_config_str, reports_path) + print(urls) + + if not urls: + raise Exception("No build URLs found") + + download_builds(result_path, urls, filter_fn) + +def download_all_deb_packages(check_name, reports_path, result_path): + download_builds_filter(check_name, reports_path, result_path, lambda x: x.endswith('deb')) + +def download_shared_build(check_name, reports_path, result_path): + download_builds_filter(check_name, reports_path, result_path, lambda x: x.endswith('shared_build.tgz')) + +def download_unit_tests(check_name, reports_path, result_path): + download_builds_filter(check_name, reports_path, result_path, lambda x: x.endswith('unit_tests_dbms')) + +def download_clickhouse_binary(check_name, reports_path, result_path): + download_builds_filter(check_name, reports_path, result_path, lambda x: x.endswith('clickhouse')) diff --git a/tests/ci/build_report_check.py b/tests/ci/build_report_check.py index e060ce39a0d..402db7c2740 100644 --- a/tests/ci/build_report_check.py +++ b/tests/ci/build_report_check.py @@ -9,6 +9,7 @@ from report import create_build_html_report from s3_helper import S3Helper from get_robot_token import get_best_robot_token from pr_info import PRInfo +from commit_status_helper import get_commit class BuildResult(): def __init__(self, compiler, build_type, sanitizer, bundled, splitted, status, elapsed_seconds, with_coverage): @@ -36,16 +37,11 @@ def group_by_artifacts(build_urls): groups['binary'].append(url) return groups -def get_commit(gh, commit_sha): - repo = gh.get_repo(os.getenv("GITHUB_REPOSITORY", "ClickHouse/ClickHouse")) - commit = repo.get_commit(commit_sha) - return commit - def process_report(build_report): build_config = build_report['build_config'] build_result = BuildResult( compiler=build_config['compiler'], - build_type=build_config['build-type'], + build_type=build_config['build_type'], sanitizer=build_config['sanitizer'], bundled=build_config['bundled'], splitted=build_config['splitted'], @@ -114,13 +110,13 @@ if __name__ == "__main__": pr_info = PRInfo(event) - branch_url = "https://github.com/ClickHouse/ClickHouse/commits/master" + branch_url = f"{os.getenv('GITHUB_SERVER_URL')}/{os.getenv('GITHUB_REPOSITORY')}/commits/master" branch_name = "master" if pr_info.number != 0: branch_name = "PR #{}".format(pr_info.number) - branch_url = "https://github.com/ClickHouse/ClickHouse/pull/" + str(pr_info.number) - commit_url = f"https://github.com/ClickHouse/ClickHouse/commit/{pr_info.sha}" - task_url = f"https://github.com/ClickHouse/ClickHouse/actions/runs/{os.getenv('GITHUB_RUN_ID', '0')}" + branch_url = f"{os.getenv('GITHUB_SERVER_URL')}/{os.getenv('GITHUB_REPOSITORY')}/pull/{pr_info.number}" + commit_url = f"{os.getenv('GITHUB_SERVER_URL')}/{os.getenv('GITHUB_REPOSITORY')}/commit/{pr_info.sha}" + task_url = f"{os.getenv('GITHUB_SERVER_URL')}/{os.getenv('GITHUB_REPOSITORY')}/actions/runs/{os.getenv('GITHUB_RUN_ID', '0')}" report = create_build_html_report( build_check_name, build_results, diff --git a/tests/ci/ccache_utils.py b/tests/ci/ccache_utils.py new file mode 100644 index 00000000000..f21f1a8c965 --- /dev/null +++ b/tests/ci/ccache_utils.py @@ -0,0 +1,105 @@ +#!/usr/bin/env python3 + +import logging +import time +import sys +import os +import shutil +from pathlib import Path + +import requests + +from compress_files import decompress_fast, compress_fast + +DOWNLOAD_RETRIES_COUNT = 5 + +def dowload_file_with_progress(url, path): + logging.info("Downloading from %s to temp path %s", url, path) + for i in range(DOWNLOAD_RETRIES_COUNT): + try: + with open(path, 'wb') as f: + response = requests.get(url, stream=True) + response.raise_for_status() + total_length = response.headers.get('content-length') + if total_length is None or int(total_length) == 0: + logging.info("No content-length, will download file without progress") + f.write(response.content) + else: + dl = 0 + total_length = int(total_length) + logging.info("Content length is %ld bytes", total_length) + for data in response.iter_content(chunk_size=4096): + dl += len(data) + f.write(data) + if sys.stdout.isatty(): + done = int(50 * dl / total_length) + percent = int(100 * float(dl) / total_length) + eq_str = '=' * done + space_str = ' ' * (50 - done) + sys.stdout.write(f"\r[{eq_str}{space_str}] {percent}%") + sys.stdout.flush() + break + except Exception as ex: + sys.stdout.write("\n") + time.sleep(3) + logging.info("Exception while downloading %s, retry %s", ex, i + 1) + if os.path.exists(path): + os.remove(path) + else: + raise Exception(f"Cannot download dataset from {url}, all retries exceeded") + + sys.stdout.write("\n") + logging.info("Downloading finished") + + +def get_ccache_if_not_exists(path_to_ccache_dir, s3_helper, current_pr_number, temp_path): + ccache_name = os.path.basename(path_to_ccache_dir) + cache_found = False + prs_to_check = [current_pr_number] + if current_pr_number != 0: + prs_to_check.append(0) + for pr_number in prs_to_check: + logging.info("Searching cache for pr %s", pr_number) + s3_path_prefix = str(pr_number) + "/ccaches" + objects = s3_helper.list_prefix(s3_path_prefix) + logging.info("Found %s objects for pr", len(objects)) + for obj in objects: + if ccache_name in obj: + logging.info("Found ccache on path %s", obj) + url = "https://s3.amazonaws.com/clickhouse-builds/" + obj + compressed_cache = os.path.join(temp_path, os.path.basename(obj)) + dowload_file_with_progress(url, compressed_cache) + + path_to_decompress = str(Path(path_to_ccache_dir).parent) + if not os.path.exists(path_to_decompress): + os.makedirs(path_to_decompress) + + if os.path.exists(path_to_ccache_dir): + shutil.rmtree(path_to_ccache_dir) + logging.info("Ccache already exists, removing it") + + logging.info("Decompressing cache to path %s", path_to_decompress) + decompress_fast(compressed_cache, path_to_decompress) + logging.info("Files on path %s", os.listdir(path_to_decompress)) + cache_found = True + break + if cache_found: + break + + if not cache_found: + logging.info("ccache not found anywhere, cannot download anything :(") + if os.path.exists(path_to_ccache_dir): + logging.info("But at least we have some local cache") + else: + logging.info("ccache downloaded") + +def upload_ccache(path_to_ccache_dir, s3_helper, current_pr_number, temp_path): + logging.info("Uploading cache %s for pr %s", path_to_ccache_dir, current_pr_number) + ccache_name = os.path.basename(path_to_ccache_dir) + compressed_cache_path = os.path.join(temp_path, ccache_name + ".tar.gz") + compress_fast(path_to_ccache_dir, compressed_cache_path) + + s3_path = str(current_pr_number) + "/ccaches/" + os.path.basename(compressed_cache_path) + logging.info("Will upload %s to path %s", compressed_cache_path, s3_path) + s3_helper.upload_build_file_to_s3(compressed_cache_path, s3_path) + logging.info("Upload finished") diff --git a/tests/ci/ci_config.py b/tests/ci/ci_config.py new file mode 100644 index 00000000000..7924a726a2e --- /dev/null +++ b/tests/ci/ci_config.py @@ -0,0 +1,736 @@ +#!/usr/bin/env python3 + +CI_CONFIG = { + "build_config": [ + { + "compiler": "clang-13", + "build_type": "", + "sanitizer": "", + "package_type": "deb", + "bundled": "bundled", + "splitted": "unsplitted", + "alien_pkgs": True, + "tidy": "disable", + "with_coverage": False + }, + { + "compiler": "clang-13", + "build_type": "", + "sanitizer": "", + "package_type": "performance", + "bundled": "bundled", + "splitted": "unsplitted", + "tidy": "disable", + "with_coverage": False + }, + { + "compiler": "gcc-11", + "build_type": "", + "sanitizer": "", + "package_type": "binary", + "bundled": "bundled", + "splitted": "unsplitted", + "tidy": "disable", + "with_coverage": False + }, + { + "compiler": "clang-13", + "build_type": "", + "sanitizer": "address", + "package_type": "deb", + "bundled": "bundled", + "splitted": "unsplitted", + "tidy": "disable", + "with_coverage": False + }, + { + "compiler": "clang-13", + "build_type": "", + "sanitizer": "undefined", + "package_type": "deb", + "bundled": "bundled", + "splitted": "unsplitted", + "tidy": "disable", + "with_coverage": False + }, + { + "compiler": "clang-13", + "build_type": "", + "sanitizer": "thread", + "package_type": "deb", + "bundled": "bundled", + "splitted": "unsplitted", + "tidy": "disable", + "with_coverage": False + }, + { + "compiler": "clang-13", + "build_type": "", + "sanitizer": "memory", + "package_type": "deb", + "bundled": "bundled", + "splitted": "unsplitted", + "tidy": "disable", + "with_coverage": False + }, + { + "compiler": "clang-13", + "build_type": "debug", + "sanitizer": "", + "package_type": "deb", + "bundled": "bundled", + "splitted": "unsplitted", + "tidy": "disable", + "with_coverage": False + }, + { + "compiler": "gcc-11", + "build_type": "", + "sanitizer": "", + "package_type": "deb", + "bundled": "unbundled", + "splitted": "unsplitted", + "tidy": "disable", + "with_coverage": False + }, + { + "compiler": "clang-13", + "build_type": "", + "sanitizer": "", + "package_type": "binary", + "bundled": "bundled", + "splitted": "unsplitted", + "tidy": "disable", + "with_coverage": False + } + ], + "special_build_config": [ + { + "compiler": "clang-13", + "build_type": "debug", + "sanitizer": "", + "package_type": "deb", + "bundled": "bundled", + "splitted": "unsplitted", + "tidy": "enable", + "with_coverage": False + }, + { + "compiler": "clang-13", + "build_type": "", + "sanitizer": "", + "package_type": "binary", + "bundled": "bundled", + "splitted": "splitted", + "tidy": "disable", + "with_coverage": False + }, + { + "compiler": "clang-13-darwin", + "build_type": "", + "sanitizer": "", + "package_type": "binary", + "bundled": "bundled", + "splitted": "unsplitted", + "tidy": "disable", + "with_coverage": False + }, + { + "compiler": "clang-13-aarch64", + "build_type": "", + "sanitizer": "", + "package_type": "binary", + "bundled": "bundled", + "splitted": "unsplitted", + "tidy": "disable", + "with_coverage": False + }, + { + "compiler": "clang-13-freebsd", + "build_type": "", + "sanitizer": "", + "package_type": "binary", + "bundled": "bundled", + "splitted": "unsplitted", + "tidy": "disable", + "with_coverage": False + }, + { + "compiler": "clang-13-darwin-aarch64", + "build_type": "", + "sanitizer": "", + "package_type": "binary", + "bundled": "bundled", + "splitted": "unsplitted", + "tidy": "disable", + "with_coverage": False + }, + { + "compiler": "clang-13-ppc64le", + "build_type": "", + "sanitizer": "", + "package_type": "binary", + "bundled": "bundled", + "splitted": "unsplitted", + "tidy": "disable", + "with_coverage": False + } + ], + "tests_config": { + "Stateful tests (address, actions)": { + "required_build_properties": { + "compiler": "clang-13", + "package_type": "deb", + "build_type": "relwithdebuginfo", + "sanitizer": "address", + "bundled": "bundled", + "splitted": "unsplitted", + "clang_tidy": "disable", + "with_coverage": False + } + }, + "Stateful tests (thread, actions)": { + "required_build_properties": { + "compiler": "clang-13", + "package_type": "deb", + "build_type": "relwithdebuginfo", + "sanitizer": "thread", + "bundled": "bundled", + "splitted": "unsplitted", + "clang_tidy": "disable", + "with_coverage": False + } + }, + "Stateful tests (memory, actions)": { + "required_build_properties": { + "compiler": "clang-13", + "package_type": "deb", + "build_type": "relwithdebuginfo", + "sanitizer": "memory", + "bundled": "bundled", + "splitted": "unsplitted", + "clang_tidy": "disable", + "with_coverage": False + } + }, + "Stateful tests (ubsan, actions)": { + "required_build_properties": { + "compiler": "clang-13", + "package_type": "deb", + "build_type": "relwithdebuginfo", + "sanitizer": "undefined", + "bundled": "bundled", + "splitted": "unsplitted", + "clang_tidy": "disable", + "with_coverage": False + } + }, + "Stateful tests (debug, actions)": { + "required_build_properties": { + "compiler": "clang-13", + "package_type": "deb", + "build_type": "debug", + "sanitizer": "none", + "bundled": "bundled", + "splitted": "unsplitted", + "clang_tidy": "disable", + "with_coverage": False + } + }, + "Stateful tests (release, actions)": { + "required_build_properties": { + "compiler": "clang-13", + "package_type": "deb", + "build_type": "relwithdebuginfo", + "sanitizer": "none", + "bundled": "bundled", + "splitted": "unsplitted", + "clang_tidy": "disable", + "with_coverage": False + } + }, + "Stateful tests (release, DatabaseOrdinary, actions)": { + "required_build_properties": { + "compiler": "clang-13", + "package_type": "deb", + "build_type": "relwithdebuginfo", + "sanitizer": "none", + "bundled": "bundled", + "splitted": "unsplitted", + "clang_tidy": "disable", + "with_coverage": False + } + }, + "Stateful tests (release, DatabaseReplicated, actions)": { + "required_build_properties": { + "compiler": "clang-13", + "package_type": "deb", + "build_type": "relwithdebuginfo", + "sanitizer": "none", + "bundled": "bundled", + "splitted": "unsplitted", + "clang_tidy": "disable", + "with_coverage": False + } + }, + "Stateless tests (address, actions)": { + "required_build_properties": { + "compiler": "clang-13", + "package_type": "deb", + "build_type": "relwithdebuginfo", + "sanitizer": "address", + "bundled": "bundled", + "splitted": "unsplitted", + "clang_tidy": "disable", + "with_coverage": False + } + }, + "Stateless tests (thread, actions)": { + "required_build_properties": { + "compiler": "clang-13", + "package_type": "deb", + "build_type": "relwithdebuginfo", + "sanitizer": "thread", + "bundled": "bundled", + "splitted": "unsplitted", + "clang_tidy": "disable", + "with_coverage": False + } + }, + "Stateless tests (memory, actions)": { + "required_build_properties": { + "compiler": "clang-13", + "package_type": "deb", + "build_type": "relwithdebuginfo", + "sanitizer": "memory", + "bundled": "bundled", + "splitted": "unsplitted", + "clang_tidy": "disable", + "with_coverage": False + } + }, + "Stateless tests (ubsan, actions)": { + "required_build_properties": { + "compiler": "clang-13", + "package_type": "deb", + "build_type": "relwithdebuginfo", + "sanitizer": "undefined", + "bundled": "bundled", + "splitted": "unsplitted", + "clang_tidy": "disable", + "with_coverage": False + } + }, + "Stateless tests (debug, actions)": { + "required_build_properties": { + "compiler": "clang-13", + "package_type": "deb", + "build_type": "debug", + "sanitizer": "none", + "bundled": "bundled", + "splitted": "unsplitted", + "clang_tidy": "disable", + "with_coverage": False + } + }, + "Stateless tests (release, actions)": { + "required_build_properties": { + "compiler": "clang-13", + "package_type": "deb", + "build_type": "relwithdebuginfo", + "sanitizer": "none", + "bundled": "bundled", + "splitted": "unsplitted", + "clang_tidy": "disable", + "with_coverage": False + } + }, + "Stateless tests (unbundled, actions)": { + "required_build_properties": { + "compiler": "gcc-11", + "package_type": "deb", + "build_type": "relwithdebuginfo", + "sanitizer": "none", + "bundled": "unbundled", + "splitted": "unsplitted", + "clang_tidy": "disable", + "with_coverage": False + } + }, + "Stateless tests (release, wide parts enabled, actions)": { + "required_build_properties": { + "compiler": "clang-13", + "package_type": "deb", + "build_type": "relwithdebuginfo", + "sanitizer": "none", + "bundled": "bundled", + "splitted": "unsplitted", + "clang_tidy": "disable", + "with_coverage": False + } + }, + "Stateless tests (release, DatabaseOrdinary, actions)": { + "required_build_properties": { + "compiler": "clang-13", + "package_type": "deb", + "build_type": "relwithdebuginfo", + "sanitizer": "none", + "bundled": "bundled", + "splitted": "unsplitted", + "clang_tidy": "disable", + "with_coverage": False + } + }, + "Stateless tests (release, DatabaseReplicated, actions)": { + "required_build_properties": { + "compiler": "clang-13", + "package_type": "deb", + "build_type": "relwithdebuginfo", + "sanitizer": "none", + "bundled": "bundled", + "splitted": "unsplitted", + "clang_tidy": "disable", + "with_coverage": False + } + }, + "Stress test (address, actions)": { + "required_build_properties": { + "compiler": "clang-13", + "package_type": "deb", + "build_type": "relwithdebuginfo", + "sanitizer": "address", + "bundled": "bundled", + "splitted": "unsplitted", + "clang_tidy": "disable", + "with_coverage": False + } + }, + "Stress test (thread, actions)": { + "required_build_properties": { + "compiler": "clang-13", + "package_type": "deb", + "build_type": "relwithdebuginfo", + "sanitizer": "thread", + "bundled": "bundled", + "splitted": "unsplitted", + "clang_tidy": "disable", + "with_coverage": False + } + }, + "Stress test (undefined, actions)": { + "required_build_properties": { + "compiler": "clang-13", + "package_type": "deb", + "build_type": "relwithdebuginfo", + "sanitizer": "undefined", + "bundled": "bundled", + "splitted": "unsplitted", + "clang_tidy": "disable", + "with_coverage": False + } + }, + "Stress test (memory, actions)": { + "required_build_properties": { + "compiler": "clang-13", + "package_type": "deb", + "build_type": "relwithdebuginfo", + "sanitizer": "memory", + "bundled": "bundled", + "splitted": "unsplitted", + "clang_tidy": "disable", + "with_coverage": False + } + }, + "Stress test (debug, actions)": { + "required_build_properties": { + "compiler": "clang-13", + "package_type": "deb", + "build_type": "debug", + "sanitizer": "none", + "bundled": "bundled", + "splitted": "unsplitted", + "clang_tidy": "disable", + "with_coverage": False + } + }, + "Integration tests (asan, actions)": { + "required_build_properties": { + "compiler": "clang-13", + "package_type": "deb", + "build_type": "relwithdebuginfo", + "sanitizer": "address", + "bundled": "bundled", + "splitted": "unsplitted", + "clang_tidy": "disable", + "with_coverage": False + } + }, + "Integration tests (thread, actions)": { + "required_build_properties": { + "compiler": "clang-13", + "package_type": "deb", + "build_type": "relwithdebuginfo", + "sanitizer": "thread", + "bundled": "bundled", + "splitted": "unsplitted", + "clang_tidy": "disable", + "with_coverage": False + } + }, + "Integration tests (release, actions)": { + "required_build_properties": { + "compiler": "clang-13", + "package_type": "deb", + "build_type": "relwithdebuginfo", + "sanitizer": "none", + "bundled": "bundled", + "splitted": "unsplitted", + "clang_tidy": "disable", + "with_coverage": False + } + }, + "Integration tests (memory, actions)": { + "required_build_properties": { + "compiler": "clang-13", + "package_type": "deb", + "build_type": "relwithdebuginfo", + "sanitizer": "memory", + "bundled": "bundled", + "splitted": "unsplitted", + "clang_tidy": "disable", + "with_coverage": False + } + }, + "Integration tests flaky check (asan, actions)": { + "required_build_properties": { + "compiler": "clang-13", + "package_type": "deb", + "build_type": "relwithdebuginfo", + "sanitizer": "address", + "bundled": "bundled", + "splitted": "unsplitted", + "clang_tidy": "disable", + "with_coverage": False + } + }, + "Compatibility check (actions)": { + "required_build_properties": { + "compiler": "clang-13", + "package_type": "deb", + "build_type": "relwithdebuginfo", + "sanitizer": "none", + "bundled": "bundled", + "splitted": "unsplitted", + "clang_tidy": "disable", + "with_coverage": False + } + }, + "Split build smoke test (actions)": { + "required_build_properties": { + "compiler": "clang-13", + "package_type": "binary", + "build_type": "relwithdebuginfo", + "sanitizer": "none", + "bundled": "bundled", + "splitted": "splitted", + "clang_tidy": "disable", + "with_coverage": False + } + }, + "Testflows check (actions)": { + "required_build_properties": { + "compiler": "clang-13", + "package_type": "deb", + "build_type": "relwithdebuginfo", + "sanitizer": "none", + "bundled": "bundled", + "splitted": "unsplitted", + "clang_tidy": "disable", + "with_coverage": False + } + }, + "Unit tests (release-gcc, actions)": { + "required_build_properties": { + "compiler": "gcc-11", + "package_type": "deb", + "build_type": "relwithdebuginfo", + "sanitizer": "none", + "bundled": "bundled", + "splitted": "unsplitted", + "clang_tidy": "disable", + "with_coverage": False + } + }, + "Unit tests (release-clang, actions)": { + "required_build_properties": { + "compiler": "clang-13", + "package_type": "binary", + "build_type": "relwithdebuginfo", + "sanitizer": "none", + "bundled": "bundled", + "splitted": "unsplitted", + "clang_tidy": "disable", + "with_coverage": False + } + }, + "Unit tests (asan, actions)": { + "required_build_properties": { + "compiler": "clang-13", + "package_type": "deb", + "build_type": "relwithdebuginfo", + "sanitizer": "address", + "bundled": "bundled", + "splitted": "unsplitted", + "clang_tidy": "disable", + "with_coverage": False + } + }, + "Unit tests (msan, actions)": { + "required_build_properties": { + "compiler": "clang-13", + "package_type": "deb", + "build_type": "relwithdebuginfo", + "sanitizer": "memory", + "bundled": "bundled", + "splitted": "unsplitted", + "clang_tidy": "disable", + "with_coverage": False + } + }, + "Unit tests (tsan, actions)": { + "required_build_properties": { + "compiler": "clang-13", + "package_type": "deb", + "build_type": "relwithdebuginfo", + "sanitizer": "thread", + "bundled": "bundled", + "splitted": "unsplitted", + "clang_tidy": "disable", + "with_coverage": False + } + }, + "Unit tests (ubsan, actions)": { + "required_build_properties": { + "compiler": "clang-13", + "package_type": "deb", + "build_type": "relwithdebuginfo", + "sanitizer": "thread", + "bundled": "bundled", + "splitted": "unsplitted", + "clang_tidy": "disable", + "with_coverage": False + } + }, + "AST fuzzer (debug, actions)": { + "required_build_properties": { + "compiler": "clang-13", + "package_type": "deb", + "build_type": "debug", + "sanitizer": "none", + "bundled": "bundled", + "splitted": "unsplitted", + "clang_tidy": "disable", + "with_coverage": False + } + }, + "AST fuzzer (ASan, actions)": { + "required_build_properties": { + "compiler": "clang-13", + "package_type": "deb", + "build_type": "relwithdebuginfo", + "sanitizer": "address", + "bundled": "bundled", + "splitted": "unsplitted", + "clang_tidy": "disable", + "with_coverage": False + } + }, + "AST fuzzer (MSan, actions)": { + "required_build_properties": { + "compiler": "clang-13", + "package_type": "deb", + "build_type": "relwithdebuginfo", + "sanitizer": "memory", + "bundled": "bundled", + "splitted": "unsplitted", + "clang_tidy": "disable", + "with_coverage": False + } + }, + "AST fuzzer (TSan, actions)": { + "required_build_properties": { + "compiler": "clang-13", + "package_type": "deb", + "build_type": "relwithdebuginfo", + "sanitizer": "thread", + "bundled": "bundled", + "splitted": "unsplitted", + "clang_tidy": "disable", + "with_coverage": False + } + }, + "AST fuzzer (UBSan, actions)": { + "required_build_properties": { + "compiler": "clang-13", + "package_type": "deb", + "build_type": "relwithdebuginfo", + "sanitizer": "undefined", + "bundled": "bundled", + "splitted": "unsplitted", + "clang_tidy": "disable", + "with_coverage": False + } + }, + "Release (actions)": { + "required_build_properties": { + "compiler": "clang-13", + "package_type": "deb", + "build_type": "relwithdebuginfo", + "sanitizer": "none", + "bundled": "bundled", + "splitted": "unsplitted", + "clang_tidy": "disable", + "with_coverage": False + } + }, + "Stateless tests flaky check (address, actions)": { + "required_build_properties": { + "compiler": "clang-13", + "package_type": "deb", + "build_type": "relwithdebuginfo", + "sanitizer": "address", + "bundled": "bundled", + "splitted": "unsplitted", + "clang_tidy": "disable", + "with_coverage": False + } + }, + "ClickHouse Keeper Jepsen (actions)": { + "required_build_properties": { + "compiler": "clang-13", + "package_type": "binary", + "build_type": "relwithdebuginfo", + "sanitizer": "none", + "bundled": "bundled", + "splitted": "unsplitted", + "clang_tidy": "disable", + "with_coverage": False + } + } + } +} + +def build_config_to_string(build_config): + if build_config["package_type"] == "performance": + return "performance" + + return "_".join([ + build_config['compiler'], + build_config['build_type'] if build_config['build_type'] else "relwithdebuginfo", + build_config['sanitizer'] if build_config['sanitizer'] else "none", + build_config['bundled'], + build_config['splitted'], + 'tidy' if 'tidy' in build_config and build_config['tidy'] == 'enable' else 'notidy', + 'with_coverage' if 'with_coverage' in build_config and build_config['with_coverage'] else 'without_coverage', + build_config['package_type'], + ]) diff --git a/tests/ci/clickhouse_helper.py b/tests/ci/clickhouse_helper.py new file mode 100644 index 00000000000..0b9df6cb868 --- /dev/null +++ b/tests/ci/clickhouse_helper.py @@ -0,0 +1,163 @@ +#!/usr/bin/env python3 +import time +import logging +import json + +import requests +from get_robot_token import get_parameter_from_ssm + +class ClickHouseHelper: + def __init__(self, url=None, user=None, password=None): + if url is None: + url = get_parameter_from_ssm("clickhouse-test-stat-url") + + self.url = url + self.auth = { + 'X-ClickHouse-User': user if user is not None else get_parameter_from_ssm("clickhouse-test-stat-login"), + 'X-ClickHouse-Key': password if password is not None else get_parameter_from_ssm("clickhouse-test-stat-password") + } + + def _insert_json_str_info(self, db, table, json_str): + params = { + 'database': db, + 'query': 'INSERT INTO {table} FORMAT JSONEachRow'.format(table=table), + 'date_time_input_format': 'best_effort', + 'send_logs_level': 'warning', + } + + for i in range(5): + response = requests.post(self.url, params=params, data=json_str, headers=self.auth, verify=False) + + logging.info("Response content '%s'", response.content) + + if response.ok: + break + + error = ( + "Cannot insert data into clickhouse at try " + str(i) + + ": HTTP code " + str(response.status_code) + ": '" + + str(response.text) + "'") + + if response.status_code >= 500: + # A retriable error + time.sleep(1) + continue + + logging.info("Request headers '%s', body '%s'", response.request.headers, response.request.body) + + raise Exception(error) + else: + raise Exception(error) + + def insert_event_into(self, db, table, event): + event_str = json.dumps(event) + self._insert_json_str_info(db, table, event_str) + + def insert_events_into(self, db, table, events): + jsons = [] + for event in events: + jsons.append(json.dumps(event)) + + self._insert_json_str_info(db, table, ','.join(jsons)) + + def _select_and_get_json_each_row(self, db, query): + params = { + 'database': db, + 'query': query, + 'default_format': 'JSONEachRow', + } + for i in range(5): + response = None + try: + response = requests.get(self.url, params=params, headers=self.auth, verify=False) + response.raise_for_status() + return response.text + except Exception as ex: + logging.warning("Cannot insert with exception %s", str(ex)) + if response: + logging.warning("Reponse text %s", response.text) + time.sleep(0.1 * i) + + raise Exception("Cannot insert data into clickhouse") + + def select_json_each_row(self, db, query): + text = self._select_and_get_json_each_row(db, query) + result = [] + for line in text.split('\n'): + if line: + result.append(json.loads(line)) + return result + +def prepare_tests_results_for_clickhouse( + pr_info, test_results, + check_status, check_duration, check_start_time, + report_url, check_name): + + pull_request_url = "https://github.com/ClickHouse/ClickHouse/commits/master" + base_ref = "master" + head_ref = "master" + base_repo = pr_info.repo_full_name + head_repo = pr_info.repo_full_name + if pr_info.number != 0: + pull_request_url = pr_info.pr_html_url + base_ref = pr_info.base_ref + base_repo = pr_info.base_name + head_ref = pr_info.head_ref + head_repo = pr_info.head_name + + common_properties = dict( + pull_request_number=pr_info.number, + commit_sha=pr_info.sha, + commit_url=pr_info.commit_html_url, + check_name=check_name, + check_status=check_status, + check_duration_ms=int(float(check_duration) * 1000), + check_start_time=check_start_time, + report_url=report_url, + pull_request_url=pull_request_url, + base_ref=base_ref, + base_repo=base_repo, + head_ref=head_ref, + head_repo=head_repo, + task_url=pr_info.task_url, + ) + + # Always publish a total record for all checks. For checks with individual + # tests, also publish a record per test. + result = [common_properties] + for test_result in test_results: + current_row = common_properties.copy() + test_name = test_result[0] + test_status = test_result[1] + + test_time = 0 + if len(test_result) > 2 and test_result[2]: + test_time = test_result[2] + current_row['test_duration_ms'] = int(float(test_time) * 1000) + current_row['test_name'] = test_name + current_row['test_status'] = test_status + result.append(current_row) + + return result + +def mark_flaky_tests(clickhouse_helper, check_name, test_results): + try: + query = """ + SELECT DISTINCT test_name + FROM checks + WHERE + check_start_time BETWEEN now() - INTERVAL 3 DAY AND now() + AND check_name = '{check_name}' + AND (test_status = 'FAIL' OR test_status = 'FLAKY') + AND pull_request_number = 0 + """.format(check_name=check_name) + + tests_data = clickhouse_helper.select_json_each_row('gh-data', query) + master_failed_tests = {row['test_name'] for row in tests_data} + logging.info("Found flaky tests: %s", ', '.join(master_failed_tests)) + + for test_result in test_results: + if test_result[1] == 'FAIL' and test_result[0] in master_failed_tests: + test_result[1] = 'FLAKY' + except Exception as ex: + logging.info("Exception happened during flaky tests fetch %s", ex) diff --git a/tests/ci/commit_status_helper.py b/tests/ci/commit_status_helper.py new file mode 100644 index 00000000000..5bdbf634715 --- /dev/null +++ b/tests/ci/commit_status_helper.py @@ -0,0 +1,12 @@ +#!/usr/bin/env python3 + +import os + +def get_commit(gh, commit_sha): + repo = gh.get_repo(os.getenv("GITHUB_REPOSITORY", "ClickHouse/ClickHouse")) + commit = repo.get_commit(commit_sha) + return commit + +def post_commit_status(gh, sha, check_name, description, state, report_url): + commit = get_commit(gh, sha) + commit.create_status(context=check_name, description=description, state=state, target_url=report_url) diff --git a/tests/ci/compatibility_check.py b/tests/ci/compatibility_check.py new file mode 100644 index 00000000000..b6a8f67aa5f --- /dev/null +++ b/tests/ci/compatibility_check.py @@ -0,0 +1,164 @@ +#!/usr/bin/env python3 + +from distutils.version import StrictVersion +import logging +import os +import json +import subprocess + +from github import Github + +from s3_helper import S3Helper +from get_robot_token import get_best_robot_token +from pr_info import PRInfo +from build_download_helper import download_builds_filter +from upload_result_helper import upload_results +from docker_pull_helper import get_images_with_versions +from commit_status_helper import post_commit_status +from clickhouse_helper import ClickHouseHelper, mark_flaky_tests, prepare_tests_results_for_clickhouse +from stopwatch import Stopwatch + +IMAGE_UBUNTU = "clickhouse/test-old-ubuntu" +IMAGE_CENTOS = "clickhouse/test-old-centos" +MAX_GLIBC_VERSION = '2.4' +DOWNLOAD_RETRIES_COUNT = 5 +CHECK_NAME = "Compatibility check (actions)" + +def process_os_check(log_path): + name = os.path.basename(log_path) + with open(log_path, 'r') as log: + line = log.read().split('\n')[0].strip() + if line != 'OK': + return (name, "FAIL") + else: + return (name, "OK") + +def process_glibc_check(log_path): + bad_lines = [] + with open(log_path, 'r') as log: + for line in log: + if line.strip(): + columns = line.strip().split(' ') + symbol_with_glibc = columns[-2] # sysconf@GLIBC_2.2.5 + _, version = symbol_with_glibc.split('@GLIBC_') + if version == 'PRIVATE': + bad_lines.append((symbol_with_glibc, "FAIL")) + elif StrictVersion(version) > MAX_GLIBC_VERSION: + bad_lines.append((symbol_with_glibc, "FAIL")) + if not bad_lines: + bad_lines.append(("glibc check", "OK")) + return bad_lines + +def process_result(result_folder, server_log_folder): + summary = process_glibc_check(os.path.join(result_folder, "glibc.log")) + + status = "success" + description = "Compatibility check passed" + if len(summary) > 1 or summary[0][1] != "OK": + status = "failure" + description = "glibc check failed" + + if status == "success": + for operating_system in ("ubuntu:12.04", "centos:5"): + result = process_os_check(os.path.join(result_folder, operating_system)) + if result[1] != "OK": + status = "failure" + description = f"Old {operating_system} failed" + summary += [result] + break + summary += [result] + + server_log_path = os.path.join(server_log_folder, "clickhouse-server.log") + stderr_log_path = os.path.join(server_log_folder, "stderr.log") + client_stderr_log_path = os.path.join(server_log_folder, "clientstderr.log") + result_logs = [] + if os.path.exists(server_log_path): + result_logs.append(server_log_path) + + if os.path.exists(stderr_log_path): + result_logs.append(stderr_log_path) + + if os.path.exists(client_stderr_log_path): + result_logs.append(client_stderr_log_path) + + return status, description, summary, result_logs + + +def get_run_commands(build_path, result_folder, server_log_folder, image_centos, image_ubuntu): + return [ + f"readelf -s {build_path}/usr/bin/clickhouse | grep '@GLIBC_' > {result_folder}/glibc.log", + f"readelf -s {build_path}/usr/bin/clickhouse-odbc-bridge | grep '@GLIBC_' >> {result_folder}/glibc.log", + f"docker run --network=host --volume={build_path}/usr/bin/clickhouse:/clickhouse " \ + f"--volume={build_path}/etc/clickhouse-server:/config " \ + f"--volume={server_log_folder}:/var/log/clickhouse-server {image_ubuntu} > {result_folder}/ubuntu:12.04", + f"docker run --network=host --volume={build_path}/usr/bin/clickhouse:/clickhouse " \ + f"--volume={build_path}/etc/clickhouse-server:/config " \ + f"--volume={server_log_folder}:/var/log/clickhouse-server {image_centos} > {result_folder}/centos:5", + ] + + +if __name__ == "__main__": + logging.basicConfig(level=logging.INFO) + + stopwatch = Stopwatch() + + temp_path = os.getenv("TEMP_PATH", os.path.abspath(".")) + repo_path = os.getenv("REPO_COPY", os.path.abspath("../../")) + reports_path = os.getenv("REPORTS_PATH", "./reports") + + with open(os.getenv('GITHUB_EVENT_PATH'), 'r', encoding='utf-8') as event_file: + event = json.load(event_file) + + pr_info = PRInfo(event) + + gh = Github(get_best_robot_token()) + + docker_images = get_images_with_versions(reports_path, [IMAGE_CENTOS, IMAGE_UBUNTU]) + + packages_path = os.path.join(temp_path, "packages") + if not os.path.exists(packages_path): + os.makedirs(packages_path) + + def url_filter(url): + return url.endswith('.deb') and ('clickhouse-common-static_' in url or 'clickhouse-server_' in url) + + download_builds_filter(CHECK_NAME, reports_path, packages_path, url_filter) + + for f in os.listdir(packages_path): + if '.deb' in f: + full_path = os.path.join(packages_path, f) + subprocess.check_call(f"dpkg -x {full_path} {packages_path} && rm {full_path}", shell=True) + + server_log_path = os.path.join(temp_path, "server_log") + if not os.path.exists(server_log_path): + os.makedirs(server_log_path) + + result_path = os.path.join(temp_path, "result_path") + if not os.path.exists(result_path): + os.makedirs(result_path) + + run_commands = get_run_commands(packages_path, result_path, server_log_path, docker_images[0], docker_images[1]) + + state = "success" + for run_command in run_commands: + try: + logging.info("Running command %s", run_command) + subprocess.check_call(run_command, shell=True) + except subprocess.CalledProcessError as ex: + logging.info("Exception calling command %s", ex) + state = "failure" + + subprocess.check_call(f"sudo chown -R ubuntu:ubuntu {temp_path}", shell=True) + + s3_helper = S3Helper('https://s3.amazonaws.com') + state, description, test_results, additional_logs = process_result(result_path, server_log_path) + + ch_helper = ClickHouseHelper() + mark_flaky_tests(ch_helper, CHECK_NAME, test_results) + + report_url = upload_results(s3_helper, pr_info.number, pr_info.sha, test_results, additional_logs, CHECK_NAME) + print(f"::notice ::Report url: {report_url}") + post_commit_status(gh, pr_info.sha, CHECK_NAME, description, state, report_url) + + prepared_events = prepare_tests_results_for_clickhouse(pr_info, test_results, state, stopwatch.duration_seconds, stopwatch.start_time_str, report_url, CHECK_NAME) + ch_helper.insert_events_into(db="gh-data", table="checks", events=prepared_events) diff --git a/tests/ci/docker_images_check.py b/tests/ci/docker_images_check.py index d874ca422c3..24d8dfe336b 100644 --- a/tests/ci/docker_images_check.py +++ b/tests/ci/docker_images_check.py @@ -6,10 +6,13 @@ import os import time import shutil from github import Github -from report import create_test_html_report from s3_helper import S3Helper from pr_info import PRInfo from get_robot_token import get_best_robot_token, get_parameter_from_ssm +from upload_result_helper import upload_results +from commit_status_helper import get_commit +from clickhouse_helper import ClickHouseHelper, prepare_tests_results_for_clickhouse +from stopwatch import Stopwatch NAME = "Push to Dockerhub (actions)" @@ -36,13 +39,17 @@ def get_changed_docker_images(pr_info, repo_path, image_file_path): if image_description['name'].startswith('clickhouse/'): dockerhub_repo_name = 'clickhouse' - for f in files_changed: - if f.startswith(dockerfile_dir): - logging.info( - "Found changed file '%s' which affects docker image '%s' with path '%s'", - f, image_description['name'], dockerfile_dir) - changed_images.append(dockerfile_dir) - break + if 'release' in pr_info.labels: + logging.info("Release PR, will rebuild all images from branch, including %s", dockerfile_dir) + changed_images.append(dockerfile_dir) + else: + for f in files_changed: + if f.startswith(dockerfile_dir): + logging.info( + "Found changed file '%s' which affects docker image '%s' with path '%s'", + f, image_description['name'], dockerfile_dir) + changed_images.append(dockerfile_dir) + break # The order is important: dependents should go later than bases, so that # they are built with updated base versions. @@ -149,33 +156,11 @@ def process_test_results(s3_client, test_results, s3_path_prefix): processed_test_results.append((test_name, status)) return overall_status, processed_test_results -def upload_results(s3_client, pr_number, commit_sha, test_results): - s3_path_prefix = f"{pr_number}/{commit_sha}/" + NAME.lower().replace(' ', '_') - - branch_url = "https://github.com/ClickHouse/ClickHouse/commits/master" - branch_name = "master" - if pr_number != 0: - branch_name = "PR #{}".format(pr_number) - branch_url = "https://github.com/ClickHouse/ClickHouse/pull/" + str(pr_number) - commit_url = f"https://github.com/ClickHouse/ClickHouse/commit/{commit_sha}" - - task_url = f"https://github.com/ClickHouse/ClickHouse/actions/runs/{os.getenv('GITHUB_RUN_ID')}" - - html_report = create_test_html_report(NAME, test_results, "https://hub.docker.com/u/clickhouse", task_url, branch_url, branch_name, commit_url) - with open('report.html', 'w') as f: - f.write(html_report) - - url = s3_client.upload_test_report_to_s3('report.html', s3_path_prefix + ".html") - logging.info("Search result in url %s", url) - return url - -def get_commit(gh, commit_sha): - repo = gh.get_repo(os.getenv("GITHUB_REPOSITORY", "ClickHouse/ClickHouse")) - commit = repo.get_commit(commit_sha) - return commit - if __name__ == "__main__": logging.basicConfig(level=logging.INFO) + + stopwatch = Stopwatch() + repo_path = os.getenv("GITHUB_WORKSPACE", os.path.abspath("../../")) temp_path = os.path.join(os.getenv("RUNNER_TEMP", os.path.abspath("./temp")), 'docker_images_check') dockerhub_password = get_parameter_from_ssm('dockerhub_robot_password') @@ -211,7 +196,6 @@ if __name__ == "__main__": else: description = "Nothing to update" - if len(description) >= 140: description = description[:136] + "..." @@ -220,14 +204,17 @@ if __name__ == "__main__": s3_path_prefix = str(pr_info.number) + "/" + pr_info.sha + "/" + NAME.lower().replace(' ', '_') status, test_results = process_test_results(s3_helper, images_processing_result, s3_path_prefix) - url = upload_results(s3_helper, pr_info.number, pr_info.sha, test_results) - - gh = Github(get_best_robot_token()) - commit = get_commit(gh, pr_info.sha) - commit.create_status(context=NAME, description=description, state=status, target_url=url) + ch_helper = ClickHouseHelper() + url = upload_results(s3_helper, pr_info.number, pr_info.sha, test_results, [], NAME) with open(os.path.join(temp_path, 'changed_images.json'), 'w') as images_file: json.dump(result_images, images_file) print("::notice ::Report url: {}".format(url)) print("::set-output name=url_output::\"{}\"".format(url)) + gh = Github(get_best_robot_token()) + commit = get_commit(gh, pr_info.sha) + commit.create_status(context=NAME, description=description, state=status, target_url=url) + + prepared_events = prepare_tests_results_for_clickhouse(pr_info, test_results, status, stopwatch.duration_seconds, stopwatch.start_time_str, url, NAME) + ch_helper.insert_events_into(db="gh-data", table="checks", events=prepared_events) diff --git a/tests/ci/docker_pull_helper.py b/tests/ci/docker_pull_helper.py new file mode 100644 index 00000000000..f9804744820 --- /dev/null +++ b/tests/ci/docker_pull_helper.py @@ -0,0 +1,59 @@ +#!/usr/bin/env python3 + +import os +import json +import time +import subprocess +import logging + +class DockerImage: + def __init__(self, name, version=None): + self.name = name + if version is None: + self.version = 'latest' + else: + self.version = version + + def __str__(self): + return f"{self.name}:{self.version}" + +def get_images_with_versions(reports_path, required_image, pull=True): + images_path = None + for root, _, files in os.walk(reports_path): + for f in files: + if f == 'changed_images.json': + images_path = os.path.join(root, 'changed_images.json') + break + + if images_path is not None and os.path.exists(images_path): + logging.info("Images file exists") + with open(images_path, 'r', encoding='utf-8') as images_fd: + images = json.load(images_fd) + logging.info("Got images %s", images) + else: + images = {} + + docker_images = [] + for image_name in required_image: + docker_image = DockerImage(image_name) + if image_name in images: + docker_image.version = images[image_name] + docker_images.append(docker_image) + + if pull: + for docker_image in docker_images: + for i in range(10): + try: + logging.info("Pulling image %s", docker_image) + latest_error = subprocess.check_output(f"docker pull {docker_image}", stderr=subprocess.STDOUT, shell=True) + break + except Exception as ex: + time.sleep(i * 3) + logging.info("Got execption pulling docker %s", ex) + else: + raise Exception(f"Cannot pull dockerhub for image docker pull {docker_image} because of {latest_error}") + + return docker_images + +def get_image_with_version(reports_path, image, pull=True): + return get_images_with_versions(reports_path, [image], pull)[0] diff --git a/tests/ci/docs_check.py b/tests/ci/docs_check.py index aece781a703..11ff68e0286 100644 --- a/tests/ci/docs_check.py +++ b/tests/ci/docs_check.py @@ -2,60 +2,26 @@ import logging import subprocess import os -import time import json import sys from github import Github -from report import create_test_html_report from s3_helper import S3Helper from pr_info import PRInfo from get_robot_token import get_best_robot_token +from upload_result_helper import upload_results +from docker_pull_helper import get_image_with_version +from commit_status_helper import post_commit_status, get_commit +from clickhouse_helper import ClickHouseHelper, prepare_tests_results_for_clickhouse +from stopwatch import Stopwatch + NAME = "Docs Check (actions)" -def process_logs(s3_client, additional_logs, s3_path_prefix): - additional_urls = [] - for log_path in additional_logs: - if log_path: - additional_urls.append( - s3_client.upload_test_report_to_s3( - log_path, - s3_path_prefix + "/" + os.path.basename(log_path))) - - return additional_urls - -def upload_results(s3_client, pr_number, commit_sha, test_results, additional_files): - s3_path_prefix = f"{pr_number}/{commit_sha}/docs_check" - additional_urls = process_logs(s3_client, additional_files, s3_path_prefix) - - branch_url = "https://github.com/ClickHouse/ClickHouse/commits/master" - branch_name = "master" - if pr_number != 0: - branch_name = f"PR #{pr_number}" - branch_url = f"https://github.com/ClickHouse/ClickHouse/pull/{pr_number}" - commit_url = f"https://github.com/ClickHouse/ClickHouse/commit/{commit_sha}" - - task_url = f"https://github.com/ClickHouse/ClickHouse/actions/runs/{os.getenv('GITHUB_RUN_ID')}" - - raw_log_url = additional_urls[0] - additional_urls.pop(0) - - html_report = create_test_html_report(NAME, test_results, raw_log_url, task_url, branch_url, branch_name, commit_url, additional_urls) - with open('report.html', 'w', encoding='utf-8') as f: - f.write(html_report) - - url = s3_client.upload_test_report_to_s3('report.html', s3_path_prefix + ".html") - logging.info("Search result in url %s", url) - return url - -def get_commit(gh, commit_sha): - repo = gh.get_repo(os.getenv("GITHUB_REPOSITORY", "ClickHouse/ClickHouse")) - commit = repo.get_commit(commit_sha) - return commit - if __name__ == "__main__": logging.basicConfig(level=logging.INFO) + stopwatch = Stopwatch() + temp_path = os.path.join(os.getenv("TEMP_PATH")) repo_path = os.path.join(os.getenv("REPO_COPY")) @@ -76,27 +42,7 @@ if __name__ == "__main__": if not os.path.exists(temp_path): os.makedirs(temp_path) - images_path = os.path.join(temp_path, 'changed_images.json') - - docker_image = 'clickhouse/docs-check' - if os.path.exists(images_path): - logging.info("Images file exists") - with open(images_path, 'r', encoding='utf-8') as images_fd: - images = json.load(images_fd) - logging.info("Got images %s", images) - if 'clickhouse/docs-check' in images: - docker_image += ':' + images['clickhouse/docs-check'] - - logging.info("Got docker image %s", docker_image) - for i in range(10): - try: - subprocess.check_output(f"docker pull {docker_image}", shell=True) - break - except Exception as ex: - time.sleep(i * 3) - logging.info("Got execption pulling docker %s", ex) - else: - raise Exception(f"Cannot pull dockerhub for image {docker_image}") + docker_image = get_image_with_version(temp_path, 'clickhouse/docs-check') test_output = os.path.join(temp_path, 'docs_check_log') if not os.path.exists(test_output): @@ -143,8 +89,11 @@ if __name__ == "__main__": lines.append(("Non zero exit code", "FAIL")) s3_helper = S3Helper('https://s3.amazonaws.com') + ch_helper = ClickHouseHelper() - report_url = upload_results(s3_helper, pr_info.number, pr_info.sha, lines, additional_files) + report_url = upload_results(s3_helper, pr_info.number, pr_info.sha, lines, additional_files, NAME) print("::notice ::Report url: {report_url}") - commit = get_commit(gh, pr_info.sha) - commit.create_status(context=NAME, description=description, state=status, target_url=report_url) + post_commit_status(gh, pr_info.sha, NAME, description, status, report_url) + + prepared_events = prepare_tests_results_for_clickhouse(pr_info, lines, status, stopwatch.duration_seconds, stopwatch.start_time_str, report_url, NAME) + ch_helper.insert_events_into(db="gh-data", table="checks", events=prepared_events) diff --git a/tests/ci/docs_release.py b/tests/ci/docs_release.py index 832a3293ae2..6ca45d63858 100644 --- a/tests/ci/docs_release.py +++ b/tests/ci/docs_release.py @@ -1,61 +1,22 @@ -#!/usr/bin/env python3 - #!/usr/bin/env python3 import logging import subprocess import os -import time import json import sys + from github import Github -from report import create_test_html_report + from s3_helper import S3Helper from pr_info import PRInfo from get_robot_token import get_best_robot_token from ssh import SSHKey +from upload_result_helper import upload_results +from docker_pull_helper import get_image_with_version +from commit_status_helper import get_commit NAME = "Docs Release (actions)" -def process_logs(s3_client, additional_logs, s3_path_prefix): - additional_urls = [] - for log_path in additional_logs: - if log_path: - additional_urls.append( - s3_client.upload_test_report_to_s3( - log_path, - s3_path_prefix + "/" + os.path.basename(log_path))) - - return additional_urls - -def upload_results(s3_client, pr_number, commit_sha, test_results, additional_files): - s3_path_prefix = f"{pr_number}/{commit_sha}/docs_release" - additional_urls = process_logs(s3_client, additional_files, s3_path_prefix) - - branch_url = "https://github.com/ClickHouse/ClickHouse/commits/master" - branch_name = "master" - if pr_number != 0: - branch_name = f"PR #{pr_number}" - branch_url = f"https://github.com/ClickHouse/ClickHouse/pull/{pr_number}" - commit_url = f"https://github.com/ClickHouse/ClickHouse/commit/{commit_sha}" - - task_url = f"https://github.com/ClickHouse/ClickHouse/actions/runs/{os.getenv('GITHUB_RUN_ID')}" - - raw_log_url = additional_urls[0] - additional_urls.pop(0) - - html_report = create_test_html_report(NAME, test_results, raw_log_url, task_url, branch_url, branch_name, commit_url, additional_urls) - with open('report.html', 'w', encoding='utf-8') as f: - f.write(html_report) - - url = s3_client.upload_test_report_to_s3('report.html', s3_path_prefix + ".html") - logging.info("Search result in url %s", url) - return url - -def get_commit(gh, commit_sha): - repo = gh.get_repo(os.getenv("GITHUB_REPOSITORY", "ClickHouse/ClickHouse")) - commit = repo.get_commit(commit_sha) - return commit - if __name__ == "__main__": logging.basicConfig(level=logging.INFO) @@ -79,27 +40,7 @@ if __name__ == "__main__": if not os.path.exists(temp_path): os.makedirs(temp_path) - images_path = os.path.join(temp_path, 'changed_images.json') - - docker_image = 'clickhouse/docs-release' - if os.path.exists(images_path): - logging.info("Images file exists") - with open(images_path, 'r', encoding='utf-8') as images_fd: - images = json.load(images_fd) - logging.info("Got images %s", images) - if 'clickhouse/docs-release' in images: - docker_image += ':' + images['clickhouse/docs-release'] - - logging.info("Got docker image %s", docker_image) - for i in range(10): - try: - subprocess.check_output(f"docker pull {docker_image}", shell=True) - break - except Exception as ex: - time.sleep(i * 3) - logging.info("Got execption pulling docker %s", ex) - else: - raise Exception(f"Cannot pull dockerhub for image {docker_image}") + docker_image = get_image_with_version(temp_path, 'clickhouse/docs-release') test_output = os.path.join(temp_path, 'docs_release_log') if not os.path.exists(test_output): @@ -149,7 +90,7 @@ if __name__ == "__main__": s3_helper = S3Helper('https://s3.amazonaws.com') - report_url = upload_results(s3_helper, pr_info.number, pr_info.sha, lines, additional_files) + report_url = upload_results(s3_helper, pr_info.number, pr_info.sha, lines, additional_files, NAME) print("::notice ::Report url: {report_url}") commit = get_commit(gh, pr_info.sha) commit.create_status(context=NAME, description=description, state=status, target_url=report_url) diff --git a/tests/ci/fast_test_check.py b/tests/ci/fast_test_check.py index ae5a18613ee..2734102be3f 100644 --- a/tests/ci/fast_test_check.py +++ b/tests/ci/fast_test_check.py @@ -4,13 +4,16 @@ import logging import subprocess import os import json -import time import csv from github import Github from pr_info import PRInfo -from report import create_test_html_report from s3_helper import S3Helper from get_robot_token import get_best_robot_token +from upload_result_helper import upload_results +from docker_pull_helper import get_image_with_version +from commit_status_helper import post_commit_status +from clickhouse_helper import ClickHouseHelper, mark_flaky_tests, prepare_tests_results_for_clickhouse +from stopwatch import Stopwatch NAME = 'Fast test (actions)' @@ -47,51 +50,12 @@ def process_results(result_folder): return state, description, test_results, additional_files -def process_logs(s3_client, additional_logs, s3_path_prefix): - additional_urls = [] - for log_path in additional_logs: - if log_path: - additional_urls.append( - s3_client.upload_test_report_to_s3( - log_path, - s3_path_prefix + "/" + os.path.basename(log_path))) - - return additional_urls - - -def upload_results(s3_client, pr_number, commit_sha, test_results, raw_log, additional_files): - additional_files = [raw_log] + additional_files - s3_path_prefix = f"{pr_number}/{commit_sha}/fasttest" - additional_urls = process_logs(s3_client, additional_files, s3_path_prefix) - - branch_url = "https://github.com/ClickHouse/ClickHouse/commits/master" - branch_name = "master" - if pr_number != 0: - branch_name = "PR #{}".format(pr_number) - branch_url = "https://github.com/ClickHouse/ClickHouse/pull/" + str(pr_number) - commit_url = f"https://github.com/ClickHouse/ClickHouse/commit/{commit_sha}" - - task_url = f"https://github.com/ClickHouse/ClickHouse/actions/runs/{os.getenv('GITHUB_RUN_ID')}" - - raw_log_url = additional_urls[0] - additional_urls.pop(0) - - html_report = create_test_html_report(NAME, test_results, raw_log_url, task_url, branch_url, branch_name, commit_url, additional_urls, True) - with open('report.html', 'w') as f: - f.write(html_report) - - url = s3_client.upload_test_report_to_s3('report.html', s3_path_prefix + ".html") - logging.info("Search result in url %s", url) - return url - -def get_commit(gh, commit_sha): - repo = gh.get_repo(os.getenv("GITHUB_REPOSITORY", "ClickHouse/ClickHouse")) - commit = repo.get_commit(commit_sha) - return commit - if __name__ == "__main__": logging.basicConfig(level=logging.INFO) + + stopwatch = Stopwatch() + temp_path = os.getenv("TEMP_PATH", os.path.abspath(".")) caches_path = os.getenv("CACHES_PATH", temp_path) @@ -105,27 +69,7 @@ if __name__ == "__main__": gh = Github(get_best_robot_token()) - images_path = os.path.join(temp_path, 'changed_images.json') - docker_image = 'clickhouse/fasttest' - if os.path.exists(images_path): - logging.info("Images file exists") - with open(images_path, 'r') as images_fd: - images = json.load(images_fd) - logging.info("Got images %s", images) - if 'clickhouse/fasttest' in images: - docker_image += ':' + images['clickhouse/fasttest'] - - logging.info("Got docker image %s", docker_image) - for i in range(10): - try: - subprocess.check_output(f"docker pull {docker_image}", shell=True) - break - except Exception as ex: - time.sleep(i * 3) - logging.info("Got execption pulling docker %s", ex) - else: - raise Exception(f"Cannot pull dockerhub for image {docker_image}") - + docker_image = get_image_with_version(temp_path, 'clickhouse/fasttest') s3_helper = S3Helper('https://s3.amazonaws.com') @@ -189,7 +133,12 @@ if __name__ == "__main__": else: state, description, test_results, additional_logs = process_results(output_path) - report_url = upload_results(s3_helper, pr_info.number, pr_info.sha, test_results, run_log_path, additional_logs) + ch_helper = ClickHouseHelper() + mark_flaky_tests(ch_helper, NAME, test_results) + + report_url = upload_results(s3_helper, pr_info.number, pr_info.sha, test_results, [run_log_path] + additional_logs, NAME, True) print("::notice ::Report url: {}".format(report_url)) - commit = get_commit(gh, pr_info.sha) - commit.create_status(context=NAME, description=description, state=state, target_url=report_url) + post_commit_status(gh, pr_info.sha, NAME, description, state, report_url) + + prepared_events = prepare_tests_results_for_clickhouse(pr_info, test_results, state, stopwatch.duration_seconds, stopwatch.start_time_str, report_url, NAME) + ch_helper.insert_events_into(db="gh-data", table="checks", events=prepared_events) diff --git a/tests/ci/finish_check.py b/tests/ci/finish_check.py index 4833f62443c..c38b3c09448 100644 --- a/tests/ci/finish_check.py +++ b/tests/ci/finish_check.py @@ -5,6 +5,7 @@ import os from github import Github from pr_info import PRInfo from get_robot_token import get_best_robot_token +from commit_status_helper import get_commit NAME = 'Run Check (actions)' @@ -23,12 +24,6 @@ def filter_statuses(statuses): filt[status.context] = status return filt - -def get_commit(gh, commit_sha): - repo = gh.get_repo(os.getenv("GITHUB_REPOSITORY", "ClickHouse/ClickHouse")) - commit = repo.get_commit(commit_sha) - return commit - if __name__ == "__main__": logging.basicConfig(level=logging.INFO) with open(os.getenv('GITHUB_EVENT_PATH'), 'r') as event_file: @@ -38,7 +33,7 @@ if __name__ == "__main__": gh = Github(get_best_robot_token()) commit = get_commit(gh, pr_info.sha) - url = f"https://github.com/ClickHouse/ClickHouse/actions/runs/{os.getenv('GITHUB_RUN_ID')}" + url = f"{os.getenv('GITHUB_SERVER_URL')}/{os.getenv('GITHUB_REPOSITORY')}/actions/runs/{os.getenv('GITHUB_RUN_ID')}" statuses = filter_statuses(list(commit.get_statuses())) if NAME in statuses and statuses[NAME].state == "pending": commit.create_status(context=NAME, description="All checks finished", state="success", target_url=url) diff --git a/tests/ci/functional_test_check.py b/tests/ci/functional_test_check.py index 75a0b31a221..dc91ec07163 100644 --- a/tests/ci/functional_test_check.py +++ b/tests/ci/functional_test_check.py @@ -5,60 +5,20 @@ import logging import subprocess import os import json -import time import sys from github import Github -import requests -from report import create_test_html_report from s3_helper import S3Helper from get_robot_token import get_best_robot_token from pr_info import PRInfo +from build_download_helper import download_all_deb_packages +from upload_result_helper import upload_results +from docker_pull_helper import get_image_with_version +from commit_status_helper import post_commit_status, get_commit +from clickhouse_helper import ClickHouseHelper, mark_flaky_tests, prepare_tests_results_for_clickhouse +from stopwatch import Stopwatch -DOWNLOAD_RETRIES_COUNT = 5 - -def process_logs(s3_client, additional_logs, s3_path_prefix): - additional_urls = [] - for log_path in additional_logs: - if log_path: - additional_urls.append( - s3_client.upload_test_report_to_s3( - log_path, - s3_path_prefix + "/" + os.path.basename(log_path))) - - return additional_urls - - -def upload_results(s3_client, pr_number, commit_sha, test_results, raw_log, additional_files, check_name): - additional_files = [raw_log] + additional_files - s3_path_prefix = f"{pr_number}/{commit_sha}/" + check_name.lower().replace(' ', '_').replace('(', '_').replace(')', '_').replace(',', '_') - additional_urls = process_logs(s3_client, additional_files, s3_path_prefix) - - branch_url = "https://github.com/ClickHouse/ClickHouse/commits/master" - branch_name = "master" - if pr_number != 0: - branch_name = f"PR #{pr_number}" - branch_url = f"https://github.com/ClickHouse/ClickHouse/pull/{pr_number}" - commit_url = f"https://github.com/ClickHouse/ClickHouse/commit/{commit_sha}" - - task_url = f"https://github.com/ClickHouse/ClickHouse/actions/runs/{os.getenv('GITHUB_RUN_ID')}" - - raw_log_url = additional_urls[0] - additional_urls.pop(0) - - html_report = create_test_html_report(check_name, test_results, raw_log_url, task_url, branch_url, branch_name, commit_url, additional_urls, True) - with open('report.html', 'w', encoding='utf-8') as f: - f.write(html_report) - - url = s3_client.upload_test_report_to_s3('report.html', s3_path_prefix + ".html") - logging.info("Search result in url %s", url) - return url - -def get_commit(gh, commit_sha): - repo = gh.get_repo(os.getenv("GITHUB_REPOSITORY", "ClickHouse/ClickHouse")) - commit = repo.get_commit(commit_sha) - return commit def get_image_name(check_name): if 'stateless' in check_name.lower(): @@ -68,92 +28,22 @@ def get_image_name(check_name): else: raise Exception(f"Cannot deduce image name based on check name {check_name}") - -def dowload_build_with_progress(url, path): - logging.info("Downloading from %s to temp path %s", url, path) - for i in range(DOWNLOAD_RETRIES_COUNT): - try: - with open(path, 'wb') as f: - response = requests.get(url, stream=True) - response.raise_for_status() - total_length = response.headers.get('content-length') - if total_length is None or int(total_length) == 0: - logging.info("No content-length, will download file without progress") - f.write(response.content) - else: - dl = 0 - total_length = int(total_length) - logging.info("Content length is %ld bytes", total_length) - for data in response.iter_content(chunk_size=4096): - dl += len(data) - f.write(data) - if sys.stdout.isatty(): - done = int(50 * dl / total_length) - percent = int(100 * float(dl) / total_length) - eq_str = '=' * done - space_str = ' ' * (50 - done) - sys.stdout.write(f"\r[{eq_str}{space_str}] {percent}%") - sys.stdout.flush() - break - except Exception as ex: - sys.stdout.write("\n") - time.sleep(3) - logging.info("Exception while downloading %s, retry %s", ex, i + 1) - if os.path.exists(path): - os.remove(path) - else: - raise Exception(f"Cannot download dataset from {url}, all retries exceeded") - - sys.stdout.write("\n") - logging.info("Downloading finished") - - -def download_builds(result_path, build_urls): - for url in build_urls: - if url.endswith('.deb'): - fname = os.path.basename(url) - logging.info("Will download %s to %s", fname, result_path) - dowload_build_with_progress(url, os.path.join(result_path, fname)) - - -def get_build_config(build_number, repo_path): - ci_config_path = os.path.join(repo_path, "tests/ci/ci_config.json") - with open(ci_config_path, 'r', encoding='utf-8') as ci_config: - config_dict = json.load(ci_config) - return config_dict['build_config'][build_number] - -def get_build_urls(build_config_str, reports_path): - for root, _, files in os.walk(reports_path): - for f in files: - if build_config_str in f : - logging.info("Found build report json %s", f) - with open(os.path.join(root, f), 'r', encoding='utf-8') as file_handler: - build_report = json.load(file_handler) - return build_report['build_urls'] - return [] - -def build_config_to_string(build_config): - if build_config["package-type"] == "performance": - return "performance" - - return "_".join([ - build_config['compiler'], - build_config['build-type'] if build_config['build-type'] else "relwithdebuginfo", - build_config['sanitizer'] if build_config['sanitizer'] else "none", - build_config['bundled'], - build_config['splitted'], - "tidy" if build_config['tidy'] == "enable" else "notidy", - "with_coverage" if build_config['with_coverage'] else "without_coverage", - build_config['package-type'], - ]) - -def get_run_command(builds_path, result_path, server_log_path, kill_timeout, additional_envs, image): +def get_run_command(builds_path, result_path, server_log_path, kill_timeout, additional_envs, image, flaky_check, tests_to_run): additional_options = ['--hung-check'] additional_options.append('--print-time') + + if tests_to_run: + additional_options += tests_to_run + additional_options_str = '-e ADDITIONAL_OPTIONS="' + ' '.join(additional_options) + '"' envs = [f'-e MAX_RUN_TIME={int(0.9 * kill_timeout)}', '-e S3_URL="https://clickhouse-datasets.s3.amazonaws.com"'] + + if flaky_check: + envs += ['-e NUM_TRIES=100', '-e MAX_RUN_TIME=1800'] + envs += [f'-e {e}' for e in additional_envs] + env_str = ' '.join(envs) return f"docker run --volume={builds_path}:/package_folder " \ @@ -161,6 +51,20 @@ def get_run_command(builds_path, result_path, server_log_path, kill_timeout, add f"--cap-add=SYS_PTRACE {env_str} {additional_options_str} {image}" +def get_tests_to_run(pr_info): + result = set([]) + + if pr_info.changed_files is None: + return [] + + for fpath in pr_info.changed_files: + if 'tests/queries/0_stateless/0' in fpath: + logging.info('File %s changed and seems like stateless test', fpath) + fname = fpath.split('/')[3] + fname_without_ext = os.path.splitext(fname)[0] + result.add(fname_without_ext + '.') + return list(result) + def process_results(result_folder, server_log_path): test_results = [] additional_files = [] @@ -194,13 +98,16 @@ def process_results(result_folder, server_log_path): if __name__ == "__main__": logging.basicConfig(level=logging.INFO) + + stopwatch = Stopwatch() + temp_path = os.getenv("TEMP_PATH", os.path.abspath(".")) repo_path = os.getenv("REPO_COPY", os.path.abspath("../../")) reports_path = os.getenv("REPORTS_PATH", "./reports") check_name = sys.argv[1] - build_number = int(sys.argv[2]) - kill_timeout = int(sys.argv[3]) + kill_timeout = int(sys.argv[2]) + flaky_check = 'flaky' in check_name.lower() if not os.path.exists(temp_path): os.makedirs(temp_path) @@ -208,51 +115,26 @@ if __name__ == "__main__": with open(os.getenv('GITHUB_EVENT_PATH'), 'r', encoding='utf-8') as event_file: event = json.load(event_file) - pr_info = PRInfo(event) - gh = Github(get_best_robot_token()) + pr_info = PRInfo(event, need_changed_files=flaky_check) + tests_to_run = [] + if flaky_check: + tests_to_run = get_tests_to_run(pr_info) + if not tests_to_run: + commit = get_commit(gh, pr_info.sha) + commit.create_status(context=check_name, description='Not found changed stateless tests', state='success') + sys.exit(0) - for root, _, files in os.walk(reports_path): - for f in files: - if f == 'changed_images.json': - images_path = os.path.join(root, 'changed_images.json') - break - image_name = get_image_name(check_name) - - docker_image = image_name - if images_path and os.path.exists(images_path): - logging.info("Images file exists") - with open(images_path, 'r', encoding='utf-8') as images_fd: - images = json.load(images_fd) - logging.info("Got images %s", images) - if image_name in images: - docker_image += ':' + images[image_name] - else: - logging.info("Images file not found") - - for i in range(10): - try: - logging.info("Pulling image %s", docker_image) - subprocess.check_output(f"docker pull {docker_image}", stderr=subprocess.STDOUT, shell=True) - break - except Exception as ex: - time.sleep(i * 3) - logging.info("Got execption pulling docker %s", ex) - else: - raise Exception(f"Cannot pull dockerhub for image docker pull {docker_image}") - - build_config = get_build_config(build_number, repo_path) - build_config_str = build_config_to_string(build_config) - urls = get_build_urls(build_config_str, reports_path) - if not urls: - raise Exception("No build URLs found") + docker_image = get_image_with_version(reports_path, image_name) packages_path = os.path.join(temp_path, "packages") if not os.path.exists(packages_path): os.makedirs(packages_path) + download_all_deb_packages(check_name, reports_path, packages_path) + server_log_path = os.path.join(temp_path, "server_log") if not os.path.exists(server_log_path): os.makedirs(server_log_path) @@ -263,8 +145,7 @@ if __name__ == "__main__": run_log_path = os.path.join(result_path, "runlog.log") - download_builds(packages_path, urls) - run_command = get_run_command(packages_path, result_path, server_log_path, kill_timeout, [], docker_image) + run_command = get_run_command(packages_path, result_path, server_log_path, kill_timeout, [], docker_image, flaky_check, tests_to_run) logging.info("Going to run func tests: %s", run_command) with open(run_log_path, 'w', encoding='utf-8') as log: @@ -279,7 +160,14 @@ if __name__ == "__main__": s3_helper = S3Helper('https://s3.amazonaws.com') state, description, test_results, additional_logs = process_results(result_path, server_log_path) - report_url = upload_results(s3_helper, pr_info.number, pr_info.sha, test_results, run_log_path, additional_logs, check_name) + + ch_helper = ClickHouseHelper() + mark_flaky_tests(ch_helper, check_name, test_results) + + report_url = upload_results(s3_helper, pr_info.number, pr_info.sha, test_results, [run_log_path] + additional_logs, check_name) + print(f"::notice ::Report url: {report_url}") - commit = get_commit(gh, pr_info.sha) - commit.create_status(context=check_name, description=description, state=state, target_url=report_url) + post_commit_status(gh, pr_info.sha, check_name, description, state, report_url) + + prepared_events = prepare_tests_results_for_clickhouse(pr_info, test_results, state, stopwatch.duration_seconds, stopwatch.start_time_str, report_url, check_name) + ch_helper.insert_events_into(db="gh-data", table="checks", events=prepared_events) diff --git a/tests/ci/integration_test_check.py b/tests/ci/integration_test_check.py index 48700927995..f6a46e72e84 100644 --- a/tests/ci/integration_test_check.py +++ b/tests/ci/integration_test_check.py @@ -4,18 +4,21 @@ import os import logging import sys import json -import time import subprocess import csv -import ast from github import Github -import requests -from report import create_test_html_report from s3_helper import S3Helper from get_robot_token import get_best_robot_token from pr_info import PRInfo +from build_download_helper import download_all_deb_packages +from upload_result_helper import upload_results +from docker_pull_helper import get_images_with_versions +from commit_status_helper import post_commit_status +from clickhouse_helper import ClickHouseHelper, mark_flaky_tests, prepare_tests_results_for_clickhouse +from stopwatch import Stopwatch + DOWNLOAD_RETRIES_COUNT = 5 @@ -39,6 +42,8 @@ def get_json_params_dict(check_name, commit_sha, pr_number, docker_images): 'pr_info': None, 'docker_images_with_versions': docker_images, 'shuffle_test_groups': False, + 'use_tmpfs': False, + 'disable_net_host': True, } def get_env_for_runner(build_path, repo_path, result_path, work_path): @@ -60,136 +65,6 @@ def get_env_for_runner(build_path, repo_path, result_path, work_path): return my_env -def dowload_build_with_progress(url, path): - logging.info("Downloading from %s to temp path %s", url, path) - for i in range(DOWNLOAD_RETRIES_COUNT): - try: - with open(path, 'wb') as f: - response = requests.get(url, stream=True) - response.raise_for_status() - total_length = response.headers.get('content-length') - if total_length is None or int(total_length) == 0: - logging.info("No content-length, will download file without progress") - f.write(response.content) - else: - dl = 0 - total_length = int(total_length) - logging.info("Content length is %ld bytes", total_length) - for data in response.iter_content(chunk_size=4096): - dl += len(data) - f.write(data) - if sys.stdout.isatty(): - done = int(50 * dl / total_length) - percent = int(100 * float(dl) / total_length) - eq_str = '=' * done - space_str = ' ' * (50 - done) - sys.stdout.write(f"\r[{eq_str}{space_str}] {percent}%") - sys.stdout.flush() - break - except Exception as ex: - sys.stdout.write("\n") - time.sleep(3) - logging.info("Exception while downloading %s, retry %s", ex, i + 1) - if os.path.exists(path): - os.remove(path) - else: - raise Exception(f"Cannot download dataset from {url}, all retries exceeded") - - sys.stdout.write("\n") - logging.info("Downloading finished") - -def get_build_urls(build_config_str, reports_path): - for root, _, files in os.walk(reports_path): - for f in files: - if build_config_str in f : - logging.info("Found build report json %s", f) - with open(os.path.join(root, f), 'r', encoding='utf-8') as file_handler: - build_report = json.load(file_handler) - return build_report['build_urls'] - return [] - -def get_build_config(build_number, repo_path): - ci_config_path = os.path.join(repo_path, "tests/ci/ci_config.json") - with open(ci_config_path, 'r', encoding='utf-8') as ci_config: - config_dict = json.load(ci_config) - return config_dict['build_config'][build_number] - -def build_config_to_string(build_config): - if build_config["package-type"] == "performance": - return "performance" - - return "_".join([ - build_config['compiler'], - build_config['build-type'] if build_config['build-type'] else "relwithdebuginfo", - build_config['sanitizer'] if build_config['sanitizer'] else "none", - build_config['bundled'], - build_config['splitted'], - "tidy" if build_config['tidy'] == "enable" else "notidy", - "with_coverage" if build_config['with_coverage'] else "without_coverage", - build_config['package-type'], - ]) - -def get_images_with_versions(images_path): - if os.path.exists(images_path): - result = {} - logging.info("Images file exists") - with open(images_path, 'r', encoding='utf-8') as images_fd: - images = json.load(images_fd) - logging.info("Got images %s", images) - for required_image in IMAGES: - if required_image in images: - result[required_image] = images[required_image] - else: - result[required_image] = 'latest' - return result - else: - return {image: 'latest' for image in IMAGES} - -def download_builds(result_path, build_urls): - for url in build_urls: - if url.endswith('.deb'): - fname = os.path.basename(url.replace('%2B', '+').replace('%20', ' ')) - logging.info("Will download %s to %s", fname, result_path) - dowload_build_with_progress(url, os.path.join(result_path, fname)) - - -def get_commit(gh, commit_sha): - repo = gh.get_repo(os.getenv("GITHUB_REPOSITORY", "ClickHouse/ClickHouse")) - commit = repo.get_commit(commit_sha) - return commit - -def process_logs(s3_client, additional_logs, s3_path_prefix, test_results): - proccessed_logs = {} - # Firstly convert paths of logs from test_results to urls to s3. - for test_result in test_results: - if len(test_result) <= 3: - continue - - # Convert from string repr of list to list. - test_log_paths = ast.literal_eval(test_result[3]) - test_log_urls = [] - for log_path in test_log_paths: - if log_path in proccessed_logs: - test_log_urls.append(proccessed_logs[log_path]) - elif log_path: - url = s3_client.upload_test_report_to_s3( - log_path, - s3_path_prefix + "/" + os.path.basename(log_path)) - test_log_urls.append(url) - proccessed_logs[log_path] = url - - test_result[3] = test_log_urls - - additional_urls = [] - for log_path in additional_logs: - if log_path: - additional_urls.append( - s3_client.upload_test_report_to_s3( - log_path, - s3_path_prefix + "/" + os.path.basename(log_path))) - - return additional_urls - def process_results(result_folder): test_results = [] additional_files = [] @@ -219,40 +94,16 @@ def process_results(result_folder): return state, description, test_results, additional_files - -def upload_results(s3_client, pr_number, commit_sha, test_results, raw_log, additional_files, check_name): - additional_files = [raw_log] + additional_files - s3_path_prefix = f"{pr_number}/{commit_sha}/" + check_name.lower().replace(' ', '_').replace('(', '_').replace(')', '_').replace(',', '_') - additional_urls = process_logs(s3_client, additional_files, s3_path_prefix, test_results) - - branch_url = "https://github.com/ClickHouse/ClickHouse/commits/master" - branch_name = "master" - if pr_number != 0: - branch_name = f"PR #{pr_number}" - branch_url = f"https://github.com/ClickHouse/ClickHouse/pull/{pr_number}" - commit_url = f"https://github.com/ClickHouse/ClickHouse/commit/{commit_sha}" - - task_url = f"https://github.com/ClickHouse/ClickHouse/actions/runs/{os.getenv('GITHUB_RUN_ID')}" - - raw_log_url = additional_urls[0] - additional_urls.pop(0) - - html_report = create_test_html_report(check_name, test_results, raw_log_url, task_url, branch_url, branch_name, commit_url, additional_urls, False) - with open('report.html', 'w', encoding='utf-8') as f: - f.write(html_report) - - url = s3_client.upload_test_report_to_s3('report.html', s3_path_prefix + ".html") - logging.info("Search result in url %s", url) - return url - if __name__ == "__main__": logging.basicConfig(level=logging.INFO) + + stopwatch = Stopwatch() + temp_path = os.getenv("TEMP_PATH", os.path.abspath(".")) repo_path = os.getenv("REPO_COPY", os.path.abspath("../../")) reports_path = os.getenv("REPORTS_PATH", "./reports") check_name = sys.argv[1] - build_number = int(sys.argv[2]) if not os.path.exists(temp_path): os.makedirs(temp_path) @@ -264,21 +115,8 @@ if __name__ == "__main__": gh = Github(get_best_robot_token()) - images_path = os.path.join(temp_path, 'changed_images.json') - images_with_version = get_images_with_versions(images_path) - for image, version in images_with_version.items(): - docker_image = image + ':' + version - for i in range(10): - try: - logging.info("Pulling image %s", docker_image) - subprocess.check_output(f"docker pull {docker_image}", stderr=subprocess.STDOUT, shell=True) - break - except Exception as ex: - time.sleep(i * 3) - logging.info("Got execption pulling docker %s", ex) - else: - raise Exception(f"Cannot pull dockerhub for image docker pull {docker_image}") - + images = get_images_with_versions(temp_path, IMAGES) + images_with_versions = {i.name: i.version for i in images} result_path = os.path.join(temp_path, "output_dir") if not os.path.exists(result_path): os.makedirs(result_path) @@ -287,23 +125,17 @@ if __name__ == "__main__": if not os.path.exists(work_path): os.makedirs(work_path) - build_config = get_build_config(build_number, repo_path) - build_config_str = build_config_to_string(build_config) - urls = get_build_urls(build_config_str, reports_path) - if not urls: - raise Exception("No build URLs found") - build_path = os.path.join(temp_path, "build") if not os.path.exists(build_path): os.makedirs(build_path) - download_builds(build_path, urls) + download_all_deb_packages(check_name, reports_path, build_path) my_env = get_env_for_runner(build_path, repo_path, result_path, work_path) json_path = os.path.join(work_path, 'params.json') with open(json_path, 'w', encoding='utf-8') as json_params: - json_params.write(json.dumps(get_json_params_dict(check_name, pr_info.sha, pr_info.number, images_with_version))) + json_params.write(json.dumps(get_json_params_dict(check_name, pr_info.sha, pr_info.number, images_with_versions))) output_path_log = os.path.join(result_path, "main_script_log.txt") @@ -318,13 +150,17 @@ if __name__ == "__main__": else: logging.info("Some tests failed") - subprocess.check_call(f"sudo chown -R ubuntu:ubuntu {temp_path}", shell=True) state, description, test_results, additional_logs = process_results(result_path) + ch_helper = ClickHouseHelper() + mark_flaky_tests(ch_helper, check_name, test_results) + s3_helper = S3Helper('https://s3.amazonaws.com') - report_url = upload_results(s3_helper, pr_info.number, pr_info.sha, test_results, output_path_log, additional_logs, check_name) + report_url = upload_results(s3_helper, pr_info.number, pr_info.sha, test_results, [output_path_log] + additional_logs, check_name, False) print(f"::notice ::Report url: {report_url}") - commit = get_commit(gh, pr_info.sha) - commit.create_status(context=check_name, description=description, state=state, target_url=report_url) + post_commit_status(gh, pr_info.sha, check_name, description, state, report_url) + + prepared_events = prepare_tests_results_for_clickhouse(pr_info, test_results, state, stopwatch.duration_seconds, stopwatch.start_time_str, report_url, check_name) + ch_helper.insert_events_into(db="gh-data", table="checks", events=prepared_events) diff --git a/tests/ci/pr_info.py b/tests/ci/pr_info.py index 46499514027..1fdc34ccdf1 100644 --- a/tests/ci/pr_info.py +++ b/tests/ci/pr_info.py @@ -8,6 +8,26 @@ from unidiff import PatchSet DIFF_IN_DOCUMENTATION_EXT = [".html", ".md", ".yml", ".txt", ".css", ".js", ".xml", ".ico", ".conf", ".svg", ".png", ".jpg", ".py", ".sh"] +def get_pr_for_commit(sha, ref): + try_get_pr_url = f"https://api.github.com/repos/{os.getenv('GITHUB_REPOSITORY', 'ClickHouse/ClickHouse')}/commits/{sha}/pulls" + try: + response = requests.get(try_get_pr_url) + response.raise_for_status() + data = response.json() + if len(data) > 1: + print("Got more than one pr for commit", sha) + for pr in data: + # refs for pushes looks like refs/head/XX + # refs for RPs looks like XX + if pr['head']['ref'] in ref: + return pr + print ("Cannot find PR with required ref", ref, "returning first one") + first_pr = data[0] + return first_pr + except Exception as ex: + print("Cannot fetch PR info from commit", ex) + return None + class PRInfo: def __init__(self, github_event, need_orgs=False, need_changed_files=False): if 'pull_request' in github_event: # pull request and other similar events @@ -17,6 +37,18 @@ class PRInfo: else: self.sha = github_event['pull_request']['head']['sha'] + repo_prefix = f"{os.getenv('GITHUB_SERVER_URL', 'https://github.com')}/{os.getenv('GITHUB_REPOSITORY', 'ClickHouse/ClickHouse')}" + self.task_url = f"{repo_prefix}/actions/runs/{os.getenv('GITHUB_RUN_ID')}" + + self.repo_full_name = os.getenv('GITHUB_REPOSITORY', 'ClickHouse/ClickHouse') + self.commit_html_url = f"{repo_prefix}/commits/{self.sha}" + self.pr_html_url = f"{repo_prefix}/pull/{self.number}" + + self.base_ref = github_event['pull_request']['base']['ref'] + self.base_name = github_event['pull_request']['base']['repo']['full_name'] + self.head_ref = github_event['pull_request']['head']['ref'] + self.head_name = github_event['pull_request']['head']['repo']['full_name'] + self.labels = { l['name'] for l in github_event['pull_request']['labels'] } self.user_login = github_event['pull_request']['user']['login'] self.user_orgs = set([]) @@ -32,13 +64,34 @@ class PRInfo: diff = urllib.request.urlopen(diff_url) diff_object = PatchSet(diff, diff.headers.get_charsets()[0]) self.changed_files = { f.path for f in diff_object } + elif 'commits' in github_event: - self.number = 0 self.sha = github_event['after'] - self.labels = {} + pull_request = get_pr_for_commit(self.sha, github_event['ref']) + repo_prefix = f"{os.getenv('GITHUB_SERVER_URL', 'https://github.com')}/{os.getenv('GITHUB_REPOSITORY', 'ClickHouse/ClickHouse')}" + self.task_url = f"{repo_prefix}/actions/runs/{os.getenv('GITHUB_RUN_ID')}" + self.commit_html_url = f"{repo_prefix}/commits/{self.sha}" + self.repo_full_name = os.getenv('GITHUB_REPOSITORY', 'ClickHouse/ClickHouse') + if pull_request is None or pull_request['state'] == 'closed': # it's merged PR to master + self.number = 0 + self.labels = {} + self.pr_html_url = f"{repo_prefix}/commits/master" + self.base_ref = "master" + self.base_name = self.repo_full_name + self.head_ref = "master" + self.head_name = self.repo_full_name + else: + self.number = pull_request['number'] + self.labels = { l['name'] for l in pull_request['labels'] } + self.base_ref = pull_request['base']['ref'] + self.base_name = pull_request['base']['repo']['full_name'] + self.head_ref = pull_request['head']['ref'] + self.head_name = pull_request['head']['repo']['full_name'] + self.pr_html_url = pull_request['html_url'] + if need_changed_files: commit_before = github_event['before'] - response = requests.get(f'https://api.github.com/repos/ClickHouse/ClickHouse/compare/{commit_before}...{self.sha}') + response = requests.get(f"https://api.github.com/repos/{os.getenv('GITHUB_REPOSITORY')}/compare/{commit_before}...{self.sha}") response.raise_for_status() diff = response.json() diff --git a/tests/ci/pvs_check.py b/tests/ci/pvs_check.py index 43575cb75d7..c55ef4dd569 100644 --- a/tests/ci/pvs_check.py +++ b/tests/ci/pvs_check.py @@ -8,27 +8,19 @@ import json import logging import sys from github import Github -from report import create_test_html_report from s3_helper import S3Helper from pr_info import PRInfo from get_robot_token import get_best_robot_token, get_parameter_from_ssm +from upload_result_helper import upload_results +from commit_status_helper import get_commit +from clickhouse_helper import ClickHouseHelper, prepare_tests_results_for_clickhouse +from stopwatch import Stopwatch NAME = 'PVS Studio (actions)' LICENCE_NAME = 'Free license: ClickHouse, Yandex' HTML_REPORT_FOLDER = 'pvs-studio-html-report' TXT_REPORT_NAME = 'pvs-studio-task-report.txt' -def process_logs(s3_client, additional_logs, s3_path_prefix): - additional_urls = [] - for log_path in additional_logs: - if log_path: - additional_urls.append( - s3_client.upload_test_report_to_s3( - log_path, - s3_path_prefix + "/" + os.path.basename(log_path))) - - return additional_urls - def _process_txt_report(path): warnings = [] errors = [] @@ -44,38 +36,11 @@ def _process_txt_report(path): return warnings, errors -def get_commit(gh, commit_sha): - repo = gh.get_repo(os.getenv("GITHUB_REPOSITORY", "ClickHouse/ClickHouse")) - commit = repo.get_commit(commit_sha) - return commit - -def upload_results(s3_client, pr_number, commit_sha, test_results, additional_files): - s3_path_prefix = str(pr_number) + "/" + commit_sha + "/" + NAME.lower().replace(' ', '_').replace('(', '_').replace(')', '_') - additional_urls = process_logs(s3_client, additional_files, s3_path_prefix) - - branch_url = "https://github.com/ClickHouse/ClickHouse/commits/master" - branch_name = "master" - if pr_number != 0: - branch_name = "PR #{}".format(pr_number) - branch_url = "https://github.com/ClickHouse/ClickHouse/pull/" + str(pr_number) - commit_url = f"https://github.com/ClickHouse/ClickHouse/commit/{commit_sha}" - - task_url = f"https://github.com/ClickHouse/ClickHouse/actions/runs/{os.getenv('GITHUB_RUN_ID')}" - - raw_log_url = additional_urls[0] - additional_urls.pop(0) - - html_report = create_test_html_report(NAME, test_results, raw_log_url, task_url, branch_url, branch_name, commit_url, additional_urls, False) - with open('report.html', 'w') as f: - f.write(html_report) - - url = s3_client.upload_test_report_to_s3('report.html', s3_path_prefix + ".html") - logging.info("Search result in url %s", url) - return url - - if __name__ == "__main__": logging.basicConfig(level=logging.INFO) + + stopwatch = Stopwatch() + repo_path = os.path.join(os.getenv("REPO_COPY", os.path.abspath("../../"))) temp_path = os.path.join(os.getenv("TEMP_PATH")) @@ -122,7 +87,8 @@ if __name__ == "__main__": break if not index_html: - commit.create_status(context=NAME, description='PVS report failed to build', state='failure', target_url=f"https://github.com/ClickHouse/ClickHouse/actions/runs/{os.getenv('GITHUB_RUN_ID')}") + commit.create_status(context=NAME, description='PVS report failed to build', state='failure', + target_url=f"{os.getenv('GITHUB_SERVER_URL')}/{os.getenv('GITHUB_REPOSITORY')}/actions/runs/{os.getenv('GITHUB_RUN_ID')}") sys.exit(1) txt_report = os.path.join(temp_path, TXT_REPORT_NAME) @@ -133,11 +99,15 @@ if __name__ == "__main__": test_results = [(index_html, "Look at the report"), ("Errors count not checked", "OK")] description = "Total errors {}".format(len(errors)) additional_logs = [txt_report, os.path.join(temp_path, 'pvs-studio.log')] - report_url = upload_results(s3_helper, pr_info.number, pr_info.sha, test_results, additional_logs) + report_url = upload_results(s3_helper, pr_info.number, pr_info.sha, test_results, additional_logs, NAME) print("::notice ::Report url: {}".format(report_url)) commit = get_commit(gh, pr_info.sha) commit.create_status(context=NAME, description=description, state=status, target_url=report_url) + + ch_helper = ClickHouseHelper() + prepared_events = prepare_tests_results_for_clickhouse(pr_info, test_results, status, stopwatch.duration_seconds, stopwatch.start_time_str, report_url, NAME) + ch_helper.insert_events_into(db="gh-data", table="checks", events=prepared_events) except Exception as ex: print("Got an exception", ex) sys.exit(1) diff --git a/tests/ci/run_check.py b/tests/ci/run_check.py index 2af50c895da..99a99ad3063 100644 --- a/tests/ci/run_check.py +++ b/tests/ci/run_check.py @@ -6,6 +6,7 @@ import logging from github import Github from pr_info import PRInfo from get_robot_token import get_best_robot_token +from commit_status_helper import get_commit NAME = 'Run Check (actions)' @@ -33,6 +34,7 @@ TRUSTED_CONTRIBUTORS = { "bharatnc", # Newbie, but already with many contributions. "bobrik", # Seasoned contributor, CloundFlare "BohuTANG", + "codyrobert", # Flickerbox engineer "damozhaeva", # DOCSUP "den-crane", "gyuton", # DOCSUP @@ -101,11 +103,6 @@ def should_run_checks_for_pr(pr_info): return True, "No special conditions apply" -def get_commit(gh, commit_sha): - repo = gh.get_repo(os.getenv("GITHUB_REPOSITORY", "ClickHouse/ClickHouse")) - commit = repo.get_commit(commit_sha) - return commit - if __name__ == "__main__": logging.basicConfig(level=logging.INFO) with open(os.getenv('GITHUB_EVENT_PATH'), 'r') as event_file: @@ -115,7 +112,7 @@ if __name__ == "__main__": can_run, description = should_run_checks_for_pr(pr_info) gh = Github(get_best_robot_token()) commit = get_commit(gh, pr_info.sha) - url = f"https://github.com/ClickHouse/ClickHouse/actions/runs/{os.getenv('GITHUB_RUN_ID')}" + url = f"{os.getenv('GITHUB_SERVER_URL')}/{os.getenv('GITHUB_REPOSITORY')}/actions/runs/{os.getenv('GITHUB_RUN_ID')}" if not can_run: print("::notice ::Cannot run") commit.create_status(context=NAME, description=description, state="failure", target_url=url) diff --git a/tests/ci/s3_helper.py b/tests/ci/s3_helper.py index 62029b01fb1..82791234f1a 100644 --- a/tests/ci/s3_helper.py +++ b/tests/ci/s3_helper.py @@ -103,3 +103,12 @@ class S3Helper(): def upload_test_folder_to_s3(self, folder_path, s3_folder_path): return self._upload_folder_to_s3(folder_path, s3_folder_path, 'clickhouse-test-reports', True, True) + + def list_prefix(self, s3_prefix_path, bucket='clickhouse-builds'): + objects = self.client.list_objects_v2(Bucket=bucket, Prefix=s3_prefix_path) + result = [] + if 'Contents' in objects: + for obj in objects['Contents']: + result.append(obj['Key']) + + return result diff --git a/tests/ci/split_build_smoke_check.py b/tests/ci/split_build_smoke_check.py new file mode 100644 index 00000000000..28eb554d90e --- /dev/null +++ b/tests/ci/split_build_smoke_check.py @@ -0,0 +1,118 @@ +#!/usr/bin/env python3 + +import os +import logging +import json +import subprocess + +from github import Github + +from s3_helper import S3Helper +from get_robot_token import get_best_robot_token +from pr_info import PRInfo +from build_download_helper import download_shared_build +from upload_result_helper import upload_results +from docker_pull_helper import get_image_with_version +from commit_status_helper import post_commit_status +from clickhouse_helper import ClickHouseHelper, prepare_tests_results_for_clickhouse +from stopwatch import Stopwatch + + +DOCKER_IMAGE = "clickhouse/split-build-smoke-test" +DOWNLOAD_RETRIES_COUNT = 5 +RESULT_LOG_NAME = "run.log" +CHECK_NAME = 'Split build smoke test (actions)' + +def process_result(result_folder, server_log_folder): + status = "success" + description = 'Server started and responded' + summary = [("Smoke test", "OK")] + with open(os.path.join(result_folder, RESULT_LOG_NAME), 'r') as run_log: + lines = run_log.read().split('\n') + if not lines or lines[0].strip() != 'OK': + status = "failure" + logging.info("Lines is not ok: %s", str('\n'.join(lines))) + summary = [("Smoke test", "FAIL")] + description = 'Server failed to respond, see result in logs' + + result_logs = [] + server_log_path = os.path.join(server_log_folder, "clickhouse-server.log") + stderr_log_path = os.path.join(result_folder, "stderr.log") + client_stderr_log_path = os.path.join(result_folder, "clientstderr.log") + run_log_path = os.path.join(result_folder, RESULT_LOG_NAME) + + for path in [server_log_path, stderr_log_path, client_stderr_log_path, run_log_path]: + if os.path.exists(path): + result_logs.append(path) + + return status, description, summary, result_logs + +def get_run_command(build_path, result_folder, server_log_folder, docker_image): + return f"docker run --network=host --volume={build_path}:/package_folder" \ + f" --volume={server_log_folder}:/var/log/clickhouse-server" \ + f" --volume={result_folder}:/test_output" \ + f" {docker_image} >{result_folder}/{RESULT_LOG_NAME}" + + +if __name__ == "__main__": + logging.basicConfig(level=logging.INFO) + + stopwatch = Stopwatch() + + temp_path = os.getenv("TEMP_PATH", os.path.abspath(".")) + repo_path = os.getenv("REPO_COPY", os.path.abspath("../../")) + reports_path = os.getenv("REPORTS_PATH", "./reports") + + with open(os.getenv('GITHUB_EVENT_PATH'), 'r', encoding='utf-8') as event_file: + event = json.load(event_file) + + pr_info = PRInfo(event) + + gh = Github(get_best_robot_token()) + + for root, _, files in os.walk(reports_path): + for f in files: + if f == 'changed_images.json': + images_path = os.path.join(root, 'changed_images.json') + break + + docker_image = get_image_with_version(reports_path, DOCKER_IMAGE) + + packages_path = os.path.join(temp_path, "packages") + if not os.path.exists(packages_path): + os.makedirs(packages_path) + + download_shared_build(CHECK_NAME, reports_path, packages_path) + + server_log_path = os.path.join(temp_path, "server_log") + if not os.path.exists(server_log_path): + os.makedirs(server_log_path) + + result_path = os.path.join(temp_path, "result_path") + if not os.path.exists(result_path): + os.makedirs(result_path) + + run_command = get_run_command(packages_path, result_path, server_log_path, docker_image) + + logging.info("Going to run command %s", run_command) + with subprocess.Popen(run_command, shell=True) as process: + retcode = process.wait() + if retcode == 0: + logging.info("Run successfully") + else: + logging.info("Run failed") + + subprocess.check_call(f"sudo chown -R ubuntu:ubuntu {temp_path}", shell=True) + print("Result path", os.listdir(result_path)) + print("Server log path", os.listdir(server_log_path)) + + state, description, test_results, additional_logs = process_result(result_path, server_log_path) + + ch_helper = ClickHouseHelper() + s3_helper = S3Helper('https://s3.amazonaws.com') + report_url = upload_results(s3_helper, pr_info.number, pr_info.sha, test_results, additional_logs, CHECK_NAME) + print(f"::notice ::Report url: {report_url}") + post_commit_status(gh, pr_info.sha, CHECK_NAME, description, state, report_url) + + prepared_events = prepare_tests_results_for_clickhouse(pr_info, test_results, state, stopwatch.duration_seconds, stopwatch.start_time_str, report_url, CHECK_NAME) + ch_helper.insert_events_into(db="gh-data", table="checks", events=prepared_events) diff --git a/tests/ci/stopwatch.py b/tests/ci/stopwatch.py new file mode 100644 index 00000000000..b6ae8674df1 --- /dev/null +++ b/tests/ci/stopwatch.py @@ -0,0 +1,16 @@ +#!/usr/bin/env python3 + +import datetime + +class Stopwatch(): + def __init__(self): + self.start_time = datetime.datetime.utcnow() + self.start_time_str_value = self.start_time.strftime("%Y-%m-%d %H:%M:%S") + + @property + def duration_seconds(self): + return (datetime.datetime.utcnow() - self.start_time).total_seconds() + + @property + def start_time_str(self): + return self.start_time_str_value diff --git a/tests/ci/stress_check.py b/tests/ci/stress_check.py index a7730285025..4b3adfad23f 100644 --- a/tests/ci/stress_check.py +++ b/tests/ci/stress_check.py @@ -1,102 +1,23 @@ #!/usr/bin/env python3 -#!/usr/bin/env python3 - import csv import logging import subprocess import os import json -import time import sys from github import Github -import requests -from report import create_test_html_report from s3_helper import S3Helper from get_robot_token import get_best_robot_token from pr_info import PRInfo - - -DOWNLOAD_RETRIES_COUNT = 5 - -def dowload_build_with_progress(url, path): - logging.info("Downloading from %s to temp path %s", url, path) - for i in range(DOWNLOAD_RETRIES_COUNT): - try: - with open(path, 'wb') as f: - response = requests.get(url, stream=True) - response.raise_for_status() - total_length = response.headers.get('content-length') - if total_length is None or int(total_length) == 0: - logging.info("No content-length, will download file without progress") - f.write(response.content) - else: - dl = 0 - total_length = int(total_length) - logging.info("Content length is %ld bytes", total_length) - for data in response.iter_content(chunk_size=4096): - dl += len(data) - f.write(data) - if sys.stdout.isatty(): - done = int(50 * dl / total_length) - percent = int(100 * float(dl) / total_length) - eq_str = '=' * done - space_str = ' ' * (50 - done) - sys.stdout.write(f"\r[{eq_str}{space_str}] {percent}%") - sys.stdout.flush() - break - except Exception as ex: - sys.stdout.write("\n") - time.sleep(3) - logging.info("Exception while downloading %s, retry %s", ex, i + 1) - if os.path.exists(path): - os.remove(path) - else: - raise Exception(f"Cannot download dataset from {url}, all retries exceeded") - - sys.stdout.write("\n") - logging.info("Downloading finished") - - -def download_builds(result_path, build_urls): - for url in build_urls: - if url.endswith('.deb'): - fname = os.path.basename(url) - logging.info("Will download %s to %s", fname, result_path) - dowload_build_with_progress(url, os.path.join(result_path, fname)) - -def get_build_config(build_number, repo_path): - ci_config_path = os.path.join(repo_path, "tests/ci/ci_config.json") - with open(ci_config_path, 'r', encoding='utf-8') as ci_config: - config_dict = json.load(ci_config) - return config_dict['build_config'][build_number] - -def get_build_urls(build_config_str, reports_path): - for root, _, files in os.walk(reports_path): - for f in files: - if build_config_str in f : - logging.info("Found build report json %s", f) - with open(os.path.join(root, f), 'r', encoding='utf-8') as file_handler: - build_report = json.load(file_handler) - return build_report['build_urls'] - return [] - -def build_config_to_string(build_config): - if build_config["package-type"] == "performance": - return "performance" - - return "_".join([ - build_config['compiler'], - build_config['build-type'] if build_config['build-type'] else "relwithdebuginfo", - build_config['sanitizer'] if build_config['sanitizer'] else "none", - build_config['bundled'], - build_config['splitted'], - "tidy" if build_config['tidy'] == "enable" else "notidy", - "with_coverage" if build_config['with_coverage'] else "without_coverage", - build_config['package-type'], - ]) +from build_download_helper import download_all_deb_packages +from upload_result_helper import upload_results +from docker_pull_helper import get_image_with_version +from commit_status_helper import post_commit_status +from clickhouse_helper import ClickHouseHelper, mark_flaky_tests, prepare_tests_results_for_clickhouse +from stopwatch import Stopwatch def get_run_command(build_path, result_folder, server_log_folder, image): @@ -107,47 +28,6 @@ def get_run_command(build_path, result_folder, server_log_folder, image): return cmd -def process_logs(s3_client, additional_logs, s3_path_prefix): - additional_urls = [] - for log_path in additional_logs: - if log_path: - additional_urls.append( - s3_client.upload_test_report_to_s3( - log_path, - s3_path_prefix + "/" + os.path.basename(log_path))) - - return additional_urls - -def upload_results(s3_client, pr_number, commit_sha, test_results, raw_log, additional_files, check_name): - additional_files = [raw_log] + additional_files - s3_path_prefix = f"{pr_number}/{commit_sha}/" + check_name.lower().replace(' ', '_').replace('(', '_').replace(')', '_').replace(',', '_') - additional_urls = process_logs(s3_client, additional_files, s3_path_prefix) - - branch_url = "https://github.com/ClickHouse/ClickHouse/commits/master" - branch_name = "master" - if pr_number != 0: - branch_name = f"PR #{pr_number}" - branch_url = f"https://github.com/ClickHouse/ClickHouse/pull/{pr_number}" - commit_url = f"https://github.com/ClickHouse/ClickHouse/commit/{commit_sha}" - - task_url = f"https://github.com/ClickHouse/ClickHouse/actions/runs/{os.getenv('GITHUB_RUN_ID')}" - - raw_log_url = additional_urls[0] - additional_urls.pop(0) - - html_report = create_test_html_report(check_name, test_results, raw_log_url, task_url, branch_url, branch_name, commit_url, additional_urls, True) - with open('report.html', 'w', encoding='utf-8') as f: - f.write(html_report) - - url = s3_client.upload_test_report_to_s3('report.html', s3_path_prefix + ".html") - logging.info("Search result in url %s", url) - return url - -def get_commit(gh, commit_sha): - repo = gh.get_repo(os.getenv("GITHUB_REPOSITORY", "ClickHouse/ClickHouse")) - commit = repo.get_commit(commit_sha) - return commit - def process_results(result_folder, server_log_path, run_log_path): test_results = [] additional_files = [] @@ -186,12 +66,13 @@ def process_results(result_folder, server_log_path, run_log_path): if __name__ == "__main__": logging.basicConfig(level=logging.INFO) + + stopwatch = Stopwatch() temp_path = os.getenv("TEMP_PATH", os.path.abspath(".")) repo_path = os.getenv("REPO_COPY", os.path.abspath("../../")) reports_path = os.getenv("REPORTS_PATH", "./reports") check_name = sys.argv[1] - build_number = int(sys.argv[2]) if not os.path.exists(temp_path): os.makedirs(temp_path) @@ -203,46 +84,14 @@ if __name__ == "__main__": gh = Github(get_best_robot_token()) - for root, _, files in os.walk(reports_path): - for f in files: - if f == 'changed_images.json': - images_path = os.path.join(root, 'changed_images.json') - break - - image_name = "clickhouse/stress-test" - - docker_image = image_name - if images_path and os.path.exists(images_path): - logging.info("Images file exists") - with open(images_path, 'r', encoding='utf-8') as images_fd: - images = json.load(images_fd) - logging.info("Got images %s", images) - if image_name in images: - docker_image += ':' + images[image_name] - else: - logging.info("Images file not found") - - for i in range(10): - try: - logging.info("Pulling image %s", docker_image) - subprocess.check_output(f"docker pull {docker_image}", stderr=subprocess.STDOUT, shell=True) - break - except Exception as ex: - time.sleep(i * 3) - logging.info("Got execption pulling docker %s", ex) - else: - raise Exception(f"Cannot pull dockerhub for image docker pull {docker_image}") - - build_config = get_build_config(build_number, repo_path) - build_config_str = build_config_to_string(build_config) - urls = get_build_urls(build_config_str, reports_path) - if not urls: - raise Exception("No build URLs found") + docker_image = get_image_with_version(reports_path, 'clickhouse/stress-test') packages_path = os.path.join(temp_path, "packages") if not os.path.exists(packages_path): os.makedirs(packages_path) + download_all_deb_packages(check_name, reports_path, packages_path) + server_log_path = os.path.join(temp_path, "server_log") if not os.path.exists(server_log_path): os.makedirs(server_log_path) @@ -253,7 +102,6 @@ if __name__ == "__main__": run_log_path = os.path.join(temp_path, "runlog.log") - download_builds(packages_path, urls) run_command = get_run_command(packages_path, result_path, server_log_path, docker_image) logging.info("Going to run func tests: %s", run_command) @@ -267,9 +115,16 @@ if __name__ == "__main__": subprocess.check_call(f"sudo chown -R ubuntu:ubuntu {temp_path}", shell=True) + s3_helper = S3Helper('https://s3.amazonaws.com') state, description, test_results, additional_logs = process_results(result_path, server_log_path, run_log_path) - report_url = upload_results(s3_helper, pr_info.number, pr_info.sha, test_results, run_log_path, additional_logs, check_name) + ch_helper = ClickHouseHelper() + mark_flaky_tests(ch_helper, check_name, test_results) + + report_url = upload_results(s3_helper, pr_info.number, pr_info.sha, test_results, [run_log_path] + additional_logs, check_name) print(f"::notice ::Report url: {report_url}") - commit = get_commit(gh, pr_info.sha) - commit.create_status(context=check_name, description=description, state=state, target_url=report_url) + + post_commit_status(gh, pr_info.sha, check_name, description, state, report_url) + + prepared_events = prepare_tests_results_for_clickhouse(pr_info, test_results, state, stopwatch.duration_seconds, stopwatch.start_time_str, report_url, check_name) + ch_helper.insert_events_into(db="gh-data", table="checks", events=prepared_events) diff --git a/tests/ci/style_check.py b/tests/ci/style_check.py index a0516623c41..8e11b295827 100644 --- a/tests/ci/style_check.py +++ b/tests/ci/style_check.py @@ -3,29 +3,21 @@ import logging import subprocess import os import csv -import time import json from github import Github -from report import create_test_html_report from s3_helper import S3Helper from pr_info import PRInfo from get_robot_token import get_best_robot_token +from upload_result_helper import upload_results +from docker_pull_helper import get_image_with_version +from commit_status_helper import post_commit_status +from clickhouse_helper import ClickHouseHelper, mark_flaky_tests, prepare_tests_results_for_clickhouse +from stopwatch import Stopwatch + NAME = "Style Check (actions)" -def process_logs(s3_client, additional_logs, s3_path_prefix): - additional_urls = [] - for log_path in additional_logs: - if log_path: - additional_urls.append( - s3_client.upload_test_report_to_s3( - log_path, - s3_path_prefix + "/" + os.path.basename(log_path))) - - return additional_urls - - def process_result(result_folder): test_results = [] additional_files = [] @@ -54,38 +46,11 @@ def process_result(result_folder): state, description = "error", "Failed to read test_results.tsv" return state, description, test_results, additional_files -def upload_results(s3_client, pr_number, commit_sha, test_results, additional_files): - s3_path_prefix = f"{pr_number}/{commit_sha}/style_check" - additional_urls = process_logs(s3_client, additional_files, s3_path_prefix) - - branch_url = "https://github.com/ClickHouse/ClickHouse/commits/master" - branch_name = "master" - if pr_number != 0: - branch_name = "PR #{}".format(pr_number) - branch_url = "https://github.com/ClickHouse/ClickHouse/pull/" + str(pr_number) - commit_url = f"https://github.com/ClickHouse/ClickHouse/commit/{commit_sha}" - - task_url = f"https://github.com/ClickHouse/ClickHouse/actions/runs/{os.getenv('GITHUB_RUN_ID')}" - - raw_log_url = additional_urls[0] - additional_urls.pop(0) - - html_report = create_test_html_report(NAME, test_results, raw_log_url, task_url, branch_url, branch_name, commit_url, additional_urls) - with open('report.html', 'w') as f: - f.write(html_report) - - url = s3_client.upload_test_report_to_s3('report.html', s3_path_prefix + ".html") - logging.info("Search result in url %s", url) - return url - - -def get_commit(gh, commit_sha): - repo = gh.get_repo(os.getenv("GITHUB_REPOSITORY", "ClickHouse/ClickHouse")) - commit = repo.get_commit(commit_sha) - return commit - if __name__ == "__main__": logging.basicConfig(level=logging.INFO) + + stopwatch = Stopwatch() + repo_path = os.path.join(os.getenv("GITHUB_WORKSPACE", os.path.abspath("../../"))) temp_path = os.path.join(os.getenv("RUNNER_TEMP", os.path.abspath("./temp")), 'style_check') @@ -98,32 +63,17 @@ if __name__ == "__main__": gh = Github(get_best_robot_token()) - images_path = os.path.join(temp_path, 'changed_images.json') - docker_image = 'clickhouse/style-test' - if os.path.exists(images_path): - logging.info("Images file exists") - with open(images_path, 'r') as images_fd: - images = json.load(images_fd) - logging.info("Got images %s", images) - if 'clickhouse/style-test' in images: - docker_image += ':' + images['clickhouse/style-test'] - - logging.info("Got docker image %s", docker_image) - for i in range(10): - try: - subprocess.check_output(f"docker pull {docker_image}", shell=True) - break - except Exception as ex: - time.sleep(i * 3) - logging.info("Got execption pulling docker %s", ex) - else: - raise Exception(f"Cannot pull dockerhub for image {docker_image}") - + docker_image = get_image_with_version(temp_path, 'clickhouse/style-test') s3_helper = S3Helper('https://s3.amazonaws.com') subprocess.check_output(f"docker run -u $(id -u ${{USER}}):$(id -g ${{USER}}) --cap-add=SYS_PTRACE --volume={repo_path}:/ClickHouse --volume={temp_path}:/test_output {docker_image}", shell=True) state, description, test_results, additional_files = process_result(temp_path) - report_url = upload_results(s3_helper, pr_info.number, pr_info.sha, test_results, additional_files) + ch_helper = ClickHouseHelper() + mark_flaky_tests(ch_helper, NAME, test_results) + + report_url = upload_results(s3_helper, pr_info.number, pr_info.sha, test_results, additional_files, NAME) print("::notice ::Report url: {}".format(report_url)) - commit = get_commit(gh, pr_info.sha) - commit.create_status(context=NAME, description=description, state=state, target_url=report_url) + post_commit_status(gh, pr_info.sha, NAME, description, state, report_url) + + prepared_events = prepare_tests_results_for_clickhouse(pr_info, test_results, state, stopwatch.duration_seconds, stopwatch.start_time_str, report_url, NAME) + ch_helper.insert_events_into(db="gh-data", table="checks", events=prepared_events) diff --git a/tests/ci/unit_tests_check.py b/tests/ci/unit_tests_check.py index 2a277d3168f..21aa63e3b19 100644 --- a/tests/ci/unit_tests_check.py +++ b/tests/ci/unit_tests_check.py @@ -3,133 +3,24 @@ import logging import os import sys -import time import subprocess import json from github import Github -import requests -from report import create_test_html_report from s3_helper import S3Helper from get_robot_token import get_best_robot_token from pr_info import PRInfo +from build_download_helper import download_unit_tests +from upload_result_helper import upload_results +from docker_pull_helper import get_image_with_version +from commit_status_helper import post_commit_status +from clickhouse_helper import ClickHouseHelper, mark_flaky_tests, prepare_tests_results_for_clickhouse +from stopwatch import Stopwatch + IMAGE_NAME = 'clickhouse/unit-test' -DOWNLOAD_RETRIES_COUNT = 5 - -def process_logs(s3_client, additional_logs, s3_path_prefix): - additional_urls = [] - for log_path in additional_logs: - if log_path: - additional_urls.append( - s3_client.upload_test_report_to_s3( - log_path, - s3_path_prefix + "/" + os.path.basename(log_path))) - - return additional_urls - -def dowload_build_with_progress(url, path): - logging.info("Downloading from %s to temp path %s", url, path) - for i in range(DOWNLOAD_RETRIES_COUNT): - try: - with open(path, 'wb') as f: - response = requests.get(url, stream=True) - response.raise_for_status() - total_length = response.headers.get('content-length') - if total_length is None or int(total_length) == 0: - logging.info("No content-length, will download file without progress") - f.write(response.content) - else: - dl = 0 - total_length = int(total_length) - logging.info("Content length is %ld bytes", total_length) - for data in response.iter_content(chunk_size=4096): - dl += len(data) - f.write(data) - if sys.stdout.isatty(): - done = int(50 * dl / total_length) - percent = int(100 * float(dl) / total_length) - eq_str = '=' * done - space_str = ' ' * (50 - done) - sys.stdout.write(f"\r[{eq_str}{space_str}] {percent}%") - sys.stdout.flush() - break - except Exception as ex: - sys.stdout.write("\n") - time.sleep(3) - logging.info("Exception while downloading %s, retry %s", ex, i + 1) - if os.path.exists(path): - os.remove(path) - else: - raise Exception(f"Cannot download dataset from {url}, all retries exceeded") - - sys.stdout.write("\n") - logging.info("Downloading finished") - - -def upload_results(s3_client, pr_number, commit_sha, test_results, raw_log, additional_files, check_name): - additional_files = [raw_log] + additional_files - s3_path_prefix = f"{pr_number}/{commit_sha}/" + check_name.lower().replace(' ', '_').replace('(', '_').replace(')', '_').replace(',', '_') - additional_urls = process_logs(s3_client, additional_files, s3_path_prefix) - - branch_url = "https://github.com/ClickHouse/ClickHouse/commits/master" - branch_name = "master" - if pr_number != 0: - branch_name = f"PR #{pr_number}" - branch_url = f"https://github.com/ClickHouse/ClickHouse/pull/{pr_number}" - commit_url = f"https://github.com/ClickHouse/ClickHouse/commit/{commit_sha}" - - task_url = f"https://github.com/ClickHouse/ClickHouse/actions/runs/{os.getenv('GITHUB_RUN_ID')}" - - raw_log_url = additional_urls[0] - additional_urls.pop(0) - - html_report = create_test_html_report(check_name, test_results, raw_log_url, task_url, branch_url, branch_name, commit_url, additional_urls, True) - with open('report.html', 'w', encoding='utf-8') as f: - f.write(html_report) - - url = s3_client.upload_test_report_to_s3('report.html', s3_path_prefix + ".html") - logging.info("Search result in url %s", url) - return url - -def get_commit(gh, commit_sha): - repo = gh.get_repo(os.getenv("GITHUB_REPOSITORY", "ClickHouse/ClickHouse")) - commit = repo.get_commit(commit_sha) - return commit - -def get_build_config(build_number, repo_path): - ci_config_path = os.path.join(repo_path, "tests/ci/ci_config.json") - with open(ci_config_path, 'r', encoding='utf-8') as ci_config: - config_dict = json.load(ci_config) - return config_dict['build_config'][build_number] - -def get_build_urls(build_config_str, reports_path): - for root, _, files in os.walk(reports_path): - for f in files: - if build_config_str in f : - logging.info("Found build report json %s", f) - with open(os.path.join(root, f), 'r', encoding='utf-8') as file_handler: - build_report = json.load(file_handler) - return build_report['build_urls'] - return [] - -def build_config_to_string(build_config): - if build_config["package-type"] == "performance": - return "performance" - - return "_".join([ - build_config['compiler'], - build_config['build-type'] if build_config['build-type'] else "relwithdebuginfo", - build_config['sanitizer'] if build_config['sanitizer'] else "none", - build_config['bundled'], - build_config['splitted'], - "tidy" if build_config['tidy'] == "enable" else "notidy", - "with_coverage" if build_config['with_coverage'] else "without_coverage", - build_config['package-type'], - ]) - def get_test_name(line): elements = reversed(line.split(' ')) for element in elements: @@ -199,12 +90,14 @@ def process_result(result_folder): if __name__ == "__main__": logging.basicConfig(level=logging.INFO) + + stopwatch = Stopwatch() + temp_path = os.getenv("TEMP_PATH", os.path.abspath(".")) repo_path = os.getenv("REPO_COPY", os.path.abspath("../../")) reports_path = os.getenv("REPORTS_PATH", "./reports") check_name = sys.argv[1] - build_number = int(sys.argv[2]) if not os.path.exists(temp_path): os.makedirs(temp_path) @@ -216,47 +109,11 @@ if __name__ == "__main__": gh = Github(get_best_robot_token()) - for root, _, files in os.walk(reports_path): - for f in files: - if f == 'changed_images.json': - images_path = os.path.join(root, 'changed_images.json') - break + docker_image = get_image_with_version(reports_path, IMAGE_NAME) - docker_image = IMAGE_NAME - if images_path and os.path.exists(images_path): - logging.info("Images file exists") - with open(images_path, 'r', encoding='utf-8') as images_fd: - images = json.load(images_fd) - logging.info("Got images %s", images) - if IMAGE_NAME in images: - docker_image += ':' + images[IMAGE_NAME] - else: - logging.info("Images file not found") - - for i in range(10): - try: - logging.info("Pulling image %s", docker_image) - subprocess.check_output(f"docker pull {docker_image}", stderr=subprocess.STDOUT, shell=True) - break - except Exception as ex: - time.sleep(i * 3) - logging.info("Got execption pulling docker %s", ex) - else: - raise Exception(f"Cannot pull dockerhub for image docker pull {docker_image}") - - build_config = get_build_config(build_number, repo_path) - build_config_str = build_config_to_string(build_config) - urls = get_build_urls(build_config_str, reports_path) - - if not urls: - raise Exception("No build URLs found") + download_unit_tests(check_name, reports_path, temp_path) tests_binary_path = os.path.join(temp_path, "unit_tests_dbms") - for url in urls: - if url.endswith('unit_tests_dbms'): - dowload_build_with_progress(url, tests_binary_path) - break - os.chmod(tests_binary_path, 0o777) test_output = os.path.join(temp_path, "test_output") @@ -281,7 +138,13 @@ if __name__ == "__main__": s3_helper = S3Helper('https://s3.amazonaws.com') state, description, test_results, additional_logs = process_result(test_output) - report_url = upload_results(s3_helper, pr_info.number, pr_info.sha, test_results, run_log_path, additional_logs, check_name) + + ch_helper = ClickHouseHelper() + mark_flaky_tests(ch_helper, check_name, test_results) + + report_url = upload_results(s3_helper, pr_info.number, pr_info.sha, test_results, [run_log_path] + additional_logs, check_name) print(f"::notice ::Report url: {report_url}") - commit = get_commit(gh, pr_info.sha) - commit.create_status(context=check_name, description=description, state=state, target_url=report_url) + post_commit_status(gh, pr_info.sha, check_name, description, state, report_url) + + prepared_events = prepare_tests_results_for_clickhouse(pr_info, test_results, state, stopwatch.duration_seconds, stopwatch.start_time_str, report_url, check_name) + ch_helper.insert_events_into(db="gh-data", table="checks", events=prepared_events) diff --git a/tests/ci/upload_result_helper.py b/tests/ci/upload_result_helper.py new file mode 100644 index 00000000000..d0705372c44 --- /dev/null +++ b/tests/ci/upload_result_helper.py @@ -0,0 +1,64 @@ +import os +import logging +import ast + +from report import create_test_html_report + +def process_logs(s3_client, additional_logs, s3_path_prefix, test_results, with_raw_logs): + proccessed_logs = {} + # Firstly convert paths of logs from test_results to urls to s3. + for test_result in test_results: + if len(test_result) <= 3 or with_raw_logs: + continue + + # Convert from string repr of list to list. + test_log_paths = ast.literal_eval(test_result[3]) + test_log_urls = [] + for log_path in test_log_paths: + if log_path in proccessed_logs: + test_log_urls.append(proccessed_logs[log_path]) + elif log_path: + url = s3_client.upload_test_report_to_s3( + log_path, + s3_path_prefix + "/" + os.path.basename(log_path)) + test_log_urls.append(url) + proccessed_logs[log_path] = url + + test_result[3] = test_log_urls + + additional_urls = [] + for log_path in additional_logs: + if log_path: + additional_urls.append( + s3_client.upload_test_report_to_s3( + log_path, + s3_path_prefix + "/" + os.path.basename(log_path))) + + return additional_urls + +def upload_results(s3_client, pr_number, commit_sha, test_results, additional_files, check_name, with_raw_logs=True): + s3_path_prefix = f"{pr_number}/{commit_sha}/" + check_name.lower().replace(' ', '_').replace('(', '_').replace(')', '_').replace(',', '_') + additional_urls = process_logs(s3_client, additional_files, s3_path_prefix, test_results, with_raw_logs) + + branch_url = f"{os.getenv('GITHUB_SERVER_URL')}/{os.getenv('GITHUB_REPOSITORY')}/commits/master" + branch_name = "master" + if pr_number != 0: + branch_name = f"PR #{pr_number}" + branch_url = f"{os.getenv('GITHUB_SERVER_URL')}/{os.getenv('GITHUB_REPOSITORY')}/pull/{pr_number}" + commit_url = f"{os.getenv('GITHUB_SERVER_URL')}/{os.getenv('GITHUB_REPOSITORY')}/commit/{commit_sha}" + + task_url = f"{os.getenv('GITHUB_SERVER_URL')}/{os.getenv('GITHUB_REPOSITORY')}/actions/runs/{os.getenv('GITHUB_RUN_ID')}" + + if additional_urls: + raw_log_url = additional_urls[0] + additional_urls.pop(0) + else: + raw_log_url = task_url + + html_report = create_test_html_report(check_name, test_results, raw_log_url, task_url, branch_url, branch_name, commit_url, additional_urls, with_raw_logs) + with open('report.html', 'w', encoding='utf-8') as f: + f.write(html_report) + + url = s3_client.upload_test_report_to_s3('report.html', s3_path_prefix + ".html") + logging.info("Search result in url %s", url) + return url diff --git a/tests/config/config.d/keeper_port.xml b/tests/config/config.d/keeper_port.xml index fc6f8240f1e..85d4bc0f04f 100644 --- a/tests/config/config.d/keeper_port.xml +++ b/tests/config/config.d/keeper_port.xml @@ -10,7 +10,7 @@ 240000 1000000000000000 - 10000 + 100000 0 diff --git a/tests/integration/ci-runner.py b/tests/integration/ci-runner.py index f54f172d8f4..25d09a8c4c5 100755 --- a/tests/integration/ci-runner.py +++ b/tests/integration/ci-runner.py @@ -177,6 +177,9 @@ class ClickhouseIntegrationTestsRunner: self.image_versions = self.params['docker_images_with_versions'] self.shuffle_groups = self.params['shuffle_test_groups'] self.flaky_check = 'flaky check' in self.params['context_name'] + # if use_tmpfs is not set we assume it to be true, otherwise check + self.use_tmpfs = 'use_tmpfs' not in self.params or self.params['use_tmpfs'] + self.disable_net_host = 'disable_net_host' in self.params and self.params['disable_net_host'] self.start_time = time.time() self.soft_deadline_time = self.start_time + (TASK_TIMEOUT - MAX_TIME_IN_SANDBOX) @@ -257,15 +260,23 @@ class ClickhouseIntegrationTestsRunner: def _compress_logs(self, dir, relpaths, result_path): subprocess.check_call("tar czf {} -C {} {}".format(result_path, dir, ' '.join(relpaths)), shell=True) # STYLE_CHECK_ALLOW_SUBPROCESS_CHECK_CALL + def _get_runner_opts(self): + result = [] + if self.use_tmpfs: + result.append("--tmpfs") + if self.disable_net_host: + result.append("--disable-net-host") + return " ".join(result) + def _get_all_tests(self, repo_path): image_cmd = self._get_runner_image_cmd(repo_path) out_file = "all_tests.txt" out_file_full = "all_tests_full.txt" cmd = "cd {repo_path}/tests/integration && " \ - "timeout -s 9 1h ./runner --tmpfs {image_cmd} ' --setup-plan' " \ + "timeout -s 9 1h ./runner {runner_opts} {image_cmd} ' --setup-plan' " \ "| tee {out_file_full} | grep '::' | sed 's/ (fixtures used:.*//g' | sed 's/^ *//g' | sed 's/ *$//g' " \ "| grep -v 'SKIPPED' | sort -u > {out_file}".format( - repo_path=repo_path, image_cmd=image_cmd, out_file=out_file, out_file_full=out_file_full) + repo_path=repo_path, runner_opts=self._get_runner_opts(), image_cmd=image_cmd, out_file=out_file, out_file_full=out_file_full) logging.info("Getting all tests with cmd '%s'", cmd) subprocess.check_call(cmd, shell=True) # STYLE_CHECK_ALLOW_SUBPROCESS_CHECK_CALL @@ -435,8 +446,8 @@ class ClickhouseIntegrationTestsRunner: test_cmd = ' '.join([test for test in sorted(test_names)]) parallel_cmd = " --parallel {} ".format(num_workers) if num_workers > 0 else "" - cmd = "cd {}/tests/integration && timeout -s 9 1h ./runner --tmpfs {} -t {} {} '-rfEp --run-id={} --color=no --durations=0 {}' | tee {}".format( - repo_path, image_cmd, test_cmd, parallel_cmd, i, _get_deselect_option(self.should_skip_tests()), info_path) + cmd = "cd {}/tests/integration && timeout -s 9 1h ./runner {} {} -t {} {} '-rfEp --run-id={} --color=no --durations=0 {}' | tee {}".format( + repo_path, self._get_runner_opts(), image_cmd, test_cmd, parallel_cmd, i, _get_deselect_option(self.should_skip_tests()), info_path) log_basename = test_group_str + "_" + str(i) + ".log" log_path = os.path.join(repo_path, "tests/integration", log_basename) diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py index bcd47899ca0..9461f4a81c5 100644 --- a/tests/integration/conftest.py +++ b/tests/integration/conftest.py @@ -7,8 +7,10 @@ from helpers.network import _NetworkManager @pytest.fixture(autouse=True, scope="session") def cleanup_environment(): - _NetworkManager.clean_all_user_iptables_rules() try: + if int(os.environ.get("PYTEST_CLEANUP_CONTAINERS")) == 1: + logging.debug(f"Cleaning all iptables rules") + _NetworkManager.clean_all_user_iptables_rules() result = run_and_check(['docker ps | wc -l'], shell=True) if int(result) > 1: if int(os.environ.get("PYTEST_CLEANUP_CONTAINERS")) != 1: diff --git a/tests/integration/helpers/0_common_instance_config.xml b/tests/integration/helpers/0_common_instance_config.xml index f4a9a0b6296..71a2f8f4b13 100644 --- a/tests/integration/helpers/0_common_instance_config.xml +++ b/tests/integration/helpers/0_common_instance_config.xml @@ -16,5 +16,6 @@ 10 /var/log/clickhouse-server/stderr.log /var/log/clickhouse-server/stdout.log + true diff --git a/tests/integration/helpers/cluster.py b/tests/integration/helpers/cluster.py index 1d5d2d004f9..0817cc882b4 100644 --- a/tests/integration/helpers/cluster.py +++ b/tests/integration/helpers/cluster.py @@ -31,8 +31,9 @@ from kazoo.exceptions import KazooException from minio import Minio from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT -from helpers.test_tools import assert_eq_with_retry +from helpers.test_tools import assert_eq_with_retry, exec_query_with_retry from helpers import pytest_xdist_logging_to_separate_files +from helpers.client import QueryRuntimeException import docker @@ -225,6 +226,8 @@ class ClickHouseCluster: self.docker_logs_path = p.join(self.instances_dir, 'docker.log') self.env_file = p.join(self.instances_dir, DEFAULT_ENV_NAME) self.env_variables = {} + self.env_variables["TSAN_OPTIONS"] = "second_deadlock_stack=1" + self.env_variables["CLICKHOUSE_WATCHDOG_ENABLE"] = "0" self.up_called = False custom_dockerd_host = custom_dockerd_host or os.environ.get('CLICKHOUSE_TESTS_DOCKERD_HOST') @@ -413,22 +416,23 @@ class ClickHouseCluster: logging.debug(f"CLUSTER INIT base_config_dir:{self.base_config_dir}") def cleanup(self): + if os.environ and 'DISABLE_CLEANUP' in os.environ and os.environ['DISABLE_CLEANUP'] == "1": + logging.warning("Cleanup is disabled") + return + # Just in case kill unstopped containers from previous launch try: - # docker-compose names containers using the following formula: - # container_name = project_name + '_' + instance_name + '_1' - # We need to have "^/" and "$" in the "--filter name" option below to filter by exact name of the container, see - # https://stackoverflow.com/questions/48767760/how-to-make-docker-container-ls-f-name-filter-by-exact-name - filter_name = f'^/{self.project_name}_.*_1$' - if int(run_and_check(f'docker container list --all --filter name={filter_name} | wc -l', shell=True)) > 1: - logging.debug(f"Trying to kill unstopped containers for project {self.project_name}:") - unstopped_containers = run_and_check(f'docker container list --all --filter name={filter_name}', shell=True) - unstopped_containers_ids = [line.split()[0] for line in unstopped_containers.splitlines()[1:]] - for id in unstopped_containers_ids: + unstopped_containers = self.get_running_containers() + if unstopped_containers: + logging.debug(f"Trying to kill unstopped containers: {unstopped_containers}") + for id in unstopped_containers: run_and_check(f'docker kill {id}', shell=True, nothrow=True) run_and_check(f'docker rm {id}', shell=True, nothrow=True) - logging.debug("Unstopped containers killed") - run_and_check(f'docker container list --all --filter name={filter_name}', shell=True) + unstopped_containers = self.get_running_containers() + if unstopped_containers: + logging.debug(f"Left unstopped containers: {unstopped_containers}") + else: + logging.debug(f"Unstopped containers killed.") else: logging.debug(f"No running containers for project: {self.project_name}") except: @@ -480,6 +484,19 @@ class ClickHouseCluster: cmd += " client" return cmd + # Returns the list of currently running docker containers corresponding to this ClickHouseCluster. + def get_running_containers(self): + # docker-compose names containers using the following formula: + # container_name = project_name + '_' + instance_name + '_1' + # We need to have "^/" and "$" in the "--filter name" option below to filter by exact name of the container, see + # https://stackoverflow.com/questions/48767760/how-to-make-docker-container-ls-f-name-filter-by-exact-name + filter_name = f'^/{self.project_name}_.*_1$' + # We want the command "docker container list" to show only containers' ID and their names, separated by colon. + format = '{{.ID}}:{{.Names}}' + containers = run_and_check(f"docker container list --all --filter name='{filter_name}' --format '{format}'", shell=True) + containers = dict(line.split(':', 1) for line in containers.decode('utf8').splitlines()) + return containers + def copy_file_from_container_to_container(self, src_node, src_path, dst_node, dst_path): fname = os.path.basename(src_path) run_and_check([f"docker cp {src_node.docker_id}:{src_path} {self.instances_dir}"], shell=True) @@ -760,7 +777,7 @@ class ClickHouseCluster: hostname=None, env_variables=None, image="clickhouse/integration-test", tag=None, stay_alive=False, ipv4_address=None, ipv6_address=None, with_installed_binary=False, external_dirs=None, tmpfs=None, zookeeper_docker_compose_path=None, minio_certs_dir=None, use_keeper=True, - main_config_name="config.xml", users_config_name="users.xml", copy_common_configs=True, config_root_name="clickhouse"): + main_config_name="config.xml", users_config_name="users.xml", copy_common_configs=True, config_root_name="clickhouse") -> 'ClickHouseInstance': """Add an instance to the cluster. @@ -962,6 +979,9 @@ class ClickHouseCluster: logging.info("Restart node with ip change") # In builds with sanitizer the server can take a long time to start node.wait_for_start(start_timeout=180.0, connection_timeout=600.0) # seconds + res = node.client.query("SELECT 30") + logging.debug(f"Read '{res}'") + assert "30\n" == res logging.info("Restarted") return node @@ -1414,7 +1434,7 @@ class ClickHouseCluster: # retry_exception(10, 5, subprocess_check_call, Exception, clickhouse_pull_cmd) if destroy_dirs and p.exists(self.instances_dir): - logging.debug(("Removing instances dir %s", self.instances_dir)) + logging.debug(f"Removing instances dir {self.instances_dir}") shutil.rmtree(self.instances_dir) for instance in list(self.instances.values()): @@ -1424,7 +1444,7 @@ class ClickHouseCluster: _create_env_file(os.path.join(self.env_file), self.env_variables) self.docker_client = docker.DockerClient(base_url='unix:///var/run/docker.sock', version=self.docker_api_version, timeout=600) - common_opts = ['up', '-d'] + common_opts = ['--verbose', 'up', '-d'] if self.with_zookeeper_secure and self.base_zookeeper_cmd: logging.debug('Setup ZooKeeper Secure') @@ -1644,7 +1664,7 @@ class ClickHouseCluster: self.shutdown() raise - def shutdown(self, kill=True): + def shutdown(self, kill=True, ignore_fatal=True): sanitizer_assert_instance = None fatal_log = None @@ -1672,10 +1692,10 @@ class ClickHouseCluster: # NOTE: we cannot do this via docker since in case of Fatal message container may already die. for name, instance in self.instances.items(): if instance.contains_in_log(SANITIZER_SIGN, from_host=True): - sanitizer_assert_instance = instance.grep_in_log(SANITIZER_SIGN, from_host=True) + sanitizer_assert_instance = instance.grep_in_log(SANITIZER_SIGN, from_host=True, filename='stderr.log') logging.error("Sanitizer in instance %s log %s", name, sanitizer_assert_instance) - if instance.contains_in_log("Fatal", from_host=True): + if not ignore_fatal and instance.contains_in_log("Fatal", from_host=True): fatal_log = instance.grep_in_log("Fatal", from_host=True) if 'Child process was terminated by signal 9 (KILL)' in fatal_log: fatal_log = None @@ -1685,7 +1705,7 @@ class ClickHouseCluster: try: subprocess_check_call(self.base_cmd + ['down', '--volumes']) except Exception as e: - logging.debug("Down + remove orphans failed durung shutdown. {}".format(repr(e))) + logging.debug("Down + remove orphans failed during shutdown. {}".format(repr(e))) else: logging.warning("docker-compose up was not called. Trying to export docker.log for running containers") @@ -1768,7 +1788,7 @@ CLICKHOUSE_START_COMMAND = "clickhouse server --config-file=/etc/clickhouse-serv " --log-file=/var/log/clickhouse-server/clickhouse-server.log " \ " --errorlog-file=/var/log/clickhouse-server/clickhouse-server.err.log" -CLICKHOUSE_STAY_ALIVE_COMMAND = 'bash -c "trap \'killall tail\' INT TERM; {} --daemon; coproc tail -f /dev/null; wait $$!"'.format(CLICKHOUSE_START_COMMAND) +CLICKHOUSE_STAY_ALIVE_COMMAND = 'bash -c "trap \'pkill tail\' INT TERM; {} --daemon; coproc tail -f /dev/null; wait $$!"'.format(CLICKHOUSE_START_COMMAND) # /run/xtables.lock passed inside for correct iptables --wait DOCKER_COMPOSE_TEMPLATE = ''' @@ -2034,84 +2054,122 @@ class ClickHouseInstance: if not self.stay_alive: raise Exception("clickhouse can be stopped only with stay_alive=True instance") try: - ps_clickhouse = self.exec_in_container(["bash", "-c", "ps -C clickhouse"], user='root') + ps_clickhouse = self.exec_in_container(["bash", "-c", "ps -C clickhouse"], nothrow=True, user='root') if ps_clickhouse == " PID TTY STAT TIME COMMAND" : logging.warning("ClickHouse process already stopped") return self.exec_in_container(["bash", "-c", "pkill {} clickhouse".format("-9" if kill else "")], user='root') - sleep_time = 0.1 - num_steps = int(stop_wait_sec / sleep_time) + start_time = time.time() stopped = False - for step in range(num_steps): - time.sleep(sleep_time) - ps_clickhouse = self.exec_in_container(["bash", "-c", "ps -C clickhouse"], user='root') - if ps_clickhouse == " PID TTY STAT TIME COMMAND": + while time.time() <= start_time + stop_wait_sec: + pid = self.get_process_pid("clickhouse") + if pid is None: stopped = True break + else: + time.sleep(1) if not stopped: - logging.warning(f"Force kill clickhouse in stop_clickhouse. ps:{ps_clickhouse}") - self.stop_clickhouse(kill=True) + pid = self.get_process_pid("clickhouse") + if pid is not None: + logging.warning(f"Force kill clickhouse in stop_clickhouse. ps:{pid}") + self.exec_in_container(["bash", "-c", f"gdb -batch -ex 'thread apply all bt full' -p {pid} > {os.path.join(self.path, 'logs/stdout.log')}"], user='root') + self.stop_clickhouse(kill=True) + else: + ps_all = self.exec_in_container(["bash", "-c", "ps aux"], nothrow=True, user='root') + logging.warning(f"We want force stop clickhouse, but no clickhouse-server is running\n{ps_all}") + return except Exception as e: logging.warning(f"Stop ClickHouse raised an error {e}") - def start_clickhouse(self, start_wait_sec=30): + def start_clickhouse(self, start_wait_sec=60): if not self.stay_alive: raise Exception("ClickHouse can be started again only with stay_alive=True instance") - + start_time = time.time() time_to_sleep = 0.5 - start_tries = 5 - total_tries = int(start_wait_sec / time_to_sleep) - query_tries = int(total_tries / start_tries) - - for i in range(start_tries): + while start_time + start_wait_sec >= time.time(): # sometimes after SIGKILL (hard reset) server may refuse to start for some time # for different reasons. - self.exec_in_container(["bash", "-c", "{} --daemon".format(self.clickhouse_start_command)], user=str(os.getuid())) - started = False - for _ in range(query_tries): + pid = self.get_process_pid("clickhouse") + if pid is None: + logging.debug("No clickhouse process running. Start new one.") + self.exec_in_container(["bash", "-c", "{} --daemon".format(self.clickhouse_start_command)], user=str(os.getuid())) + time.sleep(1) + continue + else: + logging.debug("Clickhouse process running.") try: - self.query("select 1") - started = True - break - except: - time.sleep(time_to_sleep) - if started: - break - else: - raise Exception("Cannot start ClickHouse, see additional info in logs") + self.wait_start(start_wait_sec + start_time - time.time()) + return + except Exception as e: + logging.warning(f"Current start attempt failed. Will kill {pid} just in case.") + self.exec_in_container(["bash", "-c", f"kill -9 {pid}"], user='root', nothrow=True) + time.sleep(time_to_sleep) + + raise Exception("Cannot start ClickHouse, see additional info in logs") - def restart_clickhouse(self, stop_start_wait_sec=30, kill=False): + def wait_start(self, start_wait_sec): + start_time = time.time() + last_err = None + while time.time() <= start_time + start_wait_sec: + try: + pid = self.get_process_pid("clickhouse") + if pid is None: + raise Exception("ClickHouse server is not running. Check logs.") + exec_query_with_retry(self, 'select 20', retry_count = 10, silent=True) + return + except QueryRuntimeException as err: + last_err = err + pid = self.get_process_pid("clickhouse") + if pid is not None: + logging.warning(f"ERROR {err}") + else: + raise Exception("ClickHouse server is not running. Check logs.") + logging.error(f"No time left to start. But process is still running. Will dump threads.") + ps_clickhouse = self.exec_in_container(["bash", "-c", "ps -C clickhouse"], nothrow=True, user='root') + logging.info(f"PS RESULT:\n{ps_clickhouse}") + pid = self.get_process_pid("clickhouse") + if pid is not None: + self.exec_in_container(["bash", "-c", f"gdb -batch -ex 'thread apply all bt full' -p {pid}"], user='root') + if last_err is not None: + raise last_err + + def restart_clickhouse(self, stop_start_wait_sec=60, kill=False): self.stop_clickhouse(stop_start_wait_sec, kill) self.start_clickhouse(stop_start_wait_sec) def exec_in_container(self, cmd, detach=False, nothrow=False, **kwargs): return self.cluster.exec_in_container(self.docker_id, cmd, detach, nothrow, **kwargs) - def contains_in_log(self, substring, from_host=False): + def rotate_logs(self): + self.exec_in_container(["bash", "-c", f"kill -HUP {self.get_process_pid('clickhouse server')}"], user='root') + + def contains_in_log(self, substring, from_host=False, filename='clickhouse-server.log'): if from_host: + # We check fist file exists but want to look for all rotated logs as well result = subprocess_check_call(["bash", "-c", - f'[ -f {self.logs_dir}/clickhouse-server.log ] && grep -a "{substring}" {self.logs_dir}/clickhouse-server.log || true' + f'[ -f {self.logs_dir}/{filename} ] && zgrep -aH "{substring}" {self.logs_dir}/{filename}* || true' ]) else: result = self.exec_in_container(["bash", "-c", - f'[ -f /var/log/clickhouse-server/clickhouse-server.log ] && grep -a "{substring}" /var/log/clickhouse-server/clickhouse-server.log || true' + f'[ -f /var/log/clickhouse-server/{filename} ] && zgrep -aH "{substring}" /var/log/clickhouse-server/{filename} || true' ]) return len(result) > 0 - def grep_in_log(self, substring, from_host=False): + def grep_in_log(self, substring, from_host=False, filename='clickhouse-server.log'): logging.debug(f"grep in log called %s", substring) if from_host: + # We check fist file exists but want to look for all rotated logs as well result = subprocess_check_call(["bash", "-c", - f'grep -a "{substring}" {self.logs_dir}/clickhouse-server.log || true' + f'[ -f {self.logs_dir}/{filename} ] && zgrep -a "{substring}" {self.logs_dir}/{filename}* || true' ]) else: result = self.exec_in_container(["bash", "-c", - f'grep -a "{substring}" /var/log/clickhouse-server/clickhouse-server.log || true' + f'[ -f /var/log/clickhouse-server/{filename} ] && zgrep -a "{substring}" /var/log/clickhouse-server/{filename}* || true' ]) logging.debug("grep result %s", result) return result @@ -2146,7 +2204,7 @@ class ClickHouseInstance: def get_process_pid(self, process_name): output = self.exec_in_container(["bash", "-c", - "ps ax | grep '{}' | grep -v 'grep' | grep -v 'bash -c' | awk '{{print $1}}'".format( + "ps ax | grep '{}' | grep -v 'grep' | grep -v 'coproc' | grep -v 'bash -c' | awk '{{print $1}}'".format( process_name)]) if output: try: @@ -2157,6 +2215,7 @@ class ClickHouseInstance: return None def restart_with_original_version(self, stop_start_wait_sec=300, callback_onstop=None, signal=15): + begin_time = time.time() if not self.stay_alive: raise Exception("Cannot restart not stay alive container") self.exec_in_container(["bash", "-c", "pkill -{} clickhouse".format(signal)], user='root') @@ -2176,6 +2235,7 @@ class ClickHouseInstance: if callback_onstop: callback_onstop(self) + self.exec_in_container(["bash", "-c", "echo 'restart_with_original_version: From version' && /usr/bin/clickhouse server --version && echo 'To version' && /usr/share/clickhouse_original server --version"]) self.exec_in_container( ["bash", "-c", "cp /usr/share/clickhouse_original /usr/bin/clickhouse && chmod 777 /usr/bin/clickhouse"], user='root') @@ -2185,9 +2245,14 @@ class ClickHouseInstance: self.exec_in_container(["bash", "-c", "{} --daemon".format(self.clickhouse_start_command)], user=str(os.getuid())) # wait start - assert_eq_with_retry(self, "select 1", "1", retry_count=retries) + time_left = begin_time + stop_start_wait_sec - time.time() + if time_left <= 0: + raise Exception(f"No time left during restart") + else: + self.wait_start(time_left) def restart_with_latest_version(self, stop_start_wait_sec=300, callback_onstop=None, signal=15): + begin_time = time.time() if not self.stay_alive: raise Exception("Cannot restart not stay alive container") self.exec_in_container(["bash", "-c", "pkill -{} clickhouse".format(signal)], user='root') @@ -2213,13 +2278,18 @@ class ClickHouseInstance: self.exec_in_container( ["bash", "-c", "cp /usr/share/clickhouse_fresh /usr/bin/clickhouse && chmod 777 /usr/bin/clickhouse"], user='root') + self.exec_in_container(["bash", "-c", "echo 'restart_with_latest_version: From version' && /usr/share/clickhouse_original server --version && echo 'To version' /usr/share/clickhouse_fresh server --version"]) self.exec_in_container(["bash", "-c", "cp /usr/share/clickhouse-odbc-bridge_fresh /usr/bin/clickhouse-odbc-bridge && chmod 777 /usr/bin/clickhouse"], user='root') self.exec_in_container(["bash", "-c", "{} --daemon".format(self.clickhouse_start_command)], user=str(os.getuid())) # wait start - assert_eq_with_retry(self, "select 1", "1", retry_count=retries) + time_left = begin_time + stop_start_wait_sec - time.time() + if time_left <= 0: + raise Exception(f"No time left during restart") + else: + self.wait_start(time_left) def get_docker_handle(self): return self.cluster.get_docker_handle(self.docker_id) diff --git a/tests/integration/helpers/network.py b/tests/integration/helpers/network.py index decb406879e..2bf0867c847 100644 --- a/tests/integration/helpers/network.py +++ b/tests/integration/helpers/network.py @@ -151,7 +151,7 @@ class _NetworkManager: def _iptables_cmd_suffix( source=None, destination=None, source_port=None, destination_port=None, - action=None, probability=None): + action=None, probability=None, custom_args=None): ret = [] if probability is not None: ret.extend(['-m', 'statistic', '--mode', 'random', '--probability', str(probability)]) @@ -166,6 +166,8 @@ class _NetworkManager: ret.extend(['--dport', str(destination_port)]) if action is not None: ret.extend(['-j'] + action.split()) + if custom_args is not None: + ret.extend(custom_args) return ret def __init__( diff --git a/tests/integration/helpers/test_tools.py b/tests/integration/helpers/test_tools.py index b5d40659629..3577553be34 100644 --- a/tests/integration/helpers/test_tools.py +++ b/tests/integration/helpers/test_tools.py @@ -85,15 +85,18 @@ def assert_logs_contain_with_retry(instance, substring, retry_count=20, sleep_ti else: raise AssertionError("'{}' not found in logs".format(substring)) -def exec_query_with_retry(instance, query, retry_count=40, sleep_time=0.5, settings={}): +def exec_query_with_retry(instance, query, retry_count=40, sleep_time=0.5, silent=False, settings={}): exception = None - for _ in range(retry_count): + for cnt in range(retry_count): try: - instance.query(query, timeout=30, settings=settings) + res = instance.query(query, timeout=30, settings=settings) + if not silent: + logging.debug(f"Result of {query} on {cnt} try is {res}") break except Exception as ex: exception = ex - logging.exception(f"Failed to execute query '{query}' on instance '{instance.name}' will retry") + if not silent: + logging.exception(f"Failed to execute query '{query}' on {cnt} try on instance '{instance.name}' will retry") time.sleep(sleep_time) else: raise exception diff --git a/tests/integration/runner b/tests/integration/runner index 03ea091d750..abff7cefce4 100755 --- a/tests/integration/runner +++ b/tests/integration/runner @@ -251,7 +251,11 @@ if __name__ == "__main__": elif args.dockerd_volume: dockerd_internal_volume = "--mount type=bind,source={},target=/var/lib/docker".format(args.dockerd_volume) else: - subprocess.check_call('docker volume create {name}_volume'.format(name=CONTAINER_NAME), shell=True) + try: + subprocess.check_call('docker volume create {name}_volume'.format(name=CONTAINER_NAME), shell=True) + except Exception as ex: + print("Volume creationg failed, probably it already exists, exception", ex) + dockerd_internal_volume = "--volume={}_volume:/var/lib/docker".format(CONTAINER_NAME) # If enabled we kill and remove containers before pytest session run. diff --git a/tests/integration/test_backward_compatibility/test_aggregate_fixed_key.py b/tests/integration/test_backward_compatibility/test_aggregate_fixed_key.py index 98658ec81d0..fc8d27cfa16 100644 --- a/tests/integration/test_backward_compatibility/test_aggregate_fixed_key.py +++ b/tests/integration/test_backward_compatibility/test_aggregate_fixed_key.py @@ -4,8 +4,8 @@ from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__, name="aggregate_fixed_key") node1 = cluster.add_instance('node1', with_zookeeper=True, image='yandex/clickhouse-server', tag='21.3', with_installed_binary=True) -node2 = cluster.add_instance('node2', with_zookeeper=True, image='yandex/clickhouse-server') -node3 = cluster.add_instance('node3', with_zookeeper=True, image='yandex/clickhouse-server') +node2 = cluster.add_instance('node2', with_zookeeper=True) +node3 = cluster.add_instance('node3', with_zookeeper=True) @pytest.fixture(scope="module") diff --git a/tests/integration/test_broken_part_during_merge/test.py b/tests/integration/test_broken_part_during_merge/test.py index 910dbc1d1a9..1c03add49db 100644 --- a/tests/integration/test_broken_part_during_merge/test.py +++ b/tests/integration/test_broken_part_during_merge/test.py @@ -15,12 +15,6 @@ node1 = cluster.add_instance('node1', with_zookeeper=True) def started_cluster(): try: cluster.start() - - node1.query(''' - CREATE TABLE replicated_mt(date Date, id UInt32, value Int32) - ENGINE = ReplicatedMergeTree('/clickhouse/tables/replicated_mt', '{replica}') ORDER BY id; - '''.format(replica=node1.name)) - yield cluster finally: @@ -28,6 +22,12 @@ def started_cluster(): def test_merge_and_part_corruption(started_cluster): + node1.query(''' + CREATE TABLE replicated_mt(date Date, id UInt32, value Int32) + ENGINE = ReplicatedMergeTree('/clickhouse/tables/replicated_mt', '{replica}') ORDER BY id; + '''.format(replica=node1.name)) + + node1.query("SYSTEM STOP REPLICATION QUEUES replicated_mt") for i in range(4): node1.query("INSERT INTO replicated_mt SELECT toDate('2019-10-01'), number, number * number FROM numbers ({f}, 100000)".format(f=i*100000)) @@ -53,3 +53,5 @@ def test_merge_and_part_corruption(started_cluster): # will hung if checked bug not fixed node1.query("ALTER TABLE replicated_mt UPDATE value = 7 WHERE 1", settings={"mutations_sync": 2}, timeout=30) assert node1.query("SELECT sum(value) FROM replicated_mt") == "2100000\n" + + node1.query('DROP TABLE replicated_mt SYNC') diff --git a/tests/integration/test_cleanup_dir_after_bad_zk_conn/test.py b/tests/integration/test_cleanup_dir_after_bad_zk_conn/test.py index 4db020d0226..58a8a609b10 100644 --- a/tests/integration/test_cleanup_dir_after_bad_zk_conn/test.py +++ b/tests/integration/test_cleanup_dir_after_bad_zk_conn/test.py @@ -48,7 +48,8 @@ def test_cleanup_dir_after_bad_zk_conn(start_cluster): node1.query_with_retry(query_create) node1.query_with_retry('''INSERT INTO replica.test VALUES (1, now())''') assert "1\n" in node1.query('''SELECT count() from replica.test FORMAT TSV''') - + node1.query("DROP TABLE replica.test SYNC") + node1.query("DROP DATABASE replica") def test_cleanup_dir_after_wrong_replica_name(start_cluster): node1.query_with_retry( @@ -68,7 +69,8 @@ def test_cleanup_dir_after_wrong_zk_path(start_cluster): assert "Cannot create" in error node1.query( "CREATE TABLE test3_r2 (n UInt64) ENGINE=ReplicatedMergeTree('/clickhouse/tables/test3/', 'r2') ORDER BY n") - + node1.query("DROP TABLE test3_r1 SYNC") + node1.query("DROP TABLE test3_r2 SYNC") def test_attach_without_zk(start_cluster): node1.query_with_retry( @@ -82,3 +84,4 @@ def test_attach_without_zk(start_cluster): pass node1.query("ATTACH TABLE IF NOT EXISTS test4_r1") node1.query("SELECT * FROM test4_r1") + node1.query("DROP TABLE test4_r1 SYNC") diff --git a/tests/integration/test_concurrent_ttl_merges/test.py b/tests/integration/test_concurrent_ttl_merges/test.py index 3e25398d633..8c3c490d055 100644 --- a/tests/integration/test_concurrent_ttl_merges/test.py +++ b/tests/integration/test_concurrent_ttl_merges/test.py @@ -1,4 +1,5 @@ import time +import logging import pytest from helpers.cluster import ClickHouseCluster @@ -13,7 +14,6 @@ node2 = cluster.add_instance('node2', main_configs=['configs/fast_background_poo def started_cluster(): try: cluster.start() - yield cluster finally: @@ -22,7 +22,7 @@ def started_cluster(): def count_ttl_merges_in_queue(node, table): result = node.query( - "SELECT count() FROM system.replication_queue WHERE merge_type = 'TTL_DELETE' and table = '{}'".format(table)) + f"SELECT count() FROM system.replication_queue WHERE merge_type = 'TTL_DELETE' and table = '{table}'") if not result: return 0 return int(result.strip()) @@ -30,22 +30,22 @@ def count_ttl_merges_in_queue(node, table): def count_ttl_merges_in_background_pool(node, table, level): result = TSV(node.query( - "SELECT * FROM system.merges WHERE merge_type = 'TTL_DELETE' and table = '{}'".format(table))) + f"SELECT * FROM system.merges WHERE merge_type = 'TTL_DELETE' and table = '{table}'")) count = len(result) if count >= level: - print("count_ttl_merges_in_background_pool: merges more than warn level:\n{}".format(result)) + logging.debug(f"count_ttl_merges_in_background_pool: merges more than warn level:\n{result}") return count def count_regular_merges_in_background_pool(node, table): - result = node.query("SELECT count() FROM system.merges WHERE merge_type = 'REGULAR' and table = '{}'".format(table)) + result = node.query(f"SELECT count() FROM system.merges WHERE merge_type = 'REGULAR' and table = '{table}'") if not result: return 0 return int(result.strip()) def count_running_mutations(node, table): - result = node.query("SELECT count() FROM system.merges WHERE table = '{}' and is_mutation=1".format(table)) + result = node.query(f"SELECT count() FROM system.merges WHERE table = '{table}' and is_mutation=1") if not result: return 0 return int(result.strip()) @@ -55,7 +55,6 @@ def count_running_mutations(node, table): # but it revealed a bug when we assign different merges to the same part # on the borders of partitions. def test_no_ttl_merges_in_busy_pool(started_cluster): - node1.query("DROP TABLE IF EXISTS test_ttl") node1.query( "CREATE TABLE test_ttl (d DateTime, key UInt64, data UInt64) ENGINE = MergeTree() ORDER BY tuple() PARTITION BY key TTL d + INTERVAL 1 MONTH SETTINGS merge_with_ttl_timeout = 0, number_of_free_entries_in_pool_to_execute_mutation = 0") @@ -63,12 +62,12 @@ def test_no_ttl_merges_in_busy_pool(started_cluster): for i in range(1, 7): node1.query( - "INSERT INTO test_ttl SELECT now() - INTERVAL 1 MONTH + number - 1, {}, number FROM numbers(5)".format(i)) + f"INSERT INTO test_ttl SELECT now() - INTERVAL 1 MONTH + number - 1, {i}, number FROM numbers(5)") node1.query("ALTER TABLE test_ttl UPDATE data = data + 1 WHERE sleepEachRow(1) = 0") while count_running_mutations(node1, "test_ttl") < 6: - print("Mutations count", count_running_mutations(node1, "test_ttl")) + logging.debug(f"Mutations count {count_running_mutations(node1, 'test_ttl')}") assert count_ttl_merges_in_background_pool(node1, "test_ttl", 1) == 0 time.sleep(0.5) @@ -76,7 +75,7 @@ def test_no_ttl_merges_in_busy_pool(started_cluster): rows_count = [] while count_running_mutations(node1, "test_ttl") == 6: - print("Mutations count after start TTL", count_running_mutations(node1, "test_ttl")) + logging.debug(f"Mutations count after start TTL{count_running_mutations(node1, 'test_ttl')}") rows_count.append(int(node1.query("SELECT count() FROM test_ttl").strip())) time.sleep(0.5) @@ -85,17 +84,17 @@ def test_no_ttl_merges_in_busy_pool(started_cluster): assert sum([1 for count in rows_count if count == 30]) > 4 assert_eq_with_retry(node1, "SELECT COUNT() FROM test_ttl", "0") + node1.query("DROP TABLE test_ttl SYNC") def test_limited_ttl_merges_in_empty_pool(started_cluster): - node1.query("DROP TABLE IF EXISTS test_ttl_v2") node1.query( "CREATE TABLE test_ttl_v2 (d DateTime, key UInt64, data UInt64) ENGINE = MergeTree() ORDER BY tuple() PARTITION BY key TTL d + INTERVAL 1 MONTH SETTINGS merge_with_ttl_timeout = 0") node1.query("SYSTEM STOP TTL MERGES") for i in range(100): - node1.query("INSERT INTO test_ttl_v2 SELECT now() - INTERVAL 1 MONTH, {}, number FROM numbers(1)".format(i)) + node1.query(f"INSERT INTO test_ttl_v2 SELECT now() - INTERVAL 1 MONTH, {i}, number FROM numbers(1)") assert node1.query("SELECT COUNT() FROM test_ttl_v2") == "100\n" @@ -109,17 +108,17 @@ def test_limited_ttl_merges_in_empty_pool(started_cluster): break assert max(merges_with_ttl_count) <= 2 + node1.query("DROP TABLE test_ttl_v2 SYNC") def test_limited_ttl_merges_in_empty_pool_replicated(started_cluster): - node1.query("DROP TABLE IF EXISTS replicated_ttl") node1.query( "CREATE TABLE replicated_ttl (d DateTime, key UInt64, data UInt64) ENGINE = ReplicatedMergeTree('/test/t', '1') ORDER BY tuple() PARTITION BY key TTL d + INTERVAL 1 MONTH SETTINGS merge_with_ttl_timeout = 0") node1.query("SYSTEM STOP TTL MERGES") for i in range(100): - node1.query_with_retry("INSERT INTO replicated_ttl SELECT now() - INTERVAL 1 MONTH, {}, number FROM numbers(1)".format(i)) + node1.query_with_retry(f"INSERT INTO replicated_ttl SELECT now() - INTERVAL 1 MONTH, {i}, number FROM numbers(1)") assert node1.query("SELECT COUNT() FROM replicated_ttl") == "100\n" @@ -137,12 +136,11 @@ def test_limited_ttl_merges_in_empty_pool_replicated(started_cluster): assert max(merges_with_ttl_count) <= 2 assert max(entries_with_ttl_count) <= 1 + node1.query("DROP TABLE replicated_ttl SYNC") + def test_limited_ttl_merges_two_replicas(started_cluster): # Actually this test quite fast and often we cannot catch any merges. - node1.query("DROP TABLE IF EXISTS replicated_ttl_2") - node2.query("DROP TABLE IF EXISTS replicated_ttl_2") - node1.query( "CREATE TABLE replicated_ttl_2 (d DateTime, key UInt64, data UInt64) ENGINE = ReplicatedMergeTree('/test/t2', '1') ORDER BY tuple() PARTITION BY key TTL d + INTERVAL 1 MONTH SETTINGS merge_with_ttl_timeout = 0") node2.query( @@ -153,7 +151,7 @@ def test_limited_ttl_merges_two_replicas(started_cluster): for i in range(100): node1.query_with_retry( - "INSERT INTO replicated_ttl_2 SELECT now() - INTERVAL 1 MONTH, {}, number FROM numbers(10000)".format(i)) + f"INSERT INTO replicated_ttl_2 SELECT now() - INTERVAL 1 MONTH, {i}, number FROM numbers(10000)") node2.query("SYSTEM SYNC REPLICA replicated_ttl_2", timeout=10) assert node1.query("SELECT COUNT() FROM replicated_ttl_2") == "1000000\n" @@ -176,3 +174,6 @@ def test_limited_ttl_merges_two_replicas(started_cluster): # check them assert max(merges_with_ttl_count_node1) <= 2 assert max(merges_with_ttl_count_node2) <= 2 + + node1.query("DROP TABLE replicated_ttl_2 SYNC") + node2.query("DROP TABLE replicated_ttl_2 SYNC") diff --git a/tests/integration/test_default_compression_codec/test.py b/tests/integration/test_default_compression_codec/test.py index 4d111db59ab..d114954d739 100644 --- a/tests/integration/test_default_compression_codec/test.py +++ b/tests/integration/test_default_compression_codec/test.py @@ -1,7 +1,8 @@ import random import string - +import logging import pytest +import time from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) @@ -130,6 +131,9 @@ def test_default_codec_single(start_cluster): assert node1.query("SELECT COUNT() FROM compression_table") == "3\n" assert node2.query("SELECT COUNT() FROM compression_table") == "3\n" + node1.query("DROP TABLE compression_table SYNC") + node2.query("DROP TABLE compression_table SYNC") + def test_default_codec_multiple(start_cluster): for i, node in enumerate([node1, node2]): @@ -199,6 +203,9 @@ def test_default_codec_multiple(start_cluster): assert node1.query("SELECT COUNT() FROM compression_table_multiple") == "3\n" assert node2.query("SELECT COUNT() FROM compression_table_multiple") == "3\n" + node1.query("DROP TABLE compression_table_multiple SYNC") + node2.query("DROP TABLE compression_table_multiple SYNC") + def test_default_codec_version_update(start_cluster): node3.query(""" @@ -212,8 +219,10 @@ def test_default_codec_version_update(start_cluster): node3.query("INSERT INTO compression_table VALUES (2, '{}')".format(get_random_string(2048))) node3.query("INSERT INTO compression_table VALUES (3, '{}')".format(get_random_string(22048))) + old_version = node3.query("SELECT version()") node3.restart_with_latest_version() - + new_version = node3.query("SELECT version()") + logging.debug(f"Updated from {old_version} to {new_version}") assert node3.query( "SELECT default_compression_codec FROM system.parts WHERE table = 'compression_table' and name = '1_1_1_0'") == "ZSTD(1)\n" assert node3.query( @@ -230,6 +239,16 @@ def test_default_codec_version_update(start_cluster): assert node3.query( "SELECT default_compression_codec FROM system.parts WHERE table = 'compression_table' and name = '3_3_3_1'") == "LZ4\n" + node3.query("DROP TABLE compression_table SYNC") + + def callback(n): + n.exec_in_container(['bash', '-c', 'rm -rf /var/lib/clickhouse/metadata/system /var/lib/clickhouse/data/system '], user='root') + node3.restart_with_original_version(callback_onstop=callback) + + cur_version = node3.query("SELECT version()") + logging.debug(f"End with {cur_version}") + + def test_default_codec_for_compact_parts(start_cluster): node4.query(""" CREATE TABLE compact_parts_table ( @@ -254,3 +273,4 @@ def test_default_codec_for_compact_parts(start_cluster): node4.query("ATTACH TABLE compact_parts_table") assert node4.query("SELECT COUNT() FROM compact_parts_table") == "1\n" + node4.query("DROP TABLE compact_parts_table SYNC") diff --git a/tests/integration/test_dictionaries_all_layouts_separate_sources/configs/.gitignore b/tests/integration/test_dictionaries_all_layouts_separate_sources/configs/.gitignore new file mode 100644 index 00000000000..12657916b22 --- /dev/null +++ b/tests/integration/test_dictionaries_all_layouts_separate_sources/configs/.gitignore @@ -0,0 +1,2 @@ +dictionaries/* +!.gitignore diff --git a/tests/integration/test_dictionaries_ddl/test.py b/tests/integration/test_dictionaries_ddl/test.py index 72652880c58..17a0ec11fcb 100644 --- a/tests/integration/test_dictionaries_ddl/test.py +++ b/tests/integration/test_dictionaries_ddl/test.py @@ -190,7 +190,7 @@ def test_restricted_database(started_cluster): LIFETIME(MIN 1 MAX 10) """) for node in [node1, node2]: - node.query("DROP TABLE restricted_db.table_in_restricted_db", user="admin") + node.query("DROP DICTIONARY IF EXISTS default.some_dict", user="admin") node.query("DROP DATABASE restricted_db", user="admin") diff --git a/tests/integration/test_dictionaries_dependency/test.py b/tests/integration/test_dictionaries_dependency/test.py index 9b1019822e3..7dc7f84d50b 100644 --- a/tests/integration/test_dictionaries_dependency/test.py +++ b/tests/integration/test_dictionaries_dependency/test.py @@ -90,7 +90,10 @@ def test_dependency_via_explicit_table(node): # Restart must not break anything. node.restart_clickhouse() check() - + for dct in d_names: + node.query(f"DROP DICTIONARY {dct}") + for tbl in tbl_names: + node.query(f"DROP TABLE {tbl}") @pytest.mark.parametrize("node", nodes) def test_dependency_via_dictionary_database(node): @@ -114,3 +117,9 @@ def test_dependency_via_dictionary_database(node): node.restart_clickhouse() for d_name in d_names: assert node.query_with_retry("SELECT dictGet({}, 'y', toUInt64(5))".format(d_name)) == "6\n" + + # cleanup + for d_name in d_names: + node.query(f"DROP DICTIONARY IF EXISTS {d_name} SYNC") + node.query("DROP DATABASE dict_db SYNC") + node.restart_clickhouse() diff --git a/tests/integration/test_dictionaries_dependency_xml/test.py b/tests/integration/test_dictionaries_dependency_xml/test.py index 6b8a5dff133..1b3ea32d09c 100644 --- a/tests/integration/test_dictionaries_dependency_xml/test.py +++ b/tests/integration/test_dictionaries_dependency_xml/test.py @@ -109,8 +109,11 @@ def test_dependent_tables(started_cluster): dependent_tables_assert() instance.restart_clickhouse() dependent_tables_assert() + query("drop table a.t") + query("drop table lazy.log") + query("drop table join") + query("drop dictionary test.d") + query("drop table src") + query("drop table system.join") query("drop database a") query("drop database lazy") - query("drop table src") - query("drop table join") - query("drop table system.join") diff --git a/tests/integration/test_dictionaries_update_field/test.py b/tests/integration/test_dictionaries_update_field/test.py index c52c836b4f7..2e46403c63b 100644 --- a/tests/integration/test_dictionaries_update_field/test.py +++ b/tests/integration/test_dictionaries_update_field/test.py @@ -4,7 +4,6 @@ import time import pytest from helpers.cluster import ClickHouseCluster from helpers.cluster import ClickHouseKiller -from helpers.network import PartitionManager cluster = ClickHouseCluster(__file__) diff --git a/tests/integration/test_dictionary_allow_read_expired_keys/test_default_reading.py b/tests/integration/test_dictionary_allow_read_expired_keys/test_default_reading.py index 9ab790c0fd6..cfd5f4d5607 100644 --- a/tests/integration/test_dictionary_allow_read_expired_keys/test_default_reading.py +++ b/tests/integration/test_dictionary_allow_read_expired_keys/test_default_reading.py @@ -51,7 +51,7 @@ def test_default_reading(started_cluster): test_helper() with PartitionManager() as pm, ClickHouseKiller(dictionary_node): - assert None == dictionary_node.get_process_pid("clickhouse"), "CLickHouse must be alive" + assert None == dictionary_node.get_process_pid("clickhouse"), "ClickHouse must be alive" # Remove connection between main_node and dictionary for sure pm.heal_all() diff --git a/tests/integration/test_disk_over_web_server/configs/async_read.xml b/tests/integration/test_disk_over_web_server/configs/async_read.xml deleted file mode 100644 index 4449d83779a..00000000000 --- a/tests/integration/test_disk_over_web_server/configs/async_read.xml +++ /dev/null @@ -1,7 +0,0 @@ - - - - read_threadpool - - - diff --git a/tests/integration/test_disk_over_web_server/test.py b/tests/integration/test_disk_over_web_server/test.py index 55f760f514f..f80cccac1be 100644 --- a/tests/integration/test_disk_over_web_server/test.py +++ b/tests/integration/test_disk_over_web_server/test.py @@ -11,7 +11,6 @@ def cluster(): cluster.add_instance("node1", main_configs=["configs/storage_conf.xml"], with_nginx=True) cluster.add_instance("node2", main_configs=["configs/storage_conf_web.xml"], with_nginx=True) cluster.add_instance("node3", main_configs=["configs/storage_conf_web.xml"], with_nginx=True) - cluster.add_instance("node_async_read", main_configs=["configs/storage_conf_web.xml"], user_configs=["configs/async_read.xml"], with_nginx=True) cluster.start() node1 = cluster.instances["node1"] @@ -38,7 +37,7 @@ def cluster(): cluster.shutdown() -@pytest.mark.parametrize("node_name", ["node2", "node_async_read"]) +@pytest.mark.parametrize("node_name", ["node2"]) def test_usage(cluster, node_name): node1 = cluster.instances["node1"] node2 = cluster.instances[node_name] diff --git a/tests/integration/test_grpc_protocol/test.py b/tests/integration/test_grpc_protocol/test.py index 7b2cdee8d76..c892fc94712 100644 --- a/tests/integration/test_grpc_protocol/test.py +++ b/tests/integration/test_grpc_protocol/test.py @@ -213,6 +213,7 @@ def test_errors_handling(): def test_authentication(): query("CREATE USER OR REPLACE john IDENTIFIED BY 'qwe123'") assert query("SELECT currentUser()", user_name="john", password="qwe123") == "john\n" + query("DROP USER john") def test_logs(): logs = query_and_get_logs("SELECT 1", settings={'send_logs_level':'debug'}) diff --git a/tests/integration/test_insert_into_distributed_sync_async/test.py b/tests/integration/test_insert_into_distributed_sync_async/test.py index 372ed04cd2c..1f479003b99 100755 --- a/tests/integration/test_insert_into_distributed_sync_async/test.py +++ b/tests/integration/test_insert_into_distributed_sync_async/test.py @@ -68,6 +68,8 @@ def test_insertion_sync(started_cluster): 2000-01-01 100500''') assert TSV(node2.query('SELECT date, val FROM local_table WHERE val = 100500 ORDER BY date')) == expected + node1.query("TRUNCATE TABLE local_table SYNC") + node2.query("TRUNCATE TABLE local_table SYNC") """ def test_insertion_sync_fails_on_error(started_cluster): diff --git a/tests/integration/test_keeper_three_nodes_two_alive/test.py b/tests/integration/test_keeper_three_nodes_two_alive/test.py index eb63d28b3e2..f7cfb3c354e 100644 --- a/tests/integration/test_keeper_three_nodes_two_alive/test.py +++ b/tests/integration/test_keeper_three_nodes_two_alive/test.py @@ -53,9 +53,11 @@ def test_start_offline(started_cluster): assert node3.contains_in_log("Cannot connect to ZooKeeper (or Keeper) before internal Keeper start") node2_zk = get_fake_zk("node2") - node2_zk.create("/test_dead", b"data") + node2_zk.create("/c", b"data") + finally: p.map(start, [node1, node2, node3]) + get_fake_zk("node1").delete("/test_alive") def test_start_non_existing(started_cluster): @@ -80,6 +82,8 @@ def test_start_non_existing(started_cluster): node1.replace_in_config('/etc/clickhouse-server/config.d/enable_keeper1.xml', 'non_existing_node', 'node3') node2.replace_in_config('/etc/clickhouse-server/config.d/enable_keeper2.xml', 'non_existing_node', 'node3') p.map(start, [node1, node2, node3]) + node2_zk.delete("/test_non_exising") + def test_restart_third_node(started_cluster): node1_zk = get_fake_zk("node1") @@ -88,3 +92,4 @@ def test_restart_third_node(started_cluster): node3.restart_clickhouse() assert node3.contains_in_log("Connected to ZooKeeper (or Keeper) before internal Keeper start") + node1_zk.delete("/test_restart") diff --git a/tests/integration/test_log_lz4_streaming/__init__.py b/tests/integration/test_log_lz4_streaming/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/integration/test_log_lz4_streaming/configs/logs.xml b/tests/integration/test_log_lz4_streaming/configs/logs.xml new file mode 100644 index 00000000000..43a43cd231b --- /dev/null +++ b/tests/integration/test_log_lz4_streaming/configs/logs.xml @@ -0,0 +1,5 @@ + + + true + + diff --git a/tests/integration/test_log_lz4_streaming/test.py b/tests/integration/test_log_lz4_streaming/test.py new file mode 100644 index 00000000000..7f2f22f28c9 --- /dev/null +++ b/tests/integration/test_log_lz4_streaming/test.py @@ -0,0 +1,44 @@ +import pytest +import time + +from helpers.cluster import ClickHouseCluster + +cluster = ClickHouseCluster(__file__) + +node = cluster.add_instance('node', main_configs=['configs/logs.xml'], stay_alive=True) + +@pytest.fixture(scope="module") +def started_cluster(): + try: + cluster.start() + yield cluster + + finally: + cluster.shutdown() + + +def check_log_file(): + assert node.file_exists("/var/log/clickhouse-server/clickhouse-server.log.lz4") + + lz4_output = node.exec_in_container(["bash", "-c", "lz4 -t /var/log/clickhouse-server/clickhouse-server.log.lz4 2>&1"], user='root') + assert lz4_output.count('Error') == 0, lz4_output + + compressed_size = int(node.exec_in_container(["bash", "-c", "du -b /var/log/clickhouse-server/clickhouse-server.log.lz4 | awk {' print $1 '}"], user='root')) + uncompressed_size = int(lz4_output.split()[3]) + assert 0 < compressed_size < uncompressed_size, lz4_output + + +def test_concatenation(started_cluster): + node.stop_clickhouse() + node.start_clickhouse() + node.stop_clickhouse() + + check_log_file() + + +def test_incomplete_rotation(started_cluster): + node.stop_clickhouse(kill=True) + node.start_clickhouse() + node.stop_clickhouse() + + check_log_file() diff --git a/tests/integration/test_match_process_uid_against_data_owner/test.py b/tests/integration/test_match_process_uid_against_data_owner/test.py index 754ebc731c1..cf8a4bc711b 100644 --- a/tests/integration/test_match_process_uid_against_data_owner/test.py +++ b/tests/integration/test_match_process_uid_against_data_owner/test.py @@ -1,40 +1,38 @@ import os import pwd import re - -import docker import pytest -from helpers.cluster import ClickHouseCluster, CLICKHOUSE_START_COMMAND +from helpers.cluster import ClickHouseCluster + +cluster = ClickHouseCluster(__file__) +node = cluster.add_instance('node', stay_alive=True) +other_user_id = pwd.getpwnam('nobody').pw_uid +current_user_id = os.getuid() + +@pytest.fixture(scope="module", autouse=True) +def started_cluster(): + try: + if current_user_id != 0: + return + + cluster.start() + yield cluster + + finally: + cluster.shutdown(ignore_fatal=True) -def test_different_user(): - current_user_id = os.getuid() +def test_different_user(started_cluster): + with pytest.raises(Exception): + node.stop_clickhouse() + node.exec_in_container(["bash", "-c", f"chown {other_user_id} /var/lib/clickhouse"], privileged=True) + node.start_clickhouse(start_wait_sec=3) - if current_user_id != 0: - return - - other_user_id = pwd.getpwnam('nobody').pw_uid - - cluster = ClickHouseCluster(__file__) - node = cluster.add_instance('node') - - cluster.start() - - docker_api = cluster.docker_client.api - container = node.get_docker_handle() - container.stop() - container.start() - container.exec_run('chown {} /var/lib/clickhouse'.format(other_user_id), privileged=True) - container.exec_run(CLICKHOUSE_START_COMMAND) - - cluster.shutdown() # cleanup - - with open(os.path.join(node.path, 'logs/clickhouse-server.err.log')) as log: - expected_message = "Effective user of the process \(.*\) does not match the owner of the data \(.*\)\. Run under 'sudo -u .*'\." - - last_message = [row for row in log.readlines() if "Effective" in row][-1] - - if re.search(expected_message, last_message) is None: - pytest.fail( - 'Expected the server to fail with a message "{}", but the last message is "{}"'.format(expected_message, - last_message)) + log = node.grep_in_log("Effective") + expected_message = "Effective user of the process \(.*\) does not match the owner of the data \(.*\)\. Run under 'sudo -u .*'\." + if re.search(expected_message, log) is None: + pytest.fail( + 'Expected the server to fail with a message "{}", but the last message is "{}"'.format(expected_message, log)) + node.exec_in_container(["bash", "-c", f"chown {current_user_id} /var/lib/clickhouse"], privileged=True) + node.start_clickhouse() + node.rotate_logs() diff --git a/tests/integration/test_materialized_mysql_database/configs/users.xml b/tests/integration/test_materialized_mysql_database/configs/users.xml index b5de2b300d3..4b7f5a1b109 100644 --- a/tests/integration/test_materialized_mysql_database/configs/users.xml +++ b/tests/integration/test_materialized_mysql_database/configs/users.xml @@ -3,9 +3,9 @@ 1 + Atomic 1 0 - Ordinary diff --git a/tests/integration/test_materialized_mysql_database/configs/users_db_atomic.xml b/tests/integration/test_materialized_mysql_database/configs/users_db_atomic.xml deleted file mode 100644 index 3cf7285de59..00000000000 --- a/tests/integration/test_materialized_mysql_database/configs/users_db_atomic.xml +++ /dev/null @@ -1,19 +0,0 @@ - - - - - 1 - Atomic - - - - - - - - ::/0 - - default - - - diff --git a/tests/integration/test_materialized_mysql_database/materialize_with_ddl.py b/tests/integration/test_materialized_mysql_database/materialize_with_ddl.py index 5f6daea24ac..5524f842c36 100644 --- a/tests/integration/test_materialized_mysql_database/materialize_with_ddl.py +++ b/tests/integration/test_materialized_mysql_database/materialize_with_ddl.py @@ -21,7 +21,7 @@ def check_query(clickhouse_node, query, result_set, retry_count=10, interval_sec if result_set == lastest_result: return - logging.debug(f"latest_result{lastest_result}") + logging.debug(f"latest_result {lastest_result}") time.sleep(interval_seconds) except Exception as e: logging.debug(f"check_query retry {i+1} exception {e}") @@ -225,6 +225,31 @@ def drop_table_with_materialized_mysql_database(clickhouse_node, mysql_node, ser mysql_node.query("DROP DATABASE test_database_drop") +def create_table_like_with_materialize_mysql_database(clickhouse_node, mysql_node, service_name): + mysql_node.query("DROP DATABASE IF EXISTS create_like") + mysql_node.query("DROP DATABASE IF EXISTS create_like2") + clickhouse_node.query("DROP DATABASE IF EXISTS create_like") + + mysql_node.query("CREATE DATABASE create_like") + mysql_node.query("CREATE DATABASE create_like2") + mysql_node.query("CREATE TABLE create_like.t1 (id INT NOT NULL PRIMARY KEY)") + mysql_node.query("CREATE TABLE create_like2.t1 LIKE create_like.t1") + + clickhouse_node.query( + f"CREATE DATABASE create_like ENGINE = MaterializeMySQL('{service_name}:3306', 'create_like', 'root', 'clickhouse')") + mysql_node.query("CREATE TABLE create_like.t2 LIKE create_like.t1") + mysql_node.query("USE create_like") + mysql_node.query("CREATE TABLE t3 LIKE create_like2.t1") + mysql_node.query("CREATE TABLE t4 LIKE t1") + + check_query(clickhouse_node, "SHOW TABLES FROM create_like", "t1\nt2\nt4\n") + check_query(clickhouse_node, "SHOW DATABASES LIKE 'create_like%'", "create_like\n") + + clickhouse_node.query("DROP DATABASE create_like") + mysql_node.query("DROP DATABASE create_like") + mysql_node.query("DROP DATABASE create_like2") + + def create_table_with_materialized_mysql_database(clickhouse_node, mysql_node, service_name): mysql_node.query("DROP DATABASE IF EXISTS test_database_create") clickhouse_node.query("DROP DATABASE IF EXISTS test_database_create") diff --git a/tests/integration/test_materialized_mysql_database/test.py b/tests/integration/test_materialized_mysql_database/test.py index feade1b60a0..29194f4ab25 100644 --- a/tests/integration/test_materialized_mysql_database/test.py +++ b/tests/integration/test_materialized_mysql_database/test.py @@ -5,7 +5,7 @@ import pwd import re import pymysql.cursors import pytest -from helpers.cluster import ClickHouseCluster, get_docker_compose_path, run_and_check +from helpers.cluster import ClickHouseCluster, ClickHouseInstance, get_docker_compose_path, run_and_check import docker import logging @@ -17,10 +17,10 @@ cluster = ClickHouseCluster(__file__) mysql_node = None mysql8_node = None -node_db_ordinary = cluster.add_instance('node1', user_configs=["configs/users.xml"], with_mysql=True, stay_alive=True) -node_db_atomic = cluster.add_instance('node2', user_configs=["configs/users_db_atomic.xml"], with_mysql8=True, stay_alive=True) -node_disable_bytes_settings = cluster.add_instance('node3', user_configs=["configs/users_disable_bytes_settings.xml"], with_mysql=False, stay_alive=True) -node_disable_rows_settings = cluster.add_instance('node4', user_configs=["configs/users_disable_rows_settings.xml"], with_mysql=False, stay_alive=True) +node_db = cluster.add_instance('node1', user_configs=["configs/users.xml"], with_mysql=True, with_mysql8=True, stay_alive=True) +node_disable_bytes_settings = cluster.add_instance('node2', user_configs=["configs/users_disable_bytes_settings.xml"], with_mysql=False, stay_alive=True) +node_disable_rows_settings = cluster.add_instance('node3', user_configs=["configs/users_disable_rows_settings.xml"], with_mysql=False, stay_alive=True) + @pytest.fixture(scope="module") def started_cluster(): @@ -82,32 +82,39 @@ class MySQLConnection: if self.mysql_connection is not None: self.mysql_connection.close() + @pytest.fixture(scope="module") def started_mysql_5_7(): mysql_node = MySQLConnection(cluster.mysql_port, 'root', 'clickhouse', cluster.mysql_ip) yield mysql_node + @pytest.fixture(scope="module") def started_mysql_8_0(): mysql8_node = MySQLConnection(cluster.mysql8_port, 'root', 'clickhouse', cluster.mysql8_ip) yield mysql8_node -@pytest.mark.parametrize(('clickhouse_node'), [pytest.param(node_db_ordinary, id="ordinary"), pytest.param(node_db_atomic, id="atomic")]) -def test_materialize_database_dml_with_mysql_5_7(started_cluster, started_mysql_5_7, clickhouse_node): + +@pytest.fixture(scope='module') +def clickhouse_node(): + yield node_db + + +def test_materialized_database_dml_with_mysql_5_7(started_cluster, started_mysql_5_7, clickhouse_node: ClickHouseInstance): materialize_with_ddl.dml_with_materialized_mysql_database(clickhouse_node, started_mysql_5_7, "mysql57") materialize_with_ddl.materialized_mysql_database_with_views(clickhouse_node, started_mysql_5_7, "mysql57") materialize_with_ddl.materialized_mysql_database_with_datetime_and_decimal(clickhouse_node, started_mysql_5_7, "mysql57") materialize_with_ddl.move_to_prewhere_and_column_filtering(clickhouse_node, started_mysql_5_7, "mysql57") -@pytest.mark.parametrize(('clickhouse_node'), [pytest.param(node_db_ordinary, id="ordinary"), pytest.param(node_db_atomic, id="atomic")]) -def test_materialize_database_dml_with_mysql_8_0(started_cluster, started_mysql_8_0, clickhouse_node): + +def test_materialized_database_dml_with_mysql_8_0(started_cluster, started_mysql_8_0, clickhouse_node): materialize_with_ddl.dml_with_materialized_mysql_database(clickhouse_node, started_mysql_8_0, "mysql80") materialize_with_ddl.materialized_mysql_database_with_views(clickhouse_node, started_mysql_8_0, "mysql80") materialize_with_ddl.materialized_mysql_database_with_datetime_and_decimal(clickhouse_node, started_mysql_8_0, "mysql80") materialize_with_ddl.move_to_prewhere_and_column_filtering(clickhouse_node, started_mysql_8_0, "mysql80") -@pytest.mark.parametrize(('clickhouse_node'), [pytest.param(node_db_ordinary, id="ordinary"), pytest.param(node_db_atomic, id="atomic")]) -def test_materialize_database_ddl_with_mysql_5_7(started_cluster, started_mysql_5_7, clickhouse_node): + +def test_materialized_database_ddl_with_mysql_5_7(started_cluster, started_mysql_5_7, clickhouse_node): materialize_with_ddl.drop_table_with_materialized_mysql_database(clickhouse_node, started_mysql_5_7, "mysql57") materialize_with_ddl.create_table_with_materialized_mysql_database(clickhouse_node, started_mysql_5_7, "mysql57") materialize_with_ddl.rename_table_with_materialized_mysql_database(clickhouse_node, started_mysql_5_7, "mysql57") @@ -117,9 +124,10 @@ def test_materialize_database_ddl_with_mysql_5_7(started_cluster, started_mysql_ # materialize_with_ddl.alter_rename_column_with_materialized_mysql_database(clickhouse_node, started_mysql_5_7, "mysql57") materialize_with_ddl.alter_rename_table_with_materialized_mysql_database(clickhouse_node, started_mysql_5_7, "mysql57") materialize_with_ddl.alter_modify_column_with_materialized_mysql_database(clickhouse_node, started_mysql_5_7, "mysql57") + materialize_with_ddl.create_table_like_with_materialize_mysql_database(clickhouse_node, started_mysql_5_7, "mysql57") -@pytest.mark.parametrize(('clickhouse_node'), [pytest.param(node_db_ordinary, id="ordinary"), pytest.param(node_db_atomic, id="atomic")]) -def test_materialize_database_ddl_with_mysql_8_0(started_cluster, started_mysql_8_0, clickhouse_node): + +def test_materialized_database_ddl_with_mysql_8_0(started_cluster, started_mysql_8_0, clickhouse_node): materialize_with_ddl.drop_table_with_materialized_mysql_database(clickhouse_node, started_mysql_8_0, "mysql80") materialize_with_ddl.create_table_with_materialized_mysql_database(clickhouse_node, started_mysql_8_0, "mysql80") materialize_with_ddl.rename_table_with_materialized_mysql_database(clickhouse_node, started_mysql_8_0, "mysql80") @@ -128,103 +136,98 @@ def test_materialize_database_ddl_with_mysql_8_0(started_cluster, started_mysql_ materialize_with_ddl.alter_rename_table_with_materialized_mysql_database(clickhouse_node, started_mysql_8_0, "mysql80") materialize_with_ddl.alter_rename_column_with_materialized_mysql_database(clickhouse_node, started_mysql_8_0, "mysql80") materialize_with_ddl.alter_modify_column_with_materialized_mysql_database(clickhouse_node, started_mysql_8_0, "mysql80") + materialize_with_ddl.create_table_like_with_materialize_mysql_database(clickhouse_node, started_mysql_8_0, "mysql80") -@pytest.mark.parametrize(('clickhouse_node'), [pytest.param(node_db_ordinary, id="ordinary"), pytest.param(node_db_atomic, id="atomic")]) -def test_materialize_database_ddl_with_empty_transaction_5_7(started_cluster, started_mysql_5_7, clickhouse_node): + +def test_materialized_database_ddl_with_empty_transaction_5_7(started_cluster, started_mysql_5_7, clickhouse_node): materialize_with_ddl.query_event_with_empty_transaction(clickhouse_node, started_mysql_5_7, "mysql57") -@pytest.mark.parametrize(('clickhouse_node'), [pytest.param(node_db_ordinary, id="ordinary"), pytest.param(node_db_atomic, id="atomic")]) -def test_materialize_database_ddl_with_empty_transaction_8_0(started_cluster, started_mysql_8_0, clickhouse_node): + +def test_materialized_database_ddl_with_empty_transaction_8_0(started_cluster, started_mysql_8_0, clickhouse_node): materialize_with_ddl.query_event_with_empty_transaction(clickhouse_node, started_mysql_8_0, "mysql80") -@pytest.mark.parametrize(('clickhouse_node'), [pytest.param(node_db_ordinary, id="ordinary"), pytest.param(node_db_atomic, id="atomic")]) def test_select_without_columns_5_7(started_cluster, started_mysql_5_7, clickhouse_node): materialize_with_ddl.select_without_columns(clickhouse_node, started_mysql_5_7, "mysql57") -@pytest.mark.parametrize(('clickhouse_node'), [pytest.param(node_db_ordinary, id="ordinary"), pytest.param(node_db_atomic, id="atomic")]) def test_select_without_columns_8_0(started_cluster, started_mysql_8_0, clickhouse_node): materialize_with_ddl.select_without_columns(clickhouse_node, started_mysql_8_0, "mysql80") -@pytest.mark.parametrize(('clickhouse_node'), [pytest.param(node_db_ordinary, id="ordinary"), pytest.param(node_db_atomic, id="atomic")]) def test_insert_with_modify_binlog_checksum_5_7(started_cluster, started_mysql_5_7, clickhouse_node): materialize_with_ddl.insert_with_modify_binlog_checksum(clickhouse_node, started_mysql_5_7, "mysql57") -@pytest.mark.parametrize(('clickhouse_node'), [pytest.param(node_db_ordinary, id="ordinary"), pytest.param(node_db_atomic, id="atomic")]) def test_insert_with_modify_binlog_checksum_8_0(started_cluster, started_mysql_8_0, clickhouse_node): materialize_with_ddl.insert_with_modify_binlog_checksum(clickhouse_node, started_mysql_8_0, "mysql80") -@pytest.mark.parametrize(('clickhouse_node'), [pytest.param(node_db_ordinary, id="ordinary"), pytest.param(node_db_atomic, id="atomic")]) -def test_materialize_database_err_sync_user_privs_5_7(started_cluster, started_mysql_5_7, clickhouse_node): +def test_materialized_database_err_sync_user_privs_5_7(started_cluster, started_mysql_5_7, clickhouse_node): materialize_with_ddl.err_sync_user_privs_with_materialized_mysql_database(clickhouse_node, started_mysql_5_7, "mysql57") -@pytest.mark.parametrize(('clickhouse_node'), [pytest.param(node_db_ordinary, id="ordinary"), pytest.param(node_db_atomic, id="atomic")]) -def test_materialize_database_err_sync_user_privs_8_0(started_cluster, started_mysql_8_0, clickhouse_node): +def test_materialized_database_err_sync_user_privs_8_0(started_cluster, started_mysql_8_0, clickhouse_node): materialize_with_ddl.err_sync_user_privs_with_materialized_mysql_database(clickhouse_node, started_mysql_8_0, "mysql80") -@pytest.mark.parametrize(('clickhouse_node'), [pytest.param(node_db_ordinary, id="ordinary"), pytest.param(node_db_atomic, id="atomic")]) + def test_network_partition_5_7(started_cluster, started_mysql_5_7, clickhouse_node): materialize_with_ddl.network_partition_test(clickhouse_node, started_mysql_5_7, "mysql57") -@pytest.mark.parametrize(('clickhouse_node'), [pytest.param(node_db_ordinary, id="ordinary"), pytest.param(node_db_atomic, id="atomic")]) + def test_network_partition_8_0(started_cluster, started_mysql_8_0, clickhouse_node): materialize_with_ddl.network_partition_test(clickhouse_node, started_mysql_8_0, "mysql80") -@pytest.mark.parametrize(('clickhouse_node'), [pytest.param(node_db_ordinary, id="ordinary"), pytest.param(node_db_atomic, id="atomic")]) + def test_mysql_kill_sync_thread_restore_5_7(started_cluster, started_mysql_5_7, clickhouse_node): materialize_with_ddl.mysql_kill_sync_thread_restore_test(clickhouse_node, started_mysql_5_7, "mysql57") -@pytest.mark.parametrize(('clickhouse_node'), [pytest.param(node_db_ordinary, id="ordinary"), pytest.param(node_db_atomic, id="atomic")]) + def test_mysql_kill_sync_thread_restore_8_0(started_cluster, started_mysql_8_0, clickhouse_node): materialize_with_ddl.mysql_kill_sync_thread_restore_test(clickhouse_node, started_mysql_8_0, "mysql80") -@pytest.mark.parametrize(('clickhouse_node'), [pytest.param(node_db_ordinary, id="ordinary"), pytest.param(node_db_atomic, id="atomic")]) + def test_mysql_killed_while_insert_5_7(started_cluster, started_mysql_5_7, clickhouse_node): materialize_with_ddl.mysql_killed_while_insert(clickhouse_node, started_mysql_5_7, "mysql57") -@pytest.mark.parametrize(('clickhouse_node'), [pytest.param(node_db_ordinary, id="ordinary"), pytest.param(node_db_atomic, id="atomic")]) + def test_mysql_killed_while_insert_8_0(started_cluster, started_mysql_8_0, clickhouse_node): materialize_with_ddl.mysql_killed_while_insert(clickhouse_node, started_mysql_8_0, "mysql80") -@pytest.mark.parametrize(('clickhouse_node'), [pytest.param(node_db_ordinary, id="ordinary"), pytest.param(node_db_atomic, id="atomic")]) + def test_clickhouse_killed_while_insert_5_7(started_cluster, started_mysql_5_7, clickhouse_node): materialize_with_ddl.clickhouse_killed_while_insert(clickhouse_node, started_mysql_5_7, "mysql57") -@pytest.mark.parametrize(('clickhouse_node'), [pytest.param(node_db_ordinary, id="ordinary"), pytest.param(node_db_atomic, id="atomic")]) + def test_clickhouse_killed_while_insert_8_0(started_cluster, started_mysql_8_0, clickhouse_node): materialize_with_ddl.clickhouse_killed_while_insert(clickhouse_node, started_mysql_8_0, "mysql80") -@pytest.mark.parametrize(('clickhouse_node'), [node_db_ordinary, node_db_ordinary]) + def test_utf8mb4(started_cluster, started_mysql_8_0, started_mysql_5_7, clickhouse_node): materialize_with_ddl.utf8mb4_test(clickhouse_node, started_mysql_5_7, "mysql57") materialize_with_ddl.utf8mb4_test(clickhouse_node, started_mysql_8_0, "mysql80") -@pytest.mark.parametrize(('clickhouse_node'), [node_db_ordinary, node_db_ordinary]) + def test_system_parts_table(started_cluster, started_mysql_8_0, clickhouse_node): materialize_with_ddl.system_parts_test(clickhouse_node, started_mysql_8_0, "mysql80") -@pytest.mark.parametrize(('clickhouse_node'), [node_db_ordinary, node_db_ordinary]) + def test_multi_table_update(started_cluster, started_mysql_8_0, started_mysql_5_7, clickhouse_node): materialize_with_ddl.multi_table_update_test(clickhouse_node, started_mysql_5_7, "mysql57") materialize_with_ddl.multi_table_update_test(clickhouse_node, started_mysql_8_0, "mysql80") -@pytest.mark.parametrize(('clickhouse_node'), [node_db_ordinary, node_db_ordinary]) + def test_system_tables_table(started_cluster, started_mysql_8_0, started_mysql_5_7, clickhouse_node): materialize_with_ddl.system_tables_test(clickhouse_node, started_mysql_5_7, "mysql57") materialize_with_ddl.system_tables_test(clickhouse_node, started_mysql_8_0, "mysql80") -@pytest.mark.parametrize(('clickhouse_node'), [node_db_ordinary, node_db_ordinary]) -def test_materialize_with_column_comments(started_cluster, started_mysql_8_0, started_mysql_5_7, clickhouse_node): + +def test_materialized_with_column_comments(started_cluster, started_mysql_8_0, started_mysql_5_7, clickhouse_node): materialize_with_ddl.materialize_with_column_comments_test(clickhouse_node, started_mysql_5_7, "mysql57") materialize_with_ddl.materialize_with_column_comments_test(clickhouse_node, started_mysql_8_0, "mysql80") -@pytest.mark.parametrize(('clickhouse_node'), [node_db_ordinary, node_db_ordinary]) -def test_materialize_with_enum(started_cluster, started_mysql_8_0, started_mysql_5_7, clickhouse_node): + +def test_materialized_with_enum(started_cluster, started_mysql_8_0, started_mysql_5_7, clickhouse_node): materialize_with_ddl.materialize_with_enum8_test(clickhouse_node, started_mysql_5_7, "mysql57") materialize_with_ddl.materialize_with_enum16_test(clickhouse_node, started_mysql_5_7, "mysql57") materialize_with_ddl.alter_enum8_to_enum16_test(clickhouse_node, started_mysql_5_7, "mysql57") @@ -238,7 +241,7 @@ def test_mysql_settings(started_cluster, started_mysql_8_0, started_mysql_5_7, c materialize_with_ddl.mysql_settings_test(clickhouse_node, started_mysql_5_7, "mysql57") materialize_with_ddl.mysql_settings_test(clickhouse_node, started_mysql_8_0, "mysql80") -@pytest.mark.parametrize(('clickhouse_node'), [pytest.param(node_db_ordinary, id="ordinary"), pytest.param(node_db_atomic, id="atomic")]) + def test_large_transaction(started_cluster, started_mysql_8_0, started_mysql_5_7, clickhouse_node): materialize_with_ddl.materialized_mysql_large_transaction(clickhouse_node, started_mysql_8_0, "mysql80") materialize_with_ddl.materialized_mysql_large_transaction(clickhouse_node, started_mysql_5_7, "mysql57") diff --git a/tests/integration/test_merge_tree_s3/configs/config.d/async_read.xml b/tests/integration/test_merge_tree_s3/configs/config.d/async_read.xml deleted file mode 100644 index 4449d83779a..00000000000 --- a/tests/integration/test_merge_tree_s3/configs/config.d/async_read.xml +++ /dev/null @@ -1,7 +0,0 @@ - - - - read_threadpool - - - diff --git a/tests/integration/test_merge_tree_s3/test.py b/tests/integration/test_merge_tree_s3/test.py index e99adf01ec5..b577d4a1405 100644 --- a/tests/integration/test_merge_tree_s3/test.py +++ b/tests/integration/test_merge_tree_s3/test.py @@ -50,11 +50,6 @@ def cluster(): main_configs=["configs/config.d/storage_conf.xml", "configs/config.d/bg_processing_pool_conf.xml"], with_minio=True) - cluster.add_instance("node_async_read", - main_configs=["configs/config.d/storage_conf.xml", - "configs/config.d/bg_processing_pool_conf.xml"], - user_configs=["configs/config.d/async_read.xml"], - with_minio=True) logging.info("Starting cluster...") cluster.start() logging.info("Cluster started") @@ -145,7 +140,7 @@ def wait_for_delete_s3_objects(cluster, expected, timeout=30): @pytest.fixture(autouse=True) -@pytest.mark.parametrize("node_name", ["node", "node_async_read"]) +@pytest.mark.parametrize("node_name", ["node"]) def drop_table(cluster, node_name): yield node = cluster.instances[node_name] @@ -165,9 +160,7 @@ def drop_table(cluster, node_name): "min_rows_for_wide_part,files_per_part,node_name", [ (0, FILES_OVERHEAD_PER_PART_WIDE, "node"), - (8192, FILES_OVERHEAD_PER_PART_COMPACT, "node"), - (0, FILES_OVERHEAD_PER_PART_WIDE, "node_async_read"), - (8192, FILES_OVERHEAD_PER_PART_COMPACT, "node_async_read") + (8192, FILES_OVERHEAD_PER_PART_COMPACT, "node") ] ) def test_simple_insert_select(cluster, min_rows_for_wide_part, files_per_part, node_name): @@ -191,9 +184,7 @@ def test_simple_insert_select(cluster, min_rows_for_wide_part, files_per_part, n @pytest.mark.parametrize( "merge_vertical,node_name", [ (True, "node"), - (False, "node"), - (True, "node_async_read"), - (False, "node_async_read") + (False, "node") ]) def test_insert_same_partition_and_merge(cluster, merge_vertical, node_name): settings = {} @@ -235,7 +226,7 @@ def test_insert_same_partition_and_merge(cluster, merge_vertical, node_name): wait_for_delete_s3_objects(cluster, FILES_OVERHEAD_PER_PART_WIDE + FILES_OVERHEAD) -@pytest.mark.parametrize("node_name", ["node", "node_async_read"]) +@pytest.mark.parametrize("node_name", ["node"]) def test_alter_table_columns(cluster, node_name): node = cluster.instances[node_name] create_table(node, "s3_test") @@ -264,7 +255,7 @@ def test_alter_table_columns(cluster, node_name): wait_for_delete_s3_objects(cluster, FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE + 2) -@pytest.mark.parametrize("node_name", ["node", "node_async_read"]) +@pytest.mark.parametrize("node_name", ["node"]) def test_attach_detach_partition(cluster, node_name): node = cluster.instances[node_name] create_table(node, "s3_test") @@ -296,7 +287,7 @@ def test_attach_detach_partition(cluster, node_name): assert len(list(minio.list_objects(cluster.minio_bucket, 'data/'))) == FILES_OVERHEAD -@pytest.mark.parametrize("node_name", ["node", "node_async_read"]) +@pytest.mark.parametrize("node_name", ["node"]) def test_move_partition_to_another_disk(cluster, node_name): node = cluster.instances[node_name] create_table(node, "s3_test") @@ -346,7 +337,7 @@ def test_table_manipulations(cluster, node_name): assert len(list(minio.list_objects(cluster.minio_bucket, 'data/'))) == FILES_OVERHEAD -@pytest.mark.parametrize("node_name", ["node", "node_async_read"]) +@pytest.mark.parametrize("node_name", ["node"]) def test_move_replace_partition_to_another_table(cluster, node_name): node = cluster.instances[node_name] create_table(node, "s3_test") @@ -498,7 +489,7 @@ def test_s3_disk_restart_during_load(cluster, node_name): thread.join() -@pytest.mark.parametrize("node_name", ["node", "node_async_read"]) +@pytest.mark.parametrize("node_name", ["node"]) def test_s3_disk_reads_on_unstable_connection(cluster, node_name): node = cluster.instances[node_name] create_table(node, "s3_test", storage_policy='unstable_s3') diff --git a/tests/integration/test_merge_tree_s3_with_cache/test.py b/tests/integration/test_merge_tree_s3_with_cache/test.py index da4543ccb87..e15eaf61812 100644 --- a/tests/integration/test_merge_tree_s3_with_cache/test.py +++ b/tests/integration/test_merge_tree_s3_with_cache/test.py @@ -60,8 +60,9 @@ def test_write_is_cached(cluster, min_rows_for_wide_part, read_requests): select_query = "SELECT * FROM s3_test order by id FORMAT Values" assert node.query(select_query) == "(0,'data'),(1,'data')" - stat = get_query_stat(node, select_query) - assert stat["S3ReadRequestsCount"] == read_requests # Only .bin files should be accessed from S3. + # With async reads profile events are not updated because reads are done in a separate thread. + # stat = get_query_stat(node, select_query) + # assert stat["S3ReadRequestsCount"] == read_requests # Only .bin files should be accessed from S3. node.query("DROP TABLE IF EXISTS s3_test NO DELAY") @@ -90,13 +91,16 @@ def test_read_after_cache_is_wiped(cluster, min_rows_for_wide_part, all_files, b select_query = "SELECT * FROM s3_test" node.query(select_query) - stat = get_query_stat(node, select_query) - assert stat["S3ReadRequestsCount"] == all_files # .mrk and .bin files should be accessed from S3. + # With async reads profile events are not updated because reads are done in a separate thread. + # stat = get_query_stat(node, select_query) + # assert stat["S3ReadRequestsCount"] == all_files # .mrk and .bin files should be accessed from S3. # After cache is populated again, only .bin files should be accessed from S3. select_query = "SELECT * FROM s3_test order by id FORMAT Values" assert node.query(select_query) == "(0,'data'),(1,'data')" - stat = get_query_stat(node, select_query) - assert stat["S3ReadRequestsCount"] == bin_files + + # With async reads profile events are not updated because reads are done in a separate thread. + #stat = get_query_stat(node, select_query) + #assert stat["S3ReadRequestsCount"] == bin_files node.query("DROP TABLE IF EXISTS s3_test NO DELAY") diff --git a/tests/integration/test_profile_events_s3/test.py b/tests/integration/test_profile_events_s3/test.py index 98ad55b3adc..15e2ff97d10 100644 --- a/tests/integration/test_profile_events_s3/test.py +++ b/tests/integration/test_profile_events_s3/test.py @@ -159,5 +159,7 @@ def test_profile_events(cluster): assert metrics3["S3WriteRequestsCount"] - metrics2["S3WriteRequestsCount"] == minio3["set_requests"] - minio2[ "set_requests"] stat3 = get_query_stat(instance, query3) - for metric in stat3: - assert stat3[metric] == metrics3[metric] - metrics2[metric] + # With async reads profile events are not updated fully because reads are done in a separate thread. + #for metric in stat3: + # print(metric) + # assert stat3[metric] == metrics3[metric] - metrics2[metric] diff --git a/tests/integration/test_redirect_url_storage/configs/named_collections.xml b/tests/integration/test_redirect_url_storage/configs/named_collections.xml index fde247989f8..270971c6698 100644 --- a/tests/integration/test_redirect_url_storage/configs/named_collections.xml +++ b/tests/integration/test_redirect_url_storage/configs/named_collections.xml @@ -2,10 +2,6 @@ -
- Range - bytes=0-1 -
Access-Control-Request-Method PUT diff --git a/tests/integration/test_redirect_url_storage/test.py b/tests/integration/test_redirect_url_storage/test.py index d3808cd890d..061920954b6 100644 --- a/tests/integration/test_redirect_url_storage/test.py +++ b/tests/integration/test_redirect_url_storage/test.py @@ -1,6 +1,10 @@ import pytest from helpers.cluster import ClickHouseCluster +from helpers.network import PartitionManager +import threading +import time + cluster = ClickHouseCluster(__file__) node1 = cluster.add_instance('node1', main_configs=['configs/named_collections.xml'], with_zookeeper=False, with_hdfs=True) @@ -95,3 +99,32 @@ def test_predefined_connection_configuration(started_cluster): result = node1.query("SET max_http_get_redirects=1; select * from url(url1, url='http://hdfs1:50070/webhdfs/v1/simple_storage?op=OPEN&namenoderpcaddress=hdfs1:9000&offset=0', format='TSV', structure='id UInt32, name String, weight Float64')") assert(result == "1\tMark\t72.53\n") node1.query("drop table WebHDFSStorageWithRedirect") + + +result = '' +def test_url_reconnect(started_cluster): + hdfs_api = started_cluster.hdfs_api + + with PartitionManager() as pm: + node1.query( + "insert into table function hdfs('hdfs://hdfs1:9000/storage_big', 'TSV', 'id Int32') select number from numbers(500000)") + + pm_rule = {'destination': node1.ip_address, 'source_port': 50075, 'action': 'REJECT'} + pm._add_rule(pm_rule) + + def select(): + global result + result = node1.query( + "select sum(cityHash64(id)) from url('http://hdfs1:50075/webhdfs/v1/storage_big?op=OPEN&namenoderpcaddress=hdfs1:9000&offset=0', 'TSV', 'id Int32') settings http_max_tries = 10, http_retry_max_backoff_ms=1000") + assert(int(result), 6581218782194912115) + + thread = threading.Thread(target=select) + thread.start() + + time.sleep(4) + pm._delete_rule(pm_rule) + + thread.join() + + assert(int(result), 6581218782194912115) + assert node1.contains_in_log("Error: Timeout: connect timed out") diff --git a/tests/integration/test_replicated_database/test.py b/tests/integration/test_replicated_database/test.py index fa1e9cf49fa..b688e650a8c 100644 --- a/tests/integration/test_replicated_database/test.py +++ b/tests/integration/test_replicated_database/test.py @@ -8,6 +8,8 @@ from helpers.cluster import ClickHouseCluster from helpers.test_tools import assert_eq_with_retry, assert_logs_contain from helpers.network import PartitionManager +test_recover_staled_replica_run = 1 + cluster = ClickHouseCluster(__file__) main_node = cluster.add_instance('main_node', main_configs=['configs/config.xml'], user_configs=['configs/settings.xml'], with_zookeeper=True, stay_alive=True, macros={"shard": 1, "replica": 1}) @@ -29,14 +31,14 @@ def assert_create_query(nodes, table_name, expected): def started_cluster(): try: cluster.start() - main_node.query("CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica1');") - dummy_node.query("CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica2');") yield cluster finally: cluster.shutdown() def test_create_replicated_table(started_cluster): + main_node.query("CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica1');") + dummy_node.query("CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica2');") assert "Explicit zookeeper_path and replica_name are specified" in \ main_node.query_and_get_error("CREATE TABLE testdb.replicated_table (d Date, k UInt64, i32 Int32) " "ENGINE=ReplicatedMergeTree('/test/tmp', 'r') ORDER BY k PARTITION BY toYYYYMM(d);") @@ -57,9 +59,13 @@ def test_create_replicated_table(started_cluster): assert_create_query([main_node, dummy_node], "testdb.replicated_table", expected) # assert without replacing uuid assert main_node.query("show create testdb.replicated_table") == dummy_node.query("show create testdb.replicated_table") + main_node.query("DROP DATABASE testdb SYNC") + dummy_node.query("DROP DATABASE testdb SYNC") @pytest.mark.parametrize("engine", ['MergeTree', 'ReplicatedMergeTree']) def test_simple_alter_table(started_cluster, engine): + main_node.query("CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica1');") + dummy_node.query("CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica2');") # test_simple_alter_table name = "testdb.alter_test_{}".format(engine) main_node.query("CREATE TABLE {} " @@ -100,12 +106,13 @@ def test_simple_alter_table(started_cluster, engine): "SETTINGS index_granularity = 8192".format(name, full_engine) assert_create_query([main_node, dummy_node, competing_node], name, expected) - + main_node.query("DROP DATABASE testdb SYNC") + dummy_node.query("DROP DATABASE testdb SYNC") + competing_node.query("DROP DATABASE testdb SYNC") def get_table_uuid(database, name): return main_node.query(f"SELECT uuid FROM system.tables WHERE database = '{database}' and name = '{name}'").strip() - @pytest.fixture(scope="module", name="attachable_part") def fixture_attachable_part(started_cluster): main_node.query(f"CREATE DATABASE testdb_attach_atomic ENGINE = Atomic") @@ -115,10 +122,11 @@ def fixture_attachable_part(started_cluster): table_uuid = get_table_uuid("testdb_attach_atomic", "test") return os.path.join(main_node.path, f"database/shadow/test_attach/store/{table_uuid[:3]}/{table_uuid}/all_1_1_0") - - @pytest.mark.parametrize("engine", ["MergeTree", "ReplicatedMergeTree"]) def test_alter_attach(started_cluster, attachable_part, engine): + main_node.query("CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica1');") + dummy_node.query("CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica2');") + name = "alter_attach_test_{}".format(engine) main_node.query(f"CREATE TABLE testdb.{name} (CounterID UInt32) ENGINE = {engine} ORDER BY (CounterID)") table_uuid = get_table_uuid("testdb", name) @@ -134,10 +142,14 @@ def test_alter_attach(started_cluster, attachable_part, engine): assert dummy_node.query(f"SELECT CounterID FROM testdb.{name}") == "123\n" else: assert dummy_node.query(f"SELECT CounterID FROM testdb.{name}") == "" - + main_node.query("DROP DATABASE testdb SYNC") + dummy_node.query("DROP DATABASE testdb SYNC") @pytest.mark.parametrize("engine", ["MergeTree", "ReplicatedMergeTree"]) def test_alter_drop_part(started_cluster, engine): + main_node.query("CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica1');") + dummy_node.query("CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica2');") + table = f"alter_drop_{engine}" part_name = "all_0_0_0" if engine == "ReplicatedMergeTree" else "all_1_1_0" main_node.query(f"CREATE TABLE testdb.{table} (CounterID UInt32) ENGINE = {engine} ORDER BY (CounterID)") @@ -151,10 +163,14 @@ def test_alter_drop_part(started_cluster, engine): assert dummy_node.query(f"SELECT CounterID FROM testdb.{table}") == "" else: assert dummy_node.query(f"SELECT CounterID FROM testdb.{table}") == "456\n" - + main_node.query("DROP DATABASE testdb SYNC") + dummy_node.query("DROP DATABASE testdb SYNC") @pytest.mark.parametrize("engine", ["MergeTree", "ReplicatedMergeTree"]) def test_alter_detach_part(started_cluster, engine): + main_node.query("CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica1');") + dummy_node.query("CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica2');") + table = f"alter_detach_{engine}" part_name = "all_0_0_0" if engine == "ReplicatedMergeTree" else "all_1_1_0" main_node.query(f"CREATE TABLE testdb.{table} (CounterID UInt32) ENGINE = {engine} ORDER BY (CounterID)") @@ -169,10 +185,14 @@ def test_alter_detach_part(started_cluster, engine): assert dummy_node.query(detached_parts_query) == f"{part_name}\n" else: assert dummy_node.query(detached_parts_query) == "" - + main_node.query("DROP DATABASE testdb SYNC") + dummy_node.query("DROP DATABASE testdb SYNC") @pytest.mark.parametrize("engine", ["MergeTree", "ReplicatedMergeTree"]) def test_alter_drop_detached_part(started_cluster, engine): + main_node.query("CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica1');") + dummy_node.query("CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica2');") + table = f"alter_drop_detached_{engine}" part_name = "all_0_0_0" if engine == "ReplicatedMergeTree" else "all_1_1_0" main_node.query(f"CREATE TABLE testdb.{table} (CounterID UInt32) ENGINE = {engine} ORDER BY (CounterID)") @@ -186,8 +206,14 @@ def test_alter_drop_detached_part(started_cluster, engine): assert main_node.query(detached_parts_query) == "" assert dummy_node.query(detached_parts_query) == f"{part_name}\n" + main_node.query("DROP DATABASE testdb SYNC") + dummy_node.query("DROP DATABASE testdb SYNC") + def test_alter_fetch(started_cluster): + main_node.query("CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica1');") + dummy_node.query("CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica2');") + main_node.query("CREATE TABLE testdb.fetch_source (CounterID UInt32) ENGINE = ReplicatedMergeTree ORDER BY (CounterID)") main_node.query("CREATE TABLE testdb.fetch_target (CounterID UInt32) ENGINE = ReplicatedMergeTree ORDER BY (CounterID)") main_node.query("INSERT INTO testdb.fetch_source VALUES (123)") @@ -197,10 +223,15 @@ def test_alter_fetch(started_cluster): assert main_node.query(detached_parts_query) == "all_0_0_0\n" assert dummy_node.query(detached_parts_query) == "" + main_node.query("DROP DATABASE testdb SYNC") + dummy_node.query("DROP DATABASE testdb SYNC") def test_alters_from_different_replicas(started_cluster): + main_node.query("CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica1');") + dummy_node.query("CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica2');") + # test_alters_from_different_replicas - competing_node.query("CREATE DATABASE IF NOT EXISTS testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica3');") + competing_node.query("CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica3');") main_node.query("CREATE TABLE testdb.concurrent_test " "(CounterID UInt32, StartDate Date, UserID UInt32, VisitID UInt32, NestedColumn Nested(A UInt8, S String), ToDrop UInt32) " @@ -234,7 +265,7 @@ def test_alters_from_different_replicas(started_cluster): assert_create_query([main_node, competing_node], "testdb.concurrent_test", expected) # test_create_replica_after_delay - main_node.query("DROP TABLE testdb.concurrent_test") + main_node.query("DROP TABLE testdb.concurrent_test SYNC") main_node.query("CREATE TABLE testdb.concurrent_test " "(CounterID UInt32, StartDate Date, UserID UInt32, VisitID UInt32, NestedColumn Nested(A UInt8, S String), ToDrop UInt32) " "ENGINE = ReplicatedMergeTree ORDER BY CounterID;") @@ -294,6 +325,11 @@ def test_alters_from_different_replicas(started_cluster): "9\t2021-02-11\t1241149650\n" assert_eq_with_retry(dummy_node, "SELECT CounterID, StartDate, UserID FROM testdb.dist ORDER BY CounterID", expected) + main_node.query("DROP DATABASE testdb SYNC") + dummy_node.query("DROP DATABASE testdb SYNC") + competing_node.query("DROP DATABASE testdb SYNC") + snapshotting_node.query("DROP DATABASE testdb SYNC") + snapshot_recovering_node.query("DROP DATABASE testdb SYNC") def test_recover_staled_replica(started_cluster): main_node.query("CREATE DATABASE recover ENGINE = Replicated('/clickhouse/databases/recover', 'shard1', 'replica1');") @@ -365,11 +401,12 @@ def test_recover_staled_replica(started_cluster): assert dummy_node.query("SELECT (*,).1 FROM recover.{}".format(table)) == "42\n" for table in ['m1', 'mt1']: assert dummy_node.query("SELECT count() FROM recover.{}".format(table)) == "0\n" - - assert dummy_node.query("SELECT count() FROM system.tables WHERE database='recover_broken_tables'") == "2\n" - table = dummy_node.query("SHOW TABLES FROM recover_broken_tables LIKE 'mt1_29_%'").strip() + global test_recover_staled_replica_run + assert dummy_node.query("SELECT count() FROM system.tables WHERE database='recover_broken_tables'") == f"{2*test_recover_staled_replica_run}\n" + test_recover_staled_replica_run += 1 + table = dummy_node.query("SHOW TABLES FROM recover_broken_tables LIKE 'mt1_29_%' LIMIT 1").strip() assert dummy_node.query("SELECT (*,).1 FROM recover_broken_tables.{}".format(table)) == "42\n" - table = dummy_node.query("SHOW TABLES FROM recover_broken_tables LIKE 'rmt5_29_%'").strip() + table = dummy_node.query("SHOW TABLES FROM recover_broken_tables LIKE 'rmt5_29_%' LIMIT 1").strip() assert dummy_node.query("SELECT (*,).1 FROM recover_broken_tables.{}".format(table)) == "42\n" expected = "Cleaned 6 outdated objects: dropped 1 dictionaries and 3 tables, moved 2 tables" @@ -377,10 +414,10 @@ def test_recover_staled_replica(started_cluster): dummy_node.query("DROP TABLE recover.tmp") assert_eq_with_retry(main_node, "SELECT count() FROM system.tables WHERE database='recover' AND name='tmp'", "0\n") + main_node.query("DROP DATABASE recover SYNC") + dummy_node.query("DROP DATABASE recover SYNC") def test_startup_without_zk(started_cluster): - main_node.query("DROP DATABASE IF EXISTS testdb SYNC") - main_node.query("DROP DATABASE IF EXISTS recover SYNC") with PartitionManager() as pm: pm.drop_instance_zk_connections(main_node) err = main_node.query_and_get_error("CREATE DATABASE startup ENGINE = Replicated('/clickhouse/databases/startup', 'shard1', 'replica1');") @@ -403,7 +440,7 @@ def test_startup_without_zk(started_cluster): main_node.query("EXCHANGE TABLES startup.rmt AND startup.m") assert main_node.query("SELECT (*,).1 FROM startup.m") == "42\n" - + main_node.query("DROP DATABASE startup SYNC") def test_server_uuid(started_cluster): uuid1 = main_node.query("select serverUUID()") diff --git a/tests/integration/test_replicated_fetches_bandwidth/test.py b/tests/integration/test_replicated_fetches_bandwidth/test.py index 5b8332cda16..f39baea064c 100644 --- a/tests/integration/test_replicated_fetches_bandwidth/test.py +++ b/tests/integration/test_replicated_fetches_bandwidth/test.py @@ -34,7 +34,7 @@ def test_limited_fetch_single_table(start_cluster): node2.query("SYSTEM STOP FETCHES limited_fetch_table") for i in range(5): - node1.query("INSERT INTO limited_fetch_table SELECT {}, '{}' FROM numbers(300)".format(i, get_random_string(104857))) + node1.query("INSERT INTO limited_fetch_table SELECT {}, (select randomPrintableASCII(104857)) FROM numbers(300)".format(i)) n1_net = NetThroughput(node1) n2_net = NetThroughput(node2) @@ -66,7 +66,7 @@ def test_limited_send_single_table(start_cluster): node2.query("SYSTEM STOP FETCHES limited_send_table") for i in range(5): - node1.query("INSERT INTO limited_send_table SELECT {}, '{}' FROM numbers(150)".format(i, get_random_string(104857))) + node1.query("INSERT INTO limited_send_table SELECT {}, (select randomPrintableASCII(104857)) FROM numbers(150)".format(i)) n1_net = NetThroughput(node1) n2_net = NetThroughput(node2) @@ -100,7 +100,7 @@ def test_limited_fetches_for_server(start_cluster): for j in range(5): node3.query(f"SYSTEM STOP FETCHES limited_fetches{j}") for i in range(5): - node1.query("INSERT INTO limited_fetches{} SELECT {}, '{}' FROM numbers(50)".format(j, i, get_random_string(104857))) + node1.query("INSERT INTO limited_fetches{} SELECT {}, (select randomPrintableASCII(104857)) FROM numbers(50)".format(j, i)) n1_net = NetThroughput(node1) n3_net = NetThroughput(node3) @@ -137,7 +137,7 @@ def test_limited_sends_for_server(start_cluster): for j in range(5): node1.query(f"SYSTEM STOP FETCHES limited_sends{j}") for i in range(5): - node3.query("INSERT INTO limited_sends{} SELECT {}, '{}' FROM numbers(50)".format(j, i, get_random_string(104857))) + node3.query("INSERT INTO limited_sends{} SELECT {}, (select randomPrintableASCII(104857)) FROM numbers(50)".format(j, i)) n1_net = NetThroughput(node1) n3_net = NetThroughput(node3) @@ -173,7 +173,7 @@ def test_should_execute_fetch(start_cluster): node2.query("SYSTEM STOP FETCHES should_execute_table") for i in range(3): - node1.query("INSERT INTO should_execute_table SELECT {}, '{}' FROM numbers(200)".format(i, get_random_string(104857))) + node1.query("INSERT INTO should_execute_table SELECT {}, (select randomPrintableASCII(104857)) FROM numbers(200)".format(i)) n1_net = NetThroughput(node1) n2_net = NetThroughput(node2) @@ -181,7 +181,7 @@ def test_should_execute_fetch(start_cluster): node2.query("SYSTEM START FETCHES should_execute_table") for i in range(10): - node1.query("INSERT INTO should_execute_table SELECT {}, '{}' FROM numbers(3)".format(i, get_random_string(104857))) + node1.query("INSERT INTO should_execute_table SELECT {}, (select randomPrintableASCII(104857)) FROM numbers(3)".format(i)) n2_fetch_speed = [] replication_queue_data = [] diff --git a/tests/integration/test_restore_replica/test.py b/tests/integration/test_restore_replica/test.py index 4197c064243..4013b5b474c 100644 --- a/tests/integration/test_restore_replica/test.py +++ b/tests/integration/test_restore_replica/test.py @@ -4,7 +4,6 @@ import pytest from helpers.cluster import ClickHouseCluster from helpers.cluster import ClickHouseKiller from helpers.test_tools import assert_eq_with_retry -from helpers.network import PartitionManager def fill_nodes(nodes): for node in nodes: diff --git a/tests/integration/test_row_policy/test.py b/tests/integration/test_row_policy/test.py index 76aef325bfa..66a35bea06b 100644 --- a/tests/integration/test_row_policy/test.py +++ b/tests/integration/test_row_policy/test.py @@ -437,3 +437,23 @@ def test_miscellaneous_engines(): node.query("CREATE TABLE mydb.other_table (a UInt8, b UInt8) ENGINE Distributed('test_local_cluster', mydb, local)") assert node.query("SELECT * FROM mydb.other_table", user="another") == TSV([[1, 0], [1, 1], [1, 0], [1, 1]]) assert node.query("SELECT sum(a), b FROM mydb.other_table GROUP BY b ORDER BY b", user="another") == TSV([[2, 0], [2, 1]]) + + +def test_policy_on_distributed_table_via_role(): + node.query("DROP TABLE IF EXISTS local_tbl") + node.query("DROP TABLE IF EXISTS dist_tbl") + + node.query("CREATE TABLE local_tbl engine=MergeTree ORDER BY tuple() as select * FROM numbers(10)") + node.query("CREATE TABLE dist_tbl ENGINE=Distributed( 'test_cluster_two_shards_localhost', default, local_tbl) AS local_tbl") + + node.query("CREATE ROLE OR REPLACE 'role1'") + node.query("CREATE USER OR REPLACE 'user1' DEFAULT ROLE 'role1'") + + node.query("GRANT SELECT ON dist_tbl TO 'role1'") + node.query("GRANT SELECT ON local_tbl TO 'role1'") + + node.query("CREATE ROW POLICY OR REPLACE 'all_data' ON dist_tbl, local_tbl USING 1 TO ALL EXCEPT 'role1'") + node.query("CREATE ROW POLICY OR REPLACE 'role1_data' ON dist_tbl, local_tbl USING number % 2 = 0 TO 'role1'") + + assert node.query("SELECT * FROM local_tbl SETTINGS prefer_localhost_replica=0", user="user1") == TSV([[0], [2], [4], [6], [8]]) + assert node.query("SELECT * FROM dist_tbl SETTINGS prefer_localhost_replica=0", user="user1") == TSV([[0], [2], [4], [6], [8], [0], [2], [4], [6], [8]]) diff --git a/tests/integration/test_storage_hdfs/test.py b/tests/integration/test_storage_hdfs/test.py index 64f94a52a70..c6598292ba4 100644 --- a/tests/integration/test_storage_hdfs/test.py +++ b/tests/integration/test_storage_hdfs/test.py @@ -2,6 +2,7 @@ import os import pytest from helpers.cluster import ClickHouseCluster +from pyhdfs import HdfsClient cluster = ClickHouseCluster(__file__) node1 = cluster.add_instance('node1', with_hdfs=True) @@ -238,11 +239,21 @@ def test_virtual_columns(started_cluster): def test_read_files_with_spaces(started_cluster): hdfs_api = started_cluster.hdfs_api - hdfs_api.write_data("/test test test 1.txt", "1\n") - hdfs_api.write_data("/test test test 2.txt", "2\n") - hdfs_api.write_data("/test test test 3.txt", "3\n") - node1.query("create table test (id UInt32) ENGINE = HDFS('hdfs://hdfs1:9000/test*', 'TSV')") + fs = HdfsClient(hosts=started_cluster.hdfs_ip) + dir = '/test_spaces' + exists = fs.exists(dir) + if exists: + fs.delete(dir, recursive=True) + fs.mkdirs(dir) + + hdfs_api.write_data(f"{dir}/test test test 1.txt", "1\n") + hdfs_api.write_data(f"{dir}/test test test 2.txt", "2\n") + hdfs_api.write_data(f"{dir}/test test test 3.txt", "3\n") + + node1.query(f"create table test (id UInt32) ENGINE = HDFS('hdfs://hdfs1:9000/{dir}/test*', 'TSV')") assert node1.query("select * from test order by id") == "1\n2\n3\n" + fs.delete(dir, recursive=True) + def test_truncate_table(started_cluster): diff --git a/tests/integration/test_storage_kafka/test.py b/tests/integration/test_storage_kafka/test.py index bf9ab4f9e27..c9e53b67de3 100644 --- a/tests/integration/test_storage_kafka/test.py +++ b/tests/integration/test_storage_kafka/test.py @@ -1779,9 +1779,13 @@ def test_kafka_virtual_columns2(kafka_cluster): assert TSV(result) == TSV(expected) + instance.query(''' + DROP TABLE test.kafka; + DROP TABLE test.view; + ''') kafka_delete_topic(admin_client, "virt2_0") kafka_delete_topic(admin_client, "virt2_1") - + instance.rotate_logs() def test_kafka_produce_key_timestamp(kafka_cluster): diff --git a/tests/integration/test_storage_rabbitmq/test.py b/tests/integration/test_storage_rabbitmq/test.py index 90793bea428..90734613325 100644 --- a/tests/integration/test_storage_rabbitmq/test.py +++ b/tests/integration/test_storage_rabbitmq/test.py @@ -2217,10 +2217,10 @@ def test_rabbitmq_random_detach(rabbitmq_cluster): time.sleep(random.uniform(0, 1)) thread.start() - time.sleep(5) - kill_rabbitmq(rabbitmq_cluster.rabbitmq_docker_id) - instance.query("detach table test.rabbitmq") - revive_rabbitmq(rabbitmq_cluster.rabbitmq_docker_id) + #time.sleep(5) + #kill_rabbitmq(rabbitmq_cluster.rabbitmq_docker_id) + #instance.query("detach table test.rabbitmq") + #revive_rabbitmq(rabbitmq_cluster.rabbitmq_docker_id) for thread in threads: thread.join() diff --git a/tests/integration/test_storage_s3/test.py b/tests/integration/test_storage_s3/test.py index bd918144935..74ce3ed0f6c 100644 --- a/tests/integration/test_storage_s3/test.py +++ b/tests/integration/test_storage_s3/test.py @@ -10,6 +10,7 @@ import time import helpers.client import pytest from helpers.cluster import ClickHouseCluster, ClickHouseInstance, get_instances_dir +from helpers.network import PartitionManager MINIO_INTERNAL_PORT = 9001 @@ -757,3 +758,40 @@ def test_predefined_connection_configuration(started_cluster): result = instance.query("SELECT * FROM s3(s3_conf1, format='CSV', structure='id UInt32')") assert result == instance.query("SELECT number FROM numbers(10)") + +result = "" +def test_url_reconnect_in_the_middle(started_cluster): + bucket = started_cluster.minio_bucket + instance = started_cluster.instances["dummy"] + table_format = "id String, data String" + filename = "test_url_reconnect_{}.tsv".format(random.randint(0, 1000)) + + instance.query(f"""insert into table function + s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/{filename}', 'TSV', '{table_format}') + select number, randomPrintableASCII(number % 1000) from numbers(1000000)""") + + with PartitionManager() as pm: + pm_rule_reject = {'probability': 0.02, 'destination': instance.ip_address, 'source_port': started_cluster.minio_port, 'action': 'REJECT --reject-with tcp-reset'} + pm_rule_drop_all = {'destination': instance.ip_address, 'source_port': started_cluster.minio_port, 'action': 'DROP'} + pm._add_rule(pm_rule_reject) + + def select(): + global result + result = instance.query( + f"""select sum(cityHash64(x)) from (select toUInt64(id) + sleep(0.1) as x from + url('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/{filename}', 'TSV', '{table_format}') + settings http_max_tries = 10, http_retry_max_backoff_ms=2000, http_send_timeout=1, http_receive_timeout=1)""") + assert(int(result), 3914219105369203805) + + thread = threading.Thread(target=select) + thread.start() + time.sleep(4) + pm._add_rule(pm_rule_drop_all) + + time.sleep(2) + pm._delete_rule(pm_rule_drop_all) + pm._delete_rule(pm_rule_reject) + + thread.join() + + assert(int(result), 3914219105369203805) diff --git a/tests/performance/ip_trie.xml b/tests/performance/ip_trie.xml index 9be0c4337e4..f93695931d9 100644 --- a/tests/performance/ip_trie.xml +++ b/tests/performance/ip_trie.xml @@ -82,8 +82,8 @@ LIMIT 500000 FORMAT Null + DROP TABLE IF EXISTS table_ip_from_dict + DROP TABLE IF EXISTS dict_ip_trie_table DROP DICTIONARY IF EXISTS default.dict_ip_trie DROP TABLE IF EXISTS table_ip_trie - DROP TABLE IF EXISTS dict_ip_trie_table - DROP TABLE IF EXISTS table_ip_from_dict diff --git a/tests/queries/0_stateless/01017_mutations_with_nondeterministic_functions_zookeeper.sh b/tests/queries/0_stateless/01017_mutations_with_nondeterministic_functions_zookeeper.sh index e09a8493bb8..e4d091ea0bb 100755 --- a/tests/queries/0_stateless/01017_mutations_with_nondeterministic_functions_zookeeper.sh +++ b/tests/queries/0_stateless/01017_mutations_with_nondeterministic_functions_zookeeper.sh @@ -10,13 +10,13 @@ R2=table_1017_2 T1=table_1017_merge ${CLICKHOUSE_CLIENT} -n -q " + DROP DICTIONARY IF EXISTS dict1; DROP TABLE IF EXISTS $R1; DROP TABLE IF EXISTS $R2; DROP TABLE IF EXISTS $T1; DROP TABLE IF EXISTS lookup_table; DROP TABLE IF EXISTS table_for_dict; - DROP DICTIONARY IF EXISTS dict1; CREATE TABLE table_for_dict (y UInt64, y_new UInt32) ENGINE = Log; INSERT INTO table_for_dict VALUES (3, 3003),(4,4004); @@ -69,10 +69,10 @@ ${CLICKHOUSE_CLIENT} --query "ALTER TABLE $R1 DELETE WHERE dictHas('${CLICKHOUSE && echo 'OK' || echo 'FAIL' ${CLICKHOUSE_CLIENT} -n -q " + DROP DICTIONARY IF EXISTS dict1; DROP TABLE IF EXISTS $R2; DROP TABLE IF EXISTS $R1; DROP TABLE IF EXISTS $T1; DROP TABLE IF EXISTS lookup_table; DROP TABLE IF EXISTS table_for_dict; - DROP DICTIONARY IF EXISTS dict1; " diff --git a/tests/queries/0_stateless/01018_ddl_dictionaries_concurrent_requrests.sh b/tests/queries/0_stateless/01018_ddl_dictionaries_concurrent_requrests.sh index 100e79a4546..872b0a7c1a1 100755 --- a/tests/queries/0_stateless/01018_ddl_dictionaries_concurrent_requrests.sh +++ b/tests/queries/0_stateless/01018_ddl_dictionaries_concurrent_requrests.sh @@ -118,7 +118,7 @@ $CLICKHOUSE_CLIENT -q "ATTACH DICTIONARY IF NOT EXISTS database_for_dict.dict1" $CLICKHOUSE_CLIENT -q "ATTACH DICTIONARY IF NOT EXISTS database_for_dict.dict2" $CLICKHOUSE_CLIENT -n -q " + DROP DATABASE database_for_dict; DROP TABLE table_for_dict1; DROP TABLE table_for_dict2; - DROP DATABASE database_for_dict; " diff --git a/tests/queries/0_stateless/01018_ddl_dictionaries_create.sql b/tests/queries/0_stateless/01018_ddl_dictionaries_create.sql index 0c5e0c69ffe..28b68504766 100644 --- a/tests/queries/0_stateless/01018_ddl_dictionaries_create.sql +++ b/tests/queries/0_stateless/01018_ddl_dictionaries_create.sql @@ -2,11 +2,12 @@ SET send_logs_level = 'fatal'; +DROP DATABASE IF EXISTS memory_db; +DROP DATABASE IF EXISTS db_01018; DROP DATABASE IF EXISTS database_for_dict_01018; CREATE DATABASE database_for_dict_01018; -DROP TABLE IF EXISTS database_for_dict_01018.table_for_dict; CREATE TABLE database_for_dict_01018.table_for_dict ( @@ -25,8 +26,6 @@ CREATE DATABASE db_01018; SELECT '=DICTIONARY in Ordinary DB'; -DROP DICTIONARY IF EXISTS db_01018.dict1; - CREATE DICTIONARY db_01018.dict1 ( key_column UInt64 DEFAULT 0, @@ -44,7 +43,7 @@ SHOW DICTIONARIES FROM db_01018 LIKE 'dict1'; EXISTS DICTIONARY db_01018.dict1; -SELECT database, name FROM system.dictionaries WHERE name LIKE 'dict1'; +SELECT database, name FROM system.dictionaries WHERE database='db_01018' AND name LIKE 'dict1'; SELECT '==DETACH DICTIONARY'; DETACH DICTIONARY db_01018.dict1; @@ -53,7 +52,7 @@ SHOW DICTIONARIES FROM db_01018 LIKE 'dict1'; EXISTS DICTIONARY db_01018.dict1; -SELECT database, name FROM system.dictionaries WHERE name LIKE 'dict1'; +SELECT database, name FROM system.dictionaries WHERE database='db_01018' AND name LIKE 'dict1'; SELECT '==ATTACH DICTIONARY'; ATTACH DICTIONARY db_01018.dict1; @@ -62,7 +61,7 @@ SHOW DICTIONARIES FROM db_01018 LIKE 'dict1'; EXISTS DICTIONARY db_01018.dict1; -SELECT database, name FROM system.dictionaries WHERE name LIKE 'dict1'; +SELECT database, name FROM system.dictionaries WHERE database='db_01018' AND name LIKE 'dict1'; SELECT '==DROP DICTIONARY'; @@ -72,7 +71,7 @@ SHOW DICTIONARIES FROM db_01018 LIKE 'dict1'; EXISTS DICTIONARY db_01018.dict1; -SELECT database, name FROM system.dictionaries WHERE name LIKE 'dict1'; +SELECT database, name FROM system.dictionaries WHERE database='db_01018' AND name LIKE 'dict1'; DROP DATABASE IF EXISTS db_01018; @@ -99,7 +98,7 @@ SHOW DICTIONARIES FROM memory_db LIKE 'dict2'; EXISTS DICTIONARY memory_db.dict2; -SELECT database, name FROM system.dictionaries WHERE name LIKE 'dict2'; +SELECT database, name FROM system.dictionaries WHERE database='memory_db' AND name LIKE 'dict2'; SELECT '=DICTIONARY in Lazy DB'; @@ -160,6 +159,7 @@ SHOW DICTIONARIES FROM db_01018; DROP DATABASE IF EXISTS db_01018; +DROP DICTIONARY memory_db.dict2; DROP TABLE IF EXISTS database_for_dict_01018.table_for_dict; DROP DATABASE IF EXISTS database_for_dict_01018; diff --git a/tests/queries/0_stateless/01018_ddl_dictionaries_select.sql b/tests/queries/0_stateless/01018_ddl_dictionaries_select.sql index 69c8ad2faee..748c733bf9a 100644 --- a/tests/queries/0_stateless/01018_ddl_dictionaries_select.sql +++ b/tests/queries/0_stateless/01018_ddl_dictionaries_select.sql @@ -1,6 +1,7 @@ -- Tags: no-parallel, no-fasttest SET send_logs_level = 'fatal'; +SET check_table_dependencies=0; DROP DATABASE IF EXISTS database_for_dict; @@ -18,8 +19,6 @@ ORDER BY key_column; INSERT INTO database_for_dict.table_for_dict SELECT number, number % 17, toString(number * number), number / 2.0 from numbers(100); -DROP DICTIONARY IF EXISTS database_for_dict.dict1; - CREATE DICTIONARY database_for_dict.dict1 ( key_column UInt64 DEFAULT 0, diff --git a/tests/queries/0_stateless/01018_dictionaries_from_dictionaries.sql b/tests/queries/0_stateless/01018_dictionaries_from_dictionaries.sql index a4a06d44e01..e72e113f859 100644 --- a/tests/queries/0_stateless/01018_dictionaries_from_dictionaries.sql +++ b/tests/queries/0_stateless/01018_dictionaries_from_dictionaries.sql @@ -18,8 +18,6 @@ ORDER BY key_column; INSERT INTO database_for_dict.table_for_dict SELECT number, number % 17, toString(number * number), number / 2.0 from numbers(100); -DROP DICTIONARY IF EXISTS database_for_dict.dict1; - CREATE DICTIONARY database_for_dict.dict1 ( key_column UInt64 DEFAULT 0, diff --git a/tests/queries/0_stateless/01018_ip_dictionary_long.sql b/tests/queries/0_stateless/01018_ip_dictionary_long.sql index 6e0c9c8d416..7d9dfeb1bae 100644 --- a/tests/queries/0_stateless/01018_ip_dictionary_long.sql +++ b/tests/queries/0_stateless/01018_ip_dictionary_long.sql @@ -117,8 +117,8 @@ WHERE prefix == '127.0.0.2/32'; SELECT 37 == COUNT(*) FROM database_for_dict.table_from_ipv4_trie_dict; SELECT 37 == COUNT(DISTINCT prefix) FROM database_for_dict.table_from_ipv4_trie_dict; -DROP DICTIONARY IF EXISTS database_for_dict.dict_ipv4_trie; DROP TABLE IF EXISTS database_for_dict.table_from_ipv4_trie_dict; +DROP DICTIONARY IF EXISTS database_for_dict.dict_ipv4_trie; DROP TABLE IF EXISTS database_for_dict.table_ipv4_trie; SELECT '***ipv4 trie dict mask***'; @@ -462,8 +462,8 @@ WHERE prefix == '2620:0:870::/48'; SELECT 134 == COUNT(*) FROM database_for_dict.table_from_ip_trie_dict; -DROP DICTIONARY IF EXISTS database_for_dict.dict_ip_trie; DROP TABLE IF EXISTS database_for_dict.table_from_ip_trie_dict; +DROP DICTIONARY IF EXISTS database_for_dict.dict_ip_trie; DROP TABLE IF EXISTS database_for_dict.table_ip_trie; SELECT '***ipv6 trie dict mask***'; diff --git a/tests/queries/0_stateless/01033_dictionaries_lifetime.sql b/tests/queries/0_stateless/01033_dictionaries_lifetime.sql index 299931bdeb3..67e1adf5574 100644 --- a/tests/queries/0_stateless/01033_dictionaries_lifetime.sql +++ b/tests/queries/0_stateless/01033_dictionaries_lifetime.sql @@ -6,8 +6,6 @@ DROP DATABASE IF EXISTS database_for_dict; CREATE DATABASE database_for_dict; -DROP TABLE IF EXISTS database_for_dict.table_for_dict; - CREATE TABLE database_for_dict.table_for_dict ( key_column UInt64, @@ -23,8 +21,6 @@ DROP DATABASE IF EXISTS ordinary_db; CREATE DATABASE ordinary_db; -DROP DICTIONARY IF EXISTS ordinary_db.dict1; - CREATE DICTIONARY ordinary_db.dict1 ( key_column UInt64 DEFAULT 0, @@ -40,7 +36,7 @@ SELECT 'INITIALIZING DICTIONARY'; SELECT dictGetUInt8('ordinary_db.dict1', 'second_column', toUInt64(100500)); -SELECT lifetime_min, lifetime_max FROM system.dictionaries WHERE name = 'dict1'; +SELECT lifetime_min, lifetime_max FROM system.dictionaries WHERE database='ordinary_db' AND name = 'dict1'; DROP DICTIONARY IF EXISTS ordinary_db.dict1; diff --git a/tests/queries/0_stateless/01040_dictionary_invalidate_query_switchover_long.sh b/tests/queries/0_stateless/01040_dictionary_invalidate_query_switchover_long.sh index 7d6cc5f1bff..7249d5e1a82 100755 --- a/tests/queries/0_stateless/01040_dictionary_invalidate_query_switchover_long.sh +++ b/tests/queries/0_stateless/01040_dictionary_invalidate_query_switchover_long.sh @@ -35,7 +35,7 @@ $CLICKHOUSE_CLIENT --query "SELECT dictGetUInt8('dictdb_01041_01040.invalidate', # No exception happened $CLICKHOUSE_CLIENT --query "SELECT last_exception FROM system.dictionaries WHERE database = 'dictdb_01041_01040' AND name = 'invalidate'" -$CLICKHOUSE_CLIENT --query "DROP TABLE dictdb_01041_01040.dict_invalidate" +$CLICKHOUSE_CLIENT --check_table_dependencies=0 --query "DROP TABLE dictdb_01041_01040.dict_invalidate" function check_exception_detected() { diff --git a/tests/queries/0_stateless/01041_create_dictionary_if_not_exists.sql b/tests/queries/0_stateless/01041_create_dictionary_if_not_exists.sql index 6b4335d4464..ad364237544 100644 --- a/tests/queries/0_stateless/01041_create_dictionary_if_not_exists.sql +++ b/tests/queries/0_stateless/01041_create_dictionary_if_not_exists.sql @@ -1,7 +1,5 @@ -- Tags: no-parallel -DROP TABLE IF EXISTS dictdb.table_for_dict; -DROP DICTIONARY IF EXISTS dictdb.dict_exists; DROP DATABASE IF EXISTS dictdb; CREATE DATABASE dictdb; @@ -41,6 +39,6 @@ LAYOUT(FLAT()); SELECT dictGetFloat64('dictdb.dict_exists', 'value', toUInt64(1)); -DROP TABLE dictdb.table_for_dict; DROP DICTIONARY dictdb.dict_exists; +DROP TABLE dictdb.table_for_dict; DROP DATABASE dictdb; diff --git a/tests/queries/0_stateless/01050_clickhouse_dict_source_with_subquery.sql b/tests/queries/0_stateless/01050_clickhouse_dict_source_with_subquery.sql index e3d360065c6..ec440baf8d6 100644 --- a/tests/queries/0_stateless/01050_clickhouse_dict_source_with_subquery.sql +++ b/tests/queries/0_stateless/01050_clickhouse_dict_source_with_subquery.sql @@ -1,6 +1,8 @@ +-- Tags: no-parallel + +drop dictionary if exists default.test_dict_01051_d; drop table if exists default.test_01051_d; drop table if exists default.test_view_01051_d; -drop dictionary if exists default.test_dict_01051_d; create table default.test_01051_d (key UInt64, value String) engine = MergeTree order by key; create view default.test_view_01051_d (key UInt64, value String) as select k2 + 1 as key, v2 || '_x' as value from (select key + 2 as k2, value || '_y' as v2 from default.test_01051_d); @@ -11,6 +13,6 @@ create dictionary default.test_dict_01051_d (key UInt64, value String) primary k select dictGet('default.test_dict_01051_d', 'value', toUInt64(4)); +drop dictionary if exists default.test_dict_01051_d; drop table if exists default.test_01051_d; drop table if exists default.test_view_01051_d; -drop dictionary if exists default.test_dict_01051_d; diff --git a/tests/queries/0_stateless/01059_storage_file_compression.sh b/tests/queries/0_stateless/01059_storage_file_compression.sh index fbee5070d8d..8942113ab12 100755 --- a/tests/queries/0_stateless/01059_storage_file_compression.sh +++ b/tests/queries/0_stateless/01059_storage_file_compression.sh @@ -12,6 +12,7 @@ do ${CLICKHOUSE_CLIENT} --query "CREATE TABLE file (x UInt64) ENGINE = File(TSV, '${CLICKHOUSE_DATABASE}/${m}.tsv.${m}')" ${CLICKHOUSE_CLIENT} --query "TRUNCATE TABLE file" ${CLICKHOUSE_CLIENT} --query "INSERT INTO file SELECT * FROM numbers(1000000)" + sleep 1 ${CLICKHOUSE_CLIENT} --query "SELECT count(), max(x) FROM file" ${CLICKHOUSE_CLIENT} --query "DROP TABLE file" done diff --git a/tests/queries/0_stateless/01073_grant_and_revoke.reference b/tests/queries/0_stateless/01073_grant_and_revoke.reference index a19caf19533..449f21e896a 100644 --- a/tests/queries/0_stateless/01073_grant_and_revoke.reference +++ b/tests/queries/0_stateless/01073_grant_and_revoke.reference @@ -1,11 +1,16 @@ -CREATE USER test_user_01073 A +CREATE USER test_user_01073 B +C GRANT INSERT, ALTER DELETE ON *.* TO test_user_01073 GRANT SELECT ON db1.* TO test_user_01073 GRANT SELECT ON db2.table TO test_user_01073 GRANT SELECT(col1) ON db3.table TO test_user_01073 GRANT SELECT(col1, col2) ON db4.table TO test_user_01073 -C +D GRANT ALTER DELETE ON *.* TO test_user_01073 GRANT SELECT(col1) ON db4.table TO test_user_01073 +E +GRANT SELECT ON db1.* TO test_role_01073 +REVOKE SELECT(c1, c2, c3, c4, c5) ON db1.table1 FROM test_role_01073 +REVOKE SELECT(c1) ON db1.table2 FROM test_role_01073 diff --git a/tests/queries/0_stateless/01073_grant_and_revoke.sql b/tests/queries/0_stateless/01073_grant_and_revoke.sql index ef40b26dde5..4cffd916e9f 100644 --- a/tests/queries/0_stateless/01073_grant_and_revoke.sql +++ b/tests/queries/0_stateless/01073_grant_and_revoke.sql @@ -1,28 +1,36 @@ DROP USER IF EXISTS test_user_01073; +DROP ROLE IF EXISTS test_role_01073; +SELECT 'A'; CREATE USER test_user_01073; SHOW CREATE USER test_user_01073; -SELECT 'A'; +SELECT 'B'; SHOW GRANTS FOR test_user_01073; +SELECT 'C'; GRANT SELECT ON db1.* TO test_user_01073; GRANT SELECT ON db2.table TO test_user_01073; GRANT SELECT(col1) ON db3.table TO test_user_01073; GRANT SELECT(col1, col2) ON db4.table TO test_user_01073; GRANT INSERT ON *.* TO test_user_01073; GRANT DELETE ON *.* TO test_user_01073; - -SELECT 'B'; SHOW GRANTS FOR test_user_01073; +SELECT 'D'; REVOKE SELECT ON db1.* FROM test_user_01073; REVOKE SELECT ON db2.table FROM test_user_01073; REVOKE SELECT ON db3.table FROM test_user_01073; REVOKE SELECT(col2) ON db4.table FROM test_user_01073; REVOKE INSERT ON *.* FROM test_user_01073; - -SELECT 'C'; SHOW GRANTS FOR test_user_01073; +SELECT 'E'; +CREATE ROLE test_role_01073; +GRANT SELECT ON db1.* TO test_role_01073; +REVOKE SELECT(c1, c2, c3, c4, c5) ON db1.table1 FROM test_role_01073; +REVOKE SELECT(c1) ON db1.table2 FROM test_role_01073; +SHOW GRANTS FOR test_role_01073; + DROP USER test_user_01073; +DROP ROLE test_role_01073; diff --git a/tests/queries/0_stateless/01076_cache_dictionary_datarace_exception_ptr.sh b/tests/queries/0_stateless/01076_cache_dictionary_datarace_exception_ptr.sh index 80821193d38..17068dcbdf9 100755 --- a/tests/queries/0_stateless/01076_cache_dictionary_datarace_exception_ptr.sh +++ b/tests/queries/0_stateless/01076_cache_dictionary_datarace_exception_ptr.sh @@ -7,7 +7,8 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_CLIENT --query="CREATE DATABASE IF NOT EXISTS dictdb_01076; " +$CLICKHOUSE_CLIENT --query="DROP DATABASE IF EXISTS dictdb_01076;" +$CLICKHOUSE_CLIENT --query="CREATE DATABASE dictdb_01076;" $CLICKHOUSE_CLIENT --query=" CREATE TABLE dictdb_01076.table_datarace @@ -66,6 +67,6 @@ wait echo OK -$CLICKHOUSE_CLIENT --query="DROP TABLE dictdb_01076.table_datarace;" $CLICKHOUSE_CLIENT --query="DROP DICTIONARY dictdb_01076.dict_datarace;" +$CLICKHOUSE_CLIENT --query="DROP TABLE dictdb_01076.table_datarace;" $CLICKHOUSE_CLIENT --query="DROP DATABASE dictdb_01076;" diff --git a/tests/queries/0_stateless/01109_exchange_tables.reference b/tests/queries/0_stateless/01109_exchange_tables.reference index 9c2f4af4d3d..385bbbb94ae 100644 --- a/tests/queries/0_stateless/01109_exchange_tables.reference +++ b/tests/queries/0_stateless/01109_exchange_tables.reference @@ -16,3 +16,5 @@ 3 world 0 exchange 1 tables +0 +1 diff --git a/tests/queries/0_stateless/01109_exchange_tables.sql b/tests/queries/0_stateless/01109_exchange_tables.sql index 604f1b9f57d..09edf4a1de4 100644 --- a/tests/queries/0_stateless/01109_exchange_tables.sql +++ b/tests/queries/0_stateless/01109_exchange_tables.sql @@ -51,6 +51,19 @@ SELECT * FROM t2; SELECT * FROM test_01109_other_atomic.t3; SELECT * FROM test_01109_ordinary.t4; +DROP DATABASE IF EXISTS test_01109_rename_exists; +CREATE DATABASE test_01109_rename_exists ENGINE=Atomic; +USE test_01109_rename_exists; +CREATE TABLE t0 ENGINE=Log() AS SELECT * FROM system.numbers limit 2; +RENAME TABLE t0_tmp TO t1; -- { serverError 60 } +RENAME TABLE if exists t0_tmp TO t1; +RENAME TABLE if exists t0 TO t1; +SELECT * FROM t1; + DROP DATABASE test_01109; DROP DATABASE test_01109_other_atomic; DROP DATABASE test_01109_ordinary; +DROP DATABASE test_01109_rename_exists; + + + diff --git a/tests/queries/0_stateless/01114_database_atomic.sh b/tests/queries/0_stateless/01114_database_atomic.sh index 4d77a53cf6a..ae592740551 100755 --- a/tests/queries/0_stateless/01114_database_atomic.sh +++ b/tests/queries/0_stateless/01114_database_atomic.sh @@ -6,10 +6,11 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -q "DROP DATABASE IF EXISTS test_01114_1" -$CLICKHOUSE_CLIENT -q "DROP DATABASE IF EXISTS test_01114_2" -$CLICKHOUSE_CLIENT -q "DROP DATABASE IF EXISTS test_01114_3" - +$CLICKHOUSE_CLIENT -nm -q " +DROP DATABASE IF EXISTS test_01114_1; +DROP DATABASE IF EXISTS test_01114_2; +DROP DATABASE IF EXISTS test_01114_3; +" $CLICKHOUSE_CLIENT -q "CREATE DATABASE test_01114_1 ENGINE=Atomic" $CLICKHOUSE_CLIENT --default_database_engine=Atomic -q "CREATE DATABASE test_01114_2" @@ -27,17 +28,19 @@ $CLICKHOUSE_CLIENT -q "SELECT name, splitByChar('/', metadata_path)[-2] as uuid_path, ((splitByChar('/', metadata_path)[-3] as metadata) = substr(uuid_path, 1, 3)) OR metadata='metadata' FROM system.databases WHERE name LIKE 'test_01114_%'" | sed "s/$uuid_db_1/00001114-1000-4000-8000-000000000001/g" | sed "s/$uuid_db_2/00001114-1000-4000-8000-000000000002/g" -$CLICKHOUSE_CLIENT -q "CREATE TABLE test_01114_1.mt_tmp (n UInt64) ENGINE=MergeTree() ORDER BY tuple()" -$CLICKHOUSE_CLIENT -q "INSERT INTO test_01114_1.mt_tmp SELECT * FROM numbers(100)" -$CLICKHOUSE_CLIENT -q "CREATE TABLE test_01114_3.mt (n UInt64) ENGINE=MergeTree() ORDER BY tuple() PARTITION BY (n % 5)" -$CLICKHOUSE_CLIENT -q "INSERT INTO test_01114_3.mt SELECT * FROM numbers(20)" +$CLICKHOUSE_CLIENT -nm -q " +CREATE TABLE test_01114_1.mt_tmp (n UInt64) ENGINE=MergeTree() ORDER BY tuple(); +INSERT INTO test_01114_1.mt_tmp SELECT * FROM numbers(100); +CREATE TABLE test_01114_3.mt (n UInt64) ENGINE=MergeTree() ORDER BY tuple() PARTITION BY (n % 5); +INSERT INTO test_01114_3.mt SELECT * FROM numbers(20); -$CLICKHOUSE_CLIENT -q "RENAME TABLE test_01114_1.mt_tmp TO test_01114_3.mt_tmp" # move from Atomic to Ordinary -$CLICKHOUSE_CLIENT -q "RENAME TABLE test_01114_3.mt TO test_01114_1.mt" # move from Ordinary to Atomic -$CLICKHOUSE_CLIENT -q "SELECT count() FROM test_01114_1.mt" -$CLICKHOUSE_CLIENT -q "SELECT count() FROM test_01114_3.mt_tmp" +RENAME TABLE test_01114_1.mt_tmp TO test_01114_3.mt_tmp; /* move from Atomic to Ordinary */ +RENAME TABLE test_01114_3.mt TO test_01114_1.mt; /* move from Ordinary to Atomic */ +SELECT count() FROM test_01114_1.mt; +SELECT count() FROM test_01114_3.mt_tmp; -$CLICKHOUSE_CLIENT -q "DROP DATABASE test_01114_3" +DROP DATABASE test_01114_3; +" explicit_uuid=$($CLICKHOUSE_CLIENT -q "SELECT generateUUIDv4()") $CLICKHOUSE_CLIENT -q "CREATE TABLE test_01114_2.mt UUID '$explicit_uuid' (n UInt64) ENGINE=MergeTree() ORDER BY tuple() PARTITION BY (n % 5)" @@ -49,11 +52,13 @@ $CLICKHOUSE_CLIENT -q "SELECT count(col), sum(col) FROM (SELECT n + sleepEachRow $CLICKHOUSE_CLIENT -q "INSERT INTO test_01114_2.mt SELECT number + sleepEachRow(1.5) FROM numbers(30)" & # 45s sleep 1 # SELECT and INSERT should start before the following RENAMEs -$CLICKHOUSE_CLIENT -q "RENAME TABLE test_01114_1.mt TO test_01114_1.mt_tmp" -$CLICKHOUSE_CLIENT -q "RENAME TABLE test_01114_1.mt_tmp TO test_01114_2.mt_tmp" -$CLICKHOUSE_CLIENT -q "EXCHANGE TABLES test_01114_2.mt AND test_01114_2.mt_tmp" -$CLICKHOUSE_CLIENT -q "RENAME TABLE test_01114_2.mt_tmp TO test_01114_1.mt" -$CLICKHOUSE_CLIENT -q "EXCHANGE TABLES test_01114_1.mt AND test_01114_2.mt" +$CLICKHOUSE_CLIENT -nm -q " +RENAME TABLE test_01114_1.mt TO test_01114_1.mt_tmp; +RENAME TABLE test_01114_1.mt_tmp TO test_01114_2.mt_tmp; +EXCHANGE TABLES test_01114_2.mt AND test_01114_2.mt_tmp; +RENAME TABLE test_01114_2.mt_tmp TO test_01114_1.mt; +EXCHANGE TABLES test_01114_1.mt AND test_01114_2.mt; +" # Check that nothing changed $CLICKHOUSE_CLIENT -q "SELECT count() FROM test_01114_1.mt" @@ -61,16 +66,18 @@ uuid_mt1=$($CLICKHOUSE_CLIENT -q "SELECT uuid FROM system.tables WHERE database= $CLICKHOUSE_CLIENT --show_table_uuid_in_table_create_query_if_not_nil=1 -q "SHOW CREATE TABLE test_01114_1.mt" | sed "s/$uuid_mt1/00001114-0000-4000-8000-000000000001/g" $CLICKHOUSE_CLIENT --show_table_uuid_in_table_create_query_if_not_nil=1 -q "SHOW CREATE TABLE test_01114_2.mt" | sed "s/$explicit_uuid/00001114-0000-4000-8000-000000000002/g" -$CLICKHOUSE_CLIENT -q "DROP TABLE test_01114_1.mt" --database_atomic_wait_for_drop_and_detach_synchronously=0 -$CLICKHOUSE_CLIENT -q "CREATE TABLE test_01114_1.mt (s String) ENGINE=Log()" -$CLICKHOUSE_CLIENT -q "INSERT INTO test_01114_1.mt SELECT 's' || toString(number) FROM numbers(5)" -$CLICKHOUSE_CLIENT -q "SELECT count() FROM test_01114_1.mt" # result: 5 +$CLICKHOUSE_CLIENT -nm -q " +DROP TABLE test_01114_1.mt SETTINGS database_atomic_wait_for_drop_and_detach_synchronously=0; +CREATE TABLE test_01114_1.mt (s String) ENGINE=Log(); +INSERT INTO test_01114_1.mt SELECT 's' || toString(number) FROM numbers(5); +SELECT count() FROM test_01114_1.mt +" # result: 5 $CLICKHOUSE_CLIENT -q "SELECT tuple(s, sleepEachRow(3)) FROM test_01114_1.mt" > /dev/null & # 15s sleep 1 $CLICKHOUSE_CLIENT -q "DROP DATABASE test_01114_1" --database_atomic_wait_for_drop_and_detach_synchronously=0 && echo "dropped" -wait # for INSERT +wait # for INSERT and SELECT $CLICKHOUSE_CLIENT -q "SELECT count(n), sum(n) FROM test_01114_2.mt" # result: 30, 435 $CLICKHOUSE_CLIENT -q "DROP DATABASE test_01114_2" --database_atomic_wait_for_drop_and_detach_synchronously=0 diff --git a/tests/queries/0_stateless/01160_table_dependencies.reference b/tests/queries/0_stateless/01160_table_dependencies.reference index a893d4882df..ead8377abc5 100644 --- a/tests/queries/0_stateless/01160_table_dependencies.reference +++ b/tests/queries/0_stateless/01160_table_dependencies.reference @@ -1,3 +1,17 @@ +dict1 [] ['dict_src'] ['join','t'] +dict2 [] ['join'] ['t'] +dict_src [] [] ['dict1'] +join [] ['dict1'] ['dict2','s','t'] +s [] ['join'] [] +t [] ['dict1','dict2','join'] [] +===== +dict1 [] ['dict_src'] ['join','t'] +dict2 [] ['join'] [] +dict_src [] [] ['dict1'] +join [] ['dict1'] ['dict2','s','t'] +mv [] [] [] +s [] ['join'] ['t'] +t ['mv'] ['dict1','join','s'] [] dict1 dict2 dict_src @@ -5,3 +19,24 @@ join mv s t +OK +OK +OK +dict1 [] ['dict_src'] ['join','t'] +dict2 [] ['join'] [] +dict_src [] [] ['dict1'] +join [] ['dict1'] ['dict2','s','t'] +mv [] [] [] +s [] ['join'] ['t'] +t ['mv'] ['dict1','join','s'] [] +OK +dict1 [] ['dict_src'] ['join','t'] +dict2 [] ['join'] [] +dict_src [] [] ['dict1'] +join [] ['dict1'] ['dict2','s','t'] +s [] ['join'] ['t'] +t [] ['dict1','join','s'] [] +===== +dict1 [] ['dict_src'] ['join'] +dict_src [] [] ['dict1'] +join [] ['dict1'] [] diff --git a/tests/queries/0_stateless/01160_table_dependencies.sh b/tests/queries/0_stateless/01160_table_dependencies.sh index 05d086ae1a4..a0a3f05c6a9 100755 --- a/tests/queries/0_stateless/01160_table_dependencies.sh +++ b/tests/queries/0_stateless/01160_table_dependencies.sh @@ -20,7 +20,7 @@ LIFETIME(MIN 1 MAX 10) LAYOUT(FLAT());" $CLICKHOUSE_CLIENT -q "create table join(n int, m int default dictGet('$CLICKHOUSE_DATABASE.dict1', 'm', 42::UInt64)) engine=Join(any, left, n);" -$CLICKHOUSE_CLIENT -q "create dictionary dict2 (n int default 0, m int DEFAULT 2, s String default 'asd') +$CLICKHOUSE_CLIENT -q "create dictionary dict2 (n int default 0, m int DEFAULT 2) PRIMARY KEY n SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'join' PASSWORD '' DB '$CLICKHOUSE_DATABASE')) LIFETIME(MIN 1 MAX 10) LAYOUT(FLAT());" @@ -28,10 +28,18 @@ LIFETIME(MIN 1 MAX 10) LAYOUT(FLAT());" $CLICKHOUSE_CLIENT -q "create table s (x default joinGet($CLICKHOUSE_DATABASE.join, 'm', 42::int)) engine=Set" $CLICKHOUSE_CLIENT -q "create table t (n int, m int default joinGet($CLICKHOUSE_DATABASE.join, 'm', 42::int), -s String default dictGet($CLICKHOUSE_DATABASE.dict1, 's', 42::UInt64), x default in(1, $CLICKHOUSE_DATABASE.s)) engine=MergeTree order by n;" +s String default dictGet($CLICKHOUSE_DATABASE.dict1, 's', 42::UInt64), y default dictGet($CLICKHOUSE_DATABASE.dict2, 'm', 42::UInt64)) engine=MergeTree order by n;" + +$CLICKHOUSE_CLIENT -q "select table, arraySort(dependencies_table), +arraySort(loading_dependencies_table), arraySort(loading_dependent_table) from system.tables where database=currentDatabase() order by table" +$CLICKHOUSE_CLIENT -q "select '====='" +$CLICKHOUSE_CLIENT -q "alter table t add column x int default in(1, $CLICKHOUSE_DATABASE.s), drop column y" $CLICKHOUSE_CLIENT -q "create materialized view mv to s as select n from t where n in (select n from join)" +$CLICKHOUSE_CLIENT -q "select table, arraySort(dependencies_table), +arraySort(loading_dependencies_table), arraySort(loading_dependent_table) from system.tables where database=currentDatabase() order by table" + CLICKHOUSE_CLIENT_DEFAULT_DB=$(echo ${CLICKHOUSE_CLIENT} | sed 's/'"--database=${CLICKHOUSE_DATABASE}"'/--database=default/g') for _ in {1..10}; do @@ -40,8 +48,61 @@ for _ in {1..10}; do done $CLICKHOUSE_CLIENT -q "show tables from $CLICKHOUSE_DATABASE;" -$CLICKHOUSE_CLIENT -q "drop table dict_src;" -$CLICKHOUSE_CLIENT -q "drop dictionary dict1;" +$CLICKHOUSE_CLIENT -q "rename table join to join1" 2>&1| grep -Fa "some tables depend on it" >/dev/null && echo "OK" + +$CLICKHOUSE_CLIENT -q "drop table join" 2>&1| grep -Fa "some tables depend on it" >/dev/null && echo "OK" +$CLICKHOUSE_CLIENT -q "detach dictionary dict1 permanently" 2>&1| grep -Fa "some tables depend on it" >/dev/null && echo "OK" + +$CLICKHOUSE_CLIENT -q "select table, arraySort(dependencies_table), +arraySort(loading_dependencies_table), arraySort(loading_dependent_table) from system.tables where database=currentDatabase() order by table" + +engine=`$CLICKHOUSE_CLIENT -q "select engine from system.databases where name='${CLICKHOUSE_DATABASE}'"` +$CLICKHOUSE_CLIENT -q "drop database if exists ${CLICKHOUSE_DATABASE}_1" +if [[ $engine == "Atomic" ]]; then + $CLICKHOUSE_CLIENT -q "rename database ${CLICKHOUSE_DATABASE} to ${CLICKHOUSE_DATABASE}_1" 2>&1| grep -Fa "some tables depend on it" >/dev/null && echo "OK" +else + echo "OK" +fi + +$CLICKHOUSE_CLIENT -q "drop table mv" +$CLICKHOUSE_CLIENT -q "create database ${CLICKHOUSE_DATABASE}_1" + +t_database=${CLICKHOUSE_DATABASE} + +if [[ $engine == "Atomic" ]]; then + $CLICKHOUSE_CLIENT -q "rename table t to ${CLICKHOUSE_DATABASE}_1.t" + $CLICKHOUSE_CLIENT -q "rename database ${CLICKHOUSE_DATABASE}_1 to ${CLICKHOUSE_DATABASE}_1_renamed" + t_database="${CLICKHOUSE_DATABASE}_1_renamed" +fi + +$CLICKHOUSE_CLIENT -q "select table, arraySort(dependencies_table), +arraySort(loading_dependencies_table), arraySort(loading_dependent_table) from system.tables where database in (currentDatabase(), '$t_database') order by table" + +$CLICKHOUSE_CLIENT -q "drop table ${t_database}.t;" +$CLICKHOUSE_CLIENT -q "drop table s;" $CLICKHOUSE_CLIENT -q "drop dictionary dict2;" + +$CLICKHOUSE_CLIENT -q "select '====='" +$CLICKHOUSE_CLIENT -q "select table, arraySort(dependencies_table), +arraySort(loading_dependencies_table), arraySort(loading_dependent_table) from system.tables where database=currentDatabase() order by table" +if [[ $engine != "Ordinary" ]]; then + $CLICKHOUSE_CLIENT -q "create or replace table dict_src (n int, m int, s String) engine=MergeTree order by (n, m);" +fi + $CLICKHOUSE_CLIENT -q "drop table join;" -$CLICKHOUSE_CLIENT -q "drop table t;" +$CLICKHOUSE_CLIENT -q "drop dictionary dict1;" +$CLICKHOUSE_CLIENT -q "drop table dict_src;" +if [[ $t_database != "$CLICKHOUSE_DATABASE" ]]; then + $CLICKHOUSE_CLIENT -q "drop database if exists ${t_database}" +fi + +$CLICKHOUSE_CLIENT -q "drop database if exists ${CLICKHOUSE_DATABASE}_1" +$CLICKHOUSE_CLIENT -q "create database ${CLICKHOUSE_DATABASE}_1" +$CLICKHOUSE_CLIENT -q "create table ${CLICKHOUSE_DATABASE}_1.xdict_src (n int, m int, s String) engine=MergeTree order by n;" +$CLICKHOUSE_CLIENT -q "create dictionary ${CLICKHOUSE_DATABASE}_1.ydict1 (n int default 0, m int default 1, s String default 'qqq') +PRIMARY KEY n +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'xdict_src' PASSWORD '' DB '${CLICKHOUSE_DATABASE}_1')) +LIFETIME(MIN 1 MAX 10) LAYOUT(FLAT());" + +$CLICKHOUSE_CLIENT -q "create table ${CLICKHOUSE_DATABASE}_1.zjoin(n int, m int default dictGet('${CLICKHOUSE_DATABASE}_1.ydict1', 'm', 42::UInt64)) engine=Join(any, left, n);" +$CLICKHOUSE_CLIENT -q "drop database ${CLICKHOUSE_DATABASE}_1" diff --git a/tests/queries/0_stateless/01192_rename_database_zookeeper.sh b/tests/queries/0_stateless/01192_rename_database_zookeeper.sh index 62165467675..c6686bde672 100755 --- a/tests/queries/0_stateless/01192_rename_database_zookeeper.sh +++ b/tests/queries/0_stateless/01192_rename_database_zookeeper.sh @@ -7,9 +7,9 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # 1. init -$CLICKHOUSE_CLIENT -q "DROP DATABASE IF EXISTS test_01192" -$CLICKHOUSE_CLIENT -q "DROP DATABASE IF EXISTS test_01192_renamed" -$CLICKHOUSE_CLIENT -q "DROP DATABASE IF EXISTS test_01192_atomic" +$CLICKHOUSE_CLIENT --check_table_dependencies=0 -q "DROP DATABASE IF EXISTS test_01192" +$CLICKHOUSE_CLIENT --check_table_dependencies=0 -q "DROP DATABASE IF EXISTS test_01192_renamed" +$CLICKHOUSE_CLIENT --check_table_dependencies=0 -q "DROP DATABASE IF EXISTS test_01192_atomic" $CLICKHOUSE_CLIENT --default_database_engine=Ordinary -q "CREATE DATABASE test_01192 UUID '00001192-0000-4000-8000-000000000001'" 2>&1| grep -F "does not support" > /dev/null && echo "ok" $CLICKHOUSE_CLIENT --default_database_engine=Atomic -q "CREATE DATABASE test_01192 UUID '00001192-0000-4000-8000-000000000001'" @@ -63,8 +63,8 @@ $CLICKHOUSE_CLIENT -q "SELECT dictGet('test_01192_atomic.dict', '_part', toUInt6 $CLICKHOUSE_CLIENT -q "INSERT INTO test_01192_atomic.mt SELECT number + sleepEachRow(1) + 10 FROM numbers(10)" && echo "inserted" & sleep 1 -$CLICKHOUSE_CLIENT -q "RENAME DATABASE test_01192 TO test_01192_renamed" 2>&1| grep -F "not supported" > /dev/null && echo "ok" -$CLICKHOUSE_CLIENT -q "DROP DATABASE test_01192" +$CLICKHOUSE_CLIENT --check_table_dependencies=0 -q "RENAME DATABASE test_01192 TO test_01192_renamed" 2>&1| grep -F "not supported" > /dev/null && echo "ok" +$CLICKHOUSE_CLIENT --check_table_dependencies=0 -q "DROP DATABASE test_01192" $CLICKHOUSE_CLIENT -q "RENAME DATABASE test_01192_atomic TO test_01192" && echo "renamed" wait diff --git a/tests/queries/0_stateless/01195_formats_diagnostic_info.reference b/tests/queries/0_stateless/01195_formats_diagnostic_info.reference index eddbb80198d..3c21f8124d8 100644 --- a/tests/queries/0_stateless/01195_formats_diagnostic_info.reference +++ b/tests/queries/0_stateless/01195_formats_diagnostic_info.reference @@ -7,8 +7,8 @@ ERROR: garbage after Decimal(18, 10): "Hello" Column 0, name: t, type: DateTime, ERROR: text "" is not like DateTime CustomSeparatedIgnoreSpaces -Column 2, name: d, type: Decimal(18, 10), parsed text: "123456789"ERROR -ERROR: There is no delimiter before field 1: expected ",", got "7, Hello," +Column 2, name: d, type: Decimal(18, 10), parsed text: " 123456789"ERROR +ERROR: There is no delimiter between fields: expected ",", got "7, Hello," Column 0, name: t, type: DateTime, ERROR: text ",1" is not like DateTime Column 0, name: t, type: DateTime, ERROR: text "Hello" is not like DateTime OK @@ -24,7 +24,7 @@ Unexpected NULL value CustomSeparated Column 2, name: d, type: Decimal(18, 10), parsed text: "123456789"ERROR -ERROR: There is no delimiter before field 1: expected "", got "7Hello123" +ERROR: There is no delimiter between fields: expected "", got "7Hello123" ERROR: There is no delimiter after last field: expected "", got "1" ERROR: There is no delimiter after last field: expected "", got "Hello" Column 0, name: t, type: DateTime, ERROR: text "" is not like DateTime diff --git a/tests/queries/0_stateless/01214_test_storage_merge_aliases_with_where.reference b/tests/queries/0_stateless/01214_test_storage_merge_aliases_with_where.reference new file mode 100644 index 00000000000..569b21af197 --- /dev/null +++ b/tests/queries/0_stateless/01214_test_storage_merge_aliases_with_where.reference @@ -0,0 +1,53 @@ +-- { echo } +SELECT * FROM tt_m order by a; +1 1 +2 4 +3 4 +5 12 +SELECT * FROM tt_m WHERE b != 0 order by b; +1 1 +2 4 +3 4 +5 12 +SELECT * FROM tt_m WHERE b != 1 order by b; +2 4 +3 4 +5 12 +SELECT * FROM tt_m WHERE b != a * 2 order by b; +1 1 +3 4 +5 12 +SELECT * FROM tt_m WHERE b / 2 != a order by b; +1 1 +3 4 +5 12 +SELECT b FROM tt_m WHERE b >= 0 order by b; +1 +4 +4 +12 +SELECT b FROM tt_m WHERE b == 12; +12 +SELECT b FROM tt_m ORDER BY b; +1 +4 +4 +12 +SELECT b, count() FROM tt_m GROUP BY b order by b; +1 1 +4 2 +12 1 +SELECT b FROM tt_m order by b LIMIT 1 BY b; +1 +4 +12 +SELECT a FROM tt_m WHERE b = 12; +5 +SELECT max(a) FROM tt_m group by b order by b; +1 +3 +5 +SELECT a FROM tt_m order by b LIMIT 1 BY b; +1 +2 +5 diff --git a/tests/queries/0_stateless/01214_test_storage_merge_aliases_with_where.sql b/tests/queries/0_stateless/01214_test_storage_merge_aliases_with_where.sql new file mode 100644 index 00000000000..20a22eb48b1 --- /dev/null +++ b/tests/queries/0_stateless/01214_test_storage_merge_aliases_with_where.sql @@ -0,0 +1,34 @@ +DROP TABLE IF EXISTS tt1; +DROP TABLE IF EXISTS tt2; +DROP TABLE IF EXISTS tt3; +DROP TABLE IF EXISTS tt4; +DROP TABLE IF EXISTS tt_m; + +CREATE TABLE tt1 (a UInt32, b UInt32 ALIAS a) ENGINE = Memory; +CREATE TABLE tt2 (a UInt32, b UInt32 ALIAS a * 2) ENGINE = Memory; +CREATE TABLE tt3 (a UInt32, b UInt32 ALIAS c, c UInt32) ENGINE = Memory; +CREATE TABLE tt4 (a UInt32, b UInt32 ALIAS 12) ENGINE = Memory; +CREATE TABLE tt_m (a UInt32, b UInt32) ENGINE = Merge(currentDatabase(), 'tt1|tt2|tt3|tt4'); + +INSERT INTO tt1 VALUES (1); +INSERT INTO tt2 VALUES (2); +INSERT INTO tt3(a, c) VALUES (3, 4); +INSERT INTO tt4 VALUES (5); + +-- { echo } +SELECT * FROM tt_m order by a; +SELECT * FROM tt_m WHERE b != 0 order by b; +SELECT * FROM tt_m WHERE b != 1 order by b; +SELECT * FROM tt_m WHERE b != a * 2 order by b; +SELECT * FROM tt_m WHERE b / 2 != a order by b; + +SELECT b FROM tt_m WHERE b >= 0 order by b; +SELECT b FROM tt_m WHERE b == 12; +SELECT b FROM tt_m ORDER BY b; +SELECT b, count() FROM tt_m GROUP BY b order by b; +SELECT b FROM tt_m order by b LIMIT 1 BY b; + +SELECT a FROM tt_m WHERE b = 12; +SELECT max(a) FROM tt_m group by b order by b; +SELECT a FROM tt_m order by b LIMIT 1 BY b; + diff --git a/tests/queries/0_stateless/01257_dictionary_mismatch_types.sql b/tests/queries/0_stateless/01257_dictionary_mismatch_types.sql index 4ff3b7913f4..dfdfdf46db2 100644 --- a/tests/queries/0_stateless/01257_dictionary_mismatch_types.sql +++ b/tests/queries/0_stateless/01257_dictionary_mismatch_types.sql @@ -3,8 +3,7 @@ DROP DATABASE IF EXISTS test_dict_db; CREATE DATABASE test_dict_db; -DROP TABLE IF EXISTS test_dict_db.table1; -DROP DICTIONARY IF EXISTS test_dict_db.table1_dict; +set check_table_dependencies=0; CREATE TABLE test_dict_db.table1 ( diff --git a/tests/queries/0_stateless/01501_cache_dictionary_all_fields.sql b/tests/queries/0_stateless/01501_cache_dictionary_all_fields.sql index 9b383399c51..1ef173d5f4a 100644 --- a/tests/queries/0_stateless/01501_cache_dictionary_all_fields.sql +++ b/tests/queries/0_stateless/01501_cache_dictionary_all_fields.sql @@ -1,10 +1,6 @@ -- Tags: no-parallel drop database if exists db_01501; -drop table if exists db_01501.table_cache_dict; -drop dictionary if exists db_01501.cache_dict; - - create database db_01501; CREATE TABLE db_01501.table_cache_dict( @@ -117,6 +113,6 @@ SELECT groupArray(dictHas('db_01501.cache_dict', toUInt64(number))) from numbers SELECT groupArray(dictHas('db_01501.cache_dict', toUInt64(number))) from numbers(10); SELECT groupArray(dictHas('db_01501.cache_dict', toUInt64(number))) from numbers(10); -drop table if exists table_cache_dict; -drop dictionary if exists cache_dict; +drop dictionary db_01501.cache_dict; +drop table db_01501.table_cache_dict; drop database if exists db_01501; diff --git a/tests/queries/0_stateless/01526_complex_key_dict_direct_layout.sql b/tests/queries/0_stateless/01526_complex_key_dict_direct_layout.sql index 30a5b7e8783..f6065516aa7 100644 --- a/tests/queries/0_stateless/01526_complex_key_dict_direct_layout.sql +++ b/tests/queries/0_stateless/01526_complex_key_dict_direct_layout.sql @@ -4,7 +4,6 @@ DROP DATABASE IF EXISTS db_01526; CREATE DATABASE db_01526; -DROP TABLE IF EXISTS db_01526.table_for_dict1; CREATE TABLE db_01526.table_for_dict1 ( @@ -30,6 +29,6 @@ LAYOUT(COMPLEX_KEY_DIRECT()); SELECT dictGet('db_01526.dict1', 'third_column', (number, number + 1)) FROM numbers(4); SELECT dictHas('db_01526.dict1', (toUInt64(1), toUInt64(3))); -DROP TABLE db_01526.table_for_dict1; DROP DICTIONARY db_01526.dict1; +DROP TABLE db_01526.table_for_dict1; DROP DATABASE db_01526; diff --git a/tests/queries/0_stateless/01527_dist_sharding_key_dictGet_reload.sql b/tests/queries/0_stateless/01527_dist_sharding_key_dictGet_reload.sql index 5e42fe69c2f..03f4f758e03 100644 --- a/tests/queries/0_stateless/01527_dist_sharding_key_dictGet_reload.sql +++ b/tests/queries/0_stateless/01527_dist_sharding_key_dictGet_reload.sql @@ -3,6 +3,7 @@ set allow_nondeterministic_optimize_skip_unused_shards=1; set optimize_skip_unused_shards=1; set force_optimize_skip_unused_shards=2; +set check_table_dependencies=0; drop database if exists db_01527_ranges; drop table if exists dist_01527; diff --git a/tests/queries/0_stateless/01553_settings_early_apply.sql b/tests/queries/0_stateless/01553_settings_early_apply.sql index 48178c38f33..13bb2d30d4d 100644 --- a/tests/queries/0_stateless/01553_settings_early_apply.sql +++ b/tests/queries/0_stateless/01553_settings_early_apply.sql @@ -1,5 +1,9 @@ select * from numbers(100) settings max_result_rows = 1; -- { serverError 396 } select * from numbers(100) FORMAT JSON settings max_result_rows = 1; -- { serverError 396 } +select * from numbers(100) FORMAT TSVWithNamesAndTypes settings max_result_rows = 1; -- { serverError 396 } +select * from numbers(100) FORMAT CSVWithNamesAndTypes settings max_result_rows = 1; -- { serverError 396 } +select * from numbers(100) FORMAT JSONCompactEachRowWithNamesAndTypes settings max_result_rows = 1; -- { serverError 396 } +select * from numbers(100) FORMAT XML settings max_result_rows = 1; -- { serverError 396 } SET max_result_rows = 1; select * from numbers(10); -- { serverError 396 } diff --git a/tests/queries/0_stateless/01604_explain_ast_of_nonselect_query.reference b/tests/queries/0_stateless/01604_explain_ast_of_nonselect_query.reference index 3c92ffb0a8c..887c701a5e4 100644 --- a/tests/queries/0_stateless/01604_explain_ast_of_nonselect_query.reference +++ b/tests/queries/0_stateless/01604_explain_ast_of_nonselect_query.reference @@ -1,4 +1,4 @@ -AlterQuery t1 (children 1) +AlterQuery t1 (children 2) ExpressionList (children 1) AlterCommand 33 (children 1) Function equals (children 1) @@ -6,3 +6,4 @@ AlterQuery t1 (children 1) Identifier date Function today (children 1) ExpressionList + Identifier t1 diff --git a/tests/queries/0_stateless/01610_client_spawn_editor.sh b/tests/queries/0_stateless/01610_client_spawn_editor.sh index 873b45e12a9..4df34911a3b 100755 --- a/tests/queries/0_stateless/01610_client_spawn_editor.sh +++ b/tests/queries/0_stateless/01610_client_spawn_editor.sh @@ -7,7 +7,7 @@ match_max 100000 if ![info exists env(CLICKHOUSE_PORT_TCP)] {set env(CLICKHOUSE_PORT_TCP) 9000} -set env(EDITOR) [file dirname [file normalize [info script]]]"/01610_client_spawn_editor_open.editor" +set env(EDITOR) [file dirname [file normalize [info script]]]/01610_client_spawn_editor_open.editor spawn clickhouse-client --disable_suggestion expect ":) " diff --git a/tests/queries/0_stateless/01622_constraints_simple_optimization.reference b/tests/queries/0_stateless/01622_constraints_simple_optimization.reference new file mode 100644 index 00000000000..7e012e1a17b --- /dev/null +++ b/tests/queries/0_stateless/01622_constraints_simple_optimization.reference @@ -0,0 +1,45 @@ +4 +0 +0 +0 +4 +4 +4 +0 +4 +1 +1 +1 +0 +1 +1 +1 +1 +0 +0 +1 +1 +1 +1 +0 +0 +1 +1 +0 +1 +1 +0 +1 +1 +0 +SELECT count() AS `count()` +FROM constraint_test_constants +WHERE (c > 100) OR (b > 100) +SELECT count() AS `count()` +FROM constraint_test_constants +WHERE c > 100 +SELECT count() AS `count()` +FROM constraint_test_constants +WHERE c > 100 +SELECT count() AS `count()` +FROM constraint_test_constants diff --git a/tests/queries/0_stateless/01622_constraints_simple_optimization.sql b/tests/queries/0_stateless/01622_constraints_simple_optimization.sql new file mode 100644 index 00000000000..e1922975a2a --- /dev/null +++ b/tests/queries/0_stateless/01622_constraints_simple_optimization.sql @@ -0,0 +1,105 @@ +DROP TABLE IF EXISTS constraint_test_assumption; +DROP TABLE IF EXISTS constraint_test_transitivity; +DROP TABLE IF EXISTS constraint_test_transitivity2; + +SET convert_query_to_cnf = 1; +SET optimize_using_constraints = 1; +SET optimize_move_to_prewhere = 1; +SET optimize_substitute_columns = 1; +SET optimize_append_index = 1; + +CREATE TABLE constraint_test_assumption (URL String, a Int32, CONSTRAINT c1 ASSUME domainWithoutWWW(URL) = 'yandex.ru', CONSTRAINT c2 ASSUME URL > 'zzz' AND startsWith(URL, 'test') = True) ENGINE = TinyLog; + +--- Add wrong rows in order to check optimization +INSERT INTO constraint_test_assumption (URL, a) VALUES ('1', 1); +INSERT INTO constraint_test_assumption (URL, a) VALUES ('2', 2); +INSERT INTO constraint_test_assumption (URL, a) VALUES ('yandex.ru', 3); +INSERT INTO constraint_test_assumption (URL, a) VALUES ('3', 4); + +SELECT count() FROM constraint_test_assumption WHERE domainWithoutWWW(URL) = 'yandex.ru'; --- assumption -> 4 +SELECT count() FROM constraint_test_assumption WHERE NOT (domainWithoutWWW(URL) = 'yandex.ru'); --- assumption -> 0 +SELECT count() FROM constraint_test_assumption WHERE domainWithoutWWW(URL) != 'yandex.ru'; --- assumption -> 0 +SELECT count() FROM constraint_test_assumption WHERE domainWithoutWWW(URL) = 'nothing'; --- not optimized -> 0 + +SELECT count() FROM constraint_test_assumption WHERE (domainWithoutWWW(URL) = 'yandex.ru' AND URL > 'zzz'); ---> assumption -> 4 +SELECT count() FROM constraint_test_assumption WHERE (domainWithoutWWW(URL) = 'yandex.ru' AND NOT URL <= 'zzz'); ---> assumption -> 4 +SELECT count() FROM constraint_test_assumption WHERE (domainWithoutWWW(URL) = 'yandex.ru' AND URL > 'zzz') OR (a = 10 AND a + 5 < 100); ---> assumption -> 4 +SELECT count() FROM constraint_test_assumption WHERE (domainWithoutWWW(URL) = 'yandex.ru' AND URL = '111'); ---> assumption & no assumption -> 0 +SELECT count() FROM constraint_test_assumption WHERE (startsWith(URL, 'test') = True); ---> assumption -> 4 + +DROP TABLE constraint_test_assumption; + +CREATE TABLE constraint_test_transitivity (a Int64, b Int64, c Int64, d Int32, CONSTRAINT c1 ASSUME a = b AND c = d, CONSTRAINT c2 ASSUME b = c) ENGINE = TinyLog; + +INSERT INTO constraint_test_transitivity (a, b, c, d) VALUES (1, 2, 3, 4); + +SELECT count() FROM constraint_test_transitivity WHERE a = d; ---> assumption -> 1 + +DROP TABLE constraint_test_transitivity; + + +CREATE TABLE constraint_test_strong_connectivity (a String, b String, c String, d String, CONSTRAINT c1 ASSUME a <= b AND b <= c AND c <= d AND d <= a) ENGINE = TinyLog; + +INSERT INTO constraint_test_strong_connectivity (a, b, c, d) VALUES ('1', '2', '3', '4'); + +SELECT count() FROM constraint_test_strong_connectivity WHERE a = d; ---> assumption -> 1 +SELECT count() FROM constraint_test_strong_connectivity WHERE a = c AND b = d; ---> assumption -> 1 +SELECT count() FROM constraint_test_strong_connectivity WHERE a < c OR b < d; ---> assumption -> 0 +SELECT count() FROM constraint_test_strong_connectivity WHERE a <= c OR b <= d; ---> assumption -> 1 + +DROP TABLE constraint_test_strong_connectivity; + +CREATE TABLE constraint_test_transitivity2 (a String, b String, c String, d String, CONSTRAINT c1 ASSUME a > b AND b >= c AND c > d AND a >= d) ENGINE = TinyLog; + +INSERT INTO constraint_test_transitivity2 (a, b, c, d) VALUES ('1', '2', '3', '4'); + +SELECT count() FROM constraint_test_transitivity2 WHERE a > d; ---> assumption -> 1 +SELECT count() FROM constraint_test_transitivity2 WHERE a >= d; ---> assumption -> 1 +SELECT count() FROM constraint_test_transitivity2 WHERE d < a; ---> assumption -> 1 +SELECT count() FROM constraint_test_transitivity2 WHERE a < d; ---> assumption -> 0 +SELECT count() FROM constraint_test_transitivity2 WHERE a = d; ---> assumption -> 0 +SELECT count() FROM constraint_test_transitivity2 WHERE a != d; ---> assumption -> 1 + +DROP TABLE constraint_test_transitivity2; + +CREATE TABLE constraint_test_transitivity3 (a Int64, b Int64, c Int64, CONSTRAINT c1 ASSUME b > 10 AND 1 > a) ENGINE = TinyLog; + +INSERT INTO constraint_test_transitivity3 (a, b, c) VALUES (4, 0, 2); + +SELECT count() FROM constraint_test_transitivity3 WHERE a < b; ---> assumption -> 1 +SELECT count() FROM constraint_test_transitivity3 WHERE b >= a; ---> assumption -> 1 + +DROP TABLE constraint_test_transitivity3; + + +CREATE TABLE constraint_test_constants_repl (a Int64, b Int64, c Int64, d Int64, CONSTRAINT c1 ASSUME a - b = 10 AND c + d = 20) ENGINE = TinyLog; + +INSERT INTO constraint_test_constants_repl (a, b, c, d) VALUES (1, 2, 3, 4); + +SELECT count() FROM constraint_test_constants_repl WHERE a - b = 10; ---> assumption -> 1 +SELECT count() FROM constraint_test_constants_repl WHERE a - b < 0; ---> assumption -> 0 +SELECT count() FROM constraint_test_constants_repl WHERE a - b = c + d; ---> assumption -> 0 +SELECT count() FROM constraint_test_constants_repl WHERE (a - b) * 2 = c + d; ---> assumption -> 1 + +DROP TABLE constraint_test_constants_repl; + +CREATE TABLE constraint_test_constants (a Int64, b Int64, c Int64, CONSTRAINT c1 ASSUME b > 10 AND a >= 10) ENGINE = TinyLog; + +INSERT INTO constraint_test_constants (a, b, c) VALUES (0, 0, 0); + +SELECT count() FROM constraint_test_constants WHERE 9 < b; ---> assumption -> 1 +SELECT count() FROM constraint_test_constants WHERE 11 < b; ---> assumption -> 0 +SELECT count() FROM constraint_test_constants WHERE 10 <= b; ---> assumption -> 1 +SELECT count() FROM constraint_test_constants WHERE 9 < a; ---> assumption -> 1 +SELECT count() FROM constraint_test_constants WHERE 10 < a; ---> assumption -> 0 +SELECT count() FROM constraint_test_constants WHERE 10 <= a; ---> assumption -> 1 +SELECT count() FROM constraint_test_constants WHERE 9 <= a; ---> assumption -> 1 +SELECT count() FROM constraint_test_constants WHERE 11 <= a; ---> assumption -> 0 + +-- A AND NOT A +EXPLAIN SYNTAX SELECT count() FROM constraint_test_constants WHERE (a > 100 OR b > 100 OR c > 100) AND (a <= 100 OR b > 100 OR c > 100); +EXPLAIN SYNTAX SELECT count() FROM constraint_test_constants WHERE (a > 100 OR b > 100 OR c > 100) AND (a <= 100 OR b > 100 OR c > 100) AND (NOT b > 100 OR c > 100); +EXPLAIN SYNTAX SELECT count() FROM constraint_test_constants WHERE (a > 100 OR b > 100 OR c > 100) AND (a <= 100 OR b > 100 OR c > 100) AND (NOT b > 100 OR c > 100) AND (c > 100); +EXPLAIN SYNTAX SELECT count() FROM constraint_test_constants WHERE (a > 100 OR b > 100 OR c > 100) AND (a <= 100 OR b > 100 OR c > 100) AND (NOT b > 100 OR c > 100) AND (c <= 100); + +DROP TABLE constraint_test_constants; diff --git a/tests/queries/0_stateless/01622_constraints_where_optimization.reference b/tests/queries/0_stateless/01622_constraints_where_optimization.reference new file mode 100644 index 00000000000..c7c516025f2 --- /dev/null +++ b/tests/queries/0_stateless/01622_constraints_where_optimization.reference @@ -0,0 +1,14 @@ +SELECT count() +FROM t_constraints_where +WHERE 0 +SELECT count() +FROM t_constraints_where +WHERE 0 +SELECT count() +FROM t_constraints_where +WHERE 0 +SELECT count() +FROM t_constraints_where +WHERE b < 8 +SELECT count() +FROM t_constraints_where diff --git a/tests/queries/0_stateless/01622_constraints_where_optimization.sql b/tests/queries/0_stateless/01622_constraints_where_optimization.sql new file mode 100644 index 00000000000..6a9d1ba9f6b --- /dev/null +++ b/tests/queries/0_stateless/01622_constraints_where_optimization.sql @@ -0,0 +1,23 @@ +SET convert_query_to_cnf = 1; +SET optimize_using_constraints = 1; + +DROP TABLE IF EXISTS t_constraints_where; + +CREATE TABLE t_constraints_where(a UInt32, b UInt32, CONSTRAINT c1 ASSUME b >= 5, CONSTRAINT c2 ASSUME b <= 10) ENGINE = Memory; + +INSERT INTO t_constraints_where VALUES (1, 7); + +EXPLAIN SYNTAX SELECT count() FROM t_constraints_where WHERE b > 15; -- assumption -> 0 +EXPLAIN SYNTAX SELECT count() FROM t_constraints_where WHERE b = 20; -- assumption -> 0 +EXPLAIN SYNTAX SELECT count() FROM t_constraints_where WHERE b < 2; -- assumption -> 0 +EXPLAIN SYNTAX SELECT count() FROM t_constraints_where WHERE b > 20 OR b < 8; -- assumption -> remove (b < 20) + +DROP TABLE t_constraints_where; + +CREATE TABLE t_constraints_where(a UInt32, b UInt32, CONSTRAINT c1 ASSUME b < 10) ENGINE = Memory; + +INSERT INTO t_constraints_where VALUES (1, 7); + +EXPLAIN SYNTAX SELECT count() FROM t_constraints_where WHERE b = 1 OR b < 18 OR b > 5; -- assumtion -> (b < 20) -> 0; + +DROP TABLE t_constraints_where; diff --git a/tests/queries/0_stateless/01623_constraints_column_swap.reference b/tests/queries/0_stateless/01623_constraints_column_swap.reference new file mode 100644 index 00000000000..7ae4516fe9e --- /dev/null +++ b/tests/queries/0_stateless/01623_constraints_column_swap.reference @@ -0,0 +1,53 @@ +SELECT + (b AS `cityHash64(a)`) + 10 AS `plus(cityHash64(a), 10)`, + (b AS b) + 3 AS `plus(b, 3)` +FROM column_swap_test_test +WHERE b = 1 +SELECT + (b AS `cityHash64(a)`) + 10 AS `plus(cityHash64(a), 10)`, + (b AS b) + 3 AS `plus(b, 3)` +FROM column_swap_test_test +WHERE b = 0 +SELECT + (b AS `cityHash64(a)`) + 10 AS `plus(cityHash64(a), 10)`, + (b AS b) + 3 AS `plus(b, 3)` +FROM column_swap_test_test +WHERE b = 0 +SELECT + (b AS `cityHash64(a)`) + 10 AS `plus(cityHash64(a), 10)`, + (b AS b) + 3 AS `plus(b, 3)` +FROM column_swap_test_test +WHERE b = 1 +SELECT (b AS `cityHash64(a)`) + 10 AS `plus(cityHash64(a), 10)` +FROM column_swap_test_test +WHERE b = 0 +SELECT + (cityHash64(a) AS `cityHash64(a)`) + 10 AS `plus(cityHash64(a), 10)`, + a AS a +FROM column_swap_test_test +WHERE cityHash64(a) = 0 +SELECT + (cityHash64(a) AS b) + 10 AS `plus(b, 10)`, + a AS a +FROM column_swap_test_test +WHERE cityHash64(a) = 0 +SELECT + a AS `substring(reverse(b), 1, 1)`, + a AS a +FROM column_swap_test_test +WHERE a = \'c\' +SELECT + a AS `substring(reverse(b), 1, 1)`, + a AS a +FROM column_swap_test_test +WHERE a = \'c\' +SELECT + a AS t1, + a AS t2 +FROM column_swap_test_test +WHERE a = \'c\' +SELECT a AS `substring(reverse(b), 1, 1)` +FROM column_swap_test_test +WHERE a = \'c\' +SELECT a +FROM t_bad_constraint diff --git a/tests/queries/0_stateless/01623_constraints_column_swap.sql b/tests/queries/0_stateless/01623_constraints_column_swap.sql new file mode 100644 index 00000000000..0fb0d417a43 --- /dev/null +++ b/tests/queries/0_stateless/01623_constraints_column_swap.sql @@ -0,0 +1,42 @@ +SET convert_query_to_cnf = 1; +SET optimize_using_constraints = 1; +SET optimize_move_to_prewhere = 1; +SET optimize_substitute_columns = 1; +SET optimize_append_index = 1; + +DROP TABLE IF EXISTS column_swap_test_test; + +CREATE TABLE column_swap_test_test (i Int64, a String, b UInt64, CONSTRAINT c1 ASSUME b = cityHash64(a)) ENGINE = MergeTree() ORDER BY i; +INSERT INTO column_swap_test_test VALUES (1, 'cat', 1), (2, 'dog', 2); +INSERT INTO column_swap_test_test SELECT number AS i, format('test {} kek {}', toString(number), toString(number + 10)) AS a, 1 AS b FROM system.numbers LIMIT 1000000; + +EXPLAIN SYNTAX SELECT cityHash64(a) + 10, b + 3 FROM column_swap_test_test WHERE cityHash64(a) = 1; +EXPLAIN SYNTAX SELECT cityHash64(a) + 10, b + 3 FROM column_swap_test_test WHERE cityHash64(a) = 0; +EXPLAIN SYNTAX SELECT cityHash64(a) + 10, b + 3 FROM column_swap_test_test WHERE b = 0; +EXPLAIN SYNTAX SELECT cityHash64(a) + 10, b + 3 FROM column_swap_test_test WHERE b = 1; + +EXPLAIN SYNTAX SELECT cityHash64(a) + 10 FROM column_swap_test_test WHERE cityHash64(a) = 0; +EXPLAIN SYNTAX SELECT cityHash64(a) + 10, a FROM column_swap_test_test WHERE cityHash64(a) = 0; +EXPLAIN SYNTAX SELECT b + 10, a FROM column_swap_test_test WHERE b = 0; + +DROP TABLE column_swap_test_test; + +CREATE TABLE column_swap_test_test (i Int64, a String, b String, CONSTRAINT c1 ASSUME a = substring(reverse(b), 1, 1)) ENGINE = MergeTree() ORDER BY i; +INSERT INTO column_swap_test_test SELECT number AS i, toString(number) AS a, format('test {} kek {}', toString(number), toString(number + 10)) b FROM system.numbers LIMIT 1000000; + +EXPLAIN SYNTAX SELECT substring(reverse(b), 1, 1), a FROM column_swap_test_test WHERE a = 'c'; +EXPLAIN SYNTAX SELECT substring(reverse(b), 1, 1), a FROM column_swap_test_test WHERE substring(reverse(b), 1, 1) = 'c'; +EXPLAIN SYNTAX SELECT substring(reverse(b), 1, 1) AS t1, a AS t2 FROM column_swap_test_test WHERE substring(reverse(b), 1, 1) = 'c'; +EXPLAIN SYNTAX SELECT substring(reverse(b), 1, 1) FROM column_swap_test_test WHERE substring(reverse(b), 1, 1) = 'c'; + +DROP TABLE column_swap_test_test; + +DROP TABLE IF EXISTS t_bad_constraint; + +CREATE TABLE t_bad_constraint(a UInt32, s String, CONSTRAINT c1 ASSUME a = toUInt32(s)) ENGINE = MergeTree ORDER BY tuple(); + +INSERT INTO t_bad_constraint SELECT number, randomPrintableASCII(100) FROM numbers(10000); + +EXPLAIN SYNTAX SELECT a FROM t_bad_constraint; + +DROP TABLE t_bad_constraint; diff --git a/tests/queries/0_stateless/01624_soft_constraints.reference b/tests/queries/0_stateless/01624_soft_constraints.reference new file mode 100644 index 00000000000..fef1b3c48a7 --- /dev/null +++ b/tests/queries/0_stateless/01624_soft_constraints.reference @@ -0,0 +1,16 @@ + "rows_read": 4, + "rows_read": 2, + "rows_read": 4, + "rows_read": 2, + "rows_read": 2, + "rows_read": 2, + "rows_read": 4, + "rows_read": 2, + "rows_read": 4, + "rows_read": 4, + "rows_read": 1, + "rows_read": 4, + "rows_read": 3, + "rows_read": 4, + "rows_read": 4, + "rows_read": 3, diff --git a/tests/queries/0_stateless/01624_soft_constraints.sh b/tests/queries/0_stateless/01624_soft_constraints.sh new file mode 100755 index 00000000000..944a4e4234f --- /dev/null +++ b/tests/queries/0_stateless/01624_soft_constraints.sh @@ -0,0 +1,96 @@ +#!/usr/bin/env bash + +set -e + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +SETTINGS="SET convert_query_to_cnf = 1; SET optimize_using_constraints = 1; SET optimize_move_to_prewhere = 1; SET optimize_substitute_columns = 1; SET optimize_append_index = 1" + +$CLICKHOUSE_CLIENT -n --query=" +$SETTINGS; +DROP TABLE IF EXISTS hypothesis_test_test; +DROP TABLE IF EXISTS hypothesis_test_test2; +DROP TABLE IF EXISTS hypothesis_test_test3; + +CREATE TABLE hypothesis_test_test ( + i UInt64, + a UInt64, + b UInt64, + c Float64, + INDEX t (a < b) TYPE hypothesis GRANULARITY 1, + INDEX t2 (b <= c) TYPE hypothesis GRANULARITY 1 +) ENGINE = MergeTree() ORDER BY i SETTINGS index_granularity=1; +" + +$CLICKHOUSE_CLIENT -n --query="$SETTINGS; +INSERT INTO hypothesis_test_test VALUES (0, 1, 2, 2), (1, 2, 1, 2), (2, 2, 2, 1), (3, 1, 2, 3)" + +$CLICKHOUSE_CLIENT -n --query="$SETTINGS; SELECT count() FROM hypothesis_test_test WHERE b > a FORMAT JSON" | grep "rows_read" # 4 + +$CLICKHOUSE_CLIENT -n --query="$SETTINGS; SELECT count() FROM hypothesis_test_test WHERE b <= a FORMAT JSON" | grep "rows_read" + +$CLICKHOUSE_CLIENT -n --query="$SETTINGS; SELECT count() FROM hypothesis_test_test WHERE b >= a FORMAT JSON" | grep "rows_read" # 4 + +$CLICKHOUSE_CLIENT -n --query="$SETTINGS; SELECT count() FROM hypothesis_test_test WHERE b = a FORMAT JSON" | grep "rows_read" + +$CLICKHOUSE_CLIENT -n --query="$SETTINGS; SELECT count() FROM hypothesis_test_test WHERE c < a FORMAT JSON" | grep "rows_read" + +$CLICKHOUSE_CLIENT -n --query="$SETTINGS; SELECT count() FROM hypothesis_test_test WHERE c = a FORMAT JSON" | grep "rows_read" + +$CLICKHOUSE_CLIENT -n --query="$SETTINGS; SELECT count() FROM hypothesis_test_test WHERE c > a FORMAT JSON" | grep "rows_read" # 4 + +$CLICKHOUSE_CLIENT -n --query="$SETTINGS; SELECT count() FROM hypothesis_test_test WHERE c < a FORMAT JSON" | grep "rows_read" + + +$CLICKHOUSE_CLIENT -n --query=" +$SETTINGS; +CREATE TABLE hypothesis_test_test2 ( + i UInt64, + a UInt64, + b UInt64, + INDEX t (a != b) TYPE hypothesis GRANULARITY 1 +) ENGINE = MergeTree() ORDER BY i SETTINGS index_granularity=1; +" + +$CLICKHOUSE_CLIENT -n --query="$SETTINGS; +INSERT INTO hypothesis_test_test2 VALUES (0, 1, 2), (1, 2, 1), (2, 2, 2), (3, 1, 0)" + +$CLICKHOUSE_CLIENT -n --query="$SETTINGS; SELECT count() FROM hypothesis_test_test2 WHERE a < b FORMAT JSON" | grep "rows_read" # 4 + +$CLICKHOUSE_CLIENT -n --query="$SETTINGS; SELECT count() FROM hypothesis_test_test2 WHERE a <= b FORMAT JSON" | grep "rows_read" # 4 + +$CLICKHOUSE_CLIENT -n --query="$SETTINGS; SELECT count() FROM hypothesis_test_test2 WHERE a = b FORMAT JSON" | grep "rows_read" # 1 + +$CLICKHOUSE_CLIENT -n --query="$SETTINGS; SELECT count() FROM hypothesis_test_test2 WHERE a != b FORMAT JSON" | grep "rows_read" # 4 + + +$CLICKHOUSE_CLIENT -n --query=" +$SETTINGS; +CREATE TABLE hypothesis_test_test3 ( + i UInt64, + a UInt64, + b UInt64, + INDEX t (a = b) TYPE hypothesis GRANULARITY 1 +) ENGINE = MergeTree() ORDER BY i SETTINGS index_granularity=1; +" + +$CLICKHOUSE_CLIENT -n --query=" +$SETTINGS; +INSERT INTO hypothesis_test_test3 VALUES (0, 1, 2), (1, 2, 1), (2, 2, 2), (3, 1, 0)" + +$CLICKHOUSE_CLIENT -n --query="$SETTINGS;SELECT count() FROM hypothesis_test_test3 WHERE a < b FORMAT JSON" | grep "rows_read" # 3 + +$CLICKHOUSE_CLIENT -n --query="$SETTINGS;SELECT count() FROM hypothesis_test_test3 WHERE a <= b FORMAT JSON" | grep "rows_read" # 4 + +$CLICKHOUSE_CLIENT -n --query="$SETTINGS;SELECT count() FROM hypothesis_test_test3 WHERE a = b FORMAT JSON" | grep "rows_read" # 4 + +$CLICKHOUSE_CLIENT -n --query="$SETTINGS;SELECT count() FROM hypothesis_test_test3 WHERE a != b FORMAT JSON" | grep "rows_read" # 3 + + +$CLICKHOUSE_CLIENT -n --query=" +$SETTINGS; +DROP TABLE hypothesis_test_test; +DROP TABLE hypothesis_test_test2; +DROP TABLE hypothesis_test_test3;" diff --git a/tests/queries/0_stateless/01625_constraints_index_append.reference b/tests/queries/0_stateless/01625_constraints_index_append.reference new file mode 100644 index 00000000000..0df5c429d9e --- /dev/null +++ b/tests/queries/0_stateless/01625_constraints_index_append.reference @@ -0,0 +1,15 @@ +SELECT i AS i +FROM index_append_test_test +PREWHERE a = 0 +WHERE (a = 0) AND indexHint((i + 40) > 0) +SELECT i AS i +FROM index_append_test_test +PREWHERE a < 0 +SELECT i AS i +FROM index_append_test_test +PREWHERE a >= 0 +WHERE (a >= 0) AND indexHint((i + 40) > 0) +SELECT i AS i +FROM index_append_test_test +PREWHERE (2 * b) < 100 +WHERE ((2 * b) < 100) AND indexHint(i < 100) diff --git a/tests/queries/0_stateless/01625_constraints_index_append.sql b/tests/queries/0_stateless/01625_constraints_index_append.sql new file mode 100644 index 00000000000..fbffc9c7f10 --- /dev/null +++ b/tests/queries/0_stateless/01625_constraints_index_append.sql @@ -0,0 +1,17 @@ +SET convert_query_to_cnf = 1; +SET optimize_using_constraints = 1; +SET optimize_move_to_prewhere = 1; +SET optimize_substitute_columns = 1; +SET optimize_append_index = 1; + +DROP TABLE IF EXISTS index_append_test_test; + +CREATE TABLE index_append_test_test (i Int64, a UInt32, b UInt64, CONSTRAINT c1 ASSUME i <= 2 * b AND i + 40 > a) ENGINE = MergeTree() ORDER BY i; +INSERT INTO index_append_test_test VALUES (1, 10, 1), (2, 20, 2); + +EXPLAIN SYNTAX SELECT i FROM index_append_test_test WHERE a = 0; +EXPLAIN SYNTAX SELECT i FROM index_append_test_test WHERE a < 0; +EXPLAIN SYNTAX SELECT i FROM index_append_test_test WHERE a >= 0; +EXPLAIN SYNTAX SELECT i FROM index_append_test_test WHERE 2 * b < 100; + +DROP TABLE index_append_test_test; diff --git a/tests/queries/0_stateless/01626_cnf_fuzz_long.python b/tests/queries/0_stateless/01626_cnf_fuzz_long.python new file mode 100644 index 00000000000..10c12d14182 --- /dev/null +++ b/tests/queries/0_stateless/01626_cnf_fuzz_long.python @@ -0,0 +1,73 @@ +#!/usr/bin/env python3 +import os +from random import randint, choices +import sys + +CURDIR = os.path.dirname(os.path.realpath(__file__)) +sys.path.insert(0, os.path.join(CURDIR, 'helpers')) + +from pure_http_client import ClickHouseClient + +client = ClickHouseClient() + +N = 10 +create_query = "CREATE TABLE t_cnf_fuzz(" + ", ".join([f"c{i} UInt8" for i in range(N)]) + ") ENGINE = Memory" + +client.query("DROP TABLE IF EXISTS t_cnf_fuzz") +client.query(create_query) + +# Insert all possible combinations of bool columns. +insert_query = "INSERT INTO t_cnf_fuzz VALUES " +for i in range(2**N): + values = [] + cur = i + for _ in range(N): + values.append(cur % 2) + cur //= 2 + + insert_query += "(" + ", ".join(map(lambda x: str(x), values)) + ")" + +client.query(insert_query) + +# Let's try to covert DNF to CNF, +# because it's a worst case in a sense. + +MAX_CLAUSES = 10 +MAX_ATOMS = 5 + +def generate_dnf(): + clauses = [] + num_clauses = randint(1, MAX_CLAUSES) + for _ in range(num_clauses): + num_atoms = randint(1, MAX_ATOMS) + atom_ids = choices(range(N), k=num_atoms) + negates = choices([0, 1], k=num_atoms) + atoms = [f"(NOT c{i})" if neg else f"c{i}" for (i, neg) in zip(atom_ids, negates)] + clauses.append("(" + " AND ".join(atoms) + ")") + + return " OR ".join(clauses) + +select_query = "SELECT count() FROM t_cnf_fuzz WHERE {} SETTINGS convert_query_to_cnf = {}" + +fail_report = """ +Failed query: '{}'. +Result without optimization: {}. +Result with optimization: {}. +""" + +T = 500 +for _ in range(T): + condition = generate_dnf() + + query = select_query.format(condition, 0) + res = client.query(query).strip() + + query_cnf = select_query.format(condition, 1) + res_cnf = client.query(query_cnf).strip() + + if res != res_cnf: + print(fail_report.format(query_cnf, res, res_cnf)) + exit(1) + +client.query("DROP TABLE t_cnf_fuzz") +print("OK") diff --git a/tests/queries/0_stateless/01626_cnf_fuzz_long.reference b/tests/queries/0_stateless/01626_cnf_fuzz_long.reference new file mode 100644 index 00000000000..d86bac9de59 --- /dev/null +++ b/tests/queries/0_stateless/01626_cnf_fuzz_long.reference @@ -0,0 +1 @@ +OK diff --git a/tests/queries/0_stateless/01626_cnf_fuzz_long.sh b/tests/queries/0_stateless/01626_cnf_fuzz_long.sh new file mode 100755 index 00000000000..bdf53cdb252 --- /dev/null +++ b/tests/queries/0_stateless/01626_cnf_fuzz_long.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash +# Tags: no-fasttest, long +# Tag no-fasttest: Require python libraries like scipy, pandas and numpy + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +# We should have correct env vars from shell_config.sh to run this test +python3 "$CURDIR"/01626_cnf_fuzz_long.python diff --git a/tests/queries/0_stateless/01626_cnf_test.reference b/tests/queries/0_stateless/01626_cnf_test.reference new file mode 100644 index 00000000000..081215c9fb2 --- /dev/null +++ b/tests/queries/0_stateless/01626_cnf_test.reference @@ -0,0 +1,18 @@ +SELECT i +FROM cnf_test +WHERE (i <= 2) AND (i <= 1) +SELECT i +FROM cnf_test +WHERE (i <= 2) OR (i <= 1) +SELECT i +FROM cnf_test +WHERE ((i > 2) OR (i > 5) OR (i > 3)) AND ((i > 2) OR (i > 5) OR (i > 4)) AND ((i > 2) OR (i > 6) OR (i > 3)) AND ((i > 2) OR (i > 6) OR (i > 4)) AND ((i > 1) OR (i > 5) OR (i > 3)) AND ((i > 1) OR (i > 5) OR (i > 4)) AND ((i > 1) OR (i > 6) OR (i > 3)) AND ((i > 1) OR (i > 6) OR (i > 4)) +SELECT i +FROM cnf_test +WHERE ((i <= 3) OR (i <= 2) OR (i <= 5)) AND ((i <= 3) OR (i <= 2) OR (i <= 6)) AND ((i <= 3) OR (i <= 5) OR (i <= 1)) AND ((i <= 3) OR (i <= 6) OR (i <= 1)) AND ((i <= 2) OR (i <= 5) OR (i <= 4)) AND ((i <= 2) OR (i <= 6) OR (i <= 4)) AND ((i <= 5) OR (i <= 1) OR (i <= 4)) AND ((i <= 6) OR (i <= 1) OR (i <= 4)) +SELECT i +FROM cnf_test +WHERE ((i > 2) OR (i > 5) OR (i > 3)) AND ((i > 2) OR (i > 5) OR (i > 4)) AND ((i > 2) OR (i > 5) OR (i > 8)) AND ((i > 2) OR (i > 6) OR (i > 3)) AND ((i > 2) OR (i > 6) OR (i > 4)) AND ((i > 2) OR (i > 6) OR (i > 8)) AND ((i > 1) OR (i > 5) OR (i > 3)) AND ((i > 1) OR (i > 5) OR (i > 4)) AND ((i > 1) OR (i > 5) OR (i > 8)) AND ((i > 1) OR (i > 6) OR (i > 3)) AND ((i > 1) OR (i > 6) OR (i > 4)) AND ((i > 1) OR (i > 6) OR (i > 8)) AND ((i > 5) OR (i > 3) OR (i > 7)) AND ((i > 5) OR (i > 4) OR (i > 7)) AND ((i > 5) OR (i > 8) OR (i > 7)) AND ((i > 6) OR (i > 3) OR (i > 7)) AND ((i > 6) OR (i > 4) OR (i > 7)) AND ((i > 6) OR (i > 8) OR (i > 7)) +SELECT i +FROM cnf_test +WHERE ((i > 2) OR (i > 1) OR (i > 7)) AND (i <= 5) AND (i <= 6) AND ((i > 3) OR (i > 4) OR (i > 8)) diff --git a/tests/queries/0_stateless/01626_cnf_test.sql b/tests/queries/0_stateless/01626_cnf_test.sql new file mode 100644 index 00000000000..8db732bc227 --- /dev/null +++ b/tests/queries/0_stateless/01626_cnf_test.sql @@ -0,0 +1,18 @@ +SET convert_query_to_cnf = 1; + +DROP TABLE IF EXISTS cnf_test; + +CREATE TABLE cnf_test (i Int64) ENGINE = MergeTree() ORDER BY i; + +EXPLAIN SYNTAX SELECT i FROM cnf_test WHERE NOT ((i > 1) OR (i > 2)); +EXPLAIN SYNTAX SELECT i FROM cnf_test WHERE NOT ((i > 1) AND (i > 2)); + +EXPLAIN SYNTAX SELECT i FROM cnf_test WHERE ((i > 1) AND (i > 2)) OR ((i > 3) AND (i > 4)) OR ((i > 5) AND (i > 6)); + +EXPLAIN SYNTAX SELECT i FROM cnf_test WHERE NOT (((i > 1) OR (i > 2)) AND ((i > 3) OR (i > 4)) AND ((i > 5) OR (i > 6))); + +EXPLAIN SYNTAX SELECT i FROM cnf_test WHERE ((i > 1) AND (i > 2) AND (i > 7)) OR ((i > 3) AND (i > 4) AND (i > 8)) OR ((i > 5) AND (i > 6)); + +EXPLAIN SYNTAX SELECT i FROM cnf_test WHERE ((i > 1) OR (i > 2) OR (i > 7)) AND ((i > 3) OR (i > 4) OR (i > 8)) AND NOT ((i > 5) OR (i > 6)); + +DROP TABLE cnf_test; diff --git a/tests/queries/0_stateless/01676_range_hashed_dictionary.sql b/tests/queries/0_stateless/01676_range_hashed_dictionary.sql index e933d599bd2..ff69d61b26b 100644 --- a/tests/queries/0_stateless/01676_range_hashed_dictionary.sql +++ b/tests/queries/0_stateless/01676_range_hashed_dictionary.sql @@ -53,8 +53,8 @@ SELECT CountryID, StartDate, Tax FROM database_for_range_dict.range_dictionary; SELECT 'onlySpecificColumn'; SELECT Tax FROM database_for_range_dict.range_dictionary; -DROP TABLE database_for_range_dict.date_table; DROP DICTIONARY database_for_range_dict.range_dictionary; +DROP TABLE database_for_range_dict.date_table; CREATE TABLE database_for_range_dict.date_table ( @@ -105,8 +105,8 @@ SELECT CountryID, StartDate, Tax FROM database_for_range_dict.range_dictionary_n SELECT 'onlySpecificColumn'; SELECT Tax FROM database_for_range_dict.range_dictionary_nullable; -DROP TABLE database_for_range_dict.date_table; DROP DICTIONARY database_for_range_dict.range_dictionary_nullable; +DROP TABLE database_for_range_dict.date_table; DROP DATABASE database_for_range_dict; diff --git a/tests/queries/0_stateless/01710_minmax_count_projection.reference b/tests/queries/0_stateless/01710_minmax_count_projection.reference index ed78b475842..77649f536f5 100644 --- a/tests/queries/0_stateless/01710_minmax_count_projection.reference +++ b/tests/queries/0_stateless/01710_minmax_count_projection.reference @@ -8,3 +8,12 @@ 0 9999 1 9999 3 +2021-10-25 10:00:00 2021-10-27 10:00:00 3 +1 +1 +1 +1 +\N 2021-10-27 10:00:00 4 +2021-10-24 10:00:00 +2021-10-24 10:00:00 +0 diff --git a/tests/queries/0_stateless/01710_minmax_count_projection.sql b/tests/queries/0_stateless/01710_minmax_count_projection.sql index 0138ad8c57e..713241ada72 100644 --- a/tests/queries/0_stateless/01710_minmax_count_projection.sql +++ b/tests/queries/0_stateless/01710_minmax_count_projection.sql @@ -43,3 +43,18 @@ drop table if exists t; create table t (server_date Date, something String) engine MergeTree partition by (toYYYYMM(server_date), server_date) order by (server_date, something); insert into t values ('2019-01-01', 'test1'), ('2019-02-01', 'test2'), ('2019-03-01', 'test3'); select count() from t; +drop table t; + +drop table if exists d; +create table d (dt DateTime, j int) engine MergeTree partition by (toDate(dt), ceiling(j), toDate(dt), CEILING(j)) order by tuple(); +insert into d values ('2021-10-24 10:00:00', 10), ('2021-10-25 10:00:00', 10), ('2021-10-26 10:00:00', 10), ('2021-10-27 10:00:00', 10); +select min(dt), max(dt), count() from d where toDate(dt) >= '2021-10-25'; +select count() from d group by toDate(dt); + +-- fuzz crash +SELECT pointInEllipses(min(j), NULL), max(dt), count('0.0000000007') FROM d WHERE toDate(dt) >= '2021-10-25'; +SELECT min(dt) FROM d PREWHERE ceil(j) <= 0; +SELECT min(dt) FROM d PREWHERE ((0.9998999834060669 AND 1023) AND 255) <= ceil(j); +SELECT count('') AND NULL FROM d PREWHERE ceil(j) <= NULL; + +drop table d; diff --git a/tests/queries/0_stateless/01759_dictionary_unique_attribute_names.sql b/tests/queries/0_stateless/01759_dictionary_unique_attribute_names.sql index 59f20ca7a51..1a1e65a4e1a 100644 --- a/tests/queries/0_stateless/01759_dictionary_unique_attribute_names.sql +++ b/tests/queries/0_stateless/01759_dictionary_unique_attribute_names.sql @@ -3,7 +3,6 @@ DROP DATABASE IF EXISTS 01759_db; CREATE DATABASE 01759_db; -DROP TABLE IF EXISTS 01759_db.dictionary_source_table; CREATE TABLE 01759_db.dictionary_source_table ( key UInt64, @@ -14,8 +13,6 @@ ENGINE = TinyLog; INSERT INTO 01759_db.dictionary_source_table VALUES (0, 2, 3), (1, 5, 6), (2, 8, 9); -DROP DICTIONARY IF EXISTS 01759_db.test_dictionary; - CREATE DICTIONARY 01759_db.test_dictionary(key UInt64, value1 UInt64, value1 UInt64) PRIMARY KEY key SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'dictionary_source_table' DB '01759_db')) @@ -29,6 +26,4 @@ LAYOUT(COMPLEX_KEY_DIRECT()); SELECT number, dictGet('01759_db.test_dictionary', 'value1', tuple(number)) as value1, dictGet('01759_db.test_dictionary', 'value2', tuple(number)) as value2 FROM system.numbers LIMIT 3; -DROP TABLE 01759_db.dictionary_source_table; - DROP DATABASE 01759_db; diff --git a/tests/queries/0_stateless/01824_prefer_global_in_and_join.reference b/tests/queries/0_stateless/01824_prefer_global_in_and_join.reference index 195630268b6..00c1cb99331 100644 --- a/tests/queries/0_stateless/01824_prefer_global_in_and_join.reference +++ b/tests/queries/0_stateless/01824_prefer_global_in_and_join.reference @@ -42,7 +42,7 @@ GLOBAL ALL INNER JOIN ( SELECT id FROM t1_distr AS d1 - ALL INNER JOIN t2_distr AS d2 ON id = d2.id + GLOBAL ALL INNER JOIN t2_distr AS d2 ON id = d2.id WHERE id > 0 ORDER BY id ASC ) AS s0 USING (id) diff --git a/tests/queries/0_stateless/01889_sql_json_functions.reference b/tests/queries/0_stateless/01889_sql_json_functions.reference index fd8989611a8..c2c106e8632 100644 --- a/tests/queries/0_stateless/01889_sql_json_functions.reference +++ b/tests/queries/0_stateless/01889_sql_json_functions.reference @@ -1,44 +1,105 @@ +-- { echo } +SELECT '--JSON_VALUE--'; --JSON_VALUE-- +SELECT JSON_VALUE('{"hello":1}', '$'); -- root is a complex object => default value (empty string) +SELECT JSON_VALUE('{"hello":1}', '$.hello'); 1 +SELECT JSON_VALUE('{"hello":1.2}', '$.hello'); 1.2 +SELECT JSON_VALUE('{"hello":true}', '$.hello'); true -"world" +SELECT JSON_VALUE('{"hello":"world"}', '$.hello'); +world +SELECT JSON_VALUE('{"hello":null}', '$.hello'); null +SELECT JSON_VALUE('{"hello":["world","world2"]}', '$.hello'); +SELECT JSON_VALUE('{"hello":{"world":"!"}}', '$.hello'); +SELECT JSON_VALUE('{hello:world}', '$.hello'); -- invalid json => default value (empty string) +SELECT JSON_VALUE('', '$.hello'); -"bar" +SELECT JSON_VALUE('{"foo foo":"bar"}', '$."foo foo"'); +bar +SELECT JSON_VALUE('{"hello":"\\uD83C\\uDF3A \\uD83C\\uDF38 \\uD83C\\uDF37 Hello, World \\uD83C\\uDF37 \\uD83C\\uDF38 \\uD83C\\uDF3A"}', '$.hello'); +🌺 🌸 🌷 Hello, World 🌷 🌸 🌺 +SELECT JSON_VALUE('{"a":"Hello \\"World\\" \\\\"}', '$.a'); +Hello "World" \\ +select JSON_VALUE('{"a":"\\n\\u0000"}', '$.a'); +\n\0 +select JSON_VALUE('{"a":"\\u263a"}', '$.a'); +☺ +SELECT '--JSON_QUERY--'; --JSON_QUERY-- +SELECT JSON_QUERY('{"hello":1}', '$'); [{"hello":1}] +SELECT JSON_QUERY('{"hello":1}', '$.hello'); [1] +SELECT JSON_QUERY('{"hello":1.2}', '$.hello'); [1.2] +SELECT JSON_QUERY('{"hello":true}', '$.hello'); [true] +SELECT JSON_QUERY('{"hello":"world"}', '$.hello'); ["world"] +SELECT JSON_QUERY('{"hello":null}', '$.hello'); [null] +SELECT JSON_QUERY('{"hello":["world","world2"]}', '$.hello'); [["world","world2"]] +SELECT JSON_QUERY('{"hello":{"world":"!"}}', '$.hello'); [{"world":"!"}] +SELECT JSON_QUERY( '{hello:{"world":"!"}}}', '$.hello'); -- invalid json => default value (empty string) +SELECT JSON_QUERY('', '$.hello'); +SELECT JSON_QUERY('{"array":[[0, 1, 2, 3, 4, 5], [0, -1, -2, -3, -4, -5]]}', '$.array[*][0 to 2, 4]'); [0, 1, 4, 0, -1, -4] +SELECT '--JSON_EXISTS--'; --JSON_EXISTS-- +SELECT JSON_EXISTS('{"hello":1}', '$'); 1 +SELECT JSON_EXISTS('', '$'); 0 +SELECT JSON_EXISTS('{}', '$'); 1 +SELECT JSON_EXISTS('{"hello":1}', '$.hello'); 1 +SELECT JSON_EXISTS('{"hello":1,"world":2}', '$.world'); 1 +SELECT JSON_EXISTS('{"hello":{"world":1}}', '$.world'); 0 +SELECT JSON_EXISTS('{"hello":{"world":1}}', '$.hello.world'); 1 +SELECT JSON_EXISTS('{hello:world}', '$.hello'); -- invalid json => default value (zero integer) 0 +SELECT JSON_EXISTS('', '$.hello'); 0 +SELECT JSON_EXISTS('{"hello":["world"]}', '$.hello[*]'); 1 +SELECT JSON_EXISTS('{"hello":["world"]}', '$.hello[0]'); 1 +SELECT JSON_EXISTS('{"hello":["world"]}', '$.hello[1]'); 0 +SELECT JSON_EXISTS('{"a":[{"b":1},{"c":2}]}', '$.a[*].b'); 1 +SELECT JSON_EXISTS('{"a":[{"b":1},{"c":2}]}', '$.a[*].f'); 0 +SELECT JSON_EXISTS('{"a":[[{"b":1}, {"g":1}],[{"h":1},{"y":1}]]}', '$.a[*][0].h'); 1 +SELECT '--MANY ROWS--'; --MANY ROWS-- +DROP TABLE IF EXISTS 01889_sql_json; +CREATE TABLE 01889_sql_json (id UInt8, json String) ENGINE = MergeTree ORDER BY id; +INSERT INTO 01889_sql_json(id, json) VALUES(0, '{"name":"Ivan","surname":"Ivanov","friends":["Vasily","Kostya","Artyom"]}'); +INSERT INTO 01889_sql_json(id, json) VALUES(1, '{"name":"Katya","surname":"Baltica","friends":["Tihon","Ernest","Innokentiy"]}'); +INSERT INTO 01889_sql_json(id, json) VALUES(2, '{"name":"Vitali","surname":"Brown","friends":["Katya","Anatoliy","Ivan","Oleg"]}'); +SELECT id, JSON_QUERY(json, '$.friends[0 to 2]') FROM 01889_sql_json ORDER BY id; 0 ["Vasily", "Kostya"] 1 ["Tihon", "Ernest"] 2 ["Katya", "Anatoliy"] +SELECT id, JSON_VALUE(json, '$.friends[0]') FROM 01889_sql_json ORDER BY id; +0 Vasily +1 Tihon +2 Katya +DROP TABLE 01889_sql_json; diff --git a/tests/queries/0_stateless/01889_sql_json_functions.sql b/tests/queries/0_stateless/01889_sql_json_functions.sql index f68fe63ecab..e816443382c 100644 --- a/tests/queries/0_stateless/01889_sql_json_functions.sql +++ b/tests/queries/0_stateless/01889_sql_json_functions.sql @@ -1,5 +1,6 @@ -- Tags: no-fasttest +-- { echo } SELECT '--JSON_VALUE--'; SELECT JSON_VALUE('{"hello":1}', '$'); -- root is a complex object => default value (empty string) SELECT JSON_VALUE('{"hello":1}', '$.hello'); @@ -12,6 +13,10 @@ SELECT JSON_VALUE('{"hello":{"world":"!"}}', '$.hello'); SELECT JSON_VALUE('{hello:world}', '$.hello'); -- invalid json => default value (empty string) SELECT JSON_VALUE('', '$.hello'); SELECT JSON_VALUE('{"foo foo":"bar"}', '$."foo foo"'); +SELECT JSON_VALUE('{"hello":"\\uD83C\\uDF3A \\uD83C\\uDF38 \\uD83C\\uDF37 Hello, World \\uD83C\\uDF37 \\uD83C\\uDF38 \\uD83C\\uDF3A"}', '$.hello'); +SELECT JSON_VALUE('{"a":"Hello \\"World\\" \\\\"}', '$.a'); +select JSON_VALUE('{"a":"\\n\\u0000"}', '$.a'); +select JSON_VALUE('{"a":"\\u263a"}', '$.a'); SELECT '--JSON_QUERY--'; SELECT JSON_QUERY('{"hello":1}', '$'); @@ -50,4 +55,5 @@ INSERT INTO 01889_sql_json(id, json) VALUES(0, '{"name":"Ivan","surname":"Ivanov INSERT INTO 01889_sql_json(id, json) VALUES(1, '{"name":"Katya","surname":"Baltica","friends":["Tihon","Ernest","Innokentiy"]}'); INSERT INTO 01889_sql_json(id, json) VALUES(2, '{"name":"Vitali","surname":"Brown","friends":["Katya","Anatoliy","Ivan","Oleg"]}'); SELECT id, JSON_QUERY(json, '$.friends[0 to 2]') FROM 01889_sql_json ORDER BY id; +SELECT id, JSON_VALUE(json, '$.friends[0]') FROM 01889_sql_json ORDER BY id; DROP TABLE 01889_sql_json; diff --git a/tests/queries/0_stateless/01891_not_like_partition_prune.reference b/tests/queries/0_stateless/01891_not_like_partition_prune.reference new file mode 100644 index 00000000000..249697548b7 --- /dev/null +++ b/tests/queries/0_stateless/01891_not_like_partition_prune.reference @@ -0,0 +1,6 @@ +1.1 +1 +1.12 +1.2 +1 +1.1 diff --git a/tests/queries/0_stateless/01891_not_like_partition_prune.sql b/tests/queries/0_stateless/01891_not_like_partition_prune.sql new file mode 100644 index 00000000000..5346a7f08a8 --- /dev/null +++ b/tests/queries/0_stateless/01891_not_like_partition_prune.sql @@ -0,0 +1,9 @@ +drop table if exists test; + +create table test (a String) Engine MergeTree order by a partition by a; +insert into test values('1'), ('1.1'), ('1.2'), ('1.12'); + +select * from test where a like '1%1' order by a; +select * from test where a not like '1%1' order by a; +select * from test where a not like '1%2' order by a; +drop table test; diff --git a/tests/queries/0_stateless/01945_show_debug_warning.expect b/tests/queries/0_stateless/01945_show_debug_warning.expect index 7f14fdfbc96..402ad9a1f35 100755 --- a/tests/queries/0_stateless/01945_show_debug_warning.expect +++ b/tests/queries/0_stateless/01945_show_debug_warning.expect @@ -48,3 +48,10 @@ expect ":) " send -- "q\r" expect eof } + +spawn bash -c "source $basedir/../shell_config.sh ; \$CLICKHOUSE_CLIENT_BINARY \$CLICKHOUSE_CLIENT_OPT --disable_suggestion --max_memory_usage_for_all_queries=123" +expect "Warnings:" +expect " * Some obsolete setting is changed." +expect ":) " +send -- "q\r" +expect eof diff --git a/tests/queries/0_stateless/01945_system_warnings.expect b/tests/queries/0_stateless/01945_system_warnings.expect deleted file mode 100755 index 01a314429f8..00000000000 --- a/tests/queries/0_stateless/01945_system_warnings.expect +++ /dev/null @@ -1,40 +0,0 @@ -#!/usr/bin/expect -f - -# This is a test for system.warnings. Testing in interactive mode is necessary, -# as we want to see certain warnings from client - -log_user 0 -set timeout 60 -match_max 100000 - -# A default timeout action is to do nothing, change it to fail -expect_after { - timeout { - exit 1 - } -} - -set basedir [file dirname $argv0] -spawn bash -c "source $basedir/../shell_config.sh ; \$CLICKHOUSE_CLIENT_BINARY \$CLICKHOUSE_CLIENT_OPT --disable_suggestion" -expect ":) " - -#find out BUILD TYPE -send -- "SELECT value FROM system.build_options WHERE name='BUILD_TYPE'\r" -expect { - "Debug" { - # Check debug message in system.warnings - send -- "SELECT message FROM system.warnings WHERE message='Server was built in debug mode. It will work slowly.'\r" - expect "Server was built in debug mode. It will work slowly." - expect ":) " - } - "RelWithDebInfo" { - # Check empty to find out existence - send -- "SELECT message FROM system.warnings WHERE 0=1\r" - expect "Ok." - expect ":) " - } -} - -# Finish test -send -- "q\r" -expect eof diff --git a/tests/queries/0_stateless/01945_system_warnings.reference b/tests/queries/0_stateless/01945_system_warnings.reference index e69de29bb2d..296a03447db 100644 --- a/tests/queries/0_stateless/01945_system_warnings.reference +++ b/tests/queries/0_stateless/01945_system_warnings.reference @@ -0,0 +1,5 @@ +Server was built in debug mode. It will work slowly. +0 +Some obsolete setting is changed. Check \'select * from system.settings where changed\' and read the changelog. +1 +1 diff --git a/tests/queries/0_stateless/01945_system_warnings.sh b/tests/queries/0_stateless/01945_system_warnings.sh new file mode 100755 index 00000000000..bf11cee2911 --- /dev/null +++ b/tests/queries/0_stateless/01945_system_warnings.sh @@ -0,0 +1,25 @@ +#!/usr/bin/env bash + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +set -e + +build_type=`${CLICKHOUSE_CLIENT} -q "SELECT value FROM system.build_options WHERE name='BUILD_TYPE'"` + +if [[ $build_type == "Debug" ]]; then + ${CLICKHOUSE_CLIENT} -q "SELECT message FROM system.warnings WHERE message LIKE '%built in debug mode%'" +else + echo "Server was built in debug mode. It will work slowly." +fi + +${CLICKHOUSE_CLIENT} -q "SELECT count() FROM system.warnings WHERE message LIKE '%obsolete setting%'" +${CLICKHOUSE_CLIENT} --multiple_joins_rewriter_version=42 -q "SELECT message FROM system.warnings WHERE message LIKE '%obsolete setting%'" + +# Avoid duplicated warnings +${CLICKHOUSE_CLIENT} -q "SELECT count() = countDistinct(message) FROM system.warnings" + +# Avoid too many warnings, especially in CI +${CLICKHOUSE_CLIENT} -q "SELECT count() < 5 FROM system.warnings" + diff --git a/tests/queries/0_stateless/02004_intersect_except_operators.reference b/tests/queries/0_stateless/02004_intersect_except_operators.reference index 85559496f2f..a96a6bc7264 100644 --- a/tests/queries/0_stateless/02004_intersect_except_operators.reference +++ b/tests/queries/0_stateless/02004_intersect_except_operators.reference @@ -134,3 +134,8 @@ UNION ALL SELECT 1 EXCEPT SELECT 4 +set limit=1; +select 1 intersect select 1; +1 +(((select 1) intersect select 1)); +1 diff --git a/tests/queries/0_stateless/02004_intersect_except_operators.sql b/tests/queries/0_stateless/02004_intersect_except_operators.sql index b95051cba65..7ed756cc56b 100644 --- a/tests/queries/0_stateless/02004_intersect_except_operators.sql +++ b/tests/queries/0_stateless/02004_intersect_except_operators.sql @@ -48,3 +48,7 @@ select 1 intersect select count() from (select 1 except select 2 intersect selec explain syntax select 1 intersect select 1; explain syntax select 1 except select 1; explain syntax select 1 union all select 2 except (select 2 except select 1 union all select 1) except select 4; + +set limit=1; +select 1 intersect select 1; +(((select 1) intersect select 1)); diff --git a/tests/queries/0_stateless/02006_test_positional_arguments.reference b/tests/queries/0_stateless/02006_test_positional_arguments.reference index 27936137a1b..bf2ea3333e4 100644 --- a/tests/queries/0_stateless/02006_test_positional_arguments.reference +++ b/tests/queries/0_stateless/02006_test_positional_arguments.reference @@ -145,3 +145,6 @@ select x1, x1 * 2, max(x2), max(x3) from test2 group by 2, 1, x1 order by 1, 2, 1 2 10 100 10 20 1 10 100 200 100 1 +select a, b, c, d, e, f from (select 44 a, 88 b, 13 c, 14 d, 15 e, 16 f) t group by 1,2,3,4,5,6 + +44 88 13 14 15 16 diff --git a/tests/queries/0_stateless/02006_test_positional_arguments.sql b/tests/queries/0_stateless/02006_test_positional_arguments.sql index 4b6affc290a..6bbdc9ceaff 100644 --- a/tests/queries/0_stateless/02006_test_positional_arguments.sql +++ b/tests/queries/0_stateless/02006_test_positional_arguments.sql @@ -47,3 +47,5 @@ explain syntax select x1 + x3, x3 from test group by 1, 2; create table test2(x1 Int, x2 Int, x3 Int) engine=Memory; insert into test2 values (1, 10, 100), (10, 1, 10), (100, 100, 1); select x1, x1 * 2, max(x2), max(x3) from test2 group by 2, 1, x1 order by 1, 2, 4 desc, 3 asc; + +select a, b, c, d, e, f from (select 44 a, 88 b, 13 c, 14 d, 15 e, 16 f) t group by 1,2,3,4,5,6 diff --git a/tests/queries/0_stateless/02010_lc_native.python b/tests/queries/0_stateless/02010_lc_native.python index 6b3cad89ed6..2667bba8432 100755 --- a/tests/queries/0_stateless/02010_lc_native.python +++ b/tests/queries/0_stateless/02010_lc_native.python @@ -3,10 +3,12 @@ import socket import os +import uuid CLICKHOUSE_HOST = os.environ.get('CLICKHOUSE_HOST', '127.0.0.1') CLICKHOUSE_PORT = int(os.environ.get('CLICKHOUSE_PORT_TCP', '900000')) CLICKHOUSE_DATABASE = os.environ.get('CLICKHOUSE_DATABASE', 'default') +CLICKHOUSE_QUERY_ID = uuid.uuid4().hex def writeVarUInt(x, ba): for _ in range(0, 9): @@ -111,7 +113,7 @@ def receiveHello(s): def serializeClientInfo(ba): writeStringBinary('default', ba) # initial_user - writeStringBinary('123456', ba) # initial_query_id + writeStringBinary(CLICKHOUSE_QUERY_ID, ba) # initial_query_id writeStringBinary('127.0.0.1:9000', ba) # initial_address ba.extend([0] * 8) # initial_query_start_time_microseconds ba.append(1) # TCP @@ -130,7 +132,7 @@ def serializeClientInfo(ba): def sendQuery(s, query): ba = bytearray() writeVarUInt(1, ba) # query - writeStringBinary('123456', ba) + writeStringBinary(CLICKHOUSE_QUERY_ID, ba) ba.append(1) # INITIAL_QUERY @@ -163,15 +165,22 @@ def sendEmptyBlock(s): s.sendall(ba) +def assertPacket(packet, expected): + assert(packet == expected), packet + def readHeader(s): - readVarUInt(s) # Data + packet_type = readVarUInt(s) + if packet_type == 2: # Exception + raise RuntimeError(readException(s)) + assertPacket(packet_type, 1) # Data + readStringBinary(s) # external table name # BlockInfo - readVarUInt(s) # 1 - readUInt8(s) # is_overflows - readVarUInt(s) # 2 - readUInt32(s) # bucket_num - readVarUInt(s) # 0 + assertPacket(readVarUInt(s), 1) # 1 + assertPacket(readUInt8(s), 0) # is_overflows + assertPacket(readVarUInt(s), 2) # 2 + assertPacket(readUInt32(s), 4294967295) # bucket_num + assertPacket(readVarUInt(s), 0) # 0 columns = readVarUInt(s) # rows rows = readVarUInt(s) # columns print("Rows {} Columns {}".format(rows, columns)) @@ -182,13 +191,12 @@ def readHeader(s): def readException(s): - assert(readVarUInt(s) == 2) code = readUInt32(s) name = readStringBinary(s) text = readStringBinary(s) readStringBinary(s) # trace - assert(readUInt8(s) == 0) # has_nested - print("code {}: {}".format(code, text.replace('DB::Exception:', ''))) + assertPacket(readUInt8(s), 0) # has_nested + return "code {}: {}".format(code, text.replace('DB::Exception:', '')) def insertValidLowCardinalityRow(): @@ -223,7 +231,7 @@ def insertValidLowCardinalityRow(): # Fin block sendEmptyBlock(s) - assert(readVarUInt(s) == 5) # End of stream + assertPacket(readVarUInt(s), 5) # End of stream s.close() @@ -256,7 +264,8 @@ def insertLowCardinalityRowWithIndexOverflow(): ba.extend([0] * 7 + [1]) # UInt64 index (overflow) s.sendall(ba) - readException(s) + assertPacket(readVarUInt(s), 2) + print(readException(s)) s.close() @@ -289,7 +298,8 @@ def insertLowCardinalityRowWithIncorrectDictType(): ba.extend([0] * 8) # UInt64 index (overflow) s.sendall(ba) - readException(s) + assertPacket(readVarUInt(s), 2) + print(readException(s)) s.close() diff --git a/tests/queries/0_stateless/02014_query_parameters.reference b/tests/queries/0_stateless/02014_query_parameters.reference new file mode 100644 index 00000000000..d126b1e773e --- /dev/null +++ b/tests/queries/0_stateless/02014_query_parameters.reference @@ -0,0 +1,4 @@ +1 2 +1 +4 +2 diff --git a/tests/queries/0_stateless/02014_query_parameters.sh b/tests/queries/0_stateless/02014_query_parameters.sh new file mode 100755 index 00000000000..b3d718fa578 --- /dev/null +++ b/tests/queries/0_stateless/02014_query_parameters.sh @@ -0,0 +1,20 @@ +#!/usr/bin/env bash +# Tags: no-parallel + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +${CLICKHOUSE_CLIENT} --query "DROP DATABASE IF EXISTS test_db"; + +${CLICKHOUSE_CLIENT} --param_db="test_db" --param_tbl="test_t" --query "CREATE DATABASE {db:Identifier}"; +${CLICKHOUSE_CLIENT} --param_db="test_db" --param_tbl="test_t" --query "CREATE TABLE {db:Identifier}.{tbl:Identifier} (id UInt64, col1 UInt64) ENGINE = MergeTree() ORDER BY id"; +${CLICKHOUSE_CLIENT} --param_db="test_db" --param_tbl="test_t" --query "INSERT INTO {db:Identifier}.{tbl:Identifier} VALUES (1,2)"; +${CLICKHOUSE_CLIENT} --param_db="test_db" --param_tbl="test_t" --query "SELECT * FROM {db:Identifier}.{tbl:Identifier}"; +${CLICKHOUSE_CLIENT} --param_db="test_db" --param_tbl="test_t" --query "OPTIMIZE TABLE {db:Identifier}.{tbl:Identifier}"; +${CLICKHOUSE_CLIENT} --param_db="test_db" --param_tbl="test_t" --query "ALTER TABLE {db:Identifier}.{tbl:Identifier} RENAME COLUMN col1 to col2"; +${CLICKHOUSE_CLIENT} --param_db="test_db" --param_tbl="test_t" --query "EXISTS TABLE {db:Identifier}.{tbl:Identifier}"; +${CLICKHOUSE_CLIENT} --param_db="test_db" --param_tbl="test_t" --query "INSERT INTO {db:Identifier}.{tbl:Identifier} VALUES (3,4)"; +${CLICKHOUSE_CLIENT} --param_db="test_db" --param_tbl="test_t" --query "SELECT col2 FROM {db:Identifier}.{tbl:Identifier} ORDER BY col2 DESC"; +${CLICKHOUSE_CLIENT} --param_db="test_db" --param_tbl="test_t" --query "DROP TABLE {db:Identifier}.{tbl:Identifier}"; +${CLICKHOUSE_CLIENT} --param_db="test_db" --param_tbl="test_t" --query "DROP DATABASE {db:Identifier}"; diff --git a/tests/queries/0_stateless/02015_column_default_dict_get_identifier.sql b/tests/queries/0_stateless/02015_column_default_dict_get_identifier.sql index f0ddb825abc..24a3b631388 100644 --- a/tests/queries/0_stateless/02015_column_default_dict_get_identifier.sql +++ b/tests/queries/0_stateless/02015_column_default_dict_get_identifier.sql @@ -3,7 +3,6 @@ DROP DATABASE IF EXISTS 02015_db; CREATE DATABASE 02015_db; -DROP TABLE IF EXISTS 02015_db.test_table; CREATE TABLE 02015_db.test_table ( key_column UInt64, @@ -15,7 +14,6 @@ ORDER BY key_column; INSERT INTO 02015_db.test_table VALUES (0, 0, 0); -DROP DICTIONARY IF EXISTS 02015_db.test_dictionary; CREATE DICTIONARY 02015_db.test_dictionary ( key_column UInt64 DEFAULT 0, @@ -26,7 +24,6 @@ PRIMARY KEY key_column LAYOUT(DIRECT()) SOURCE(CLICKHOUSE(DB '02015_db' TABLE 'test_table')); -DROP TABLE IF EXISTS 02015_db.test_table_default; CREATE TABLE 02015_db.test_table_default ( data_1 DEFAULT dictGetUInt64('02015_db.test_dictionary', 'data_column_1', toUInt64(0)), @@ -37,8 +34,8 @@ ENGINE=TinyLog; INSERT INTO 02015_db.test_table_default(data_1) VALUES (5); SELECT * FROM 02015_db.test_table_default; +DROP TABLE 02015_db.test_table_default; DROP DICTIONARY 02015_db.test_dictionary; DROP TABLE 02015_db.test_table; -DROP TABLE 02015_db.test_table_default; DROP DATABASE 02015_db; diff --git a/tests/queries/0_stateless/02026_storage_filelog_largefile.reference b/tests/queries/0_stateless/02026_storage_filelog_largefile.reference index 95240890a95..7ab314964ee 100644 --- a/tests/queries/0_stateless/02026_storage_filelog_largefile.reference +++ b/tests/queries/0_stateless/02026_storage_filelog_largefile.reference @@ -1,3 +1,2 @@ -2000000 -2000000 -2000000 +100000 +100000 diff --git a/tests/queries/0_stateless/02026_storage_filelog_largefile.sh b/tests/queries/0_stateless/02026_storage_filelog_largefile.sh index acd1c464334..c28d20c9e8a 100755 --- a/tests/queries/0_stateless/02026_storage_filelog_largefile.sh +++ b/tests/queries/0_stateless/02026_storage_filelog_largefile.sh @@ -18,7 +18,7 @@ rm -rf ${user_files_path}/${CLICKHOUSE_TEST_UNIQUE_NAME:?}/* chmod 777 ${user_files_path}/${CLICKHOUSE_TEST_UNIQUE_NAME}/ -for i in {1..200} +for i in {1..10} do ${CLICKHOUSE_CLIENT} --query "insert into function file('${user_files_path}/${CLICKHOUSE_TEST_UNIQUE_NAME}/test$i.csv', 'CSV', 'k UInt32, v UInt32') select number, number from numbers(10000);" done @@ -28,14 +28,7 @@ ${CLICKHOUSE_CLIENT} --query "create table file_log(k UInt32, v UInt32) engine=F ${CLICKHOUSE_CLIENT} --query "select count() from file_log " -for i in {201..400} -do - ${CLICKHOUSE_CLIENT} --query "insert into function file('${user_files_path}/${CLICKHOUSE_TEST_UNIQUE_NAME}/test$i.csv', 'CSV', 'k UInt32, v UInt32') select number, number from numbers(10000);" -done - -${CLICKHOUSE_CLIENT} --query "select count() from file_log " - -for i in {401..600} +for i in {11..20} do ${CLICKHOUSE_CLIENT} --query "insert into function file('${user_files_path}/${CLICKHOUSE_TEST_UNIQUE_NAME}/test$i.csv', 'CSV', 'k UInt32, v UInt32') select number, number from numbers(10000);" done diff --git a/tests/queries/0_stateless/02097_default_dict_get_add_database.sql b/tests/queries/0_stateless/02097_default_dict_get_add_database.sql index af177566476..d4886c7b988 100644 --- a/tests/queries/0_stateless/02097_default_dict_get_add_database.sql +++ b/tests/queries/0_stateless/02097_default_dict_get_add_database.sql @@ -5,7 +5,6 @@ CREATE DATABASE 02097_db; USE 02097_db; -DROP TABLE IF EXISTS test_table; CREATE TABLE test_table ( key_column UInt64, @@ -15,7 +14,6 @@ CREATE TABLE test_table ENGINE = MergeTree ORDER BY key_column; -DROP DICTIONARY IF EXISTS test_dictionary; CREATE DICTIONARY test_dictionary ( key_column UInt64 DEFAULT 0, @@ -26,7 +24,6 @@ PRIMARY KEY key_column LAYOUT(DIRECT()) SOURCE(CLICKHOUSE(TABLE 'test_table')); -DROP TABLE IF EXISTS test_table_default; CREATE TABLE test_table_default ( data_1 DEFAULT dictGetUInt64('test_dictionary', 'data_column_1', toUInt64(0)), @@ -36,9 +33,8 @@ ENGINE=TinyLog; SELECT create_table_query FROM system.tables WHERE name = 'test_table_default' AND database = '02097_db'; +DROP TABLE test_table_default; DROP DICTIONARY test_dictionary; DROP TABLE test_table; -DROP TABLE test_table_default; - -DROP DATABASE 02097_db; +DROP DATABASE IF EXISTS 02097_db; diff --git a/tests/queries/0_stateless/02098_with_types_use_header.reference b/tests/queries/0_stateless/02098_with_types_use_header.reference index c1d70452d1d..fb79be8dccb 100644 --- a/tests/queries/0_stateless/02098_with_types_use_header.reference +++ b/tests/queries/0_stateless/02098_with_types_use_header.reference @@ -2,6 +2,10 @@ TSVWithNamesAndTypes OK OK OK +CustomSeparatedWithNamesAndTypes +OK +OK +OK CSVWithNamesAndTypes OK OK diff --git a/tests/queries/0_stateless/02098_with_types_use_header.sh b/tests/queries/0_stateless/02098_with_types_use_header.sh index 846696d18c0..5d88a994052 100755 --- a/tests/queries/0_stateless/02098_with_types_use_header.sh +++ b/tests/queries/0_stateless/02098_with_types_use_header.sh @@ -13,6 +13,10 @@ echo -e "x\ty\tz\nString\tDate\tUInt32\ntext\t2020-01-01\t1" | $CLICKHOUSE_CLIEN echo -e "y\tz\tx\nString\tDate\tUInt32\ntext\t2020-01-01\t1" | $CLICKHOUSE_CLIENT --input_format_with_names_use_header=1 --input_format_with_types_use_header=1 -q "INSERT INTO test_02098 FORMAT TSVWithNamesAndTypes" && echo 'OK' || echo 'FAIL' echo -e "x\tz\ty\nUInt32\tString\tDate\n1\ttext\t2020-01-01" | $CLICKHOUSE_CLIENT --input_format_with_names_use_header=1 --input_format_with_types_use_header=1 -q "INSERT INTO test_02098 FORMAT TSVWithNamesAndTypes" 2>&1 | grep -F -q "INCORRECT_DATA" && echo 'OK' || echo 'FAIL' +echo "CustomSeparatedWithNamesAndTypes" +echo -e "x\ty\tz\nString\tDate\tUInt32\ntext\t2020-01-01\t1" | $CLICKHOUSE_CLIENT --input_format_with_names_use_header=1 --input_format_with_types_use_header=1 -q "INSERT INTO test_02098 FORMAT CustomSeparatedWithNamesAndTypes" 2>&1 | grep -F -q "INCORRECT_DATA" && echo 'OK' || echo 'FAIL' +echo -e "y\tz\tx\nString\tDate\tUInt32\ntext\t2020-01-01\t1" | $CLICKHOUSE_CLIENT --input_format_with_names_use_header=1 --input_format_with_types_use_header=1 -q "INSERT INTO test_02098 FORMAT CustomSeparatedWithNamesAndTypes" && echo 'OK' || echo 'FAIL' +echo -e "x\tz\ty\nUInt32\tString\tDate\n1\ttext\t2020-01-01" | $CLICKHOUSE_CLIENT --input_format_with_names_use_header=1 --input_format_with_types_use_header=1 -q "INSERT INTO test_02098 FORMAT CustomSeparatedWithNamesAndTypes" 2>&1 | grep -F -q "INCORRECT_DATA" && echo 'OK' || echo 'FAIL' echo "CSVWithNamesAndTypes" echo -e "'x','y','z'\n'String','Date','UInt32'\n'text','2020-01-01',1" | $CLICKHOUSE_CLIENT --input_format_with_names_use_header=1 --input_format_with_types_use_header=1 -q "INSERT INTO test_02098 FORMAT CSVWithNamesAndTypes" 2>&1 | grep -F -q "INCORRECT_DATA" && echo 'OK' || echo 'FAIL' diff --git a/tests/queries/0_stateless/02105_backslash_letter_commands.expect b/tests/queries/0_stateless/02105_backslash_letter_commands.expect new file mode 100755 index 00000000000..9c6f3e10227 --- /dev/null +++ b/tests/queries/0_stateless/02105_backslash_letter_commands.expect @@ -0,0 +1,47 @@ +#!/usr/bin/expect -f +# Tags: no-fasttest + +log_user 0 +set timeout 02 +match_max 100000 +# A default timeout action is to do nothing, change it to fail +expect_after { + timeout { + exit 1 + } +} + +set basedir [file dirname $argv0] +spawn bash -c "source $basedir/../shell_config.sh ; \$CLICKHOUSE_CLIENT_BINARY \$CLICKHOUSE_CLIENT_OPT --disable_suggestion" +expect ":) " + +# Send a command +send -- "\\ld;\r" +expect "Syntax error: *" +expect ":) " + +send -- "\\c;\r" +expect "Syntax error: *" +expect ":) " + +send -- " \\l ; \\d; \r" +expect "Syntax error (Multi-statements are not allowed): *" +expect ":) " + +send -- " \\l ;\r" +expect "SHOW DATABASES" +expect "system" +expect ":) " + +send -- "\\c system;\r" +#expect "USE system" +expect ":) " + +send -- " \\d like 'one'\\G\r" +expect "SHOW TABLES" +expect "Row 1:" +expect "name: one" +expect ":) " + +send -- "\4" +expect eof diff --git a/tests/queries/0_stateless/02105_backslash_letter_commands.reference b/tests/queries/0_stateless/02105_backslash_letter_commands.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/02111_global_context_temporary_tables.reference b/tests/queries/0_stateless/02111_global_context_temporary_tables.reference new file mode 100644 index 00000000000..96651f9c959 --- /dev/null +++ b/tests/queries/0_stateless/02111_global_context_temporary_tables.reference @@ -0,0 +1,5 @@ +-- { echo } +SELECT * FROM remote('127.1', system.one, 1 IN id); -- { serverError UNKNOWN_TABLE } +SELECT * FROM remote('127.1', system.one, 1 IN dummy); -- { serverError UNKNOWN_TABLE } +SELECT * FROM remote('127.1', view(SELECT * FROM system.one), 1 IN id); -- { serverError UNKNOWN_TABLE } +SELECT * FROM remote('127.1', view(SELECT number AS id FROM numbers(2)), 1 IN id); -- { serverError UNKNOWN_TABLE } diff --git a/tests/queries/0_stateless/02111_global_context_temporary_tables.sql b/tests/queries/0_stateless/02111_global_context_temporary_tables.sql new file mode 100644 index 00000000000..96651f9c959 --- /dev/null +++ b/tests/queries/0_stateless/02111_global_context_temporary_tables.sql @@ -0,0 +1,5 @@ +-- { echo } +SELECT * FROM remote('127.1', system.one, 1 IN id); -- { serverError UNKNOWN_TABLE } +SELECT * FROM remote('127.1', system.one, 1 IN dummy); -- { serverError UNKNOWN_TABLE } +SELECT * FROM remote('127.1', view(SELECT * FROM system.one), 1 IN id); -- { serverError UNKNOWN_TABLE } +SELECT * FROM remote('127.1', view(SELECT number AS id FROM numbers(2)), 1 IN id); -- { serverError UNKNOWN_TABLE } diff --git a/tests/queries/0_stateless/02111_json_column_name_encoding.reference b/tests/queries/0_stateless/02111_json_column_name_encoding.reference new file mode 100644 index 00000000000..dd1bf2f5982 --- /dev/null +++ b/tests/queries/0_stateless/02111_json_column_name_encoding.reference @@ -0,0 +1,16 @@ +{ + "meta": + [ + { + "name": "length('�')", + "type": "UInt64" + } + ], + + "data": + [ + ["1"] + ], + + "rows": 1 +} diff --git a/tests/queries/0_stateless/02111_json_column_name_encoding.sql b/tests/queries/0_stateless/02111_json_column_name_encoding.sql new file mode 100644 index 00000000000..69af7507295 --- /dev/null +++ b/tests/queries/0_stateless/02111_json_column_name_encoding.sql @@ -0,0 +1,7 @@ +-- Tags: no-fasttest + +SET output_format_write_statistics = 0; + +SELECT + length('\x80') + FORMAT JSONCompact; diff --git a/tests/queries/0_stateless/02113_format_row.reference b/tests/queries/0_stateless/02113_format_row.reference new file mode 100644 index 00000000000..0ac3a15b115 --- /dev/null +++ b/tests/queries/0_stateless/02113_format_row.reference @@ -0,0 +1,20 @@ +0\t1970-01-01\n +1\t1970-01-02\n +2\t1970-01-03\n +3\t1970-01-04\n +4\t1970-01-05\n +0,"1970-01-01"\n +1,"1970-01-02"\n +2,"1970-01-03"\n +3,"1970-01-04"\n +4,"1970-01-05"\n +["0", "1970-01-01"]\n +["1", "1970-01-02"]\n +["2", "1970-01-03"]\n +["3", "1970-01-04"]\n +["4", "1970-01-05"]\n +\t\t\n\t\t\t0\n\t\t\t1970-01-01\n\t\t\n +\t\t\n\t\t\t1\n\t\t\t1970-01-02\n\t\t\n +\t\t\n\t\t\t2\n\t\t\t1970-01-03\n\t\t\n +\t\t\n\t\t\t3\n\t\t\t1970-01-04\n\t\t\n +\t\t\n\t\t\t4\n\t\t\t1970-01-05\n\t\t\n diff --git a/tests/queries/0_stateless/02113_format_row.sql b/tests/queries/0_stateless/02113_format_row.sql new file mode 100644 index 00000000000..93ee6d0f1dd --- /dev/null +++ b/tests/queries/0_stateless/02113_format_row.sql @@ -0,0 +1,5 @@ +select formatRow('TSVWithNamesAndTypes', number, toDate(number)) from numbers(5); +select formatRow('CSVWithNamesAndTypes', number, toDate(number)) from numbers(5); +select formatRow('JSONCompactEachRowWithNamesAndTypes', number, toDate(number)) from numbers(5); +select formatRow('XML', number, toDate(number)) from numbers(5); + diff --git a/tests/queries/0_stateless/02113_untuple_func_alias.reference b/tests/queries/0_stateless/02113_untuple_func_alias.reference new file mode 100644 index 00000000000..9985391d522 --- /dev/null +++ b/tests/queries/0_stateless/02113_untuple_func_alias.reference @@ -0,0 +1,2 @@ +ut.1 ut.2 ut.3 ut.4 ut2.1 ut2.2 ut2.3 ut2.4 +1 2 3 \N \N 3 2 1 diff --git a/tests/queries/0_stateless/02113_untuple_func_alias.sql b/tests/queries/0_stateless/02113_untuple_func_alias.sql new file mode 100644 index 00000000000..d39e6626d48 --- /dev/null +++ b/tests/queries/0_stateless/02113_untuple_func_alias.sql @@ -0,0 +1,2 @@ +SELECT untuple((1, 2, 3, b)) AS `ut`, untuple((NULL, 3, 2, a)) AS `ut2` +FROM (SELECT 1 AS a, NULL AS b) FORMAT TSVWithNames; diff --git a/tests/queries/0_stateless/02114_offset_fetch_without_order_by.reference b/tests/queries/0_stateless/02114_offset_fetch_without_order_by.reference new file mode 100644 index 00000000000..f3c09026c90 --- /dev/null +++ b/tests/queries/0_stateless/02114_offset_fetch_without_order_by.reference @@ -0,0 +1 @@ +Code: 628 diff --git a/tests/queries/0_stateless/02114_offset_fetch_without_order_by.sh b/tests/queries/0_stateless/02114_offset_fetch_without_order_by.sh new file mode 100755 index 00000000000..9c48a6b8f2a --- /dev/null +++ b/tests/queries/0_stateless/02114_offset_fetch_without_order_by.sh @@ -0,0 +1,7 @@ +#!/usr/bin/env bash + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +$CLICKHOUSE_CURL -sS "$CLICKHOUSE_URL" -d "SELECT number from numbers(10) OFFSET 1 ROWS FETCH FIRST 1 ROWS ONLY;" | grep -oF 'Code: 628' diff --git a/tests/queries/0_stateless/02115_rewrite_local_join_right_distribute_table.reference b/tests/queries/0_stateless/02115_rewrite_local_join_right_distribute_table.reference new file mode 100644 index 00000000000..b9119b9d087 --- /dev/null +++ b/tests/queries/0_stateless/02115_rewrite_local_join_right_distribute_table.reference @@ -0,0 +1,36 @@ +1 +2 +3 +1 +2 +3 +SELECT a +FROM t1_all AS t1 +ALL INNER JOIN test_02115.t2_local AS t2 ON a = t2.a +1 +2 +3 +1 +2 +3 +1 +2 +3 +1 +2 +3 +SELECT a +FROM t1_all AS t1 +GLOBAL ALL INNER JOIN t2_all AS t2 ON a = t2.a +1 +1 +2 +2 +3 +3 +1 +1 +2 +2 +3 +3 diff --git a/tests/queries/0_stateless/02115_rewrite_local_join_right_distribute_table.sql b/tests/queries/0_stateless/02115_rewrite_local_join_right_distribute_table.sql new file mode 100644 index 00000000000..2eebb14a46f --- /dev/null +++ b/tests/queries/0_stateless/02115_rewrite_local_join_right_distribute_table.sql @@ -0,0 +1,33 @@ +-- Tags: global, no-parallel +CREATE DATABASE IF NOT EXISTS test_02115; +USE test_02115; + +DROP TABLE IF EXISTS t1_local; +DROP TABLE IF EXISTS t2_local; +DROP TABLE IF EXISTS t1_all; +DROP TABLE IF EXISTS t2_all; + +create table t1_local(a Int32) engine=MergeTree() order by a; +create table t2_local as t1_local; + +create table t1_all as t1_local engine Distributed(test_cluster_two_shards_localhost, test_02115, t1_local, rand()); +create table t2_all as t2_local engine Distributed(test_cluster_two_shards_localhost, test_02115, t2_local, rand()); + +insert into t1_local values(1), (2), (3); +insert into t2_local values(1), (2), (3); + +set distributed_product_mode = 'local'; +select * from t1_all t1 where t1.a in (select t2.a from t2_all t2); +explain syntax select t1.* from t1_all t1 join t2_all t2 on t1.a = t2.a; +select t1.* from t1_all t1 join t2_all t2 on t1.a = t2.a; + +set distributed_product_mode = 'global'; +select * from t1_all t1 where t1.a in (select t2.a from t2_all t2); +explain syntax select t1.* from t1_all t1 join t2_all t2 on t1.a = t2.a; +select t1.* from t1_all t1 join t2_all t2 on t1.a = t2.a; + +DROP TABLE t1_local; +DROP TABLE t2_local; +DROP TABLE t1_all; +DROP TABLE t2_all; +DROP DATABASE test_02115; diff --git a/tests/queries/0_stateless/02116_clickhouse_stderr.reference b/tests/queries/0_stateless/02116_clickhouse_stderr.reference new file mode 100644 index 00000000000..ca7f5b8df1f --- /dev/null +++ b/tests/queries/0_stateless/02116_clickhouse_stderr.reference @@ -0,0 +1,3 @@ +Ensure that not existing file still write an error to stderr +File /no/such/file (logger.stderr) is not writable +Ensure that the file will be created diff --git a/tests/queries/0_stateless/02116_clickhouse_stderr.sh b/tests/queries/0_stateless/02116_clickhouse_stderr.sh new file mode 100755 index 00000000000..4cda287681c --- /dev/null +++ b/tests/queries/0_stateless/02116_clickhouse_stderr.sh @@ -0,0 +1,31 @@ +#!/usr/bin/env bash + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +( + echo 'Ensure that not existing file still write an error to stderr' + + test_dir=$(mktemp -d -t clickhouse.XXXXXX) + mkdir -p "$test_dir" + cd "$test_dir" || exit 1 + $CLICKHOUSE_SERVER_BINARY -- --logger.stderr=/no/such/file |& grep -o 'File /no/such/file (logger.stderr) is not writable' + rm -fr "${test_dir:?}" +) + +( + echo 'Ensure that the file will be created' + + test_dir=$(mktemp -d -t clickhouse.XXXXXX) + mkdir -p "$test_dir" + cd "$test_dir" || exit 1 + + stderr=$(mktemp -t clickhouse.XXXXXX) + $CLICKHOUSE_SERVER_BINARY -- --logger.stderr="$stderr" 2>/dev/null + # -s -- check that stderr was created and is not empty + test -s "$stderr" || exit 2 + rm "$stderr" + + rm -fr "${test_dir:?}" +) diff --git a/tests/queries/0_stateless/02116_interactive_hello.expect b/tests/queries/0_stateless/02116_interactive_hello.expect new file mode 100755 index 00000000000..1642ac91e42 --- /dev/null +++ b/tests/queries/0_stateless/02116_interactive_hello.expect @@ -0,0 +1,24 @@ +#!/usr/bin/expect -f +# Tags: no-fasttest + +log_user 0 +set timeout 60 +match_max 100000 + +# A default timeout action is to do nothing, change it to fail +expect_after { + timeout { + exit 1 + } +} + +set basedir [file dirname $argv0] +spawn bash -c "source $basedir/../shell_config.sh ; \$CLICKHOUSE_CLIENT_BINARY \$CLICKHOUSE_CLIENT_OPT --disable_suggestion" + +expect -re "ClickHouse client version \[\\d\]{2}.\[\\d\]{1,2}.\[\\d\]{1,2}.\[\\d\]{1,2}.\r" +expect -re "Connecting to database .* at localhost:9000 as user default.\r" +expect -re "Connected to ClickHouse server version \[\\d\]{2}.\[\\d\]{1,2}.\[\\d\]{1,2} revision .*\r" +expect ":) " + +send -- "" +expect eof diff --git a/tests/queries/0_stateless/02116_interactive_hello.reference b/tests/queries/0_stateless/02116_interactive_hello.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/02116_tuple_element.reference b/tests/queries/0_stateless/02116_tuple_element.reference new file mode 100644 index 00000000000..121b08d02f1 --- /dev/null +++ b/tests/queries/0_stateless/02116_tuple_element.reference @@ -0,0 +1,25 @@ +1 +SELECT `t1.a` +FROM t_tuple_element +a +SELECT `t1.s` +FROM t_tuple_element +1 +SELECT `t1.a` +FROM t_tuple_element +2 +SELECT `t2.1` +FROM t_tuple_element +2 +SELECT `t2.1` +FROM t_tuple_element +1 2 +WITH (1, 2) AS t +SELECT + t.1, + t.2 +1 2 +WITH CAST(\'(1, 2)\', \'Tuple(a UInt32, b UInt32)\') AS t +SELECT + t.1, + tupleElement(t, \'b\') diff --git a/tests/queries/0_stateless/02116_tuple_element.sql b/tests/queries/0_stateless/02116_tuple_element.sql new file mode 100644 index 00000000000..4ce6e5cf136 --- /dev/null +++ b/tests/queries/0_stateless/02116_tuple_element.sql @@ -0,0 +1,42 @@ +DROP TABLE IF EXISTS t_tuple_element; + +CREATE TABLE t_tuple_element(t1 Tuple(a UInt32, s String), t2 Tuple(UInt32, String)) ENGINE = Memory; +INSERT INTO t_tuple_element VALUES ((1, 'a'), (2, 'b')); + +SET optimize_functions_to_subcolumns = 1; + +SELECT t1.1 FROM t_tuple_element; +EXPLAIN SYNTAX SELECT t1.1 FROM t_tuple_element; + +SELECT tupleElement(t1, 2) FROM t_tuple_element; +EXPLAIN SYNTAX SELECT tupleElement(t1, 2) FROM t_tuple_element; + +SELECT tupleElement(t1, 'a') FROM t_tuple_element; +EXPLAIN SYNTAX SELECT tupleElement(t1, 'a') FROM t_tuple_element; + +SELECT tupleElement(number, 1) FROM numbers(1); -- { serverError 43 } +SELECT tupleElement(t1) FROM t_tuple_element; -- { serverError 42 } +SELECT tupleElement(t1, 'b') FROM t_tuple_element; -- { serverError 47 } +SELECT tupleElement(t1, 0) FROM t_tuple_element; -- { serverError 127 } +SELECT tupleElement(t1, 3) FROM t_tuple_element; -- { serverError 127 } +SELECT tupleElement(t1, materialize('a')) FROM t_tuple_element; -- { serverError 43 } + +SELECT t2.1 FROM t_tuple_element; +EXPLAIN SYNTAX SELECT t2.1 FROM t_tuple_element; + +SELECT tupleElement(t2, 1) FROM t_tuple_element; +EXPLAIN SYNTAX SELECT tupleElement(t2, 1) FROM t_tuple_element; + +SELECT tupleElement(t2) FROM t_tuple_element; -- { serverError 42 } +SELECT tupleElement(t2, 'a') FROM t_tuple_element; -- { serverError 47 } +SELECT tupleElement(t2, 0) FROM t_tuple_element; -- { serverError 127 } +SELECT tupleElement(t2, 3) FROM t_tuple_element; -- { serverError 127 } +SELECT tupleElement(t2, materialize(1)) FROM t_tuple_element; -- { serverError 43 } + +DROP TABLE t_tuple_element; + +WITH (1, 2) AS t SELECT t.1, t.2; +EXPLAIN SYNTAX WITH (1, 2) AS t SELECT t.1, t.2; + +WITH (1, 2)::Tuple(a UInt32, b UInt32) AS t SELECT t.1, tupleElement(t, 'b'); +EXPLAIN SYNTAX WITH (1, 2)::Tuple(a UInt32, b UInt32) AS t SELECT t.1, tupleElement(t, 'b'); diff --git a/tests/queries/0_stateless/02117_custom_separated_with_names_and_types.reference b/tests/queries/0_stateless/02117_custom_separated_with_names_and_types.reference new file mode 100644 index 00000000000..bbe0c6d9fcc --- /dev/null +++ b/tests/queries/0_stateless/02117_custom_separated_with_names_and_types.reference @@ -0,0 +1,71 @@ +CustomSeparated + +01"hello" + +12"hello" + +23"hello" + +34"hello" + +45"hello" + +0 1 hello +1 2 hello +2 3 hello +3 4 hello +4 5 hello +CustomSeparatedWithNames + +"x""y""s" + +01"hello" + +12"hello" + +23"hello" + +34"hello" + +45"hello" + +0 1 hello +1 2 hello +2 3 hello +3 4 hello +4 5 hello +CustomSeparatedWithNamesAndTypes + +"x""y""s" + +"UInt64""UInt64""String" + +01"hello" + +12"hello" + +23"hello" + +34"hello" + +45"hello" + +0 1 hello +1 2 hello +2 3 hello +3 4 hello +4 5 hello +1 text 2020-01-01 +1 text 2020-01-01 +1 text 2020-01-01 +1 text 2020-01-01 +1 text 2020-01-01 +1 text 2020-01-01 +1 default 1970-01-01 +1 default 1970-01-01 +1 1970-01-01 +1 1970-01-01 +1 default 1970-01-01 +1 default 1970-01-01 +OK +OK diff --git a/tests/queries/0_stateless/02117_custom_separated_with_names_and_types.sh b/tests/queries/0_stateless/02117_custom_separated_with_names_and_types.sh new file mode 100755 index 00000000000..c2dbec4b3be --- /dev/null +++ b/tests/queries/0_stateless/02117_custom_separated_with_names_and_types.sh @@ -0,0 +1,84 @@ +#!/usr/bin/env bash + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +CUSTOM_SETTINGS="SETTINGS format_custom_row_before_delimiter='', format_custom_row_after_delimiter='\n', format_custom_row_between_delimiter='\n', format_custom_result_before_delimiter='\n', format_custom_result_after_delimiter='\n', format_custom_field_delimiter='', format_custom_escaping_rule='CSV'" + +$CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS test_02117" +$CLICKHOUSE_CLIENT -q "CREATE TABLE test_02117 (x UInt64, y UInt64, s String) engine=Memory()" + +for format in CustomSeparated CustomSeparatedWithNames CustomSeparatedWithNamesAndTypes +do + echo $format + $CLICKHOUSE_CLIENT -q "SELECT number AS x, number + 1 AS y, 'hello' AS s FROM numbers(5) FORMAT $format $CUSTOM_SETTINGS" + $CLICKHOUSE_CLIENT -q "SELECT number AS x, number + 1 AS y, 'hello' AS s FROM numbers(5) FORMAT $format $CUSTOM_SETTINGS" | $CLICKHOUSE_CLIENT -q "INSERT INTO test_02117 FORMAT $format $CUSTOM_SETTINGS" + $CLICKHOUSE_CLIENT -q "SELECT * FROM test_02117" + $CLICKHOUSE_CLIENT -q "TRUNCATE TABLE test_02117" +done + +$CLICKHOUSE_CLIENT -q "DROP TABLE test_02117" +$CLICKHOUSE_CLIENT -q "CREATE TABLE test_02117 (x UInt32, y String DEFAULT 'default', z Date) engine=Memory()" + + +$CLICKHOUSE_CLIENT -q "SELECT toUInt32(1) AS x, 'text' AS y, toDate('2020-01-01') AS z FORMAT CustomSeparatedWithNames $CUSTOM_SETTINGS" | $CLICKHOUSE_CLIENT --input_format_with_names_use_header=1 -q "INSERT INTO test_02117 FORMAT CustomSeparatedWithNames $CUSTOM_SETTINGS" +$CLICKHOUSE_CLIENT -q "SELECT * FROM test_02117" +$CLICKHOUSE_CLIENT -q "TRUNCATE TABLE test_02117" + +$CLICKHOUSE_CLIENT -q "SELECT toUInt32(1) AS x, 'text' AS y, toDate('2020-01-01') AS z FORMAT CustomSeparatedWithNamesAndTypes $CUSTOM_SETTINGS" | $CLICKHOUSE_CLIENT --input_format_with_names_use_header=1 --input_format_with_types_use_header=1 -q "INSERT INTO test_02117 FORMAT CustomSeparatedWithNamesAndTypes $CUSTOM_SETTINGS" +$CLICKHOUSE_CLIENT -q "SELECT * FROM test_02117" +$CLICKHOUSE_CLIENT -q "TRUNCATE TABLE test_02117" + + +$CLICKHOUSE_CLIENT -q "SELECT toUInt32(1) AS x, 'text' AS y, toDate('2020-01-01') AS z FORMAT CustomSeparatedWithNames $CUSTOM_SETTINGS" | $CLICKHOUSE_CLIENT --input_format_with_names_use_header=0 -q "INSERT INTO test_02117 FORMAT CustomSeparatedWithNames $CUSTOM_SETTINGS" +$CLICKHOUSE_CLIENT -q "SELECT * FROM test_02117" +$CLICKHOUSE_CLIENT -q "TRUNCATE TABLE test_02117" + +$CLICKHOUSE_CLIENT -q "SELECT toUInt32(1) AS x, 'text' AS y, toDate('2020-01-01') AS z FORMAT CustomSeparatedWithNamesAndTypes $CUSTOM_SETTINGS" | $CLICKHOUSE_CLIENT --input_format_with_names_use_header=0 --input_format_with_types_use_header=0 -q "INSERT INTO test_02117 FORMAT CustomSeparatedWithNamesAndTypes $CUSTOM_SETTINGS" +$CLICKHOUSE_CLIENT -q "SELECT * FROM test_02117" +$CLICKHOUSE_CLIENT -q "TRUNCATE TABLE test_02117" + + +$CLICKHOUSE_CLIENT -q "SELECT 'text' AS y, toDate('2020-01-01') AS z, toUInt32(1) AS x FORMAT CustomSeparatedWithNames $CUSTOM_SETTINGS" | $CLICKHOUSE_CLIENT --input_format_with_names_use_header=1 -q "INSERT INTO test_02117 FORMAT CustomSeparatedWithNames $CUSTOM_SETTINGS" +$CLICKHOUSE_CLIENT -q "SELECT * FROM test_02117" +$CLICKHOUSE_CLIENT -q "TRUNCATE TABLE test_02117" + +$CLICKHOUSE_CLIENT -q "SELECT 'text' AS y, toDate('2020-01-01') AS z, toUInt32(1) AS x FORMAT CustomSeparatedWithNamesAndTypes $CUSTOM_SETTINGS" | $CLICKHOUSE_CLIENT --input_format_with_names_use_header=1 --input_format_with_types_use_header=1 -q "INSERT INTO test_02117 FORMAT CustomSeparatedWithNamesAndTypes $CUSTOM_SETTINGS" +$CLICKHOUSE_CLIENT -q "SELECT * FROM test_02117" +$CLICKHOUSE_CLIENT -q "TRUNCATE TABLE test_02117" + + +$CLICKHOUSE_CLIENT -q "SELECT toUInt32(1) AS x FORMAT CustomSeparatedWithNames $CUSTOM_SETTINGS" | $CLICKHOUSE_CLIENT --input_format_with_names_use_header=1 -q "INSERT INTO test_02117 FORMAT CustomSeparatedWithNames $CUSTOM_SETTINGS" +$CLICKHOUSE_CLIENT -q "SELECT * FROM test_02117" +$CLICKHOUSE_CLIENT -q "TRUNCATE TABLE test_02117" + +$CLICKHOUSE_CLIENT -q "SELECT toUInt32(1) AS x FORMAT CustomSeparatedWithNamesAndTypes $CUSTOM_SETTINGS" | $CLICKHOUSE_CLIENT --input_format_with_names_use_header=1 --input_format_with_types_use_header=1 -q "INSERT INTO test_02117 FORMAT CustomSeparatedWithNamesAndTypes $CUSTOM_SETTINGS" +$CLICKHOUSE_CLIENT -q "SELECT * FROM test_02117" +$CLICKHOUSE_CLIENT -q "TRUNCATE TABLE test_02117" + + +$CLICKHOUSE_CLIENT -q "SELECT toUInt32(1) AS x FORMAT CustomSeparatedWithNames $CUSTOM_SETTINGS" | $CLICKHOUSE_CLIENT --input_format_defaults_for_omitted_fields=0 --input_format_with_names_use_header=1 -q "INSERT INTO test_02117 FORMAT CustomSeparatedWithNames $CUSTOM_SETTINGS" +$CLICKHOUSE_CLIENT -q "SELECT * FROM test_02117" +$CLICKHOUSE_CLIENT -q "TRUNCATE TABLE test_02117" + +$CLICKHOUSE_CLIENT -q "SELECT toUInt32(1) AS x FORMAT CustomSeparatedWithNamesAndTypes $CUSTOM_SETTINGS" | $CLICKHOUSE_CLIENT --input_format_defaults_for_omitted_fields=0 --input_format_with_names_use_header=1 --input_format_with_types_use_header=1 -q "INSERT INTO test_02117 FORMAT CustomSeparatedWithNamesAndTypes $CUSTOM_SETTINGS" +$CLICKHOUSE_CLIENT -q "SELECT * FROM test_02117" +$CLICKHOUSE_CLIENT -q "TRUNCATE TABLE test_02117" + + +$CLICKHOUSE_CLIENT -q "SELECT toUInt32(1) AS x, [[1, 2, 3], [4, 5], []] as a FORMAT CustomSeparatedWithNames $CUSTOM_SETTINGS" | $CLICKHOUSE_CLIENT --input_format_skip_unknown_fields=1 --input_format_with_names_use_header=1 -q "INSERT INTO test_02117 FORMAT CustomSeparatedWithNames $CUSTOM_SETTINGS" +$CLICKHOUSE_CLIENT -q "SELECT * FROM test_02117" +$CLICKHOUSE_CLIENT -q "TRUNCATE TABLE test_02117" + + +$CLICKHOUSE_CLIENT -q "SELECT toUInt32(1) AS x, [[1, 2, 3], [4, 5], []] as a FORMAT CustomSeparatedWithNamesAndTypes $CUSTOM_SETTINGS" | $CLICKHOUSE_CLIENT --input_format_skip_unknown_fields=1 --input_format_with_names_use_header=1 --input_format_with_types_use_header=1 -q "INSERT INTO test_02117 FORMAT CustomSeparatedWithNamesAndTypes $CUSTOM_SETTINGS" +$CLICKHOUSE_CLIENT -q "SELECT * FROM test_02117" +$CLICKHOUSE_CLIENT -q "TRUNCATE TABLE test_02117" + + +$CLICKHOUSE_CLIENT -q "SELECT 'text' AS x, toDate('2020-01-01') AS y, toUInt32(1) AS z FORMAT CustomSeparatedWithNamesAndTypes $CUSTOM_SETTINGS" | $CLICKHOUSE_CLIENT --input_format_with_names_use_header=1 --input_format_with_types_use_header=1 -q "INSERT INTO test_02117 FORMAT CustomSeparatedWithNamesAndTypes $CUSTOM_SETTINGS" 2>&1 | grep -F -q "INCORRECT_DATA" && echo 'OK' || echo 'FAIL' + +$CLICKHOUSE_CLIENT -q "SELECT toUInt32(1) AS x, 'text' as z, toDate('2020-01-01') AS y FORMAT CustomSeparatedWithNamesAndTypes $CUSTOM_SETTINGS" | $CLICKHOUSE_CLIENT --input_format_with_names_use_header=1 --input_format_with_types_use_header=1 -q "INSERT INTO test_02117 FORMAT CustomSeparatedWithNamesAndTypes $CUSTOM_SETTINGS" 2>&1 | grep -F -q "INCORRECT_DATA" && echo 'OK' || echo 'FAIL' + +$CLICKHOUSE_CLIENT -q "DROP TABLE test_02117" \ No newline at end of file diff --git a/tests/queries/0_stateless/02117_show_create_table_system.reference b/tests/queries/0_stateless/02117_show_create_table_system.reference new file mode 100644 index 00000000000..2b391cd292e --- /dev/null +++ b/tests/queries/0_stateless/02117_show_create_table_system.reference @@ -0,0 +1,66 @@ +CREATE TABLE system.aggregate_function_combinators\n(\n `name` String,\n `is_internal` UInt8\n)\nENGINE = SystemAggregateFunctionCombinators()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' +CREATE TABLE system.asynchronous_inserts\n(\n `query` String,\n `database` String,\n `table` String,\n `format` String,\n `first_update` DateTime64(6),\n `last_update` DateTime64(6),\n `total_bytes` UInt64,\n `entries.query_id` Array(String),\n `entries.bytes` Array(UInt64),\n `entries.finished` Array(UInt8),\n `entries.exception` Array(String)\n)\nENGINE = AsynchronousInserts()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' +CREATE TABLE system.asynchronous_metrics\n(\n `metric` String,\n `value` Float64\n)\nENGINE = SystemAsynchronousMetrics()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' +CREATE TABLE system.build_options\n(\n `name` String,\n `value` String\n)\nENGINE = SystemBuildOptions()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' +CREATE TABLE system.clusters\n(\n `cluster` String,\n `shard_num` UInt32,\n `shard_weight` UInt32,\n `replica_num` UInt32,\n `host_name` String,\n `host_address` String,\n `port` UInt16,\n `is_local` UInt8,\n `user` String,\n `default_database` String,\n `errors_count` UInt32,\n `slowdowns_count` UInt32,\n `estimated_recovery_time` UInt32\n)\nENGINE = SystemClusters()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' +CREATE TABLE system.collations\n(\n `name` String,\n `language` Nullable(String)\n)\nENGINE = SystemTableCollations()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' +CREATE TABLE system.columns\n(\n `database` String,\n `table` String,\n `name` String,\n `type` String,\n `position` UInt64,\n `default_kind` String,\n `default_expression` String,\n `data_compressed_bytes` UInt64,\n `data_uncompressed_bytes` UInt64,\n `marks_bytes` UInt64,\n `comment` String,\n `is_in_partition_key` UInt8,\n `is_in_sorting_key` UInt8,\n `is_in_primary_key` UInt8,\n `is_in_sampling_key` UInt8,\n `compression_codec` String,\n `character_octet_length` Nullable(UInt64),\n `numeric_precision` Nullable(UInt64),\n `numeric_precision_radix` Nullable(UInt64),\n `numeric_scale` Nullable(UInt64),\n `datetime_precision` Nullable(UInt64)\n)\nENGINE = SystemColumns()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' +CREATE TABLE system.contributors\n(\n `name` String\n)\nENGINE = SystemContributors()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' +CREATE TABLE system.current_roles\n(\n `role_name` String,\n `with_admin_option` UInt8,\n `is_default` UInt8\n)\nENGINE = SystemCurrentRoles()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' +CREATE TABLE system.data_skipping_indices\n(\n `database` String,\n `table` String,\n `name` String,\n `type` String,\n `expr` String,\n `granularity` UInt64,\n `data_compressed_bytes` UInt64,\n `data_uncompressed_bytes` UInt64,\n `marks` UInt64\n)\nENGINE = SystemDataSkippingIndices()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' +CREATE TABLE system.data_type_families\n(\n `name` String,\n `case_insensitive` UInt8,\n `alias_to` String\n)\nENGINE = SystemTableDataTypeFamilies()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' +CREATE TABLE system.databases\n(\n `name` String,\n `engine` String,\n `data_path` String,\n `metadata_path` String,\n `uuid` UUID,\n `comment` String,\n `database` String\n)\nENGINE = SystemDatabases()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' +CREATE TABLE system.detached_parts\n(\n `database` String,\n `table` String,\n `partition_id` Nullable(String),\n `name` String,\n `disk` String,\n `reason` Nullable(String),\n `min_block_number` Nullable(Int64),\n `max_block_number` Nullable(Int64),\n `level` Nullable(UInt32)\n)\nENGINE = SystemDetachedParts()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' +CREATE TABLE system.dictionaries\n(\n `database` String,\n `name` String,\n `uuid` UUID,\n `status` Enum8(\'NOT_LOADED\' = 0, \'LOADED\' = 1, \'FAILED\' = 2, \'LOADING\' = 3, \'FAILED_AND_RELOADING\' = 4, \'LOADED_AND_RELOADING\' = 5, \'NOT_EXIST\' = 6),\n `origin` String,\n `type` String,\n `key.names` Array(String),\n `key.types` Array(String),\n `attribute.names` Array(String),\n `attribute.types` Array(String),\n `bytes_allocated` UInt64,\n `query_count` UInt64,\n `hit_rate` Float64,\n `found_rate` Float64,\n `element_count` UInt64,\n `load_factor` Float64,\n `source` String,\n `lifetime_min` UInt64,\n `lifetime_max` UInt64,\n `loading_start_time` DateTime,\n `last_successful_update_time` DateTime,\n `loading_duration` Float32,\n `last_exception` String,\n `comment` String\n)\nENGINE = SystemDictionaries()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' +CREATE TABLE system.disks\n(\n `name` String,\n `path` String,\n `free_space` UInt64,\n `total_space` UInt64,\n `keep_free_space` UInt64,\n `type` String\n)\nENGINE = SystemDisks()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' +CREATE TABLE system.distributed_ddl_queue\n(\n `entry` String,\n `entry_version` Nullable(UInt8),\n `initiator_host` Nullable(String),\n `initiator_port` Nullable(UInt16),\n `cluster` String,\n `query` String,\n `settings` Map(String, String),\n `query_create_time` DateTime,\n `host` Nullable(String),\n `port` Nullable(UInt16),\n `status` Nullable(Enum8(\'Inactive\' = 0, \'Active\' = 1, \'Finished\' = 2, \'Removing\' = 3, \'Unknown\' = 4)),\n `exception_code` Nullable(UInt16),\n `exception_text` Nullable(String),\n `query_finish_time` Nullable(DateTime),\n `query_duration_ms` Nullable(UInt64)\n)\nENGINE = SystemDDLWorkerQueue()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' +CREATE TABLE system.distribution_queue\n(\n `database` String,\n `table` String,\n `data_path` String,\n `is_blocked` UInt8,\n `error_count` UInt64,\n `data_files` UInt64,\n `data_compressed_bytes` UInt64,\n `broken_data_files` UInt64,\n `broken_data_compressed_bytes` UInt64,\n `last_exception` String\n)\nENGINE = SystemDistributionQueue()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' +CREATE TABLE system.enabled_roles\n(\n `role_name` String,\n `with_admin_option` UInt8,\n `is_current` UInt8,\n `is_default` UInt8\n)\nENGINE = SystemEnabledRoles()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' +CREATE TABLE system.errors\n(\n `name` String,\n `code` Int32,\n `value` UInt64,\n `last_error_time` DateTime,\n `last_error_message` String,\n `last_error_trace` Array(UInt64),\n `remote` UInt8\n)\nENGINE = SystemErrors()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' +CREATE TABLE system.events\n(\n `event` String,\n `value` UInt64,\n `description` String\n)\nENGINE = SystemEvents()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' +CREATE TABLE system.formats\n(\n `name` String,\n `is_input` UInt8,\n `is_output` UInt8\n)\nENGINE = SystemFormats()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' +CREATE TABLE system.functions\n(\n `name` String,\n `is_aggregate` UInt8,\n `case_insensitive` UInt8,\n `alias_to` String,\n `create_query` String,\n `origin` Enum8(\'System\' = 0, \'SQLUserDefined\' = 1, \'ExecutableUserDefined\' = 2)\n)\nENGINE = SystemFunctions()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' +CREATE TABLE system.grants\n(\n `user_name` Nullable(String),\n `role_name` Nullable(String),\n `access_type` Enum8(\'SQLITE\' = -128, \'ODBC\' = -127, \'JDBC\' = -126, \'HDFS\' = -125, \'S3\' = -124, \'SOURCES\' = -123, \'ALL\' = -122, \'NONE\' = -121, \'SHOW DATABASES\' = 0, \'SHOW TABLES\' = 1, \'SHOW COLUMNS\' = 2, \'SHOW DICTIONARIES\' = 3, \'SHOW\' = 4, \'SELECT\' = 5, \'INSERT\' = 6, \'ALTER UPDATE\' = 7, \'ALTER DELETE\' = 8, \'ALTER ADD COLUMN\' = 9, \'ALTER MODIFY COLUMN\' = 10, \'ALTER DROP COLUMN\' = 11, \'ALTER COMMENT COLUMN\' = 12, \'ALTER CLEAR COLUMN\' = 13, \'ALTER RENAME COLUMN\' = 14, \'ALTER MATERIALIZE COLUMN\' = 15, \'ALTER COLUMN\' = 16, \'ALTER MODIFY COMMENT\' = 17, \'ALTER ORDER BY\' = 18, \'ALTER SAMPLE BY\' = 19, \'ALTER ADD INDEX\' = 20, \'ALTER DROP INDEX\' = 21, \'ALTER MATERIALIZE INDEX\' = 22, \'ALTER CLEAR INDEX\' = 23, \'ALTER INDEX\' = 24, \'ALTER ADD PROJECTION\' = 25, \'ALTER DROP PROJECTION\' = 26, \'ALTER MATERIALIZE PROJECTION\' = 27, \'ALTER CLEAR PROJECTION\' = 28, \'ALTER PROJECTION\' = 29, \'ALTER ADD CONSTRAINT\' = 30, \'ALTER DROP CONSTRAINT\' = 31, \'ALTER CONSTRAINT\' = 32, \'ALTER TTL\' = 33, \'ALTER MATERIALIZE TTL\' = 34, \'ALTER SETTINGS\' = 35, \'ALTER MOVE PARTITION\' = 36, \'ALTER FETCH PARTITION\' = 37, \'ALTER FREEZE PARTITION\' = 38, \'ALTER DATABASE SETTINGS\' = 39, \'ALTER TABLE\' = 40, \'ALTER DATABASE\' = 41, \'ALTER VIEW REFRESH\' = 42, \'ALTER VIEW MODIFY QUERY\' = 43, \'ALTER VIEW\' = 44, \'ALTER\' = 45, \'CREATE DATABASE\' = 46, \'CREATE TABLE\' = 47, \'CREATE VIEW\' = 48, \'CREATE DICTIONARY\' = 49, \'CREATE TEMPORARY TABLE\' = 50, \'CREATE FUNCTION\' = 51, \'CREATE\' = 52, \'DROP DATABASE\' = 53, \'DROP TABLE\' = 54, \'DROP VIEW\' = 55, \'DROP DICTIONARY\' = 56, \'DROP FUNCTION\' = 57, \'DROP\' = 58, \'TRUNCATE\' = 59, \'OPTIMIZE\' = 60, \'KILL QUERY\' = 61, \'MOVE PARTITION BETWEEN SHARDS\' = 62, \'CREATE USER\' = 63, \'ALTER USER\' = 64, \'DROP USER\' = 65, \'CREATE ROLE\' = 66, \'ALTER ROLE\' = 67, \'DROP ROLE\' = 68, \'ROLE ADMIN\' = 69, \'CREATE ROW POLICY\' = 70, \'ALTER ROW POLICY\' = 71, \'DROP ROW POLICY\' = 72, \'CREATE QUOTA\' = 73, \'ALTER QUOTA\' = 74, \'DROP QUOTA\' = 75, \'CREATE SETTINGS PROFILE\' = 76, \'ALTER SETTINGS PROFILE\' = 77, \'DROP SETTINGS PROFILE\' = 78, \'SHOW USERS\' = 79, \'SHOW ROLES\' = 80, \'SHOW ROW POLICIES\' = 81, \'SHOW QUOTAS\' = 82, \'SHOW SETTINGS PROFILES\' = 83, \'SHOW ACCESS\' = 84, \'ACCESS MANAGEMENT\' = 85, \'SYSTEM SHUTDOWN\' = 86, \'SYSTEM DROP DNS CACHE\' = 87, \'SYSTEM DROP MARK CACHE\' = 88, \'SYSTEM DROP UNCOMPRESSED CACHE\' = 89, \'SYSTEM DROP MMAP CACHE\' = 90, \'SYSTEM DROP COMPILED EXPRESSION CACHE\' = 91, \'SYSTEM DROP CACHE\' = 92, \'SYSTEM RELOAD CONFIG\' = 93, \'SYSTEM RELOAD SYMBOLS\' = 94, \'SYSTEM RELOAD DICTIONARY\' = 95, \'SYSTEM RELOAD MODEL\' = 96, \'SYSTEM RELOAD FUNCTION\' = 97, \'SYSTEM RELOAD EMBEDDED DICTIONARIES\' = 98, \'SYSTEM RELOAD\' = 99, \'SYSTEM RESTART DISK\' = 100, \'SYSTEM MERGES\' = 101, \'SYSTEM TTL MERGES\' = 102, \'SYSTEM FETCHES\' = 103, \'SYSTEM MOVES\' = 104, \'SYSTEM DISTRIBUTED SENDS\' = 105, \'SYSTEM REPLICATED SENDS\' = 106, \'SYSTEM SENDS\' = 107, \'SYSTEM REPLICATION QUEUES\' = 108, \'SYSTEM DROP REPLICA\' = 109, \'SYSTEM SYNC REPLICA\' = 110, \'SYSTEM RESTART REPLICA\' = 111, \'SYSTEM RESTORE REPLICA\' = 112, \'SYSTEM FLUSH DISTRIBUTED\' = 113, \'SYSTEM FLUSH LOGS\' = 114, \'SYSTEM FLUSH\' = 115, \'SYSTEM\' = 116, \'dictGet\' = 117, \'addressToLine\' = 118, \'addressToSymbol\' = 119, \'demangle\' = 120, \'INTROSPECTION\' = 121, \'FILE\' = 122, \'URL\' = 123, \'REMOTE\' = 124, \'MONGO\' = 125, \'MYSQL\' = 126, \'POSTGRES\' = 127),\n `database` Nullable(String),\n `table` Nullable(String),\n `column` Nullable(String),\n `is_partial_revoke` UInt8,\n `grant_option` UInt8\n)\nENGINE = SystemGrants()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' +CREATE TABLE system.graphite_retentions\n(\n `config_name` String,\n `regexp` String,\n `function` String,\n `age` UInt64,\n `precision` UInt64,\n `priority` UInt16,\n `is_default` UInt8,\n `Tables.database` Array(String),\n `Tables.table` Array(String)\n)\nENGINE = SystemGraphite()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' +CREATE TABLE system.licenses\n(\n `library_name` String,\n `license_type` String,\n `license_path` String,\n `license_text` String\n)\nENGINE = SystemLicenses()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' +CREATE TABLE system.macros\n(\n `macro` String,\n `substitution` String\n)\nENGINE = SystemMacros()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' +CREATE TABLE system.merge_tree_settings\n(\n `name` String,\n `value` String,\n `changed` UInt8,\n `description` String,\n `type` String\n)\nENGINE = SystemMergeTreeSettings()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' +CREATE TABLE system.merges\n(\n `database` String,\n `table` String,\n `elapsed` Float64,\n `progress` Float64,\n `num_parts` UInt64,\n `source_part_names` Array(String),\n `result_part_name` String,\n `source_part_paths` Array(String),\n `result_part_path` String,\n `partition_id` String,\n `is_mutation` UInt8,\n `total_size_bytes_compressed` UInt64,\n `total_size_marks` UInt64,\n `bytes_read_uncompressed` UInt64,\n `rows_read` UInt64,\n `bytes_written_uncompressed` UInt64,\n `rows_written` UInt64,\n `columns_written` UInt64,\n `memory_usage` UInt64,\n `thread_id` UInt64,\n `merge_type` String,\n `merge_algorithm` String\n)\nENGINE = SystemMerges()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' +CREATE TABLE system.metrics\n(\n `metric` String,\n `value` Int64,\n `description` String\n)\nENGINE = SystemMetrics()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' +CREATE TABLE system.models\n(\n `name` String,\n `status` Enum8(\'NOT_LOADED\' = 0, \'LOADED\' = 1, \'FAILED\' = 2, \'LOADING\' = 3, \'FAILED_AND_RELOADING\' = 4, \'LOADED_AND_RELOADING\' = 5, \'NOT_EXIST\' = 6),\n `origin` String,\n `type` String,\n `loading_start_time` DateTime,\n `loading_duration` Float32,\n `last_exception` String\n)\nENGINE = SystemModels()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' +CREATE TABLE system.mutations\n(\n `database` String,\n `table` String,\n `mutation_id` String,\n `command` String,\n `create_time` DateTime,\n `block_numbers.partition_id` Array(String),\n `block_numbers.number` Array(Int64),\n `parts_to_do_names` Array(String),\n `parts_to_do` Int64,\n `is_done` UInt8,\n `latest_failed_part` String,\n `latest_fail_time` DateTime,\n `latest_fail_reason` String\n)\nENGINE = SystemMutations()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' +CREATE TABLE system.numbers\n(\n `number` UInt64\n)\nENGINE = SystemNumbers()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' +CREATE TABLE system.numbers_mt\n(\n `number` UInt64\n)\nENGINE = SystemNumbers()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' +CREATE TABLE system.one\n(\n `dummy` UInt8\n)\nENGINE = SystemOne()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' +CREATE TABLE system.part_moves_between_shards\n(\n `database` String,\n `table` String,\n `task_name` String,\n `task_uuid` UUID,\n `create_time` DateTime,\n `part_name` String,\n `part_uuid` UUID,\n `to_shard` String,\n `dst_part_name` String,\n `update_time` DateTime,\n `state` String,\n `rollback` UInt8,\n `num_tries` UInt32,\n `last_exception` String\n)\nENGINE = SystemShardMoves()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' +CREATE TABLE system.parts\n(\n `partition` String,\n `name` String,\n `uuid` UUID,\n `part_type` String,\n `active` UInt8,\n `marks` UInt64,\n `rows` UInt64,\n `bytes_on_disk` UInt64,\n `data_compressed_bytes` UInt64,\n `data_uncompressed_bytes` UInt64,\n `marks_bytes` UInt64,\n `secondary_indices_compressed_bytes` UInt64,\n `secondary_indices_uncompressed_bytes` UInt64,\n `secondary_indices_marks_bytes` UInt64,\n `modification_time` DateTime,\n `remove_time` DateTime,\n `refcount` UInt32,\n `min_date` Date,\n `max_date` Date,\n `min_time` DateTime,\n `max_time` DateTime,\n `partition_id` String,\n `min_block_number` Int64,\n `max_block_number` Int64,\n `level` UInt32,\n `data_version` UInt64,\n `primary_key_bytes_in_memory` UInt64,\n `primary_key_bytes_in_memory_allocated` UInt64,\n `is_frozen` UInt8,\n `database` String,\n `table` String,\n `engine` String,\n `disk_name` String,\n `path` String,\n `hash_of_all_files` String,\n `hash_of_uncompressed_files` String,\n `uncompressed_hash_of_compressed_files` String,\n `delete_ttl_info_min` DateTime,\n `delete_ttl_info_max` DateTime,\n `move_ttl_info.expression` Array(String),\n `move_ttl_info.min` Array(DateTime),\n `move_ttl_info.max` Array(DateTime),\n `default_compression_codec` String,\n `recompression_ttl_info.expression` Array(String),\n `recompression_ttl_info.min` Array(DateTime),\n `recompression_ttl_info.max` Array(DateTime),\n `group_by_ttl_info.expression` Array(String),\n `group_by_ttl_info.min` Array(DateTime),\n `group_by_ttl_info.max` Array(DateTime),\n `rows_where_ttl_info.expression` Array(String),\n `rows_where_ttl_info.min` Array(DateTime),\n `rows_where_ttl_info.max` Array(DateTime),\n `projections` Array(String),\n `bytes` UInt64,\n `marks_size` UInt64\n)\nENGINE = SystemParts()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' +CREATE TABLE system.parts_columns\n(\n `partition` String,\n `name` String,\n `uuid` UUID,\n `part_type` String,\n `active` UInt8,\n `marks` UInt64,\n `rows` UInt64,\n `bytes_on_disk` UInt64,\n `data_compressed_bytes` UInt64,\n `data_uncompressed_bytes` UInt64,\n `marks_bytes` UInt64,\n `modification_time` DateTime,\n `remove_time` DateTime,\n `refcount` UInt32,\n `min_date` Date,\n `max_date` Date,\n `min_time` DateTime,\n `max_time` DateTime,\n `partition_id` String,\n `min_block_number` Int64,\n `max_block_number` Int64,\n `level` UInt32,\n `data_version` UInt64,\n `primary_key_bytes_in_memory` UInt64,\n `primary_key_bytes_in_memory_allocated` UInt64,\n `database` String,\n `table` String,\n `engine` String,\n `disk_name` String,\n `path` String,\n `column` String,\n `type` String,\n `column_position` UInt64,\n `default_kind` String,\n `default_expression` String,\n `column_bytes_on_disk` UInt64,\n `column_data_compressed_bytes` UInt64,\n `column_data_uncompressed_bytes` UInt64,\n `column_marks_bytes` UInt64,\n `bytes` UInt64,\n `marks_size` UInt64\n)\nENGINE = SystemPartsColumns()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' +CREATE TABLE system.privileges\n(\n `privilege` Enum8(\'SQLITE\' = -128, \'ODBC\' = -127, \'JDBC\' = -126, \'HDFS\' = -125, \'S3\' = -124, \'SOURCES\' = -123, \'ALL\' = -122, \'NONE\' = -121, \'SHOW DATABASES\' = 0, \'SHOW TABLES\' = 1, \'SHOW COLUMNS\' = 2, \'SHOW DICTIONARIES\' = 3, \'SHOW\' = 4, \'SELECT\' = 5, \'INSERT\' = 6, \'ALTER UPDATE\' = 7, \'ALTER DELETE\' = 8, \'ALTER ADD COLUMN\' = 9, \'ALTER MODIFY COLUMN\' = 10, \'ALTER DROP COLUMN\' = 11, \'ALTER COMMENT COLUMN\' = 12, \'ALTER CLEAR COLUMN\' = 13, \'ALTER RENAME COLUMN\' = 14, \'ALTER MATERIALIZE COLUMN\' = 15, \'ALTER COLUMN\' = 16, \'ALTER MODIFY COMMENT\' = 17, \'ALTER ORDER BY\' = 18, \'ALTER SAMPLE BY\' = 19, \'ALTER ADD INDEX\' = 20, \'ALTER DROP INDEX\' = 21, \'ALTER MATERIALIZE INDEX\' = 22, \'ALTER CLEAR INDEX\' = 23, \'ALTER INDEX\' = 24, \'ALTER ADD PROJECTION\' = 25, \'ALTER DROP PROJECTION\' = 26, \'ALTER MATERIALIZE PROJECTION\' = 27, \'ALTER CLEAR PROJECTION\' = 28, \'ALTER PROJECTION\' = 29, \'ALTER ADD CONSTRAINT\' = 30, \'ALTER DROP CONSTRAINT\' = 31, \'ALTER CONSTRAINT\' = 32, \'ALTER TTL\' = 33, \'ALTER MATERIALIZE TTL\' = 34, \'ALTER SETTINGS\' = 35, \'ALTER MOVE PARTITION\' = 36, \'ALTER FETCH PARTITION\' = 37, \'ALTER FREEZE PARTITION\' = 38, \'ALTER DATABASE SETTINGS\' = 39, \'ALTER TABLE\' = 40, \'ALTER DATABASE\' = 41, \'ALTER VIEW REFRESH\' = 42, \'ALTER VIEW MODIFY QUERY\' = 43, \'ALTER VIEW\' = 44, \'ALTER\' = 45, \'CREATE DATABASE\' = 46, \'CREATE TABLE\' = 47, \'CREATE VIEW\' = 48, \'CREATE DICTIONARY\' = 49, \'CREATE TEMPORARY TABLE\' = 50, \'CREATE FUNCTION\' = 51, \'CREATE\' = 52, \'DROP DATABASE\' = 53, \'DROP TABLE\' = 54, \'DROP VIEW\' = 55, \'DROP DICTIONARY\' = 56, \'DROP FUNCTION\' = 57, \'DROP\' = 58, \'TRUNCATE\' = 59, \'OPTIMIZE\' = 60, \'KILL QUERY\' = 61, \'MOVE PARTITION BETWEEN SHARDS\' = 62, \'CREATE USER\' = 63, \'ALTER USER\' = 64, \'DROP USER\' = 65, \'CREATE ROLE\' = 66, \'ALTER ROLE\' = 67, \'DROP ROLE\' = 68, \'ROLE ADMIN\' = 69, \'CREATE ROW POLICY\' = 70, \'ALTER ROW POLICY\' = 71, \'DROP ROW POLICY\' = 72, \'CREATE QUOTA\' = 73, \'ALTER QUOTA\' = 74, \'DROP QUOTA\' = 75, \'CREATE SETTINGS PROFILE\' = 76, \'ALTER SETTINGS PROFILE\' = 77, \'DROP SETTINGS PROFILE\' = 78, \'SHOW USERS\' = 79, \'SHOW ROLES\' = 80, \'SHOW ROW POLICIES\' = 81, \'SHOW QUOTAS\' = 82, \'SHOW SETTINGS PROFILES\' = 83, \'SHOW ACCESS\' = 84, \'ACCESS MANAGEMENT\' = 85, \'SYSTEM SHUTDOWN\' = 86, \'SYSTEM DROP DNS CACHE\' = 87, \'SYSTEM DROP MARK CACHE\' = 88, \'SYSTEM DROP UNCOMPRESSED CACHE\' = 89, \'SYSTEM DROP MMAP CACHE\' = 90, \'SYSTEM DROP COMPILED EXPRESSION CACHE\' = 91, \'SYSTEM DROP CACHE\' = 92, \'SYSTEM RELOAD CONFIG\' = 93, \'SYSTEM RELOAD SYMBOLS\' = 94, \'SYSTEM RELOAD DICTIONARY\' = 95, \'SYSTEM RELOAD MODEL\' = 96, \'SYSTEM RELOAD FUNCTION\' = 97, \'SYSTEM RELOAD EMBEDDED DICTIONARIES\' = 98, \'SYSTEM RELOAD\' = 99, \'SYSTEM RESTART DISK\' = 100, \'SYSTEM MERGES\' = 101, \'SYSTEM TTL MERGES\' = 102, \'SYSTEM FETCHES\' = 103, \'SYSTEM MOVES\' = 104, \'SYSTEM DISTRIBUTED SENDS\' = 105, \'SYSTEM REPLICATED SENDS\' = 106, \'SYSTEM SENDS\' = 107, \'SYSTEM REPLICATION QUEUES\' = 108, \'SYSTEM DROP REPLICA\' = 109, \'SYSTEM SYNC REPLICA\' = 110, \'SYSTEM RESTART REPLICA\' = 111, \'SYSTEM RESTORE REPLICA\' = 112, \'SYSTEM FLUSH DISTRIBUTED\' = 113, \'SYSTEM FLUSH LOGS\' = 114, \'SYSTEM FLUSH\' = 115, \'SYSTEM\' = 116, \'dictGet\' = 117, \'addressToLine\' = 118, \'addressToSymbol\' = 119, \'demangle\' = 120, \'INTROSPECTION\' = 121, \'FILE\' = 122, \'URL\' = 123, \'REMOTE\' = 124, \'MONGO\' = 125, \'MYSQL\' = 126, \'POSTGRES\' = 127),\n `aliases` Array(String),\n `level` Nullable(Enum8(\'GLOBAL\' = 0, \'DATABASE\' = 1, \'TABLE\' = 2, \'DICTIONARY\' = 3, \'VIEW\' = 4, \'COLUMN\' = 5)),\n `parent_group` Nullable(Enum8(\'SQLITE\' = -128, \'ODBC\' = -127, \'JDBC\' = -126, \'HDFS\' = -125, \'S3\' = -124, \'SOURCES\' = -123, \'ALL\' = -122, \'NONE\' = -121, \'SHOW DATABASES\' = 0, \'SHOW TABLES\' = 1, \'SHOW COLUMNS\' = 2, \'SHOW DICTIONARIES\' = 3, \'SHOW\' = 4, \'SELECT\' = 5, \'INSERT\' = 6, \'ALTER UPDATE\' = 7, \'ALTER DELETE\' = 8, \'ALTER ADD COLUMN\' = 9, \'ALTER MODIFY COLUMN\' = 10, \'ALTER DROP COLUMN\' = 11, \'ALTER COMMENT COLUMN\' = 12, \'ALTER CLEAR COLUMN\' = 13, \'ALTER RENAME COLUMN\' = 14, \'ALTER MATERIALIZE COLUMN\' = 15, \'ALTER COLUMN\' = 16, \'ALTER MODIFY COMMENT\' = 17, \'ALTER ORDER BY\' = 18, \'ALTER SAMPLE BY\' = 19, \'ALTER ADD INDEX\' = 20, \'ALTER DROP INDEX\' = 21, \'ALTER MATERIALIZE INDEX\' = 22, \'ALTER CLEAR INDEX\' = 23, \'ALTER INDEX\' = 24, \'ALTER ADD PROJECTION\' = 25, \'ALTER DROP PROJECTION\' = 26, \'ALTER MATERIALIZE PROJECTION\' = 27, \'ALTER CLEAR PROJECTION\' = 28, \'ALTER PROJECTION\' = 29, \'ALTER ADD CONSTRAINT\' = 30, \'ALTER DROP CONSTRAINT\' = 31, \'ALTER CONSTRAINT\' = 32, \'ALTER TTL\' = 33, \'ALTER MATERIALIZE TTL\' = 34, \'ALTER SETTINGS\' = 35, \'ALTER MOVE PARTITION\' = 36, \'ALTER FETCH PARTITION\' = 37, \'ALTER FREEZE PARTITION\' = 38, \'ALTER DATABASE SETTINGS\' = 39, \'ALTER TABLE\' = 40, \'ALTER DATABASE\' = 41, \'ALTER VIEW REFRESH\' = 42, \'ALTER VIEW MODIFY QUERY\' = 43, \'ALTER VIEW\' = 44, \'ALTER\' = 45, \'CREATE DATABASE\' = 46, \'CREATE TABLE\' = 47, \'CREATE VIEW\' = 48, \'CREATE DICTIONARY\' = 49, \'CREATE TEMPORARY TABLE\' = 50, \'CREATE FUNCTION\' = 51, \'CREATE\' = 52, \'DROP DATABASE\' = 53, \'DROP TABLE\' = 54, \'DROP VIEW\' = 55, \'DROP DICTIONARY\' = 56, \'DROP FUNCTION\' = 57, \'DROP\' = 58, \'TRUNCATE\' = 59, \'OPTIMIZE\' = 60, \'KILL QUERY\' = 61, \'MOVE PARTITION BETWEEN SHARDS\' = 62, \'CREATE USER\' = 63, \'ALTER USER\' = 64, \'DROP USER\' = 65, \'CREATE ROLE\' = 66, \'ALTER ROLE\' = 67, \'DROP ROLE\' = 68, \'ROLE ADMIN\' = 69, \'CREATE ROW POLICY\' = 70, \'ALTER ROW POLICY\' = 71, \'DROP ROW POLICY\' = 72, \'CREATE QUOTA\' = 73, \'ALTER QUOTA\' = 74, \'DROP QUOTA\' = 75, \'CREATE SETTINGS PROFILE\' = 76, \'ALTER SETTINGS PROFILE\' = 77, \'DROP SETTINGS PROFILE\' = 78, \'SHOW USERS\' = 79, \'SHOW ROLES\' = 80, \'SHOW ROW POLICIES\' = 81, \'SHOW QUOTAS\' = 82, \'SHOW SETTINGS PROFILES\' = 83, \'SHOW ACCESS\' = 84, \'ACCESS MANAGEMENT\' = 85, \'SYSTEM SHUTDOWN\' = 86, \'SYSTEM DROP DNS CACHE\' = 87, \'SYSTEM DROP MARK CACHE\' = 88, \'SYSTEM DROP UNCOMPRESSED CACHE\' = 89, \'SYSTEM DROP MMAP CACHE\' = 90, \'SYSTEM DROP COMPILED EXPRESSION CACHE\' = 91, \'SYSTEM DROP CACHE\' = 92, \'SYSTEM RELOAD CONFIG\' = 93, \'SYSTEM RELOAD SYMBOLS\' = 94, \'SYSTEM RELOAD DICTIONARY\' = 95, \'SYSTEM RELOAD MODEL\' = 96, \'SYSTEM RELOAD FUNCTION\' = 97, \'SYSTEM RELOAD EMBEDDED DICTIONARIES\' = 98, \'SYSTEM RELOAD\' = 99, \'SYSTEM RESTART DISK\' = 100, \'SYSTEM MERGES\' = 101, \'SYSTEM TTL MERGES\' = 102, \'SYSTEM FETCHES\' = 103, \'SYSTEM MOVES\' = 104, \'SYSTEM DISTRIBUTED SENDS\' = 105, \'SYSTEM REPLICATED SENDS\' = 106, \'SYSTEM SENDS\' = 107, \'SYSTEM REPLICATION QUEUES\' = 108, \'SYSTEM DROP REPLICA\' = 109, \'SYSTEM SYNC REPLICA\' = 110, \'SYSTEM RESTART REPLICA\' = 111, \'SYSTEM RESTORE REPLICA\' = 112, \'SYSTEM FLUSH DISTRIBUTED\' = 113, \'SYSTEM FLUSH LOGS\' = 114, \'SYSTEM FLUSH\' = 115, \'SYSTEM\' = 116, \'dictGet\' = 117, \'addressToLine\' = 118, \'addressToSymbol\' = 119, \'demangle\' = 120, \'INTROSPECTION\' = 121, \'FILE\' = 122, \'URL\' = 123, \'REMOTE\' = 124, \'MONGO\' = 125, \'MYSQL\' = 126, \'POSTGRES\' = 127))\n)\nENGINE = SystemPrivileges()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' +CREATE TABLE system.processes\n(\n `is_initial_query` UInt8,\n `user` String,\n `query_id` String,\n `address` IPv6,\n `port` UInt16,\n `initial_user` String,\n `initial_query_id` String,\n `initial_address` IPv6,\n `initial_port` UInt16,\n `interface` UInt8,\n `os_user` String,\n `client_hostname` String,\n `client_name` String,\n `client_revision` UInt64,\n `client_version_major` UInt64,\n `client_version_minor` UInt64,\n `client_version_patch` UInt64,\n `http_method` UInt8,\n `http_user_agent` String,\n `http_referer` String,\n `forwarded_for` String,\n `quota_key` String,\n `elapsed` Float64,\n `is_cancelled` UInt8,\n `read_rows` UInt64,\n `read_bytes` UInt64,\n `total_rows_approx` UInt64,\n `written_rows` UInt64,\n `written_bytes` UInt64,\n `memory_usage` Int64,\n `peak_memory_usage` Int64,\n `query` String,\n `thread_ids` Array(UInt64),\n `ProfileEvents` Map(String, UInt64),\n `Settings` Map(String, String),\n `current_database` String,\n `ProfileEvents.Names` Array(String),\n `ProfileEvents.Values` Array(UInt64),\n `Settings.Names` Array(String),\n `Settings.Values` Array(String)\n)\nENGINE = SystemProcesses()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' +CREATE TABLE system.projection_parts\n(\n `partition` String,\n `name` String,\n `part_type` String,\n `parent_name` String,\n `parent_uuid` UUID,\n `parent_part_type` String,\n `active` UInt8,\n `marks` UInt64,\n `rows` UInt64,\n `bytes_on_disk` UInt64,\n `data_compressed_bytes` UInt64,\n `data_uncompressed_bytes` UInt64,\n `marks_bytes` UInt64,\n `parent_marks` UInt64,\n `parent_rows` UInt64,\n `parent_bytes_on_disk` UInt64,\n `parent_data_compressed_bytes` UInt64,\n `parent_data_uncompressed_bytes` UInt64,\n `parent_marks_bytes` UInt64,\n `modification_time` DateTime,\n `remove_time` DateTime,\n `refcount` UInt32,\n `min_date` Date,\n `max_date` Date,\n `min_time` DateTime,\n `max_time` DateTime,\n `partition_id` String,\n `min_block_number` Int64,\n `max_block_number` Int64,\n `level` UInt32,\n `data_version` UInt64,\n `primary_key_bytes_in_memory` UInt64,\n `primary_key_bytes_in_memory_allocated` UInt64,\n `is_frozen` UInt8,\n `database` String,\n `table` String,\n `engine` String,\n `disk_name` String,\n `path` String,\n `hash_of_all_files` String,\n `hash_of_uncompressed_files` String,\n `uncompressed_hash_of_compressed_files` String,\n `delete_ttl_info_min` DateTime,\n `delete_ttl_info_max` DateTime,\n `move_ttl_info.expression` Array(String),\n `move_ttl_info.min` Array(DateTime),\n `move_ttl_info.max` Array(DateTime),\n `default_compression_codec` String,\n `recompression_ttl_info.expression` Array(String),\n `recompression_ttl_info.min` Array(DateTime),\n `recompression_ttl_info.max` Array(DateTime),\n `group_by_ttl_info.expression` Array(String),\n `group_by_ttl_info.min` Array(DateTime),\n `group_by_ttl_info.max` Array(DateTime),\n `rows_where_ttl_info.expression` Array(String),\n `rows_where_ttl_info.min` Array(DateTime),\n `rows_where_ttl_info.max` Array(DateTime),\n `bytes` UInt64,\n `marks_size` UInt64\n)\nENGINE = SystemProjectionParts()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' +CREATE TABLE system.projection_parts_columns\n(\n `partition` String,\n `name` String,\n `part_type` String,\n `parent_name` String,\n `parent_uuid` UUID,\n `parent_part_type` String,\n `active` UInt8,\n `marks` UInt64,\n `rows` UInt64,\n `bytes_on_disk` UInt64,\n `data_compressed_bytes` UInt64,\n `data_uncompressed_bytes` UInt64,\n `marks_bytes` UInt64,\n `parent_marks` UInt64,\n `parent_rows` UInt64,\n `parent_bytes_on_disk` UInt64,\n `parent_data_compressed_bytes` UInt64,\n `parent_data_uncompressed_bytes` UInt64,\n `parent_marks_bytes` UInt64,\n `modification_time` DateTime,\n `remove_time` DateTime,\n `refcount` UInt32,\n `min_date` Date,\n `max_date` Date,\n `min_time` DateTime,\n `max_time` DateTime,\n `partition_id` String,\n `min_block_number` Int64,\n `max_block_number` Int64,\n `level` UInt32,\n `data_version` UInt64,\n `primary_key_bytes_in_memory` UInt64,\n `primary_key_bytes_in_memory_allocated` UInt64,\n `database` String,\n `table` String,\n `engine` String,\n `disk_name` String,\n `path` String,\n `column` String,\n `type` String,\n `column_position` UInt64,\n `default_kind` String,\n `default_expression` String,\n `column_bytes_on_disk` UInt64,\n `column_data_compressed_bytes` UInt64,\n `column_data_uncompressed_bytes` UInt64,\n `column_marks_bytes` UInt64,\n `bytes` UInt64,\n `marks_size` UInt64\n)\nENGINE = SystemProjectionPartsColumns()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' +CREATE TABLE system.quota_limits\n(\n `quota_name` String,\n `duration` UInt32,\n `is_randomized_interval` UInt8,\n `max_queries` Nullable(UInt64),\n `max_query_selects` Nullable(UInt64),\n `max_query_inserts` Nullable(UInt64),\n `max_errors` Nullable(UInt64),\n `max_result_rows` Nullable(UInt64),\n `max_result_bytes` Nullable(UInt64),\n `max_read_rows` Nullable(UInt64),\n `max_read_bytes` Nullable(UInt64),\n `max_execution_time` Nullable(Float64)\n)\nENGINE = SystemQuotaLimits()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' +CREATE TABLE system.quota_usage\n(\n `quota_name` String,\n `quota_key` String,\n `start_time` Nullable(DateTime),\n `end_time` Nullable(DateTime),\n `duration` Nullable(UInt32),\n `queries` Nullable(UInt64),\n `max_queries` Nullable(UInt64),\n `query_selects` Nullable(UInt64),\n `max_query_selects` Nullable(UInt64),\n `query_inserts` Nullable(UInt64),\n `max_query_inserts` Nullable(UInt64),\n `errors` Nullable(UInt64),\n `max_errors` Nullable(UInt64),\n `result_rows` Nullable(UInt64),\n `max_result_rows` Nullable(UInt64),\n `result_bytes` Nullable(UInt64),\n `max_result_bytes` Nullable(UInt64),\n `read_rows` Nullable(UInt64),\n `max_read_rows` Nullable(UInt64),\n `read_bytes` Nullable(UInt64),\n `max_read_bytes` Nullable(UInt64),\n `execution_time` Nullable(Float64),\n `max_execution_time` Nullable(Float64)\n)\nENGINE = SystemQuotaUsage()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' +CREATE TABLE system.quotas\n(\n `name` String,\n `id` UUID,\n `storage` String,\n `keys` Array(Enum8(\'user_name\' = 1, \'ip_address\' = 2, \'forwarded_ip_address\' = 3, \'client_key\' = 4)),\n `durations` Array(UInt32),\n `apply_to_all` UInt8,\n `apply_to_list` Array(String),\n `apply_to_except` Array(String)\n)\nENGINE = SystemQuotas()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' +CREATE TABLE system.quotas_usage\n(\n `quota_name` String,\n `quota_key` String,\n `is_current` UInt8,\n `start_time` Nullable(DateTime),\n `end_time` Nullable(DateTime),\n `duration` Nullable(UInt32),\n `queries` Nullable(UInt64),\n `max_queries` Nullable(UInt64),\n `query_selects` Nullable(UInt64),\n `max_query_selects` Nullable(UInt64),\n `query_inserts` Nullable(UInt64),\n `max_query_inserts` Nullable(UInt64),\n `errors` Nullable(UInt64),\n `max_errors` Nullable(UInt64),\n `result_rows` Nullable(UInt64),\n `max_result_rows` Nullable(UInt64),\n `result_bytes` Nullable(UInt64),\n `max_result_bytes` Nullable(UInt64),\n `read_rows` Nullable(UInt64),\n `max_read_rows` Nullable(UInt64),\n `read_bytes` Nullable(UInt64),\n `max_read_bytes` Nullable(UInt64),\n `execution_time` Nullable(Float64),\n `max_execution_time` Nullable(Float64)\n)\nENGINE = SystemQuotasUsage()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' +CREATE TABLE system.replicas\n(\n `database` String,\n `table` String,\n `engine` String,\n `is_leader` UInt8,\n `can_become_leader` UInt8,\n `is_readonly` UInt8,\n `is_session_expired` UInt8,\n `future_parts` UInt32,\n `parts_to_check` UInt32,\n `zookeeper_path` String,\n `replica_name` String,\n `replica_path` String,\n `columns_version` Int32,\n `queue_size` UInt32,\n `inserts_in_queue` UInt32,\n `merges_in_queue` UInt32,\n `part_mutations_in_queue` UInt32,\n `queue_oldest_time` DateTime,\n `inserts_oldest_time` DateTime,\n `merges_oldest_time` DateTime,\n `part_mutations_oldest_time` DateTime,\n `oldest_part_to_get` String,\n `oldest_part_to_merge_to` String,\n `oldest_part_to_mutate_to` String,\n `log_max_index` UInt64,\n `log_pointer` UInt64,\n `last_queue_update` DateTime,\n `absolute_delay` UInt64,\n `total_replicas` UInt8,\n `active_replicas` UInt8,\n `last_queue_update_exception` String,\n `zookeeper_exception` String,\n `replica_is_active` Map(String, UInt8)\n)\nENGINE = SystemReplicas()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' +CREATE TABLE system.replicated_fetches\n(\n `database` String,\n `table` String,\n `elapsed` Float64,\n `progress` Float64,\n `result_part_name` String,\n `result_part_path` String,\n `partition_id` String,\n `total_size_bytes_compressed` UInt64,\n `bytes_read_compressed` UInt64,\n `source_replica_path` String,\n `source_replica_hostname` String,\n `source_replica_port` UInt16,\n `interserver_scheme` String,\n `URI` String,\n `to_detached` UInt8,\n `thread_id` UInt64\n)\nENGINE = SystemReplicatedFetches()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' +CREATE TABLE system.replicated_merge_tree_settings\n(\n `name` String,\n `value` String,\n `changed` UInt8,\n `description` String,\n `type` String\n)\nENGINE = SystemReplicatedMergeTreeSettings()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' +CREATE TABLE system.replication_queue\n(\n `database` String,\n `table` String,\n `replica_name` String,\n `position` UInt32,\n `node_name` String,\n `type` String,\n `create_time` DateTime,\n `required_quorum` UInt32,\n `source_replica` String,\n `new_part_name` String,\n `parts_to_merge` Array(String),\n `is_detach` UInt8,\n `is_currently_executing` UInt8,\n `num_tries` UInt32,\n `last_exception` String,\n `last_attempt_time` DateTime,\n `num_postponed` UInt32,\n `postpone_reason` String,\n `last_postpone_time` DateTime,\n `merge_type` String\n)\nENGINE = SystemReplicationQueue()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' +CREATE TABLE system.role_grants\n(\n `user_name` Nullable(String),\n `role_name` Nullable(String),\n `granted_role_name` String,\n `granted_role_is_default` UInt8,\n `with_admin_option` UInt8\n)\nENGINE = SystemRoleGrants()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' +CREATE TABLE system.roles\n(\n `name` String,\n `id` UUID,\n `storage` String\n)\nENGINE = SystemRoles()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' +CREATE TABLE system.row_policies\n(\n `name` String,\n `short_name` String,\n `database` String,\n `table` String,\n `id` UUID,\n `storage` String,\n `select_filter` Nullable(String),\n `is_restrictive` UInt8,\n `apply_to_all` UInt8,\n `apply_to_list` Array(String),\n `apply_to_except` Array(String)\n)\nENGINE = SystemRowPolicies()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' +CREATE TABLE system.settings\n(\n `name` String,\n `value` String,\n `changed` UInt8,\n `description` String,\n `min` Nullable(String),\n `max` Nullable(String),\n `readonly` UInt8,\n `type` String\n)\nENGINE = SystemSettings()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' +CREATE TABLE system.settings_profile_elements\n(\n `profile_name` Nullable(String),\n `user_name` Nullable(String),\n `role_name` Nullable(String),\n `index` UInt64,\n `setting_name` Nullable(String),\n `value` Nullable(String),\n `min` Nullable(String),\n `max` Nullable(String),\n `readonly` Nullable(UInt8),\n `inherit_profile` Nullable(String)\n)\nENGINE = SystemSettingsProfileElements()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' +CREATE TABLE system.settings_profiles\n(\n `name` String,\n `id` UUID,\n `storage` String,\n `num_elements` UInt64,\n `apply_to_all` UInt8,\n `apply_to_list` Array(String),\n `apply_to_except` Array(String)\n)\nENGINE = SystemSettingsProfiles()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' +CREATE TABLE system.stack_trace\n(\n `thread_name` String,\n `thread_id` UInt64,\n `query_id` String,\n `trace` Array(UInt64)\n)\nENGINE = SystemStackTrace()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' +CREATE TABLE system.storage_policies\n(\n `policy_name` String,\n `volume_name` String,\n `volume_priority` UInt64,\n `disks` Array(String),\n `volume_type` String,\n `max_data_part_size` UInt64,\n `move_factor` Float32,\n `prefer_not_to_merge` UInt8\n)\nENGINE = SystemStoragePolicies()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' +CREATE TABLE system.table_engines\n(\n `name` String,\n `supports_settings` UInt8,\n `supports_skipping_indices` UInt8,\n `supports_projections` UInt8,\n `supports_sort_order` UInt8,\n `supports_ttl` UInt8,\n `supports_replication` UInt8,\n `supports_deduplication` UInt8,\n `supports_parallel_insert` UInt8\n)\nENGINE = SystemTableEngines()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' +CREATE TABLE system.table_functions\n(\n `name` String\n)\nENGINE = SystemTableFunctions()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' +CREATE TABLE system.tables\n(\n `database` String,\n `name` String,\n `uuid` UUID,\n `engine` String,\n `is_temporary` UInt8,\n `data_paths` Array(String),\n `metadata_path` String,\n `metadata_modification_time` DateTime,\n `dependencies_database` Array(String),\n `dependencies_table` Array(String),\n `create_table_query` String,\n `engine_full` String,\n `as_select` String,\n `partition_key` String,\n `sorting_key` String,\n `primary_key` String,\n `sampling_key` String,\n `storage_policy` String,\n `total_rows` Nullable(UInt64),\n `total_bytes` Nullable(UInt64),\n `lifetime_rows` Nullable(UInt64),\n `lifetime_bytes` Nullable(UInt64),\n `comment` String,\n `has_own_data` UInt8,\n `loading_dependencies_database` Array(String),\n `loading_dependencies_table` Array(String),\n `loading_dependent_database` Array(String),\n `loading_dependent_table` Array(String),\n `table` String\n)\nENGINE = SystemTables()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' +CREATE TABLE system.time_zones\n(\n `time_zone` String\n)\nENGINE = SystemTimeZones()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' +CREATE TABLE system.user_directories\n(\n `name` String,\n `type` String,\n `params` String,\n `precedence` UInt64\n)\nENGINE = SystemUserDirectories()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' +CREATE TABLE system.users\n(\n `name` String,\n `id` UUID,\n `storage` String,\n `auth_type` Enum8(\'no_password\' = 0, \'plaintext_password\' = 1, \'sha256_password\' = 2, \'double_sha1_password\' = 3, \'ldap\' = 4, \'kerberos\' = 5),\n `auth_params` String,\n `host_ip` Array(String),\n `host_names` Array(String),\n `host_names_regexp` Array(String),\n `host_names_like` Array(String),\n `default_roles_all` UInt8,\n `default_roles_list` Array(String),\n `default_roles_except` Array(String),\n `grantees_any` UInt8,\n `grantees_list` Array(String),\n `grantees_except` Array(String),\n `default_database` String\n)\nENGINE = SystemUsers()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' +CREATE TABLE system.warnings\n(\n `message` String\n)\nENGINE = SystemWarnings()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' +CREATE TABLE system.zeros\n(\n `zero` UInt8\n)\nENGINE = SystemZeros()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' +CREATE TABLE system.zeros_mt\n(\n `zero` UInt8\n)\nENGINE = SystemZeros()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' diff --git a/tests/queries/0_stateless/02117_show_create_table_system.sql b/tests/queries/0_stateless/02117_show_create_table_system.sql new file mode 100644 index 00000000000..16861cc3f8e --- /dev/null +++ b/tests/queries/0_stateless/02117_show_create_table_system.sql @@ -0,0 +1,84 @@ +/* we will `use system` to bypass style check, +because `show create table` statement +cannot fit the requirement in check-sytle, which is as + +"# Queries to: +tables_with_database_column=( + system.tables + system.parts + system.detached_parts + system.parts_columns + system.columns + system.projection_parts + system.mutations +) +# should have database = currentDatabase() condition" + + */ +use system; +show create table aggregate_function_combinators; +show create table asynchronous_inserts; +show create table asynchronous_metrics; +show create table build_options; +show create table clusters; +show create table collations; +show create table columns; +show create table contributors; +show create table current_roles; +show create table data_skipping_indices; +show create table data_type_families; +show create table databases; +show create table detached_parts; +show create table dictionaries; +show create table disks; +show create table distributed_ddl_queue; +show create table distribution_queue; +show create table enabled_roles; +show create table errors; +show create table events; +show create table formats; +show create table functions; +show create table grants; +show create table graphite_retentions; +show create table licenses; +show create table macros; +show create table merge_tree_settings; +show create table merges; +show create table metrics; +show create table models; +show create table mutations; +show create table numbers; +show create table numbers_mt; +show create table one; +show create table part_moves_between_shards; +show create table parts; +show create table parts_columns; +show create table privileges; +show create table processes; +show create table projection_parts; +show create table projection_parts_columns; +show create table quota_limits; +show create table quota_usage; +show create table quotas; +show create table quotas_usage; +show create table replicas; +show create table replicated_fetches; +show create table replicated_merge_tree_settings; +show create table replication_queue; +show create table role_grants; +show create table roles; +show create table row_policies; +show create table settings; +show create table settings_profile_elements; +show create table settings_profiles; +show create table stack_trace; +show create table storage_policies; +show create table table_engines; +show create table table_functions; +show create table tables; +show create table time_zones; +show create table user_directories; +show create table users; +show create table warnings; +show create table zeros; +show create table zeros_mt; diff --git a/tests/queries/0_stateless/02118_show_create_table_rocksdb.reference b/tests/queries/0_stateless/02118_show_create_table_rocksdb.reference new file mode 100644 index 00000000000..9e487824e3e --- /dev/null +++ b/tests/queries/0_stateless/02118_show_create_table_rocksdb.reference @@ -0,0 +1 @@ +CREATE TABLE system.rocksdb\n(\n `database` String,\n `table` String,\n `name` String,\n `value` UInt64\n)\nENGINE = SystemRocksDB()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' diff --git a/tests/queries/0_stateless/02118_show_create_table_rocksdb.sql b/tests/queries/0_stateless/02118_show_create_table_rocksdb.sql new file mode 100644 index 00000000000..98a64c4b756 --- /dev/null +++ b/tests/queries/0_stateless/02118_show_create_table_rocksdb.sql @@ -0,0 +1,3 @@ +-- Tags: no-fasttest +-- Tag no-fasttest: In fasttest, ENABLE_LIBRARIES=0, so rocksdb engine is not enabled by default +show create table system.rocksdb; diff --git a/tests/queries/0_stateless/02121_pager.reference b/tests/queries/0_stateless/02121_pager.reference new file mode 100644 index 00000000000..7290ba859f4 --- /dev/null +++ b/tests/queries/0_stateless/02121_pager.reference @@ -0,0 +1,2 @@ +4 +4 diff --git a/tests/queries/0_stateless/02121_pager.sh b/tests/queries/0_stateless/02121_pager.sh new file mode 100755 index 00000000000..9ca9637116b --- /dev/null +++ b/tests/queries/0_stateless/02121_pager.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +$CLICKHOUSE_CLIENT --pager 'wc -c' --query 'select 123' +$CLICKHOUSE_LOCAL --pager 'wc -c' --query 'select 123' diff --git a/tests/queries/0_stateless/02123_MySQLWire_regression.reference b/tests/queries/0_stateless/02123_MySQLWire_regression.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/02123_MySQLWire_regression.sql b/tests/queries/0_stateless/02123_MySQLWire_regression.sql new file mode 100644 index 00000000000..504d2f2a521 --- /dev/null +++ b/tests/queries/0_stateless/02123_MySQLWire_regression.sql @@ -0,0 +1,6 @@ +DROP TABLE IF EXISTS table_MySQLWire; +CREATE TABLE table_MySQLWire (x UInt64) ENGINE = File(MySQLWire); +INSERT INTO table_MySQLWire SELECT number FROM numbers(10); +-- regression for not initializing serializations +INSERT INTO table_MySQLWire SELECT number FROM numbers(10); +DROP TABLE table_MySQLWire; diff --git a/tests/queries/0_stateless/helpers/pure_http_client.py b/tests/queries/0_stateless/helpers/pure_http_client.py index 4e18ab3a0f4..9f79c4ac529 100644 --- a/tests/queries/0_stateless/helpers/pure_http_client.py +++ b/tests/queries/0_stateless/helpers/pure_http_client.py @@ -1,6 +1,6 @@ -import os -import io -import sys +import os +import io +import sys import requests import time import pandas as pd @@ -8,6 +8,7 @@ import pandas as pd CLICKHOUSE_HOST = os.environ.get('CLICKHOUSE_HOST', '127.0.0.1') CLICKHOUSE_PORT_HTTP = os.environ.get('CLICKHOUSE_PORT_HTTP', '8123') CLICKHOUSE_SERVER_URL_STR = 'http://' + ':'.join(str(s) for s in [CLICKHOUSE_HOST, CLICKHOUSE_PORT_HTTP]) + "/" +CLICKHOUSE_DATABASE = os.environ.get('CLICKHOUSE_DATABASE', 'test') class ClickHouseClient: def __init__(self, host = CLICKHOUSE_SERVER_URL_STR): @@ -19,8 +20,12 @@ class ClickHouseClient: for i in range(NUMBER_OF_TRIES): r = requests.post( - self.host, - params = {'timeout_before_checking_execution_speed': 120, 'max_execution_time': 6000}, + self.host, + params = { + 'timeout_before_checking_execution_speed': 120, + 'max_execution_time': 6000, + 'database': CLICKHOUSE_DATABASE + }, timeout = connection_timeout, data = query) if r.status_code == 200: @@ -35,7 +40,7 @@ class ClickHouseClient: raise ValueError(r.text) def query_return_df(self, query, connection_timeout = 1500): - data = self.query(query, connection_timeout) + data = self.query(query, connection_timeout) df = pd.read_csv(io.StringIO(data), sep = '\t') return df diff --git a/utils/check-style/check-style b/utils/check-style/check-style index 514f01ce4b9..a3f0612510d 100755 --- a/utils/check-style/check-style +++ b/utils/check-style/check-style @@ -13,7 +13,21 @@ # and then to run formatter only for the specified files. ROOT_PATH=$(git rev-parse --show-toplevel) -EXCLUDE_DIRS='build/|integration/|widechar_width/|glibc-compatibility/|memcpy/|consistent-hashing/|Parsers/New' +EXCLUDE_DIRS='build/|integration/|widechar_width/|glibc-compatibility/|memcpy/|consistent-hashing' + +# From [1]: +# But since array_to_string_internal() in array.c still loops over array +# elements and concatenates them into a string, it's probably not more +# efficient than the looping solutions proposed, but it's more readable. +# +# [1]: https://stackoverflow.com/a/15394738/328260 +function in_array() +{ + local IFS="|" + local value=$1 && shift + + [[ "${IFS}${*}${IFS}" =~ "${IFS}${value}${IFS}" ]] +} find $ROOT_PATH/{src,base,programs,utils} -name '*.h' -or -name '*.cpp' 2>/dev/null | grep -vP $EXCLUDE_DIRS | @@ -40,24 +54,78 @@ find $ROOT_PATH/{src,base,programs,utils} -name '*.h' -or -name '*.cpp' 2>/dev/n grep -vP $EXCLUDE_DIRS | while read i; do $ROOT_PATH/utils/check-style/double-whitespaces.pl < $i || echo -e "^ File $i contains double whitespaces\n"; done -# Unused ErrorCodes -# NOTE: to fix automatically, replace echo with: -# sed -i "/extern const int $code/d" $file -find $ROOT_PATH/{src,base,programs,utils} -name '*.h' -or -name '*.cpp' | - grep -vP $EXCLUDE_DIRS | - xargs grep -l -P 'extern const int [_A-Z]+' | while read file; do grep -P 'extern const int [_A-Z]+;' $file | sed -r -e 's/^.*?extern const int ([_A-Z]+);.*?$/\1/' | while read code; do grep -q "ErrorCodes::$code" $file || echo "ErrorCode $code is defined but not used in file $file"; done; done +# Unused/Undefined/Duplicates ErrorCodes/ProfileEvents/CurrentMetrics +declare -A EXTERN_TYPES +EXTERN_TYPES[ErrorCodes]=int +EXTERN_TYPES[ProfileEvents]=Event +EXTERN_TYPES[CurrentMetrics]=Metric +declare -A EXTERN_ALLOWED_CHARS +EXTERN_ALLOWED_CHARS[ErrorCodes]='_A-Z' +EXTERN_ALLOWED_CHARS[ProfileEvents]='_A-Za-z' +EXTERN_ALLOWED_CHARS[CurrentMetrics]='_A-Za-z' +EXTERN_TYPES_EXCLUDES=( + ProfileEvents::global_counters + ProfileEvents::Event + ProfileEvents::Count + ProfileEvents::Counters + ProfileEvents::end + ProfileEvents::increment + ProfileEvents::getName + ProfileEvents::Type + ProfileEvents::TypeEnum + ProfileEvents::dumpToMapColumn + ProfileEvents::LOCAL_NAME -# Undefined ErrorCodes -# NOTE: to fix automatically, replace echo with: -# ( grep -q -F 'namespace ErrorCodes' $file && sed -i -r "0,/(\s*)extern const int [_A-Z]+/s//\1extern const int $code;\n&/" $file || awk '{ print; if (ns == 1) { ns = 2 }; if (ns == 2) { ns = 0; print "namespace ErrorCodes\n{\n extern const int '$code';\n}" } }; /namespace DB/ { ns = 1; };' < $file > ${file}.tmp && mv ${file}.tmp $file ) -find $ROOT_PATH/{src,base,programs,utils} -name '*.h' -or -name '*.cpp' | - grep -vP $EXCLUDE_DIRS | - xargs grep -l -P 'ErrorCodes::[_A-Z]+' | while read file; do grep -P 'ErrorCodes::[_A-Z]+' $file | sed -r -e 's/^.*?ErrorCodes::([_A-Z]+).*?$/\1/' | while read code; do grep -q "extern const int $code" $file || echo "ErrorCode $code is used in file $file but not defined"; done; done + CurrentMetrics::add + CurrentMetrics::sub + CurrentMetrics::set + CurrentMetrics::end + CurrentMetrics::Increment + CurrentMetrics::Metric + CurrentMetrics::values +) +for extern_type in ${!EXTERN_TYPES[@]}; do + type_of_extern=${EXTERN_TYPES[$extern_type]} + allowed_chars=${EXTERN_ALLOWED_CHARS[$extern_type]} -# Duplicate ErrorCodes -find $ROOT_PATH/{src,base,programs,utils} -name '*.h' -or -name '*.cpp' | - grep -vP $EXCLUDE_DIRS | - xargs grep -l -P 'ErrorCodes::[_A-Z]+' | while read file; do grep -P 'extern const int [_A-Z]+;' $file | sort | uniq -c | grep -v -P ' +1 ' && echo "Duplicate ErrorCode in file $file"; done + # Unused + # NOTE: to fix automatically, replace echo with: + # sed -i "/extern const $type_of_extern $val/d" $file + find $ROOT_PATH/{src,base,programs,utils} -name '*.h' -or -name '*.cpp' | { + grep -vP $EXCLUDE_DIRS | xargs grep -l -P "extern const $type_of_extern [$allowed_chars]+" + } | while read file; do + grep -P "extern const $type_of_extern [$allowed_chars]+;" $file | sed -r -e "s/^.*?extern const $type_of_extern ([$allowed_chars]+);.*?$/\1/" | while read val; do + if ! grep -q "$extern_type::$val" $file; then + # Excludes for SOFTWARE_EVENT/HARDWARE_EVENT/CACHE_EVENT in ThreadProfileEvents.cpp + if [[ ! $extern_type::$val =~ ProfileEvents::Perf.* ]]; then + echo "$extern_type::$val is defined but not used in file $file" + fi + fi + done + done + + # Undefined + # NOTE: to fix automatically, replace echo with: + # ( grep -q -F 'namespace $extern_type' $file && sed -i -r "0,/(\s*)extern const $type_of_extern [$allowed_chars]+/s//\1extern const $type_of_extern $val;\n&/" $file || awk '{ print; if (ns == 1) { ns = 2 }; if (ns == 2) { ns = 0; print "namespace $extern_type\n{\n extern const $type_of_extern '$val';\n}" } }; /namespace DB/ { ns = 1; };' < $file > ${file}.tmp && mv ${file}.tmp $file ) + find $ROOT_PATH/{src,base,programs,utils} -name '*.h' -or -name '*.cpp' | { + grep -vP $EXCLUDE_DIRS | xargs grep -l -P "$extern_type::[$allowed_chars]+" + } | while read file; do + grep -P "$extern_type::[$allowed_chars]+" $file | sed -r -e "s/^.*?$extern_type::([$allowed_chars]+).*?$/\1/" | while read val; do + if ! grep -q "extern const $type_of_extern $val" $file; then + if ! in_array "$extern_type::$val" "${EXTERN_TYPES_EXCLUDES[@]}"; then + echo "$extern_type::$val is used in file $file but not defined" + fi + fi + done + done + + # Duplicates + find $ROOT_PATH/{src,base,programs,utils} -name '*.h' -or -name '*.cpp' | { + grep -vP $EXCLUDE_DIRS | xargs grep -l -P "$extern_type::[$allowed_chars]+" + } | while read file; do + grep -P "extern const $type_of_extern [$allowed_chars]+;" $file | sort | uniq -c | grep -v -P ' +1 ' && echo "Duplicate $extern_type in file $file" + done +done # Three or more consecutive empty lines find $ROOT_PATH/{src,base,programs,utils} -name '*.h' -or -name '*.cpp' | diff --git a/utils/iotest/iotest.cpp b/utils/iotest/iotest.cpp index b327355d07b..91f097693e4 100644 --- a/utils/iotest/iotest.cpp +++ b/utils/iotest/iotest.cpp @@ -156,7 +156,11 @@ int mainImpl(int argc, char ** argv) pool.scheduleOrThrowOnError([=]{ thread(fd, mode, min_offset, max_offset, block_size, count); }); pool.wait(); - fsync(fd); + #if defined(OS_DARWIN) + fsync(fd); + #else + fdatasync(fd); + #endif watch.stop(); diff --git a/utils/iotest/iotest_nonblock.cpp b/utils/iotest/iotest_nonblock.cpp index 524d6298da5..430e951d36b 100644 --- a/utils/iotest/iotest_nonblock.cpp +++ b/utils/iotest/iotest_nonblock.cpp @@ -136,8 +136,13 @@ int mainImpl(int argc, char ** argv) for (size_t i = 0; i < descriptors; ++i) { +#if defined(OS_DARWIN) if (fsync(fds[i])) throwFromErrno("Cannot fsync", ErrorCodes::CANNOT_FSYNC); +#else + if (fdatasync(fds[i])) + throwFromErrno("Cannot fdatasync", ErrorCodes::CANNOT_FSYNC); +#endif } watch.stop(); diff --git a/website/README.md b/website/README.md index f02deb0ad91..c6743b5dcb9 100644 --- a/website/README.md +++ b/website/README.md @@ -9,10 +9,9 @@ pip3 install -r requirements.txt # This is needed only when documentation is included sudo npm install -g purify-css amphtml-validator -sudo apt install wkhtmltopdf virtualenv build -./build.py --skip-multi-page --skip-single-page --skip-amp --skip-pdf --skip-blog --skip-git-log --skip-docs --livereload 8080 +./build.py --skip-multi-page --skip-single-page --skip-amp --skip-blog --skip-git-log --skip-docs --livereload 8080 # Open the web browser and go to http://localhost:8080/ ``` @@ -20,11 +19,11 @@ virtualenv build # How to quickly test the blog ``` -./build.py --skip-multi-page --skip-single-page --skip-amp --skip-pdf --skip-git-log --skip-docs --livereload 8080 +./build.py --skip-multi-page --skip-single-page --skip-amp --skip-git-log --skip-docs --livereload 8080 ``` # How to quickly test the broken links in docs ``` -./build.py --skip-multi-page --skip-amp --skip-pdf --skip-blog --skip-git-log --lang en --livereload 8080 +./build.py --skip-multi-page --skip-amp --skip-blog --skip-git-log --lang en --livereload 8080 ``` diff --git a/website/benchmark/benchmark.js b/website/benchmark/benchmark.js index bd5ec3c083e..09607b27263 100644 --- a/website/benchmark/benchmark.js +++ b/website/benchmark/benchmark.js @@ -40,7 +40,7 @@ function update_hash() { function generate_selectors(elem) { - var html = '

Compare

'; + var html = '

Compare

'; var available_results = results; @@ -63,7 +63,7 @@ function generate_selectors(elem) { button_class = 'btn-outline-secondary'; }; - html += ''; + html += ''; } } - html += '

Run

'; + html += '

Run

'; for (var i = 0; i < runs.length; i++) { - html += ''; + html += ''; } html += '
'; diff --git a/website/benchmark/hardware/index.html b/website/benchmark/hardware/index.html index 6dc12890ef4..260a928184d 100644 --- a/website/benchmark/hardware/index.html +++ b/website/benchmark/hardware/index.html @@ -13,11 +13,11 @@
-
+
ClickHouse -

{{ title }}

+

{{ title }}

@@ -27,21 +27,21 @@
-

Relative query processing time (lower is better)

+

Relative query processing time (lower is better)

-

Full results

+

Full results

-

Comments

+

Comments

Submit your own results: https://clickhouse.com/docs/en/operations/performance-test/

Results for Lenovo B580 Laptop are from Ragıp Ünal. 16GB RAM 1600 GHz, 240GB SSD, Intel(R) Core(TM) i5-3210M CPU @ 2.50GHz (2 Core / 4 HT)
@@ -78,6 +78,10 @@ Results for 2x AMD EPYC 7742 (128 physical cores, 1 TB DDR4-3200 RAM) from Ye Results for ASUS A15 (Ryzen laptop) are from Kimmo Linna.
Results for MacBook Air M1 are from Denis Glazachev.
Results for Xeon Gold 6140 are from Shiv Iyer (ChistaDATA Labs).
+Comparison of EBS and EFS is from Ramazan Polat.
+Results for Hetzner and Scaleway are from Anthony Najjar Simon (Panelbear).
+Results for GCP are from Vy Nguyen Tan.
+Results for ThinkPad P15 are from Mikhail Shiryaev.

diff --git a/website/benchmark/hardware/results/aws_c6g_2xlarge.json b/website/benchmark/hardware/results/aws_c6g_2xlarge.json new file mode 100644 index 00000000000..e970e73b919 --- /dev/null +++ b/website/benchmark/hardware/results/aws_c6g_2xlarge.json @@ -0,0 +1,54 @@ +[ + { + "system": "AWS c6g.2xlarge (Graviton 2)", + "system_full": "AWS c6g.2xlarge (Graviton 2) 8 vCPU, 16 GiB RAM, EBS", + "time": "2021-11-21 00:00:00", + "kind": "cloud", + "result": + [ +[0.002, 0.001, 0.001], +[0.035, 0.022, 0.022], +[0.084, 0.045, 0.045], +[0.745, 0.056, 0.056], +[1.397, 0.126, 0.125], +[2.237, 0.498, 0.499], +[0.063, 0.052, 0.052], +[0.025, 0.023, 0.023], +[1.295, 0.544, 0.542], +[1.655, 0.656, 0.655], +[0.769, 0.317, 0.316], +[1.043, 0.362, 0.360], +[1.938, 0.861, 0.854], +[3.563, 1.131, 1.104], +[2.152, 1.036, 1.014], +[0.967, 0.917, 0.900], +[4.259, 2.678, 2.724], +[3.342, 1.741, 1.704], +[8.303, 4.812, 4.794], +[0.680, 0.079, 0.074], +[18.753, 1.239, 1.224], +[20.694, 1.362, 1.333], +[38.987, 2.967, 2.937], +[31.357, 1.420, 1.404], +[4.471, 0.421, 0.413], +[1.633, 0.360, 0.358], +[4.554, 0.423, 0.417], +[18.076, 1.133, 1.118], +[15.164, 1.762, 1.747], +[0.678, 0.656, 0.651], +[3.504, 0.829, 0.817], +[9.359, 1.104, 1.088], +[8.794, 5.886, 5.848], +[19.039, 4.025, 4.007], +[19.061, 4.015, 4.053], +[1.289, 1.194, 1.194], +[0.287, 0.217, 0.208], +[0.113, 0.094, 0.092], +[0.111, 0.084, 0.086], +[0.539, 0.447, 0.439], +[0.072, 0.033, 0.025], +[0.042, 0.019, 0.039], +[0.005, 0.011, 0.004] + ] + } +] diff --git a/website/benchmark/hardware/results/core_i5_9600K_asus_z390.json b/website/benchmark/hardware/results/core_i5_9600K_asus_z390.json new file mode 100644 index 00000000000..cf36dd37d04 --- /dev/null +++ b/website/benchmark/hardware/results/core_i5_9600K_asus_z390.json @@ -0,0 +1,54 @@ +[ + { + "system": "ASUS Z390-Plus Server", + "system_full": "ASUS Z390-Plus Server with Intel I5-9600K and DDR4 16GB", + "time": "2021-11-05 03:00:00", + "kind": "server", + "result": + [ + [0.001, 0.001, 0.001], + [0.021, 0.008, 0.008], + [0.104, 0.029, 0.029], + [0.404, 0.047, 0.045], + [0.409, 0.103, 0.102], + [0.647, 0.313, 0.312], + [0.024, 0.018, 0.020], + [0.034, 0.009, 0.009], + [0.710, 0.618, 0.612], + [0.826, 0.700, 0.700], + [0.439, 0.195, 0.195], + [0.461, 0.233, 0.231], + [1.148, 1.073, 1.065], + [1.508, 1.366, 1.373], + [1.167, 1.085, 1.080], + [1.237, 1.207, 1.203], + [3.149, 3.082, 3.079], + [1.808, 1.726, 1.737], + [6.510, 6.457, 6.477], + [0.412, 0.057, 0.057], + [4.543, 1.149, 1.133], + [5.162, 1.291, 1.299], + [9.613, 3.176, 3.177], + [7.753, 1.212, 1.179], + [1.306, 0.318, 0.315], + [0.635, 0.277, 0.280], + [1.303, 0.319, 0.319], + [4.542, 1.299, 1.287], + [3.893, 1.934, 1.925], + [3.651, 3.642, 3.645], + [1.252, 0.815, 0.813], + [2.869, 1.534, 1.543], + [8.087, 7.939, 8.006], + [6.254, 5.169, 5.208], + [6.390, 5.248, 5.248], + [2.077, 2.040, 2.051], + [0.136, 0.127, 0.119], + [0.059, 0.051, 0.048], + [0.061, 0.045, 0.044], + [0.261, 0.248, 0.240], + [0.046, 0.015, 0.014], + [0.035, 0.019, 0.014], + [0.020, 0.003, 0.002] + ] + } +] diff --git a/website/benchmark/hardware/results/core_i7_11800h_lenovo_p15.json b/website/benchmark/hardware/results/core_i7_11800h_lenovo_p15.json new file mode 100644 index 00000000000..f42be93e1e3 --- /dev/null +++ b/website/benchmark/hardware/results/core_i7_11800h_lenovo_p15.json @@ -0,0 +1,54 @@ +[ + { + "system": "ThinkPad P15", + "system_full": "Lenovo ThinkPad P15, i7-11800H @ 2.30GHz, 16 cores, 32 GiB RAM, NVMe", + "time": "2021-11-19 00:00:00", + "kind": "laptop", + "result": + [ +[0.001, 0.001, 0.001], +[0.013, 0.008, 0.007], +[0.036, 0.025, 0.024], +[0.098, 0.046, 0.040], +[0.139, 0.102, 0.100], +[0.368, 0.279, 0.278], +[0.018, 0.015, 0.017], +[0.025, 0.008, 0.009], +[0.586, 0.527, 0.533], +[0.690, 0.579, 0.554], +[0.224, 0.171, 0.162], +[0.244, 0.201, 0.191], +[0.996, 0.854, 0.871], +[1.339, 1.199, 1.159], +[1.116, 1.073, 1.045], +[1.177, 1.060, 1.084], +[3.307, 3.236, 3.182], +[1.958, 1.789, 1.835], +[6.079, 5.883, 5.895], +[0.109, 0.051, 0.048], +[1.429, 0.898, 0.819], +[1.626, 1.023, 0.937], +[3.390, 2.296, 2.381], +[2.249, 0.997, 0.992], +[0.422, 0.260, 0.299], +[0.285, 0.230, 0.213], +[0.419, 0.256, 0.268], +[1.411, 0.777, 0.830], +[1.828, 1.305, 1.283], +[3.556, 3.725, 3.603], +[0.805, 0.643, 0.650], +[1.369, 1.049, 1.033], +[7.665, 7.623, 7.601], +[5.305, 4.513, 4.691], +[5.370, 4.686, 4.874], +[1.756, 1.492, 1.579], +[0.143, 0.132, 0.131], +[0.058, 0.057, 0.056], +[0.068, 0.048, 0.051], +[0.339, 0.298, 0.304], +[0.032, 0.022, 0.023], +[0.018, 0.011, 0.015], +[0.010, 0.002, 0.004] + ] + } +] diff --git a/website/benchmark/hardware/results/efs_vs_ebs.json b/website/benchmark/hardware/results/efs_vs_ebs.json new file mode 100644 index 00000000000..8cbc89f58da --- /dev/null +++ b/website/benchmark/hardware/results/efs_vs_ebs.json @@ -0,0 +1,107 @@ +[ + { + "system": "t3.xlarge, EBS", + "system_full": "t3.xlarge, EBS 1 TiB gp2, 3000 IOPS", + "time": "2021-11-08 00:00:00", + "kind": "cloud", + "result": + [ +[0.001,0.002,0.001], +[0.053,0.04,0.038], +[0.254,0.136,0.121], +[0.524,0.194,0.174], +[0.71,0.375,0.376], +[1.575,1.27,1.225], +[0.088,0.076,0.075], +[0.082,0.031,0.032], +[2.326,1.976,1.973], +[2.751,2.244,2.183], +[0.953,0.704,0.669], +[1.284,0.831,0.771], +[3.409,2.972,3.072], +[4.786,4.218,4.286], +[4.686,4.061,4.058], +[3.539,3.287,3.191], +[12.088,11.476,11.371], +[8.679,7.746,7.719], +[24.023,22.915,22.783], +[0.777,0.489,0.238], +[8.885,4.196,4.088], +[10.116,4.387,4.215], +[19.825,9.816,9.468], +[15.399,4.741,4.668], +[2.502,1.261,1.121], +[1.358,0.972,0.982], +[1.865,1.136,1.132], +[8.788,3.68,3.472], +[9.6,5.466,5.405], +[4.733,4.742,4.798], +[3.531,2.568,2.431], +[5.217,3.306,3.327], +[21.862,21.028,20.748], +[19.793,15.297,15.241], +[19.678,15.219,15.316], +[5.943,5.372,5.428], +[0.284,0.256,0.213], +[0.102,0.085,0.1], +[0.115,0.095,0.079], +[0.602,0.549,0.722], +[0.063,0.023,0.023], +[0.064,0.02,0.02], +[0.008,0.005,0.004] + ] + }, + + { + "system": "t3.xlarge, EFS", + "system_full": "t3.xlarge, EFS 1 TiB gp, prov. at 1000 MiB/s", + "time": "2021-11-08 00:00:00", + "kind": "cloud", + "result": + [ +[0.001,0.001,0.001], +[0.074,0.036,0.043], +[0.482,0.13,0.142], +[1.623,0.205,0.209], +[1.823,0.425,0.361], +[3.075,1.274,1.237], +[0.11,0.076,0.076], +[0.152,0.039,0.037], +[3.837,1.944,1.959], +[4.251,2.517,2.168], +[2.232,0.835,0.711], +[2.263,0.936,0.802], +[4.947,3.078,3.026], +[7.207,4.084,4.07], +[6.074,4.452,3.919], +[5.349,3.191,3.12], +[14.559,11.097,11.063], +[11.058,8.01,7.762], +[28.227,22.692,22.459], +[1.688,0.35,0.259], +[16.214,4.229,4.039], +[18.284,4.339,4.233], +[31.926,9.819,9.475], +[30.797,5.062,5.247], +[4.744,1.308,1.122], +[2.785,1.022,0.957], +[4.473,1.235,1.135], +[15.194,3.561,3.443], +[15.267,5.481,5.387], +[5.145,4.8,4.686], +[5.523,2.581,2.531], +[9.329,3.263,3.452], +[25.753,20.411,20.543], +[26.907,14.767,14.783], +[26.541,15.051,14.838], +[6.637,5.235,5.491], +[0.493,0.237,0.217], +[0.171,0.095,0.094], +[0.298,0.091,0.085], +[0.901,0.555,0.547], +[0.171,0.042,0.035], +[0.12,0.035,0.033], +[0.032,0.014,0.015] + ] + } +] diff --git a/website/benchmark/hardware/results/gcp_n2d.json b/website/benchmark/hardware/results/gcp_n2d.json new file mode 100644 index 00000000000..1ce0524f0cf --- /dev/null +++ b/website/benchmark/hardware/results/gcp_n2d.json @@ -0,0 +1,54 @@ +[ + { + "system": "GCP n2d-16-highmem", + "system_full": "GCP compute n2d-16-highmem, AMD EPYC 7B12, 16vCPU, 128 GiB RAM", + "time": "2021-11-18 00:00:00", + "kind": "cloud", + "result": + [ +[0.002, 0.001, 0.001], +[0.017, 0.013, 0.012], +[0.046, 0.032, 0.031], +[0.062, 0.048, 0.046], +[0.122, 0.112, 0.103], +[0.365, 0.313, 0.312], +[0.026, 0.029, 0.024], +[0.028, 0.014, 0.014], +[0.516, 0.473, 0.477], +[0.591, 0.545, 0.542], +[0.210, 0.178, 0.183], +[0.224, 0.208, 0.205], +[0.682, 0.629, 0.609], +[0.862, 0.804, 0.812], +[0.854, 0.769, 0.778], +[0.769, 0.771, 0.768], +[2.147, 2.171, 2.166], +[1.439, 1.380, 1.355], +[4.099, 3.974, 4.048], +[0.118, 0.053, 0.048], +[0.873, 0.785, 0.786], +[1.022, 0.908, 0.891], +[2.278, 2.079, 2.042], +[1.714, 0.962, 0.950], +[0.400, 0.287, 0.267], +[0.336, 0.248, 0.228], +[0.379, 0.273, 0.268], +[0.889, 0.816, 0.802], +[1.474, 1.391, 1.455], +[1.358, 1.355, 1.342], +[0.723, 0.628, 0.613], +[0.914, 0.756, 0.741], +[3.916, 3.967, 3.962], +[3.194, 2.998, 3.016], +[3.097, 3.050, 3.073], +[1.099, 1.111, 1.087], +[0.184, 0.168, 0.175], +[0.072, 0.066, 0.065], +[0.067, 0.063, 0.055], +[0.373, 0.374, 0.376], +[0.032, 0.027, 0.020], +[0.021, 0.015, 0.015], +[0.006, 0.008, 0.006] + ] + } +] diff --git a/website/benchmark/hardware/results/hetzner_epyc.json b/website/benchmark/hardware/results/hetzner_epyc.json new file mode 100644 index 00000000000..4ced699a56d --- /dev/null +++ b/website/benchmark/hardware/results/hetzner_epyc.json @@ -0,0 +1,106 @@ +[ + { + "system": "Hetzner CCX22", + "system_full": "Hetzner CCX22 (AMD EPYC 7003, 4 cores, 16 GiB RAM, NVMe)", + "time": "2021-11-17 00:00:00", + "kind": "server", + "result": + [ +[0.001, 0.001, 0.001], +[0.036, 0.023, 0.039], +[0.130, 0.102, 0.092], +[0.304, 0.148, 0.141], +[0.431, 0.298, 0.291], +[1.492, 1.359, 1.357], +[0.088, 0.087, 0.091], +[0.058, 0.039, 0.042], +[1.612, 1.477, 1.473], +[2.017, 1.805, 1.809], +[1.044, 0.925, 0.926], +[1.167, 1.050, 1.048], +[2.621, 2.447, 2.447], +[3.426, 3.176, 3.193], +[3.545, 3.475, 3.431], +[2.958, 2.805, 2.816], +[8.547, 8.320, 8.321], +[6.395, 5.992, 6.081], +[16.542, 16.407, 16.057], +[0.404, 0.166, 0.156], +[4.338, 3.419, 3.373], +[5.042, 4.102, 4.052], +[10.231, 8.420, 8.304], +[6.121, 3.904, 3.804], +[1.582, 1.297, 1.279], +[1.316, 1.183, 1.171], +[1.565, 1.305, 1.296], +[4.098, 3.290, 3.246], +[5.999, 5.242, 5.205], +[2.247, 2.198, 2.183], +[2.581, 2.336, 2.242], +[3.269, 2.806, 2.744], +[14.252, 14.052, 13.956], +[11.730, 10.638, 10.632], +[11.418, 10.659, 10.572], +[4.170, 4.086, 4.092], +[0.208, 0.173, 0.159], +[0.082, 0.075, 0.069], +[0.082, 0.062, 0.065], +[0.413, 0.392, 0.375], +[0.046, 0.021, 0.029], +[0.032, 0.016, 0.017], +[0.005, 0.004, 0.007] + ] + }, + { + "system": "Hetzner CCX32", + "system_full": "Hetzner CCX32 (AMD EPYC 7003, 8 cores, 32 GiB RAM, NVMe)", + "time": "2021-11-17 00:00:00", + "kind": "server", + "result": + [ +[0.001, 0.001, 0.001], +[0.021, 0.018, 0.017], +[0.078, 0.057, 0.063], +[0.178, 0.083, 0.076], +[0.229, 0.191, 0.182], +[1.141, 1.063, 0.977], +[0.071, 0.051, 0.068], +[0.056, 0.022, 0.035], +[1.043, 1.288, 1.272], +[1.757, 1.003, 0.996], +[0.554, 0.492, 0.555], +[0.931, 0.698, 0.582], +[1.471, 1.364, 1.310], +[2.284, 2.040, 1.720], +[1.852, 1.749, 1.710], +[1.551, 1.496, 1.482], +[4.852, 4.310, 4.964], +[3.384, 3.353, 3.015], +[10.150, 9.422, 10.005], +[0.230, 0.091, 0.089], +[3.525, 1.731, 1.721], +[2.939, 2.325, 2.077], +[7.716, 5.046, 4.394], +[3.927, 2.023, 1.951], +[0.848, 0.732, 0.874], +[1.005, 0.627, 0.606], +[0.968, 0.725, 0.687], +[2.771, 2.453, 1.815], +[3.536, 3.283, 3.020], +[1.661, 1.690, 1.761], +[1.511, 1.213, 1.205], +[2.002, 1.715, 1.518], +[8.160, 8.943, 8.982], +[6.999, 5.827, 6.024], +[7.777, 6.634, 6.338], +[2.391, 2.285, 2.284], +[0.221, 0.182, 0.196], +[0.114, 0.072, 0.069], +[0.096, 0.063, 0.065], +[0.423, 0.382, 0.405], +[0.077, 0.022, 0.024], +[0.030, 0.022, 0.018], +[0.011, 0.004, 0.008] + ] + } +] diff --git a/website/benchmark/hardware/results/scaleway_epyc.json b/website/benchmark/hardware/results/scaleway_epyc.json new file mode 100644 index 00000000000..54cf6eaf459 --- /dev/null +++ b/website/benchmark/hardware/results/scaleway_epyc.json @@ -0,0 +1,54 @@ +[ + { + "system": "Scaleway GP1-XS", + "system_full": "Scaleway GP1-XS (AMD EPYC 7401P, 4 cores, 16 GiB RAM, NVMe)", + "time": "2021-11-17 00:00:00", + "kind": "server", + "result": + [ +[0.002, 0.002, 0.002], +[0.044, 0.026, 0.028], +[0.100, 0.076, 0.067], +[0.151, 0.101, 0.102], +[0.276, 0.218, 0.207], +[0.740, 0.693, 0.703], +[0.066, 0.054, 0.050], +[0.062, 0.035, 0.041], +[1.271, 1.124, 1.141], +[1.441, 1.279, 1.280], +[0.438, 0.382, 0.376], +[0.514, 0.485, 0.467], +[1.914, 1.664, 1.694], +[2.367, 2.277, 2.258], +[2.143, 2.066, 2.131], +[1.923, 1.826, 1.777], +[5.894, 5.653, 5.765], +[3.545, 3.464, 3.405], +[12.060, 12.893, 13.049], +[0.196, 0.121, 0.118], +[2.328, 1.841, 1.808], +[2.498, 2.100, 2.067], +[5.839, 5.094, 5.078], +[3.068, 2.255, 2.202], +[0.718, 0.611, 0.616], +[0.597, 0.531, 0.529], +[0.702, 0.615, 0.592], +[2.310, 1.991, 1.969], +[3.540, 3.222, 3.179], +[3.950, 3.977, 3.876], +[1.527, 1.319, 1.319], +[2.264, 1.950, 1.927], +[11.987, 11.644, 11.777], +[10.142, 9.150, 9.204], +[9.627, 9.298, 9.183], +[2.937, 2.812, 2.849], +[0.229, 0.226, 0.227], +[0.096, 0.097, 0.095], +[0.087, 0.074, 0.071], +[0.464, 0.447, 0.463], +[0.037, 0.027, 0.032], +[0.030, 0.046, 0.029], +[0.006, 0.017, 0.014] + ] + } +] diff --git a/website/blog/en/2021/clickhouse-october-moscow-meetup.md b/website/blog/en/2021/clickhouse-october-moscow-meetup.md new file mode 100644 index 00000000000..557c7f0f9c0 --- /dev/null +++ b/website/blog/en/2021/clickhouse-october-moscow-meetup.md @@ -0,0 +1,42 @@ +--- +title: 'ClickHouse Moscow Meetup October 19, 2021' +image: 'https://blog-images.clickhouse.com/en/2021/clickhouse-october-moscow-meetup/featured.jpg' +date: '2021-11-11' +author: '[Rich Raposa](https://github.com/rfraposa)' +tags: ['company', 'community'] +--- + +ClickHouse organized an online Meetup on October 19, 2021, hosted by our very own co-founder and CTO, Alexey Milovidov. There are a lot of new features to discuss in the 21.10 version of ClickHouse, along with many more new features coming up on the roadmap. + +There were over 200 attendees in person for the Meetup and 3,853 viewers online, and we want to thank everyone who attended live. You can watch the recording of the Meetup on YouTube [here](https://www.youtube.com/watch?v=W6h3_xykd2Y). + +Alexey Milovidov, Chief Technology Officer, welcomed and updated the community on ClickHouse Inc.'s latest news. Maksim Kita, Sr. Software Engineer at ClickHouse, started with a discussion on the new User Defined Functions (UDFs) available in 21.10. UDFs can be defined as lambda expressions using the CREATE FUNCTION command. For example: + +``` +CREATE FUNCTION a_plus_b AS (a, b) -> a + b +``` + +In addition to UDFs, there are two new table engines - Executable and ExecutablePool - that can stream records via stdin and stdout through custom scripts written in whatever language you prefer. For details, be sure to check out our [new training lesson on What's New in ClickHouse 21.10](https://clickhouse.com/learn/lessons/whatsnew-clickhouse-21.10/). + +You can now encrypt your data stored on S3, HDFS, external disks, or on a local disk. ClickHouse developers Vitaly Baranov and Artur Filatenkov discussed the details and benefits of encrypting your data at rest in ClickHouse. Vitaly presented the new full disk encryption feature and Arthur presented column-level encryption. + +![Disk Encryption Performance](https://blog-images.clickhouse.com/en/2021/clickhouse-october-moscow-meetup/disk-encryption-performance.jpg) + +![Arthur Filatenkov](https://blog-images.clickhouse.com/en/2021/clickhouse-october-moscow-meetup/arthur-filatenkov.jpg) + +Alexey then spent 40 minutes discussing some of the amazing new features on the ClickHouse roadmap, including: + +* ClickHouse Keeper: a new C++ coordination system for ClickHouse designed as an alternative to ZooKeeper +* Support for working with semi-structured data, including JSON objects with arbitrary nested objects +* Asynchronous insert mode - now you can insert data without batching! + +After the talk, Alexey took questions from users on: + +* How to parse User-Agent in ClickHouse +* Is it true that ClickHouse developers have a ClickHouse tattoo + +![YAML Configuration](https://blog-images.clickhouse.com/en/2021/clickhouse-october-moscow-meetup/yaml-configuration.jpg) + +* If you are excited about ClickHouse, be sure to join us on [Telegram](https://t.me/clickhouse_en) +* We also have a community Slack workspace be sure to join [here](https://clickhousedb.slack.com/). +* If you are new to ClickHouse and want to see it in action, check out our [Getting Started lesson](https://clickhouse.com/learn/lessons/gettingstarted/). diff --git a/website/blog/en/2021/clickhouse-v21.11-released.md b/website/blog/en/2021/clickhouse-v21.11-released.md new file mode 100644 index 00000000000..a10d6ce85de --- /dev/null +++ b/website/blog/en/2021/clickhouse-v21.11-released.md @@ -0,0 +1,63 @@ +--- +title: 'ClickHouse v21.11 Released' +image: 'https://blog-images.clickhouse.com/en/2021/clickhouse-v21-11/featured.jpg' +date: '2021-11-11' +author: '[Rich Raposa](https://github.com/rfraposa), [Alexey Milovidov](https://github.com/alexey-milovidov)' +tags: ['company', 'community'] +--- + +We're continuing our monthly release cadence and blog updates at[ ClickHouse, Inc](https://clickhouse.com/blog/en/2021/clickhouse-inc/). The 21.11 release includes asynchronous inserts, interactive mode, UDFs, predefined connections, and compression gains. Thank you to the 142 committers and 4337 commits for making this release possible. + +Let's highlight some of these new exciting new capabilities in 21.11: + +## Async Inserts + +New asynchronous INSERT mode allows to accumulate inserted data and store it in a single batch utilizing less disk resources(IOPS) enabling support of high rate of INSERT queries. On a client it can be enabled by setting `async_insert` for `INSERT` queries with data inlined in a query or in a separate buffer (e.g. for `INSERT` queries via HTTP protocol). If `wait_for_async_insert` is true (by default) the client will wait until data will be flushed to the table. On the server-side it can be tuned by the settings `async_insert_threads`, `async_insert_max_data_size` and `async_insert_busy_timeout_ms`. + +**How does this help our ClickHouse Users?** + +A notable pain point for users was around having to insert data in large batches and performance can sometimes be hindered. What if you have a monitoring use case and you want to do 1M records per second into ClickHouse; you would do large 100k record batches, but if you have 1,000 clients shipping data then that was hard to collect these batches to insert into ClickHouse. Historically to solve for this you might have to use Kafka or buffer tables to help with the balancing and insertion of data. + +Now, we've introduced this new mode of Async inserts where you can do a high rate of small inserts concurrently and ClickHouse will automatically group them together into batches and insert it into the table automatically. Every client will get an acknowledgement that the data was inserted successfully. + +## Local Interactive Mode + +We have added interactive mode for `clickhouse-local` so that you can just run `clickhouse-local` to get a command line ClickHouse interface without connecting to a server and process data from files and external data sources. + +**How does this help our ClickHouse Users?** + +What if you have an ad-hoc use case that you want to run analytics on a local file with ClickHouse? Historically, you'd have to spin up an empty ClickHouse server and connect it to the external data source that you were interested in running the query on e.g. S3, HDFS, URL's. Now with ClickHouse Local you can just run it just like a ClickHouse Client and have the same full interactive experience without any additional overhead steps around setup and ingestion of data to try out your idea or hypothesis. Hope you enjoy! + +## Executable UDFs + +Added support for executable (scriptable) user defined functions. These are UDFs that can be written in any programming language. + +**How does this help our ClickHouse Users?** + +We added UDFs in our 21.10 release. Similar to our October release we're continuing to innovate around the idea of making it more user friendly to plug in tools into ClickHouse as functions. This could be you doing an ML inference in your Python script and now you can define it as a function as available in SQL. Or, what if you wanted to do a DNS lookup? You have a domain name in a ClickHouse table and want to convert to an IP address with some function. Now just plug in an external script and this will go process and convert the domain names into IP addresses. + +## Predefined Connections + +Allow predefined connections to external data sources. This allows to avoid specifying credentials or addresses while using external data sources, they can be referenced by names instead. + +**How does this help our ClickHouse Users?** + +You're just trying to connect ClickHouse to another data source to load data, like MySQL for example, how do you do that? Before this feature you would have to handle all the credentials for MySql, use the MySQL table functions, know the user and password permissions to access certain tables, etc. Now you have a predefined required parameters inside the ClickHouse configuration and the user can just refer to this by a name e.g. MongoDB, HDFS, S3, MySQL and it's a one-time configuration going forward. + +## Compression + +Add support for compression and decompression for `INTO OUTFILE` and `FROM INFILE` (with autodetect or with additional optional parameter). + +**How does this help our ClickHouse Users?** + +Are you just looking to import and export data into ClickHouse more easily if you have compressed data? Before this feature you had to manually specify compression of input and output data into ClickHouse and even for stream insertion you'd still have to manage the decompression there too. Now, you can just write it as a file e.g. mytable.csv.gz --- and, go! + +In the last month, we've added new free Training modules including a What's New in 21.11. Take the lesson [here](https://clickhouse.com/learn/lessons/whatsnew-clickhouse-21.11/). + +## ClickHouse Release Notes + +Release 21.11 + +Release Date: 2021-11-09 + +Release Notes: [21.11](https://github.com/ClickHouse/ClickHouse/blob/master/CHANGELOG.md) diff --git a/website/css/bootstrap.css b/website/css/bootstrap.css index b65cbbfed01..eb26dce70d7 100644 --- a/website/css/bootstrap.css +++ b/website/css/bootstrap.css @@ -3,4 +3,4 @@ * Copyright 2011-2019 The Bootstrap Authors * Copyright 2011-2019 Twitter, Inc. * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE) - */:root{--blue:#007bff;--indigo:#6610f2;--purple:#6f42c1;--pink:#e83e8c;--red:#dc3545;--orange:#fd7e14;--yellow:#ffc107;--green:#28a745;--teal:#20c997;--cyan:#17a2b8;--gray:#6c757d;--gray-dark:#343a40;--brand-primary:#fc0;--brand-secondary:#ff3939;--primary-accent-yellow:#fc0;--primary-accent-light-yellow:#fffaf0;--primary-accent-blue:#257af4;--primary-accent-light-blue:#e3f1fe;--secondary-accent-orange:#ff8c00;--secondary-accent-light-orange:#ffe4b5;--secondary-accent-red:#ff3939;--secondary-accent-light-red:#ffe4e1;--primary:#fc0;--secondary:#212529;--success:#28a745;--info:#17a2b8;--warning:#ffc107;--danger:#dc3545;--light:#f1f6f9;--dark:#495057;--primary-light:#fffaf0;--secondary-light:#fff;--tertiary:#257af4;--tertiary-light:#e3f1fe;--white:#fff;--black:#212529;--blue:#257af4;--light-blue:#e3f1fe;--yellow:#fc0;--light-yellow:#fffaf0;--orange:#ff8c00;--light-orange:#ffe4b5;--red:#ff3939;--light-red:#ffe4e1;--medium:#d6dbdf;--breakpoint-xxs:0;--breakpoint-xs:400px;--breakpoint-sm:616px;--breakpoint-md:768px;--breakpoint-lg:980px;--breakpoint-xl:1240px;--font-family-sans-serif:"Noto Sans",sans-serif;--font-family-monospace:SFMono-Regular,Menlo,Monaco,Consolas,"Liberation Mono","Courier New",monospace}*,:after,:before{box-sizing:border-box}html{font-family:sans-serif;line-height:1.15;-webkit-text-size-adjust:100%;-webkit-tap-highlight-color:rgba(33,37,41,0)}article,aside,figcaption,figure,footer,header,hgroup,main,nav,section{display:block}body{margin:0;font-family:Noto Sans,sans-serif;font-size:1rem;font-weight:400;line-height:1.5;color:#212529;text-align:left;background-color:#fff}[tabindex="-1"]:focus:not(:focus-visible){outline:0!important}hr{box-sizing:content-box;height:0;overflow:visible}h1,h2,h3,h4,h5,h6{margin-top:0;margin-bottom:16px}p{margin-top:0;margin-bottom:1rem}abbr[data-original-title],abbr[title]{text-decoration:underline;-webkit-text-decoration:underline dotted;text-decoration:underline dotted;cursor:help;border-bottom:0;-webkit-text-decoration-skip-ink:none;text-decoration-skip-ink:none}address{font-style:normal;line-height:inherit}address,dl,ol,ul{margin-bottom:1rem}dl,ol,ul{margin-top:0}ol ol,ol ul,ul ol,ul ul{margin-bottom:0}dt{font-weight:700}dd{margin-bottom:.5rem;margin-left:0}blockquote{margin:0 0 1rem}b,strong{font-weight:bolder}small{font-size:80%}sub,sup{position:relative;font-size:75%;line-height:0;vertical-align:baseline}sub{bottom:-.25em}sup{top:-.5em}a{text-decoration:none;background-color:transparent}a,a:hover{color:#ff8c00}a:hover{text-decoration:underline}a:not([href]),a:not([href]):hover{color:inherit;text-decoration:none}code,kbd,pre,samp{font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,monospace;font-size:1em}pre{margin-top:0;margin-bottom:1rem;overflow:auto}figure{margin:0 0 1rem}img{border-style:none}img,svg{vertical-align:middle}svg{overflow:hidden}table{border-collapse:collapse}caption{padding-top:.75rem;padding-bottom:.75rem;color:#6c757d;text-align:left;caption-side:bottom}th{text-align:inherit}label{display:inline-block;margin-bottom:.5rem}button{border-radius:0}button:focus{outline:1px dotted;outline:5px auto -webkit-focus-ring-color}button,input,optgroup,select,textarea{margin:0;font-family:inherit;font-size:inherit;line-height:inherit}button,input{overflow:visible}button,select{text-transform:none}select{word-wrap:normal}[type=button],[type=reset],[type=submit],button{-webkit-appearance:button}[type=button]:not(:disabled),[type=reset]:not(:disabled),[type=submit]:not(:disabled),button:not(:disabled){cursor:pointer}[type=button]::-moz-focus-inner,[type=reset]::-moz-focus-inner,[type=submit]::-moz-focus-inner,button::-moz-focus-inner{padding:0;border-style:none}input[type=checkbox],input[type=radio]{box-sizing:border-box;padding:0}input[type=date],input[type=datetime-local],input[type=month],input[type=time]{-webkit-appearance:listbox}textarea{overflow:auto;resize:vertical}fieldset{min-width:0;padding:0;margin:0;border:0}legend{display:block;width:100%;max-width:100%;padding:0;margin-bottom:.5rem;font-size:1.5rem;line-height:inherit;color:inherit;white-space:normal}@media(max-width:1200px){legend{font-size:calc(1.275rem + .3vw)}}progress{vertical-align:baseline}[type=number]::-webkit-inner-spin-button,[type=number]::-webkit-outer-spin-button{height:auto}[type=search]{outline-offset:-2px;-webkit-appearance:none}[type=search]::-webkit-search-decoration{-webkit-appearance:none}::-webkit-file-upload-button{font:inherit;-webkit-appearance:button}output{display:inline-block}summary{display:list-item;cursor:pointer}template{display:none}[hidden]{display:none!important}.h1,.h2,.h3,.h4,.h5,.h6,h1,h2,h3,h4,h5,h6{margin-bottom:16px;font-family:Hind Siliguri,sans-serif;font-weight:500;line-height:1.125}.h1,h1{font-size:2.5rem}@media(max-width:1200px){.h1,h1{font-size:calc(1.375rem + 1.5vw)}}.h2,h2{font-size:2rem}@media(max-width:1200px){.h2,h2{font-size:calc(1.325rem + .9vw)}}.h3,h3{font-size:1.75rem}@media(max-width:1200px){.h3,h3{font-size:calc(1.3rem + .6vw)}}.h4,h4{font-size:1.5rem}@media(max-width:1200px){.h4,h4{font-size:calc(1.275rem + .3vw)}}.h5,h5{font-size:1.125rem}.h6,h6{font-size:.875rem}.lead{font-size:1.375rem;font-weight:400}@media(max-width:1200px){.lead{font-size:calc(1.2625rem + .15vw)}}.display-1{font-size:4rem;font-weight:600;line-height:1.125}@media(max-width:1200px){.display-1{font-size:calc(1.525rem + 3.3vw)}}.display-2{font-size:2.5rem;font-weight:600;line-height:1.125}@media(max-width:1200px){.display-2{font-size:calc(1.375rem + 1.5vw)}}.display-3{font-size:2rem;font-weight:500;line-height:1.125}@media(max-width:1200px){.display-3{font-size:calc(1.325rem + .9vw)}}.display-4{font-size:1.75rem;font-weight:500;line-height:1.125}@media(max-width:1200px){.display-4{font-size:calc(1.3rem + .6vw)}}hr{margin-top:8px;margin-bottom:8px;border:0;border-top:1px solid rgba(33,37,41,.1)}.small,small{font-size:80%;font-weight:400}.mark,mark{padding:.2em;background-color:#fcf8e3}.list-inline,.list-unstyled{padding-left:0;list-style:none}.list-inline-item{display:inline-block}.list-inline-item:not(:last-child){margin-right:.5rem}.initialism{font-size:90%;text-transform:uppercase}.blockquote{margin-bottom:8px;font-size:1.25rem}.blockquote-footer{display:block;font-size:80%;color:#6c757d}.blockquote-footer:before{content:"— "}.img-fluid,.img-thumbnail{max-width:100%;height:auto}.img-thumbnail{padding:.25rem;background-color:#fff;border:1px solid #dee2e6;border-radius:8px}.figure{display:inline-block}.figure-img{margin-bottom:4px;line-height:1}.figure-caption{font-size:90%;color:#6c757d}code{font-size:87.5%;color:#e83e8c;word-wrap:break-word}a>code{color:inherit}kbd{padding:.2rem .4rem;font-size:87.5%;color:#fff;background-color:#495057;border-radius:8px}kbd kbd{padding:0;font-size:100%;font-weight:700}pre{display:block;font-size:87.5%;color:#495057}pre code{font-size:inherit;color:inherit;word-break:normal}.pre-scrollable{max-height:340px;overflow-y:scroll}.container{width:100%;padding-right:20px;padding-left:20px;margin-right:auto;margin-left:auto}@media(min-width:400px){.container{max-width:576px}}@media(min-width:616px){.container{max-width:576px}}@media(min-width:768px){.container{max-width:958px}}@media(min-width:980px){.container{max-width:1008px}}@media(min-width:1240px){.container{max-width:1118px}}.container-fluid,.container-lg,.container-md,.container-sm,.container-xl,.container-xs{width:100%;padding-right:20px;padding-left:20px;margin-right:auto;margin-left:auto}@media(min-width:400px){.container,.container-xs{max-width:576px}}@media(min-width:616px){.container,.container-sm,.container-xs{max-width:576px}}@media(min-width:768px){.container,.container-md,.container-sm,.container-xs{max-width:958px}}@media(min-width:980px){.container,.container-lg,.container-md,.container-sm,.container-xs{max-width:1008px}}@media(min-width:1240px){.container,.container-lg,.container-md,.container-sm,.container-xl,.container-xs{max-width:1118px}}.row{display:flex;flex-wrap:wrap;margin-right:-20px;margin-left:-20px}.no-gutters{margin-right:0;margin-left:0}.no-gutters>.col,.no-gutters>[class*=col-]{padding-right:0;padding-left:0}.col,.col-1,.col-2,.col-3,.col-4,.col-5,.col-6,.col-7,.col-8,.col-9,.col-10,.col-11,.col-12,.col-auto,.col-lg,.col-lg-1,.col-lg-2,.col-lg-3,.col-lg-4,.col-lg-5,.col-lg-6,.col-lg-7,.col-lg-8,.col-lg-9,.col-lg-10,.col-lg-11,.col-lg-12,.col-lg-auto,.col-md,.col-md-1,.col-md-2,.col-md-3,.col-md-4,.col-md-5,.col-md-6,.col-md-7,.col-md-8,.col-md-9,.col-md-10,.col-md-11,.col-md-12,.col-md-auto,.col-sm,.col-sm-1,.col-sm-2,.col-sm-3,.col-sm-4,.col-sm-5,.col-sm-6,.col-sm-7,.col-sm-8,.col-sm-9,.col-sm-10,.col-sm-11,.col-sm-12,.col-sm-auto,.col-xl,.col-xl-1,.col-xl-2,.col-xl-3,.col-xl-4,.col-xl-5,.col-xl-6,.col-xl-7,.col-xl-8,.col-xl-9,.col-xl-10,.col-xl-11,.col-xl-12,.col-xl-auto,.col-xs,.col-xs-1,.col-xs-2,.col-xs-3,.col-xs-4,.col-xs-5,.col-xs-6,.col-xs-7,.col-xs-8,.col-xs-9,.col-xs-10,.col-xs-11,.col-xs-12,.col-xs-auto{position:relative;width:100%;padding-right:20px;padding-left:20px}.col{flex-basis:0;flex-grow:1;max-width:100%}.row-cols-1>*{flex:0 0 100%;max-width:100%}.row-cols-2>*{flex:0 0 50%;max-width:50%}.row-cols-3>*{flex:0 0 33.3333333333%;max-width:33.3333333333%}.row-cols-4>*{flex:0 0 25%;max-width:25%}.row-cols-5>*{flex:0 0 20%;max-width:20%}.row-cols-6>*{flex:0 0 16.6666666667%;max-width:16.6666666667%}.col-auto{flex:0 0 auto;width:auto;max-width:100%}.col-1{flex:0 0 8.3333333333%;max-width:8.3333333333%}.col-2{flex:0 0 16.6666666667%;max-width:16.6666666667%}.col-3{flex:0 0 25%;max-width:25%}.col-4{flex:0 0 33.3333333333%;max-width:33.3333333333%}.col-5{flex:0 0 41.6666666667%;max-width:41.6666666667%}.col-6{flex:0 0 50%;max-width:50%}.col-7{flex:0 0 58.3333333333%;max-width:58.3333333333%}.col-8{flex:0 0 66.6666666667%;max-width:66.6666666667%}.col-9{flex:0 0 75%;max-width:75%}.col-10{flex:0 0 83.3333333333%;max-width:83.3333333333%}.col-11{flex:0 0 91.6666666667%;max-width:91.6666666667%}.col-12{flex:0 0 100%;max-width:100%}.order-first{order:-1}.order-last{order:13}.order-0{order:0}.order-1{order:1}.order-2{order:2}.order-3{order:3}.order-4{order:4}.order-5{order:5}.order-6{order:6}.order-7{order:7}.order-8{order:8}.order-9{order:9}.order-10{order:10}.order-11{order:11}.order-12{order:12}.offset-1{margin-left:8.3333333333%}.offset-2{margin-left:16.6666666667%}.offset-3{margin-left:25%}.offset-4{margin-left:33.3333333333%}.offset-5{margin-left:41.6666666667%}.offset-6{margin-left:50%}.offset-7{margin-left:58.3333333333%}.offset-8{margin-left:66.6666666667%}.offset-9{margin-left:75%}.offset-10{margin-left:83.3333333333%}.offset-11{margin-left:91.6666666667%}@media(min-width:400px){.col-xs{flex-basis:0;flex-grow:1;max-width:100%}.row-cols-xs-1>*{flex:0 0 100%;max-width:100%}.row-cols-xs-2>*{flex:0 0 50%;max-width:50%}.row-cols-xs-3>*{flex:0 0 33.3333333333%;max-width:33.3333333333%}.row-cols-xs-4>*{flex:0 0 25%;max-width:25%}.row-cols-xs-5>*{flex:0 0 20%;max-width:20%}.row-cols-xs-6>*{flex:0 0 16.6666666667%;max-width:16.6666666667%}.col-xs-auto{flex:0 0 auto;width:auto;max-width:100%}.col-xs-1{flex:0 0 8.3333333333%;max-width:8.3333333333%}.col-xs-2{flex:0 0 16.6666666667%;max-width:16.6666666667%}.col-xs-3{flex:0 0 25%;max-width:25%}.col-xs-4{flex:0 0 33.3333333333%;max-width:33.3333333333%}.col-xs-5{flex:0 0 41.6666666667%;max-width:41.6666666667%}.col-xs-6{flex:0 0 50%;max-width:50%}.col-xs-7{flex:0 0 58.3333333333%;max-width:58.3333333333%}.col-xs-8{flex:0 0 66.6666666667%;max-width:66.6666666667%}.col-xs-9{flex:0 0 75%;max-width:75%}.col-xs-10{flex:0 0 83.3333333333%;max-width:83.3333333333%}.col-xs-11{flex:0 0 91.6666666667%;max-width:91.6666666667%}.col-xs-12{flex:0 0 100%;max-width:100%}.order-xs-first{order:-1}.order-xs-last{order:13}.order-xs-0{order:0}.order-xs-1{order:1}.order-xs-2{order:2}.order-xs-3{order:3}.order-xs-4{order:4}.order-xs-5{order:5}.order-xs-6{order:6}.order-xs-7{order:7}.order-xs-8{order:8}.order-xs-9{order:9}.order-xs-10{order:10}.order-xs-11{order:11}.order-xs-12{order:12}.offset-xs-0{margin-left:0}.offset-xs-1{margin-left:8.3333333333%}.offset-xs-2{margin-left:16.6666666667%}.offset-xs-3{margin-left:25%}.offset-xs-4{margin-left:33.3333333333%}.offset-xs-5{margin-left:41.6666666667%}.offset-xs-6{margin-left:50%}.offset-xs-7{margin-left:58.3333333333%}.offset-xs-8{margin-left:66.6666666667%}.offset-xs-9{margin-left:75%}.offset-xs-10{margin-left:83.3333333333%}.offset-xs-11{margin-left:91.6666666667%}}@media(min-width:616px){.col-sm{flex-basis:0;flex-grow:1;max-width:100%}.row-cols-sm-1>*{flex:0 0 100%;max-width:100%}.row-cols-sm-2>*{flex:0 0 50%;max-width:50%}.row-cols-sm-3>*{flex:0 0 33.3333333333%;max-width:33.3333333333%}.row-cols-sm-4>*{flex:0 0 25%;max-width:25%}.row-cols-sm-5>*{flex:0 0 20%;max-width:20%}.row-cols-sm-6>*{flex:0 0 16.6666666667%;max-width:16.6666666667%}.col-sm-auto{flex:0 0 auto;width:auto;max-width:100%}.col-sm-1{flex:0 0 8.3333333333%;max-width:8.3333333333%}.col-sm-2{flex:0 0 16.6666666667%;max-width:16.6666666667%}.col-sm-3{flex:0 0 25%;max-width:25%}.col-sm-4{flex:0 0 33.3333333333%;max-width:33.3333333333%}.col-sm-5{flex:0 0 41.6666666667%;max-width:41.6666666667%}.col-sm-6{flex:0 0 50%;max-width:50%}.col-sm-7{flex:0 0 58.3333333333%;max-width:58.3333333333%}.col-sm-8{flex:0 0 66.6666666667%;max-width:66.6666666667%}.col-sm-9{flex:0 0 75%;max-width:75%}.col-sm-10{flex:0 0 83.3333333333%;max-width:83.3333333333%}.col-sm-11{flex:0 0 91.6666666667%;max-width:91.6666666667%}.col-sm-12{flex:0 0 100%;max-width:100%}.order-sm-first{order:-1}.order-sm-last{order:13}.order-sm-0{order:0}.order-sm-1{order:1}.order-sm-2{order:2}.order-sm-3{order:3}.order-sm-4{order:4}.order-sm-5{order:5}.order-sm-6{order:6}.order-sm-7{order:7}.order-sm-8{order:8}.order-sm-9{order:9}.order-sm-10{order:10}.order-sm-11{order:11}.order-sm-12{order:12}.offset-sm-0{margin-left:0}.offset-sm-1{margin-left:8.3333333333%}.offset-sm-2{margin-left:16.6666666667%}.offset-sm-3{margin-left:25%}.offset-sm-4{margin-left:33.3333333333%}.offset-sm-5{margin-left:41.6666666667%}.offset-sm-6{margin-left:50%}.offset-sm-7{margin-left:58.3333333333%}.offset-sm-8{margin-left:66.6666666667%}.offset-sm-9{margin-left:75%}.offset-sm-10{margin-left:83.3333333333%}.offset-sm-11{margin-left:91.6666666667%}}@media(min-width:768px){.col-md{flex-basis:0;flex-grow:1;max-width:100%}.row-cols-md-1>*{flex:0 0 100%;max-width:100%}.row-cols-md-2>*{flex:0 0 50%;max-width:50%}.row-cols-md-3>*{flex:0 0 33.3333333333%;max-width:33.3333333333%}.row-cols-md-4>*{flex:0 0 25%;max-width:25%}.row-cols-md-5>*{flex:0 0 20%;max-width:20%}.row-cols-md-6>*{flex:0 0 16.6666666667%;max-width:16.6666666667%}.col-md-auto{flex:0 0 auto;width:auto;max-width:100%}.col-md-1{flex:0 0 8.3333333333%;max-width:8.3333333333%}.col-md-2{flex:0 0 16.6666666667%;max-width:16.6666666667%}.col-md-3{flex:0 0 25%;max-width:25%}.col-md-4{flex:0 0 33.3333333333%;max-width:33.3333333333%}.col-md-5{flex:0 0 41.6666666667%;max-width:41.6666666667%}.col-md-6{flex:0 0 50%;max-width:50%}.col-md-7{flex:0 0 58.3333333333%;max-width:58.3333333333%}.col-md-8{flex:0 0 66.6666666667%;max-width:66.6666666667%}.col-md-9{flex:0 0 75%;max-width:75%}.col-md-10{flex:0 0 83.3333333333%;max-width:83.3333333333%}.col-md-11{flex:0 0 91.6666666667%;max-width:91.6666666667%}.col-md-12{flex:0 0 100%;max-width:100%}.order-md-first{order:-1}.order-md-last{order:13}.order-md-0{order:0}.order-md-1{order:1}.order-md-2{order:2}.order-md-3{order:3}.order-md-4{order:4}.order-md-5{order:5}.order-md-6{order:6}.order-md-7{order:7}.order-md-8{order:8}.order-md-9{order:9}.order-md-10{order:10}.order-md-11{order:11}.order-md-12{order:12}.offset-md-0{margin-left:0}.offset-md-1{margin-left:8.3333333333%}.offset-md-2{margin-left:16.6666666667%}.offset-md-3{margin-left:25%}.offset-md-4{margin-left:33.3333333333%}.offset-md-5{margin-left:41.6666666667%}.offset-md-6{margin-left:50%}.offset-md-7{margin-left:58.3333333333%}.offset-md-8{margin-left:66.6666666667%}.offset-md-9{margin-left:75%}.offset-md-10{margin-left:83.3333333333%}.offset-md-11{margin-left:91.6666666667%}}@media(min-width:980px){.col-lg{flex-basis:0;flex-grow:1;max-width:100%}.row-cols-lg-1>*{flex:0 0 100%;max-width:100%}.row-cols-lg-2>*{flex:0 0 50%;max-width:50%}.row-cols-lg-3>*{flex:0 0 33.3333333333%;max-width:33.3333333333%}.row-cols-lg-4>*{flex:0 0 25%;max-width:25%}.row-cols-lg-5>*{flex:0 0 20%;max-width:20%}.row-cols-lg-6>*{flex:0 0 16.6666666667%;max-width:16.6666666667%}.col-lg-auto{flex:0 0 auto;width:auto;max-width:100%}.col-lg-1{flex:0 0 8.3333333333%;max-width:8.3333333333%}.col-lg-2{flex:0 0 16.6666666667%;max-width:16.6666666667%}.col-lg-3{flex:0 0 25%;max-width:25%}.col-lg-4{flex:0 0 33.3333333333%;max-width:33.3333333333%}.col-lg-5{flex:0 0 41.6666666667%;max-width:41.6666666667%}.col-lg-6{flex:0 0 50%;max-width:50%}.col-lg-7{flex:0 0 58.3333333333%;max-width:58.3333333333%}.col-lg-8{flex:0 0 66.6666666667%;max-width:66.6666666667%}.col-lg-9{flex:0 0 75%;max-width:75%}.col-lg-10{flex:0 0 83.3333333333%;max-width:83.3333333333%}.col-lg-11{flex:0 0 91.6666666667%;max-width:91.6666666667%}.col-lg-12{flex:0 0 100%;max-width:100%}.order-lg-first{order:-1}.order-lg-last{order:13}.order-lg-0{order:0}.order-lg-1{order:1}.order-lg-2{order:2}.order-lg-3{order:3}.order-lg-4{order:4}.order-lg-5{order:5}.order-lg-6{order:6}.order-lg-7{order:7}.order-lg-8{order:8}.order-lg-9{order:9}.order-lg-10{order:10}.order-lg-11{order:11}.order-lg-12{order:12}.offset-lg-0{margin-left:0}.offset-lg-1{margin-left:8.3333333333%}.offset-lg-2{margin-left:16.6666666667%}.offset-lg-3{margin-left:25%}.offset-lg-4{margin-left:33.3333333333%}.offset-lg-5{margin-left:41.6666666667%}.offset-lg-6{margin-left:50%}.offset-lg-7{margin-left:58.3333333333%}.offset-lg-8{margin-left:66.6666666667%}.offset-lg-9{margin-left:75%}.offset-lg-10{margin-left:83.3333333333%}.offset-lg-11{margin-left:91.6666666667%}}@media(min-width:1240px){.col-xl{flex-basis:0;flex-grow:1;max-width:100%}.row-cols-xl-1>*{flex:0 0 100%;max-width:100%}.row-cols-xl-2>*{flex:0 0 50%;max-width:50%}.row-cols-xl-3>*{flex:0 0 33.3333333333%;max-width:33.3333333333%}.row-cols-xl-4>*{flex:0 0 25%;max-width:25%}.row-cols-xl-5>*{flex:0 0 20%;max-width:20%}.row-cols-xl-6>*{flex:0 0 16.6666666667%;max-width:16.6666666667%}.col-xl-auto{flex:0 0 auto;width:auto;max-width:100%}.col-xl-1{flex:0 0 8.3333333333%;max-width:8.3333333333%}.col-xl-2{flex:0 0 16.6666666667%;max-width:16.6666666667%}.col-xl-3{flex:0 0 25%;max-width:25%}.col-xl-4{flex:0 0 33.3333333333%;max-width:33.3333333333%}.col-xl-5{flex:0 0 41.6666666667%;max-width:41.6666666667%}.col-xl-6{flex:0 0 50%;max-width:50%}.col-xl-7{flex:0 0 58.3333333333%;max-width:58.3333333333%}.col-xl-8{flex:0 0 66.6666666667%;max-width:66.6666666667%}.col-xl-9{flex:0 0 75%;max-width:75%}.col-xl-10{flex:0 0 83.3333333333%;max-width:83.3333333333%}.col-xl-11{flex:0 0 91.6666666667%;max-width:91.6666666667%}.col-xl-12{flex:0 0 100%;max-width:100%}.order-xl-first{order:-1}.order-xl-last{order:13}.order-xl-0{order:0}.order-xl-1{order:1}.order-xl-2{order:2}.order-xl-3{order:3}.order-xl-4{order:4}.order-xl-5{order:5}.order-xl-6{order:6}.order-xl-7{order:7}.order-xl-8{order:8}.order-xl-9{order:9}.order-xl-10{order:10}.order-xl-11{order:11}.order-xl-12{order:12}.offset-xl-0{margin-left:0}.offset-xl-1{margin-left:8.3333333333%}.offset-xl-2{margin-left:16.6666666667%}.offset-xl-3{margin-left:25%}.offset-xl-4{margin-left:33.3333333333%}.offset-xl-5{margin-left:41.6666666667%}.offset-xl-6{margin-left:50%}.offset-xl-7{margin-left:58.3333333333%}.offset-xl-8{margin-left:66.6666666667%}.offset-xl-9{margin-left:75%}.offset-xl-10{margin-left:83.3333333333%}.offset-xl-11{margin-left:91.6666666667%}}.table{width:100%;margin-bottom:8px;color:#212529}.table td,.table th{padding:.75rem;vertical-align:top;border-top:1px solid #d6dbdf}.table thead th{vertical-align:bottom;border-bottom:2px solid #d6dbdf}.table tbody+tbody{border-top:2px solid #d6dbdf}.table-sm td,.table-sm th{padding:.3rem}.table-bordered,.table-bordered td,.table-bordered th{border:1px solid #d6dbdf}.table-bordered thead td,.table-bordered thead th{border-bottom-width:2px}.table-borderless tbody+tbody,.table-borderless td,.table-borderless th,.table-borderless thead th{border:0}.table-striped tbody tr:nth-of-type(odd){background-color:rgba(33,37,41,.05)}.table-hover tbody tr:hover{color:#212529;background-color:rgba(33,37,41,.075)}.table-primary,.table-primary>td,.table-primary>th{background-color:#fff1b8}.table-primary tbody+tbody,.table-primary td,.table-primary th,.table-primary thead th{border-color:#ffe47a}.table-hover .table-primary:hover,.table-hover .table-primary:hover>td,.table-hover .table-primary:hover>th{background-color:#ffec9f}.table-secondary,.table-secondary>td,.table-secondary>th{background-color:#c1c2c3}.table-secondary tbody+tbody,.table-secondary td,.table-secondary th,.table-secondary thead th{border-color:#8c8e90}.table-hover .table-secondary:hover,.table-hover .table-secondary:hover>td,.table-hover .table-secondary:hover>th{background-color:#b4b5b6}.table-success,.table-success>td,.table-success>th{background-color:#c3e6cb}.table-success tbody+tbody,.table-success td,.table-success th,.table-success thead th{border-color:#8fd19e}.table-hover .table-success:hover,.table-hover .table-success:hover>td,.table-hover .table-success:hover>th{background-color:#b1dfbb}.table-info,.table-info>td,.table-info>th{background-color:#bee5eb}.table-info tbody+tbody,.table-info td,.table-info th,.table-info thead th{border-color:#86cfda}.table-hover .table-info:hover,.table-hover .table-info:hover>td,.table-hover .table-info:hover>th{background-color:#abdde5}.table-warning,.table-warning>td,.table-warning>th{background-color:#ffeeba}.table-warning tbody+tbody,.table-warning td,.table-warning th,.table-warning thead th{border-color:#ffdf7e}.table-hover .table-warning:hover,.table-hover .table-warning:hover>td,.table-hover .table-warning:hover>th{background-color:#ffe8a1}.table-danger,.table-danger>td,.table-danger>th{background-color:#f5c6cb}.table-danger tbody+tbody,.table-danger td,.table-danger th,.table-danger thead th{border-color:#ed969e}.table-hover .table-danger:hover,.table-hover .table-danger:hover>td,.table-hover .table-danger:hover>th{background-color:#f1b0b7}.table-light,.table-light>td,.table-light>th{background-color:#fbfcfd}.table-light tbody+tbody,.table-light td,.table-light th,.table-light thead th{border-color:#f8fafc}.table-hover .table-light:hover,.table-hover .table-light:hover>td,.table-hover .table-light:hover>th{background-color:#eaeff5}.table-dark,.table-dark>td,.table-dark>th{background-color:#ccced0}.table-dark tbody+tbody,.table-dark td,.table-dark th,.table-dark thead th{border-color:#a0a4a8}.table-hover .table-dark:hover,.table-hover .table-dark:hover>td,.table-hover .table-dark:hover>th{background-color:#bfc1c4}.table-primary-light,.table-primary-light>td,.table-primary-light>th{background-color:#fffefb}.table-primary-light tbody+tbody,.table-primary-light td,.table-primary-light th,.table-primary-light thead th{border-color:#fffcf7}.table-hover .table-primary-light:hover,.table-hover .table-primary-light:hover>td,.table-hover .table-primary-light:hover>th{background-color:#fff8e2}.table-secondary-light,.table-secondary-light>td,.table-secondary-light>th{background-color:#fff}.table-secondary-light tbody+tbody,.table-secondary-light td,.table-secondary-light th,.table-secondary-light thead th{border-color:#fff}.table-hover .table-secondary-light:hover,.table-hover .table-secondary-light:hover>td,.table-hover .table-secondary-light:hover>th{background-color:#f2f2f2}.table-tertiary,.table-tertiary>td,.table-tertiary>th{background-color:#c2dafc}.table-tertiary tbody+tbody,.table-tertiary td,.table-tertiary th,.table-tertiary thead th{border-color:#8ebaf9}.table-hover .table-tertiary:hover,.table-hover .table-tertiary:hover>td,.table-hover .table-tertiary:hover>th{background-color:#aacbfb}.table-tertiary-light,.table-tertiary-light>td,.table-tertiary-light>th{background-color:#f7fbff}.table-tertiary-light tbody+tbody,.table-tertiary-light td,.table-tertiary-light th,.table-tertiary-light thead th{border-color:#f0f8fe}.table-hover .table-tertiary-light:hover,.table-hover .table-tertiary-light:hover>td,.table-hover .table-tertiary-light:hover>th{background-color:#deeeff}.table-white,.table-white>td,.table-white>th{background-color:#fff}.table-white tbody+tbody,.table-white td,.table-white th,.table-white thead th{border-color:#fff}.table-hover .table-white:hover,.table-hover .table-white:hover>td,.table-hover .table-white:hover>th{background-color:#f2f2f2}.table-black,.table-black>td,.table-black>th{background-color:#c1c2c3}.table-black tbody+tbody,.table-black td,.table-black th,.table-black thead th{border-color:#8c8e90}.table-hover .table-black:hover,.table-hover .table-black:hover>td,.table-hover .table-black:hover>th{background-color:#b4b5b6}.table-blue,.table-blue>td,.table-blue>th{background-color:#c2dafc}.table-blue tbody+tbody,.table-blue td,.table-blue th,.table-blue thead th{border-color:#8ebaf9}.table-hover .table-blue:hover,.table-hover .table-blue:hover>td,.table-hover .table-blue:hover>th{background-color:#aacbfb}.table-light-blue,.table-light-blue>td,.table-light-blue>th{background-color:#f7fbff}.table-light-blue tbody+tbody,.table-light-blue td,.table-light-blue th,.table-light-blue thead th{border-color:#f0f8fe}.table-hover .table-light-blue:hover,.table-hover .table-light-blue:hover>td,.table-hover .table-light-blue:hover>th{background-color:#deeeff}.table-yellow,.table-yellow>td,.table-yellow>th{background-color:#fff1b8}.table-yellow tbody+tbody,.table-yellow td,.table-yellow th,.table-yellow thead th{border-color:#ffe47a}.table-hover .table-yellow:hover,.table-hover .table-yellow:hover>td,.table-hover .table-yellow:hover>th{background-color:#ffec9f}.table-light-yellow,.table-light-yellow>td,.table-light-yellow>th{background-color:#fffefb}.table-light-yellow tbody+tbody,.table-light-yellow td,.table-light-yellow th,.table-light-yellow thead th{border-color:#fffcf7}.table-hover .table-light-yellow:hover,.table-hover .table-light-yellow:hover>td,.table-hover .table-light-yellow:hover>th{background-color:#fff8e2}.table-orange,.table-orange>td,.table-orange>th{background-color:#ffdfb8}.table-orange tbody+tbody,.table-orange td,.table-orange th,.table-orange thead th{border-color:#ffc37a}.table-hover .table-orange:hover,.table-hover .table-orange:hover>td,.table-hover .table-orange:hover>th{background-color:#ffd49f}.table-light-orange,.table-light-orange>td,.table-light-orange>th{background-color:#fff7ea}.table-light-orange tbody+tbody,.table-light-orange td,.table-light-orange th,.table-light-orange thead th{border-color:#fff1d9}.table-hover .table-light-orange:hover,.table-hover .table-light-orange:hover>td,.table-hover .table-light-orange:hover>th{background-color:#ffedd1}.table-red,.table-red>td,.table-red>th{background-color:#ffc8c8}.table-red tbody+tbody,.table-red td,.table-red th,.table-red thead th{border-color:#ff9898}.table-hover .table-red:hover,.table-hover .table-red:hover>td,.table-hover .table-red:hover>th{background-color:#ffafaf}.table-light-red,.table-light-red>td,.table-light-red>th{background-color:#fff7f7}.table-light-red tbody+tbody,.table-light-red td,.table-light-red th,.table-light-red thead th{border-color:#fff1ef}.table-hover .table-light-red:hover,.table-hover .table-light-red:hover>td,.table-hover .table-light-red:hover>th{background-color:#ffdede}.table-medium,.table-medium>td,.table-medium>th{background-color:#f4f5f6}.table-medium tbody+tbody,.table-medium td,.table-medium th,.table-medium thead th{border-color:#eaecee}.table-hover .table-medium:hover,.table-hover .table-medium:hover>td,.table-hover .table-medium:hover>th{background-color:#e6e8eb}.table-active,.table-active>td,.table-active>th{background-color:rgba(33,37,41,.075)}.table-hover .table-active:hover,.table-hover .table-active:hover>td,.table-hover .table-active:hover>th{background-color:rgba(22,24,27,.075)}.table .thead-dark th{color:#fff;background-color:#343a40;border-color:#454d55}.table .thead-light th{color:#6c757d;background-color:#e9ecef;border-color:#d6dbdf}.table-dark{color:#fff;background-color:#343a40}.table-dark td,.table-dark th,.table-dark thead th{border-color:#454d55}.table-dark.table-bordered{border:0}.table-dark.table-striped tbody tr:nth-of-type(odd){background-color:hsla(0,0%,100%,.05)}.table-dark.table-hover tbody tr:hover{color:#fff;background-color:hsla(0,0%,100%,.075)}@media(max-width:399.98px){.table-responsive-xs{display:block;width:100%;overflow-x:auto;-webkit-overflow-scrolling:touch}.table-responsive-xs>.table-bordered{border:0}}@media(max-width:615.98px){.table-responsive-sm{display:block;width:100%;overflow-x:auto;-webkit-overflow-scrolling:touch}.table-responsive-sm>.table-bordered{border:0}}@media(max-width:767.98px){.table-responsive-md{display:block;width:100%;overflow-x:auto;-webkit-overflow-scrolling:touch}.table-responsive-md>.table-bordered{border:0}}@media(max-width:979.98px){.table-responsive-lg{display:block;width:100%;overflow-x:auto;-webkit-overflow-scrolling:touch}.table-responsive-lg>.table-bordered{border:0}}@media(max-width:1239.98px){.table-responsive-xl{display:block;width:100%;overflow-x:auto;-webkit-overflow-scrolling:touch}.table-responsive-xl>.table-bordered{border:0}}.table-responsive{display:block;width:100%;overflow-x:auto;-webkit-overflow-scrolling:touch}.table-responsive>.table-bordered{border:0}.form-control{display:block;width:100%;height:calc(1.5em + .75rem + 2px);padding:.375rem .75rem;font-size:1rem;font-weight:400;line-height:1.5;color:#6c757d;background-color:#fff;background-clip:padding-box;border:1px solid #ced4da;border-radius:8px;transition:border-color .15s ease-in-out,box-shadow .15s ease-in-out}@media(prefers-reduced-motion:reduce){.form-control{transition:none}}.form-control::-ms-expand{background-color:transparent;border:0}.form-control:-moz-focusring{color:transparent;text-shadow:0 0 0 #6c757d}.form-control:focus{color:#6c757d;background-color:#fff;border-color:#ffe680;outline:0;box-shadow:0 0 0 .2rem rgba(255,204,0,.25)}.form-control::-moz-placeholder{color:#6c757d;opacity:1}.form-control:-ms-input-placeholder{color:#6c757d;opacity:1}.form-control::placeholder{color:#6c757d;opacity:1}.form-control:disabled,.form-control[readonly]{background-color:#e9ecef;opacity:1}select.form-control:focus::-ms-value{color:#6c757d;background-color:#fff}.form-control-file,.form-control-range{display:block;width:100%}.col-form-label{padding-top:calc(.375rem + 1px);padding-bottom:calc(.375rem + 1px);margin-bottom:0;font-size:inherit;line-height:1.5}.col-form-label-lg{padding-top:calc(.5rem + 1px);padding-bottom:calc(.5rem + 1px);font-size:1.125rem;line-height:1.5}.col-form-label-sm{padding-top:calc(.25rem + 1px);padding-bottom:calc(.25rem + 1px);font-size:.875rem;line-height:1.5}.form-control-plaintext{display:block;width:100%;padding:.375rem 0;margin-bottom:0;font-size:1rem;line-height:1.5;color:#212529;background-color:transparent;border:solid transparent;border-width:1px 0}.form-control-plaintext.form-control-lg,.form-control-plaintext.form-control-sm{padding-right:0;padding-left:0}.form-control-sm{height:calc(1.5em + .5rem + 2px);padding:.25rem .5rem;font-size:.875rem;line-height:1.5;border-radius:8px}.form-control-lg{height:calc(1.5em + 1rem + 2px);padding:.5rem 1rem;font-size:1.125rem;line-height:1.5;border-radius:8px}select.form-control[multiple],select.form-control[size],textarea.form-control{height:auto}.form-group{margin-bottom:1rem}.form-text{display:block;margin-top:.25rem}.form-row{display:flex;flex-wrap:wrap;margin-right:-5px;margin-left:-5px}.form-row>.col,.form-row>[class*=col-]{padding-right:5px;padding-left:5px}.form-check{position:relative;display:block;padding-left:1.25rem}.form-check-input{position:absolute;margin-top:.3rem;margin-left:-1.25rem}.form-check-input:disabled~.form-check-label,.form-check-input[disabled]~.form-check-label{color:#6c757d}.form-check-label{margin-bottom:0}.form-check-inline{display:inline-flex;align-items:center;padding-left:0;margin-right:.75rem}.form-check-inline .form-check-input{position:static;margin-top:0;margin-right:.3125rem;margin-left:0}.valid-feedback{display:none;width:100%;margin-top:.25rem;font-size:80%;color:#28a745}.valid-tooltip{position:absolute;top:100%;z-index:5;display:none;max-width:100%;padding:.25rem .5rem;margin-top:.1rem;font-size:.875rem;line-height:1.5;color:#fff;background-color:rgba(40,167,69,.9);border-radius:8px}.is-valid~.valid-feedback,.is-valid~.valid-tooltip,.was-validated :valid~.valid-feedback,.was-validated :valid~.valid-tooltip{display:block}.form-control.is-valid,.was-validated .form-control:valid{border-color:#28a745;padding-right:calc(1.5em + .75rem);background-image:url("data:image/svg+xml;charset=utf-8,%3Csvg xmlns='http://www.w3.org/2000/svg' width='8' height='8'%3E%3Cpath fill='%2328a745' d='M2.3 6.73L.6 4.53c-.4-1.04.46-1.4 1.1-.8l1.1 1.4 3.4-3.8c.6-.63 1.6-.27 1.2.7l-4 4.6c-.43.5-.8.4-1.1.1z'/%3E%3C/svg%3E");background-repeat:no-repeat;background-position:right calc(.375em + .1875rem) center;background-size:calc(.75em + .375rem) calc(.75em + .375rem)}.form-control.is-valid:focus,.was-validated .form-control:valid:focus{border-color:#28a745;box-shadow:0 0 0 .2rem rgba(40,167,69,.25)}.was-validated textarea.form-control:valid,textarea.form-control.is-valid{padding-right:calc(1.5em + .75rem);background-position:top calc(.375em + .1875rem) right calc(.375em + .1875rem)}.custom-select.is-valid,.was-validated .custom-select:valid{border-color:#28a745;padding-right:calc(.75em + 2.3125rem);background:url("data:image/svg+xml;charset=utf-8,%3Csvg xmlns='http://www.w3.org/2000/svg' width='4' height='5'%3E%3Cpath fill='%23343a40' d='M2 0L0 2h4zm0 5L0 3h4z'/%3E%3C/svg%3E") no-repeat right .75rem center/8px 10px,url("data:image/svg+xml;charset=utf-8,%3Csvg xmlns='http://www.w3.org/2000/svg' width='8' height='8'%3E%3Cpath fill='%2328a745' d='M2.3 6.73L.6 4.53c-.4-1.04.46-1.4 1.1-.8l1.1 1.4 3.4-3.8c.6-.63 1.6-.27 1.2.7l-4 4.6c-.43.5-.8.4-1.1.1z'/%3E%3C/svg%3E") #fff no-repeat center right 1.75rem/calc(.75em + .375rem) calc(.75em + .375rem)}.custom-select.is-valid:focus,.was-validated .custom-select:valid:focus{border-color:#28a745;box-shadow:0 0 0 .2rem rgba(40,167,69,.25)}.form-check-input.is-valid~.form-check-label,.was-validated .form-check-input:valid~.form-check-label{color:#28a745}.form-check-input.is-valid~.valid-feedback,.form-check-input.is-valid~.valid-tooltip,.was-validated .form-check-input:valid~.valid-feedback,.was-validated .form-check-input:valid~.valid-tooltip{display:block}.custom-control-input.is-valid~.custom-control-label,.was-validated .custom-control-input:valid~.custom-control-label{color:#28a745}.custom-control-input.is-valid~.custom-control-label:before,.was-validated .custom-control-input:valid~.custom-control-label:before{border-color:#28a745}.custom-control-input.is-valid:checked~.custom-control-label:before,.was-validated .custom-control-input:valid:checked~.custom-control-label:before{border-color:#34ce57;background-color:#34ce57}.custom-control-input.is-valid:focus~.custom-control-label:before,.was-validated .custom-control-input:valid:focus~.custom-control-label:before{box-shadow:0 0 0 .2rem rgba(40,167,69,.25)}.custom-control-input.is-valid:focus:not(:checked)~.custom-control-label:before,.custom-file-input.is-valid~.custom-file-label,.was-validated .custom-control-input:valid:focus:not(:checked)~.custom-control-label:before,.was-validated .custom-file-input:valid~.custom-file-label{border-color:#28a745}.custom-file-input.is-valid:focus~.custom-file-label,.was-validated .custom-file-input:valid:focus~.custom-file-label{border-color:#28a745;box-shadow:0 0 0 .2rem rgba(40,167,69,.25)}.invalid-feedback{display:none;width:100%;margin-top:.25rem;font-size:80%;color:#dc3545}.invalid-tooltip{position:absolute;top:100%;z-index:5;display:none;max-width:100%;padding:.25rem .5rem;margin-top:.1rem;font-size:.875rem;line-height:1.5;color:#fff;background-color:rgba(220,53,69,.9);border-radius:8px}.is-invalid~.invalid-feedback,.is-invalid~.invalid-tooltip,.was-validated :invalid~.invalid-feedback,.was-validated :invalid~.invalid-tooltip{display:block}.form-control.is-invalid,.was-validated .form-control:invalid{border-color:#dc3545;padding-right:calc(1.5em + .75rem);background-image:url("data:image/svg+xml;charset=utf-8,%3Csvg xmlns='http://www.w3.org/2000/svg' width='12' height='12' fill='none' stroke='%23dc3545'%3E%3Ccircle cx='6' cy='6' r='4.5'/%3E%3Cpath stroke-linejoin='round' d='M5.8 3.6h.4L6 6.5z'/%3E%3Ccircle cx='6' cy='8.2' r='.6' fill='%23dc3545' stroke='none'/%3E%3C/svg%3E");background-repeat:no-repeat;background-position:right calc(.375em + .1875rem) center;background-size:calc(.75em + .375rem) calc(.75em + .375rem)}.form-control.is-invalid:focus,.was-validated .form-control:invalid:focus{border-color:#dc3545;box-shadow:0 0 0 .2rem rgba(220,53,69,.25)}.was-validated textarea.form-control:invalid,textarea.form-control.is-invalid{padding-right:calc(1.5em + .75rem);background-position:top calc(.375em + .1875rem) right calc(.375em + .1875rem)}.custom-select.is-invalid,.was-validated .custom-select:invalid{border-color:#dc3545;padding-right:calc(.75em + 2.3125rem);background:url("data:image/svg+xml;charset=utf-8,%3Csvg xmlns='http://www.w3.org/2000/svg' width='4' height='5'%3E%3Cpath fill='%23343a40' d='M2 0L0 2h4zm0 5L0 3h4z'/%3E%3C/svg%3E") no-repeat right .75rem center/8px 10px,url("data:image/svg+xml;charset=utf-8,%3Csvg xmlns='http://www.w3.org/2000/svg' width='12' height='12' fill='none' stroke='%23dc3545'%3E%3Ccircle cx='6' cy='6' r='4.5'/%3E%3Cpath stroke-linejoin='round' d='M5.8 3.6h.4L6 6.5z'/%3E%3Ccircle cx='6' cy='8.2' r='.6' fill='%23dc3545' stroke='none'/%3E%3C/svg%3E") #fff no-repeat center right 1.75rem/calc(.75em + .375rem) calc(.75em + .375rem)}.custom-select.is-invalid:focus,.was-validated .custom-select:invalid:focus{border-color:#dc3545;box-shadow:0 0 0 .2rem rgba(220,53,69,.25)}.form-check-input.is-invalid~.form-check-label,.was-validated .form-check-input:invalid~.form-check-label{color:#dc3545}.form-check-input.is-invalid~.invalid-feedback,.form-check-input.is-invalid~.invalid-tooltip,.was-validated .form-check-input:invalid~.invalid-feedback,.was-validated .form-check-input:invalid~.invalid-tooltip{display:block}.custom-control-input.is-invalid~.custom-control-label,.was-validated .custom-control-input:invalid~.custom-control-label{color:#dc3545}.custom-control-input.is-invalid~.custom-control-label:before,.was-validated .custom-control-input:invalid~.custom-control-label:before{border-color:#dc3545}.custom-control-input.is-invalid:checked~.custom-control-label:before,.was-validated .custom-control-input:invalid:checked~.custom-control-label:before{border-color:#e4606d;background-color:#e4606d}.custom-control-input.is-invalid:focus~.custom-control-label:before,.was-validated .custom-control-input:invalid:focus~.custom-control-label:before{box-shadow:0 0 0 .2rem rgba(220,53,69,.25)}.custom-control-input.is-invalid:focus:not(:checked)~.custom-control-label:before,.custom-file-input.is-invalid~.custom-file-label,.was-validated .custom-control-input:invalid:focus:not(:checked)~.custom-control-label:before,.was-validated .custom-file-input:invalid~.custom-file-label{border-color:#dc3545}.custom-file-input.is-invalid:focus~.custom-file-label,.was-validated .custom-file-input:invalid:focus~.custom-file-label{border-color:#dc3545;box-shadow:0 0 0 .2rem rgba(220,53,69,.25)}.form-inline{display:flex;flex-flow:row wrap;align-items:center}.form-inline .form-check{width:100%}@media(min-width:616px){.form-inline label{justify-content:center}.form-inline .form-group,.form-inline label{display:flex;align-items:center;margin-bottom:0}.form-inline .form-group{flex:0 0 auto;flex-flow:row wrap}.form-inline .form-control{display:inline-block;width:auto;vertical-align:middle}.form-inline .form-control-plaintext{display:inline-block}.form-inline .custom-select,.form-inline .input-group{width:auto}.form-inline .form-check{display:flex;align-items:center;justify-content:center;width:auto;padding-left:0}.form-inline .form-check-input{position:relative;flex-shrink:0;margin-top:0;margin-right:.25rem;margin-left:0}.form-inline .custom-control{align-items:center;justify-content:center}.form-inline .custom-control-label{margin-bottom:0}}.btn{display:inline-block;font-family:inherit;font-weight:700;color:#212529;text-align:center;vertical-align:middle;cursor:pointer;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none;background-color:transparent;border:1px solid transparent;padding:12px 32px;font-size:.875rem;line-height:20px;border-radius:8px;transition:color .15s ease-in-out,background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out}@media(prefers-reduced-motion:reduce){.btn{transition:none}}.btn:hover{color:#212529;text-decoration:none}.btn.focus,.btn:focus{outline:0;box-shadow:none}.btn.disabled,.btn:disabled{opacity:.65}a.btn.disabled,fieldset:disabled a.btn{pointer-events:none}.btn-primary{color:#495057;background-color:#fc0;border-color:#fc0}.btn-primary.focus,.btn-primary:focus,.btn-primary:hover{color:#495057;background-color:#d9ad00;border-color:#cca300}.btn-primary.focus,.btn-primary:focus{box-shadow:0 0 0 0 rgba(228,185,13,.5)}.btn-primary.disabled,.btn-primary:disabled{color:#495057;background-color:#fc0;border-color:#fc0}.btn-primary:not(:disabled):not(.disabled).active,.btn-primary:not(:disabled):not(.disabled):active,.show>.btn-primary.dropdown-toggle{color:#495057;background-color:#cca300;border-color:#bf9900}.btn-primary:not(:disabled):not(.disabled).active:focus,.btn-primary:not(:disabled):not(.disabled):active:focus,.show>.btn-primary.dropdown-toggle:focus{box-shadow:0 0 0 0 rgba(228,185,13,.5)}.btn-secondary{color:#fff;background-color:#212529;border-color:#212529}.btn-secondary.focus,.btn-secondary:focus,.btn-secondary:hover{color:#fff;background-color:#101214;border-color:#0a0c0d}.btn-secondary.focus,.btn-secondary:focus{box-shadow:0 0 0 0 rgba(66,70,73,.5)}.btn-secondary.disabled,.btn-secondary:disabled{color:#fff;background-color:#212529;border-color:#212529}.btn-secondary:not(:disabled):not(.disabled).active,.btn-secondary:not(:disabled):not(.disabled):active,.show>.btn-secondary.dropdown-toggle{color:#fff;background-color:#0a0c0d;border-color:#050506}.btn-secondary:not(:disabled):not(.disabled).active:focus,.btn-secondary:not(:disabled):not(.disabled):active:focus,.show>.btn-secondary.dropdown-toggle:focus{box-shadow:0 0 0 0 rgba(66,70,73,.5)}.btn-success{color:#fff;background-color:#28a745;border-color:#28a745}.btn-success.focus,.btn-success:focus,.btn-success:hover{color:#fff;background-color:#218838;border-color:#1e7e34}.btn-success.focus,.btn-success:focus{box-shadow:0 0 0 0 rgba(72,180,97,.5)}.btn-success.disabled,.btn-success:disabled{color:#fff;background-color:#28a745;border-color:#28a745}.btn-success:not(:disabled):not(.disabled).active,.btn-success:not(:disabled):not(.disabled):active,.show>.btn-success.dropdown-toggle{color:#fff;background-color:#1e7e34;border-color:#1c7430}.btn-success:not(:disabled):not(.disabled).active:focus,.btn-success:not(:disabled):not(.disabled):active:focus,.show>.btn-success.dropdown-toggle:focus{box-shadow:0 0 0 0 rgba(72,180,97,.5)}.btn-info{color:#fff;background-color:#17a2b8;border-color:#17a2b8}.btn-info.focus,.btn-info:focus,.btn-info:hover{color:#fff;background-color:#138496;border-color:#117a8b}.btn-info.focus,.btn-info:focus{box-shadow:0 0 0 0 rgba(58,176,195,.5)}.btn-info.disabled,.btn-info:disabled{color:#fff;background-color:#17a2b8;border-color:#17a2b8}.btn-info:not(:disabled):not(.disabled).active,.btn-info:not(:disabled):not(.disabled):active,.show>.btn-info.dropdown-toggle{color:#fff;background-color:#117a8b;border-color:#10707f}.btn-info:not(:disabled):not(.disabled).active:focus,.btn-info:not(:disabled):not(.disabled):active:focus,.show>.btn-info.dropdown-toggle:focus{box-shadow:0 0 0 0 rgba(58,176,195,.5)}.btn-warning{color:#495057;background-color:#ffc107;border-color:#ffc107}.btn-warning.focus,.btn-warning:focus,.btn-warning:hover{color:#495057;background-color:#e0a800;border-color:#d39e00}.btn-warning.focus,.btn-warning:focus{box-shadow:0 0 0 0 rgba(228,176,19,.5)}.btn-warning.disabled,.btn-warning:disabled{color:#495057;background-color:#ffc107;border-color:#ffc107}.btn-warning:not(:disabled):not(.disabled).active,.btn-warning:not(:disabled):not(.disabled):active,.show>.btn-warning.dropdown-toggle{color:#495057;background-color:#d39e00;border-color:#c69500}.btn-warning:not(:disabled):not(.disabled).active:focus,.btn-warning:not(:disabled):not(.disabled):active:focus,.show>.btn-warning.dropdown-toggle:focus{box-shadow:0 0 0 0 rgba(228,176,19,.5)}.btn-danger{color:#fff;background-color:#dc3545;border-color:#dc3545}.btn-danger.focus,.btn-danger:focus,.btn-danger:hover{color:#fff;background-color:#c82333;border-color:#bd2130}.btn-danger.focus,.btn-danger:focus{box-shadow:0 0 0 0 rgba(225,83,97,.5)}.btn-danger.disabled,.btn-danger:disabled{color:#fff;background-color:#dc3545;border-color:#dc3545}.btn-danger:not(:disabled):not(.disabled).active,.btn-danger:not(:disabled):not(.disabled):active,.show>.btn-danger.dropdown-toggle{color:#fff;background-color:#bd2130;border-color:#b21f2d}.btn-danger:not(:disabled):not(.disabled).active:focus,.btn-danger:not(:disabled):not(.disabled):active:focus,.show>.btn-danger.dropdown-toggle:focus{box-shadow:0 0 0 0 rgba(225,83,97,.5)}.btn-light{color:#495057;background-color:#f1f6f9;border-color:#f1f6f9}.btn-light.focus,.btn-light:focus,.btn-light:hover{color:#495057;background-color:#d6e5ee;border-color:#cddfea}.btn-light.focus,.btn-light:focus{box-shadow:0 0 0 0 rgba(216,221,225,.5)}.btn-light.disabled,.btn-light:disabled{color:#495057;background-color:#f1f6f9;border-color:#f1f6f9}.btn-light:not(:disabled):not(.disabled).active,.btn-light:not(:disabled):not(.disabled):active,.show>.btn-light.dropdown-toggle{color:#495057;background-color:#cddfea;border-color:#c4d9e6}.btn-light:not(:disabled):not(.disabled).active:focus,.btn-light:not(:disabled):not(.disabled):active:focus,.show>.btn-light.dropdown-toggle:focus{box-shadow:0 0 0 0 rgba(216,221,225,.5)}.btn-dark{color:#fff;background-color:#495057;border-color:#495057}.btn-dark.focus,.btn-dark:focus,.btn-dark:hover{color:#fff;background-color:#383d42;border-color:#32373b}.btn-dark.focus,.btn-dark:focus{box-shadow:0 0 0 0 rgba(100,106,112,.5)}.btn-dark.disabled,.btn-dark:disabled{color:#fff;background-color:#495057;border-color:#495057}.btn-dark:not(:disabled):not(.disabled).active,.btn-dark:not(:disabled):not(.disabled):active,.show>.btn-dark.dropdown-toggle{color:#fff;background-color:#32373b;border-color:#2c3034}.btn-dark:not(:disabled):not(.disabled).active:focus,.btn-dark:not(:disabled):not(.disabled):active:focus,.show>.btn-dark.dropdown-toggle:focus{box-shadow:0 0 0 0 rgba(100,106,112,.5)}.btn-primary-light{color:#495057;background-color:#fffaf0;border-color:#fffaf0}.btn-primary-light.focus,.btn-primary-light:focus,.btn-primary-light:hover{color:#495057;background-color:#ffedca;border-color:#ffe9bd}.btn-primary-light.focus,.btn-primary-light:focus{box-shadow:0 0 0 0 rgba(228,225,217,.5)}.btn-primary-light.disabled,.btn-primary-light:disabled{color:#495057;background-color:#fffaf0;border-color:#fffaf0}.btn-primary-light:not(:disabled):not(.disabled).active,.btn-primary-light:not(:disabled):not(.disabled):active,.show>.btn-primary-light.dropdown-toggle{color:#495057;background-color:#ffe9bd;border-color:#ffe5b0}.btn-primary-light:not(:disabled):not(.disabled).active:focus,.btn-primary-light:not(:disabled):not(.disabled):active:focus,.show>.btn-primary-light.dropdown-toggle:focus{box-shadow:0 0 0 0 rgba(228,225,217,.5)}.btn-secondary-light{color:#495057;background-color:#fff;border-color:#fff}.btn-secondary-light.focus,.btn-secondary-light:focus,.btn-secondary-light:hover{color:#495057;background-color:#ececec;border-color:#e6e6e6}.btn-secondary-light.focus,.btn-secondary-light:focus{box-shadow:0 0 0 0 rgba(228,229,230,.5)}.btn-secondary-light.disabled,.btn-secondary-light:disabled{color:#495057;background-color:#fff;border-color:#fff}.btn-secondary-light:not(:disabled):not(.disabled).active,.btn-secondary-light:not(:disabled):not(.disabled):active,.show>.btn-secondary-light.dropdown-toggle{color:#495057;background-color:#e6e6e6;border-color:#dfdfdf}.btn-secondary-light:not(:disabled):not(.disabled).active:focus,.btn-secondary-light:not(:disabled):not(.disabled):active:focus,.show>.btn-secondary-light.dropdown-toggle:focus{box-shadow:0 0 0 0 rgba(228,229,230,.5)}.btn-tertiary{color:#fff;background-color:#257af4;border-color:#257af4}.btn-tertiary.focus,.btn-tertiary:focus,.btn-tertiary:hover{color:#fff;background-color:#0c66e7;border-color:#0b60db}.btn-tertiary.focus,.btn-tertiary:focus{box-shadow:0 0 0 0 rgba(70,142,246,.5)}.btn-tertiary.disabled,.btn-tertiary:disabled{color:#fff;background-color:#257af4;border-color:#257af4}.btn-tertiary:not(:disabled):not(.disabled).active,.btn-tertiary:not(:disabled):not(.disabled):active,.show>.btn-tertiary.dropdown-toggle{color:#fff;background-color:#0b60db;border-color:#0a5bcf}.btn-tertiary:not(:disabled):not(.disabled).active:focus,.btn-tertiary:not(:disabled):not(.disabled):active:focus,.show>.btn-tertiary.dropdown-toggle:focus{box-shadow:0 0 0 0 rgba(70,142,246,.5)}.btn-tertiary-light{color:#495057;background-color:#e3f1fe;border-color:#e3f1fe}.btn-tertiary-light.focus,.btn-tertiary-light:focus,.btn-tertiary-light:hover{color:#495057;background-color:#bedffd;border-color:#b2d8fc}.btn-tertiary-light.focus,.btn-tertiary-light:focus{box-shadow:0 0 0 0 rgba(204,217,229,.5)}.btn-tertiary-light.disabled,.btn-tertiary-light:disabled{color:#495057;background-color:#e3f1fe;border-color:#e3f1fe}.btn-tertiary-light:not(:disabled):not(.disabled).active,.btn-tertiary-light:not(:disabled):not(.disabled):active,.show>.btn-tertiary-light.dropdown-toggle{color:#495057;background-color:#b2d8fc;border-color:#a5d2fc}.btn-tertiary-light:not(:disabled):not(.disabled).active:focus,.btn-tertiary-light:not(:disabled):not(.disabled):active:focus,.show>.btn-tertiary-light.dropdown-toggle:focus{box-shadow:0 0 0 0 rgba(204,217,229,.5)}.btn-white{color:#495057;background-color:#fff;border-color:#fff}.btn-white.focus,.btn-white:focus,.btn-white:hover{color:#495057;background-color:#ececec;border-color:#e6e6e6}.btn-white.focus,.btn-white:focus{box-shadow:0 0 0 0 rgba(228,229,230,.5)}.btn-white.disabled,.btn-white:disabled{color:#495057;background-color:#fff;border-color:#fff}.btn-white:not(:disabled):not(.disabled).active,.btn-white:not(:disabled):not(.disabled):active,.show>.btn-white.dropdown-toggle{color:#495057;background-color:#e6e6e6;border-color:#dfdfdf}.btn-white:not(:disabled):not(.disabled).active:focus,.btn-white:not(:disabled):not(.disabled):active:focus,.show>.btn-white.dropdown-toggle:focus{box-shadow:0 0 0 0 rgba(228,229,230,.5)}.btn-black{color:#fff;background-color:#212529;border-color:#212529}.btn-black.focus,.btn-black:focus,.btn-black:hover{color:#fff;background-color:#101214;border-color:#0a0c0d}.btn-black.focus,.btn-black:focus{box-shadow:0 0 0 0 rgba(66,70,73,.5)}.btn-black.disabled,.btn-black:disabled{color:#fff;background-color:#212529;border-color:#212529}.btn-black:not(:disabled):not(.disabled).active,.btn-black:not(:disabled):not(.disabled):active,.show>.btn-black.dropdown-toggle{color:#fff;background-color:#0a0c0d;border-color:#050506}.btn-black:not(:disabled):not(.disabled).active:focus,.btn-black:not(:disabled):not(.disabled):active:focus,.show>.btn-black.dropdown-toggle:focus{box-shadow:0 0 0 0 rgba(66,70,73,.5)}.btn-blue{color:#fff;background-color:#257af4;border-color:#257af4}.btn-blue.focus,.btn-blue:focus,.btn-blue:hover{color:#fff;background-color:#0c66e7;border-color:#0b60db}.btn-blue.focus,.btn-blue:focus{box-shadow:0 0 0 0 rgba(70,142,246,.5)}.btn-blue.disabled,.btn-blue:disabled{color:#fff;background-color:#257af4;border-color:#257af4}.btn-blue:not(:disabled):not(.disabled).active,.btn-blue:not(:disabled):not(.disabled):active,.show>.btn-blue.dropdown-toggle{color:#fff;background-color:#0b60db;border-color:#0a5bcf}.btn-blue:not(:disabled):not(.disabled).active:focus,.btn-blue:not(:disabled):not(.disabled):active:focus,.show>.btn-blue.dropdown-toggle:focus{box-shadow:0 0 0 0 rgba(70,142,246,.5)}.btn-light-blue{color:#495057;background-color:#e3f1fe;border-color:#e3f1fe}.btn-light-blue.focus,.btn-light-blue:focus,.btn-light-blue:hover{color:#495057;background-color:#bedffd;border-color:#b2d8fc}.btn-light-blue.focus,.btn-light-blue:focus{box-shadow:0 0 0 0 rgba(204,217,229,.5)}.btn-light-blue.disabled,.btn-light-blue:disabled{color:#495057;background-color:#e3f1fe;border-color:#e3f1fe}.btn-light-blue:not(:disabled):not(.disabled).active,.btn-light-blue:not(:disabled):not(.disabled):active,.show>.btn-light-blue.dropdown-toggle{color:#495057;background-color:#b2d8fc;border-color:#a5d2fc}.btn-light-blue:not(:disabled):not(.disabled).active:focus,.btn-light-blue:not(:disabled):not(.disabled):active:focus,.show>.btn-light-blue.dropdown-toggle:focus{box-shadow:0 0 0 0 rgba(204,217,229,.5)}.btn-yellow{color:#495057;background-color:#fc0;border-color:#fc0}.btn-yellow.focus,.btn-yellow:focus,.btn-yellow:hover{color:#495057;background-color:#d9ad00;border-color:#cca300}.btn-yellow.focus,.btn-yellow:focus{box-shadow:0 0 0 0 rgba(228,185,13,.5)}.btn-yellow.disabled,.btn-yellow:disabled{color:#495057;background-color:#fc0;border-color:#fc0}.btn-yellow:not(:disabled):not(.disabled).active,.btn-yellow:not(:disabled):not(.disabled):active,.show>.btn-yellow.dropdown-toggle{color:#495057;background-color:#cca300;border-color:#bf9900}.btn-yellow:not(:disabled):not(.disabled).active:focus,.btn-yellow:not(:disabled):not(.disabled):active:focus,.show>.btn-yellow.dropdown-toggle:focus{box-shadow:0 0 0 0 rgba(228,185,13,.5)}.btn-light-yellow{color:#495057;background-color:#fffaf0;border-color:#fffaf0}.btn-light-yellow.focus,.btn-light-yellow:focus,.btn-light-yellow:hover{color:#495057;background-color:#ffedca;border-color:#ffe9bd}.btn-light-yellow.focus,.btn-light-yellow:focus{box-shadow:0 0 0 0 rgba(228,225,217,.5)}.btn-light-yellow.disabled,.btn-light-yellow:disabled{color:#495057;background-color:#fffaf0;border-color:#fffaf0}.btn-light-yellow:not(:disabled):not(.disabled).active,.btn-light-yellow:not(:disabled):not(.disabled):active,.show>.btn-light-yellow.dropdown-toggle{color:#495057;background-color:#ffe9bd;border-color:#ffe5b0}.btn-light-yellow:not(:disabled):not(.disabled).active:focus,.btn-light-yellow:not(:disabled):not(.disabled):active:focus,.show>.btn-light-yellow.dropdown-toggle:focus{box-shadow:0 0 0 0 rgba(228,225,217,.5)}.btn-orange{color:#495057;background-color:#ff8c00;border-color:#ff8c00}.btn-orange.focus,.btn-orange:focus,.btn-orange:hover{color:#fff;background-color:#d97700;border-color:#cc7000}.btn-orange.focus,.btn-orange:focus{box-shadow:0 0 0 0 rgba(228,131,13,.5)}.btn-orange.disabled,.btn-orange:disabled{color:#495057;background-color:#ff8c00;border-color:#ff8c00}.btn-orange:not(:disabled):not(.disabled).active,.btn-orange:not(:disabled):not(.disabled):active,.show>.btn-orange.dropdown-toggle{color:#fff;background-color:#cc7000;border-color:#bf6900}.btn-orange:not(:disabled):not(.disabled).active:focus,.btn-orange:not(:disabled):not(.disabled):active:focus,.show>.btn-orange.dropdown-toggle:focus{box-shadow:0 0 0 0 rgba(228,131,13,.5)}.btn-light-orange{color:#495057;background-color:#ffe4b5;border-color:#ffe4b5}.btn-light-orange.focus,.btn-light-orange:focus,.btn-light-orange:hover{color:#495057;background-color:#ffd68f;border-color:#ffd182}.btn-light-orange.focus,.btn-light-orange:focus{box-shadow:0 0 0 0 rgba(228,206,167,.5)}.btn-light-orange.disabled,.btn-light-orange:disabled{color:#495057;background-color:#ffe4b5;border-color:#ffe4b5}.btn-light-orange:not(:disabled):not(.disabled).active,.btn-light-orange:not(:disabled):not(.disabled):active,.show>.btn-light-orange.dropdown-toggle{color:#495057;background-color:#ffd182;border-color:#ffcd75}.btn-light-orange:not(:disabled):not(.disabled).active:focus,.btn-light-orange:not(:disabled):not(.disabled):active:focus,.show>.btn-light-orange.dropdown-toggle:focus{box-shadow:0 0 0 0 rgba(228,206,167,.5)}.btn-red{color:#fff;background-color:#ff3939;border-color:#ff3939}.btn-red.focus,.btn-red:focus,.btn-red:hover{color:#fff;background-color:#ff1313;border-color:#ff0606}.btn-red.focus,.btn-red:focus{box-shadow:0 0 0 0 rgba(255,87,87,.5)}.btn-red.disabled,.btn-red:disabled{color:#fff;background-color:#ff3939;border-color:#ff3939}.btn-red:not(:disabled):not(.disabled).active,.btn-red:not(:disabled):not(.disabled):active,.show>.btn-red.dropdown-toggle{color:#fff;background-color:#ff0606;border-color:#f80000}.btn-red:not(:disabled):not(.disabled).active:focus,.btn-red:not(:disabled):not(.disabled):active:focus,.show>.btn-red.dropdown-toggle:focus{box-shadow:0 0 0 0 rgba(255,87,87,.5)}.btn-light-red{color:#495057;background-color:#ffe4e1;border-color:#ffe4e1}.btn-light-red.focus,.btn-light-red:focus,.btn-light-red:hover{color:#495057;background-color:#ffc2bb;border-color:#ffb6ae}.btn-light-red.focus,.btn-light-red:focus{box-shadow:0 0 0 0 rgba(228,206,204,.5)}.btn-light-red.disabled,.btn-light-red:disabled{color:#495057;background-color:#ffe4e1;border-color:#ffe4e1}.btn-light-red:not(:disabled):not(.disabled).active,.btn-light-red:not(:disabled):not(.disabled):active,.show>.btn-light-red.dropdown-toggle{color:#495057;background-color:#ffb6ae;border-color:#ffaba1}.btn-light-red:not(:disabled):not(.disabled).active:focus,.btn-light-red:not(:disabled):not(.disabled):active:focus,.show>.btn-light-red.dropdown-toggle:focus{box-shadow:0 0 0 0 rgba(228,206,204,.5)}.btn-medium{color:#495057;background-color:#d6dbdf;border-color:#d6dbdf}.btn-medium.focus,.btn-medium:focus,.btn-medium:hover{color:#495057;background-color:#c1c8ce;border-color:#b9c2c9}.btn-medium.focus,.btn-medium:focus{box-shadow:0 0 0 0 rgba(193,198,203,.5)}.btn-medium.disabled,.btn-medium:disabled{color:#495057;background-color:#d6dbdf;border-color:#d6dbdf}.btn-medium:not(:disabled):not(.disabled).active,.btn-medium:not(:disabled):not(.disabled):active,.show>.btn-medium.dropdown-toggle{color:#495057;background-color:#b9c2c9;border-color:#b2bcc3}.btn-medium:not(:disabled):not(.disabled).active:focus,.btn-medium:not(:disabled):not(.disabled):active:focus,.show>.btn-medium.dropdown-toggle:focus{box-shadow:0 0 0 0 rgba(193,198,203,.5)}.btn-outline-primary{color:#fc0;border-color:#fc0}.btn-outline-primary:hover{color:#495057;background-color:#fc0;border-color:#fc0}.btn-outline-primary.focus,.btn-outline-primary:focus{box-shadow:0 0 0 0 rgba(255,204,0,.5)}.btn-outline-primary.disabled,.btn-outline-primary:disabled{color:#fc0;background-color:transparent}.btn-outline-primary:not(:disabled):not(.disabled).active,.btn-outline-primary:not(:disabled):not(.disabled):active,.show>.btn-outline-primary.dropdown-toggle{color:#495057;background-color:#fc0;border-color:#fc0}.btn-outline-primary:not(:disabled):not(.disabled).active:focus,.btn-outline-primary:not(:disabled):not(.disabled):active:focus,.show>.btn-outline-primary.dropdown-toggle:focus{box-shadow:0 0 0 0 rgba(255,204,0,.5)}.btn-outline-secondary{color:#212529;border-color:#212529}.btn-outline-secondary:hover{color:#fff;background-color:#212529;border-color:#212529}.btn-outline-secondary.focus,.btn-outline-secondary:focus{box-shadow:0 0 0 0 rgba(33,37,41,.5)}.btn-outline-secondary.disabled,.btn-outline-secondary:disabled{color:#212529;background-color:transparent}.btn-outline-secondary:not(:disabled):not(.disabled).active,.btn-outline-secondary:not(:disabled):not(.disabled):active,.show>.btn-outline-secondary.dropdown-toggle{color:#fff;background-color:#212529;border-color:#212529}.btn-outline-secondary:not(:disabled):not(.disabled).active:focus,.btn-outline-secondary:not(:disabled):not(.disabled):active:focus,.show>.btn-outline-secondary.dropdown-toggle:focus{box-shadow:0 0 0 0 rgba(33,37,41,.5)}.btn-outline-success{color:#28a745;border-color:#28a745}.btn-outline-success:hover{color:#fff;background-color:#28a745;border-color:#28a745}.btn-outline-success.focus,.btn-outline-success:focus{box-shadow:0 0 0 0 rgba(40,167,69,.5)}.btn-outline-success.disabled,.btn-outline-success:disabled{color:#28a745;background-color:transparent}.btn-outline-success:not(:disabled):not(.disabled).active,.btn-outline-success:not(:disabled):not(.disabled):active,.show>.btn-outline-success.dropdown-toggle{color:#fff;background-color:#28a745;border-color:#28a745}.btn-outline-success:not(:disabled):not(.disabled).active:focus,.btn-outline-success:not(:disabled):not(.disabled):active:focus,.show>.btn-outline-success.dropdown-toggle:focus{box-shadow:0 0 0 0 rgba(40,167,69,.5)}.btn-outline-info{color:#17a2b8;border-color:#17a2b8}.btn-outline-info:hover{color:#fff;background-color:#17a2b8;border-color:#17a2b8}.btn-outline-info.focus,.btn-outline-info:focus{box-shadow:0 0 0 0 rgba(23,162,184,.5)}.btn-outline-info.disabled,.btn-outline-info:disabled{color:#17a2b8;background-color:transparent}.btn-outline-info:not(:disabled):not(.disabled).active,.btn-outline-info:not(:disabled):not(.disabled):active,.show>.btn-outline-info.dropdown-toggle{color:#fff;background-color:#17a2b8;border-color:#17a2b8}.btn-outline-info:not(:disabled):not(.disabled).active:focus,.btn-outline-info:not(:disabled):not(.disabled):active:focus,.show>.btn-outline-info.dropdown-toggle:focus{box-shadow:0 0 0 0 rgba(23,162,184,.5)}.btn-outline-warning{color:#ffc107;border-color:#ffc107}.btn-outline-warning:hover{color:#495057;background-color:#ffc107;border-color:#ffc107}.btn-outline-warning.focus,.btn-outline-warning:focus{box-shadow:0 0 0 0 rgba(255,193,7,.5)}.btn-outline-warning.disabled,.btn-outline-warning:disabled{color:#ffc107;background-color:transparent}.btn-outline-warning:not(:disabled):not(.disabled).active,.btn-outline-warning:not(:disabled):not(.disabled):active,.show>.btn-outline-warning.dropdown-toggle{color:#495057;background-color:#ffc107;border-color:#ffc107}.btn-outline-warning:not(:disabled):not(.disabled).active:focus,.btn-outline-warning:not(:disabled):not(.disabled):active:focus,.show>.btn-outline-warning.dropdown-toggle:focus{box-shadow:0 0 0 0 rgba(255,193,7,.5)}.btn-outline-danger{color:#dc3545;border-color:#dc3545}.btn-outline-danger:hover{color:#fff;background-color:#dc3545;border-color:#dc3545}.btn-outline-danger.focus,.btn-outline-danger:focus{box-shadow:0 0 0 0 rgba(220,53,69,.5)}.btn-outline-danger.disabled,.btn-outline-danger:disabled{color:#dc3545;background-color:transparent}.btn-outline-danger:not(:disabled):not(.disabled).active,.btn-outline-danger:not(:disabled):not(.disabled):active,.show>.btn-outline-danger.dropdown-toggle{color:#fff;background-color:#dc3545;border-color:#dc3545}.btn-outline-danger:not(:disabled):not(.disabled).active:focus,.btn-outline-danger:not(:disabled):not(.disabled):active:focus,.show>.btn-outline-danger.dropdown-toggle:focus{box-shadow:0 0 0 0 rgba(220,53,69,.5)}.btn-outline-light{color:#f1f6f9;border-color:#f1f6f9}.btn-outline-light:hover{color:#495057;background-color:#f1f6f9;border-color:#f1f6f9}.btn-outline-light.focus,.btn-outline-light:focus{box-shadow:0 0 0 0 rgba(241,246,249,.5)}.btn-outline-light.disabled,.btn-outline-light:disabled{color:#f1f6f9;background-color:transparent}.btn-outline-light:not(:disabled):not(.disabled).active,.btn-outline-light:not(:disabled):not(.disabled):active,.show>.btn-outline-light.dropdown-toggle{color:#495057;background-color:#f1f6f9;border-color:#f1f6f9}.btn-outline-light:not(:disabled):not(.disabled).active:focus,.btn-outline-light:not(:disabled):not(.disabled):active:focus,.show>.btn-outline-light.dropdown-toggle:focus{box-shadow:0 0 0 0 rgba(241,246,249,.5)}.btn-outline-dark{color:#495057;border-color:#495057}.btn-outline-dark:hover{color:#fff;background-color:#495057;border-color:#495057}.btn-outline-dark.focus,.btn-outline-dark:focus{box-shadow:0 0 0 0 rgba(73,80,87,.5)}.btn-outline-dark.disabled,.btn-outline-dark:disabled{color:#495057;background-color:transparent}.btn-outline-dark:not(:disabled):not(.disabled).active,.btn-outline-dark:not(:disabled):not(.disabled):active,.show>.btn-outline-dark.dropdown-toggle{color:#fff;background-color:#495057;border-color:#495057}.btn-outline-dark:not(:disabled):not(.disabled).active:focus,.btn-outline-dark:not(:disabled):not(.disabled):active:focus,.show>.btn-outline-dark.dropdown-toggle:focus{box-shadow:0 0 0 0 rgba(73,80,87,.5)}.btn-outline-primary-light{color:#fffaf0;border-color:#fffaf0}.btn-outline-primary-light:hover{color:#495057;background-color:#fffaf0;border-color:#fffaf0}.btn-outline-primary-light.focus,.btn-outline-primary-light:focus{box-shadow:0 0 0 0 rgba(255,250,240,.5)}.btn-outline-primary-light.disabled,.btn-outline-primary-light:disabled{color:#fffaf0;background-color:transparent}.btn-outline-primary-light:not(:disabled):not(.disabled).active,.btn-outline-primary-light:not(:disabled):not(.disabled):active,.show>.btn-outline-primary-light.dropdown-toggle{color:#495057;background-color:#fffaf0;border-color:#fffaf0}.btn-outline-primary-light:not(:disabled):not(.disabled).active:focus,.btn-outline-primary-light:not(:disabled):not(.disabled):active:focus,.show>.btn-outline-primary-light.dropdown-toggle:focus{box-shadow:0 0 0 0 rgba(255,250,240,.5)}.btn-outline-secondary-light{color:#fff;border-color:#fff}.btn-outline-secondary-light:hover{color:#495057;background-color:#fff;border-color:#fff}.btn-outline-secondary-light.focus,.btn-outline-secondary-light:focus{box-shadow:0 0 0 0 hsla(0,0%,100%,.5)}.btn-outline-secondary-light.disabled,.btn-outline-secondary-light:disabled{color:#fff;background-color:transparent}.btn-outline-secondary-light:not(:disabled):not(.disabled).active,.btn-outline-secondary-light:not(:disabled):not(.disabled):active,.show>.btn-outline-secondary-light.dropdown-toggle{color:#495057;background-color:#fff;border-color:#fff}.btn-outline-secondary-light:not(:disabled):not(.disabled).active:focus,.btn-outline-secondary-light:not(:disabled):not(.disabled):active:focus,.show>.btn-outline-secondary-light.dropdown-toggle:focus{box-shadow:0 0 0 0 hsla(0,0%,100%,.5)}.btn-outline-tertiary{color:#257af4;border-color:#257af4}.btn-outline-tertiary:hover{color:#fff;background-color:#257af4;border-color:#257af4}.btn-outline-tertiary.focus,.btn-outline-tertiary:focus{box-shadow:0 0 0 0 rgba(37,122,244,.5)}.btn-outline-tertiary.disabled,.btn-outline-tertiary:disabled{color:#257af4;background-color:transparent}.btn-outline-tertiary:not(:disabled):not(.disabled).active,.btn-outline-tertiary:not(:disabled):not(.disabled):active,.show>.btn-outline-tertiary.dropdown-toggle{color:#fff;background-color:#257af4;border-color:#257af4}.btn-outline-tertiary:not(:disabled):not(.disabled).active:focus,.btn-outline-tertiary:not(:disabled):not(.disabled):active:focus,.show>.btn-outline-tertiary.dropdown-toggle:focus{box-shadow:0 0 0 0 rgba(37,122,244,.5)}.btn-outline-tertiary-light{color:#e3f1fe;border-color:#e3f1fe}.btn-outline-tertiary-light:hover{color:#495057;background-color:#e3f1fe;border-color:#e3f1fe}.btn-outline-tertiary-light.focus,.btn-outline-tertiary-light:focus{box-shadow:0 0 0 0 rgba(227,241,254,.5)}.btn-outline-tertiary-light.disabled,.btn-outline-tertiary-light:disabled{color:#e3f1fe;background-color:transparent}.btn-outline-tertiary-light:not(:disabled):not(.disabled).active,.btn-outline-tertiary-light:not(:disabled):not(.disabled):active,.show>.btn-outline-tertiary-light.dropdown-toggle{color:#495057;background-color:#e3f1fe;border-color:#e3f1fe}.btn-outline-tertiary-light:not(:disabled):not(.disabled).active:focus,.btn-outline-tertiary-light:not(:disabled):not(.disabled):active:focus,.show>.btn-outline-tertiary-light.dropdown-toggle:focus{box-shadow:0 0 0 0 rgba(227,241,254,.5)}.btn-outline-white{color:#fff;border-color:#fff}.btn-outline-white:hover{color:#495057;background-color:#fff;border-color:#fff}.btn-outline-white.focus,.btn-outline-white:focus{box-shadow:0 0 0 0 hsla(0,0%,100%,.5)}.btn-outline-white.disabled,.btn-outline-white:disabled{color:#fff;background-color:transparent}.btn-outline-white:not(:disabled):not(.disabled).active,.btn-outline-white:not(:disabled):not(.disabled):active,.show>.btn-outline-white.dropdown-toggle{color:#495057;background-color:#fff;border-color:#fff}.btn-outline-white:not(:disabled):not(.disabled).active:focus,.btn-outline-white:not(:disabled):not(.disabled):active:focus,.show>.btn-outline-white.dropdown-toggle:focus{box-shadow:0 0 0 0 hsla(0,0%,100%,.5)}.btn-outline-black{color:#212529;border-color:#212529}.btn-outline-black:hover{color:#fff;background-color:#212529;border-color:#212529}.btn-outline-black.focus,.btn-outline-black:focus{box-shadow:0 0 0 0 rgba(33,37,41,.5)}.btn-outline-black.disabled,.btn-outline-black:disabled{color:#212529;background-color:transparent}.btn-outline-black:not(:disabled):not(.disabled).active,.btn-outline-black:not(:disabled):not(.disabled):active,.show>.btn-outline-black.dropdown-toggle{color:#fff;background-color:#212529;border-color:#212529}.btn-outline-black:not(:disabled):not(.disabled).active:focus,.btn-outline-black:not(:disabled):not(.disabled):active:focus,.show>.btn-outline-black.dropdown-toggle:focus{box-shadow:0 0 0 0 rgba(33,37,41,.5)}.btn-outline-blue{color:#257af4;border-color:#257af4}.btn-outline-blue:hover{color:#fff;background-color:#257af4;border-color:#257af4}.btn-outline-blue.focus,.btn-outline-blue:focus{box-shadow:0 0 0 0 rgba(37,122,244,.5)}.btn-outline-blue.disabled,.btn-outline-blue:disabled{color:#257af4;background-color:transparent}.btn-outline-blue:not(:disabled):not(.disabled).active,.btn-outline-blue:not(:disabled):not(.disabled):active,.show>.btn-outline-blue.dropdown-toggle{color:#fff;background-color:#257af4;border-color:#257af4}.btn-outline-blue:not(:disabled):not(.disabled).active:focus,.btn-outline-blue:not(:disabled):not(.disabled):active:focus,.show>.btn-outline-blue.dropdown-toggle:focus{box-shadow:0 0 0 0 rgba(37,122,244,.5)}.btn-outline-light-blue{color:#e3f1fe;border-color:#e3f1fe}.btn-outline-light-blue:hover{color:#495057;background-color:#e3f1fe;border-color:#e3f1fe}.btn-outline-light-blue.focus,.btn-outline-light-blue:focus{box-shadow:0 0 0 0 rgba(227,241,254,.5)}.btn-outline-light-blue.disabled,.btn-outline-light-blue:disabled{color:#e3f1fe;background-color:transparent}.btn-outline-light-blue:not(:disabled):not(.disabled).active,.btn-outline-light-blue:not(:disabled):not(.disabled):active,.show>.btn-outline-light-blue.dropdown-toggle{color:#495057;background-color:#e3f1fe;border-color:#e3f1fe}.btn-outline-light-blue:not(:disabled):not(.disabled).active:focus,.btn-outline-light-blue:not(:disabled):not(.disabled):active:focus,.show>.btn-outline-light-blue.dropdown-toggle:focus{box-shadow:0 0 0 0 rgba(227,241,254,.5)}.btn-outline-yellow{color:#fc0;border-color:#fc0}.btn-outline-yellow:hover{color:#495057;background-color:#fc0;border-color:#fc0}.btn-outline-yellow.focus,.btn-outline-yellow:focus{box-shadow:0 0 0 0 rgba(255,204,0,.5)}.btn-outline-yellow.disabled,.btn-outline-yellow:disabled{color:#fc0;background-color:transparent}.btn-outline-yellow:not(:disabled):not(.disabled).active,.btn-outline-yellow:not(:disabled):not(.disabled):active,.show>.btn-outline-yellow.dropdown-toggle{color:#495057;background-color:#fc0;border-color:#fc0}.btn-outline-yellow:not(:disabled):not(.disabled).active:focus,.btn-outline-yellow:not(:disabled):not(.disabled):active:focus,.show>.btn-outline-yellow.dropdown-toggle:focus{box-shadow:0 0 0 0 rgba(255,204,0,.5)}.btn-outline-light-yellow{color:#fffaf0;border-color:#fffaf0}.btn-outline-light-yellow:hover{color:#495057;background-color:#fffaf0;border-color:#fffaf0}.btn-outline-light-yellow.focus,.btn-outline-light-yellow:focus{box-shadow:0 0 0 0 rgba(255,250,240,.5)}.btn-outline-light-yellow.disabled,.btn-outline-light-yellow:disabled{color:#fffaf0;background-color:transparent}.btn-outline-light-yellow:not(:disabled):not(.disabled).active,.btn-outline-light-yellow:not(:disabled):not(.disabled):active,.show>.btn-outline-light-yellow.dropdown-toggle{color:#495057;background-color:#fffaf0;border-color:#fffaf0}.btn-outline-light-yellow:not(:disabled):not(.disabled).active:focus,.btn-outline-light-yellow:not(:disabled):not(.disabled):active:focus,.show>.btn-outline-light-yellow.dropdown-toggle:focus{box-shadow:0 0 0 0 rgba(255,250,240,.5)}.btn-outline-orange{color:#ff8c00;border-color:#ff8c00}.btn-outline-orange:hover{color:#495057;background-color:#ff8c00;border-color:#ff8c00}.btn-outline-orange.focus,.btn-outline-orange:focus{box-shadow:0 0 0 0 rgba(255,140,0,.5)}.btn-outline-orange.disabled,.btn-outline-orange:disabled{color:#ff8c00;background-color:transparent}.btn-outline-orange:not(:disabled):not(.disabled).active,.btn-outline-orange:not(:disabled):not(.disabled):active,.show>.btn-outline-orange.dropdown-toggle{color:#495057;background-color:#ff8c00;border-color:#ff8c00}.btn-outline-orange:not(:disabled):not(.disabled).active:focus,.btn-outline-orange:not(:disabled):not(.disabled):active:focus,.show>.btn-outline-orange.dropdown-toggle:focus{box-shadow:0 0 0 0 rgba(255,140,0,.5)}.btn-outline-light-orange{color:#ffe4b5;border-color:#ffe4b5}.btn-outline-light-orange:hover{color:#495057;background-color:#ffe4b5;border-color:#ffe4b5}.btn-outline-light-orange.focus,.btn-outline-light-orange:focus{box-shadow:0 0 0 0 rgba(255,228,181,.5)}.btn-outline-light-orange.disabled,.btn-outline-light-orange:disabled{color:#ffe4b5;background-color:transparent}.btn-outline-light-orange:not(:disabled):not(.disabled).active,.btn-outline-light-orange:not(:disabled):not(.disabled):active,.show>.btn-outline-light-orange.dropdown-toggle{color:#495057;background-color:#ffe4b5;border-color:#ffe4b5}.btn-outline-light-orange:not(:disabled):not(.disabled).active:focus,.btn-outline-light-orange:not(:disabled):not(.disabled):active:focus,.show>.btn-outline-light-orange.dropdown-toggle:focus{box-shadow:0 0 0 0 rgba(255,228,181,.5)}.btn-outline-red{color:#ff3939;border-color:#ff3939}.btn-outline-red:hover{color:#fff;background-color:#ff3939;border-color:#ff3939}.btn-outline-red.focus,.btn-outline-red:focus{box-shadow:0 0 0 0 rgba(255,57,57,.5)}.btn-outline-red.disabled,.btn-outline-red:disabled{color:#ff3939;background-color:transparent}.btn-outline-red:not(:disabled):not(.disabled).active,.btn-outline-red:not(:disabled):not(.disabled):active,.show>.btn-outline-red.dropdown-toggle{color:#fff;background-color:#ff3939;border-color:#ff3939}.btn-outline-red:not(:disabled):not(.disabled).active:focus,.btn-outline-red:not(:disabled):not(.disabled):active:focus,.show>.btn-outline-red.dropdown-toggle:focus{box-shadow:0 0 0 0 rgba(255,57,57,.5)}.btn-outline-light-red{color:#ffe4e1;border-color:#ffe4e1}.btn-outline-light-red:hover{color:#495057;background-color:#ffe4e1;border-color:#ffe4e1}.btn-outline-light-red.focus,.btn-outline-light-red:focus{box-shadow:0 0 0 0 rgba(255,228,225,.5)}.btn-outline-light-red.disabled,.btn-outline-light-red:disabled{color:#ffe4e1;background-color:transparent}.btn-outline-light-red:not(:disabled):not(.disabled).active,.btn-outline-light-red:not(:disabled):not(.disabled):active,.show>.btn-outline-light-red.dropdown-toggle{color:#495057;background-color:#ffe4e1;border-color:#ffe4e1}.btn-outline-light-red:not(:disabled):not(.disabled).active:focus,.btn-outline-light-red:not(:disabled):not(.disabled):active:focus,.show>.btn-outline-light-red.dropdown-toggle:focus{box-shadow:0 0 0 0 rgba(255,228,225,.5)}.btn-outline-medium{color:#d6dbdf;border-color:#d6dbdf}.btn-outline-medium:hover{color:#495057;background-color:#d6dbdf;border-color:#d6dbdf}.btn-outline-medium.focus,.btn-outline-medium:focus{box-shadow:0 0 0 0 rgba(214,219,223,.5)}.btn-outline-medium.disabled,.btn-outline-medium:disabled{color:#d6dbdf;background-color:transparent}.btn-outline-medium:not(:disabled):not(.disabled).active,.btn-outline-medium:not(:disabled):not(.disabled):active,.show>.btn-outline-medium.dropdown-toggle{color:#495057;background-color:#d6dbdf;border-color:#d6dbdf}.btn-outline-medium:not(:disabled):not(.disabled).active:focus,.btn-outline-medium:not(:disabled):not(.disabled):active:focus,.show>.btn-outline-medium.dropdown-toggle:focus{box-shadow:0 0 0 0 rgba(214,219,223,.5)}.btn-link{font-weight:400;color:#ff8c00;text-decoration:none}.btn-link:hover{color:#ff8c00;text-decoration:underline}.btn-link.focus,.btn-link:focus{text-decoration:underline;box-shadow:none}.btn-link.disabled,.btn-link:disabled{color:#d6dbdf;pointer-events:none}.btn-group-lg>.btn,.btn-lg{padding:16px 32px;font-size:1.125rem;line-height:26px;border-radius:8px}.btn-group-sm>.btn,.btn-sm{padding:12px 32px;font-size:.875rem;line-height:20px;border-radius:8px}.btn-block{display:block;width:100%}.btn-block+.btn-block{margin-top:24px}input[type=button].btn-block,input[type=reset].btn-block,input[type=submit].btn-block{width:100%}.fade{transition:opacity .15s linear}@media(prefers-reduced-motion:reduce){.fade{transition:none}}.fade:not(.show){opacity:0}.collapse:not(.show){display:none}.collapsing{position:relative;height:0;overflow:hidden;transition:height .35s ease}@media(prefers-reduced-motion:reduce){.collapsing{transition:none}}.dropdown,.dropleft,.dropright,.dropup{position:relative}.dropdown-toggle{white-space:nowrap}.dropdown-toggle:after{display:inline-block;margin-left:.255em;vertical-align:.255em;content:"";border-top:.3em solid;border-right:.3em solid transparent;border-bottom:0;border-left:.3em solid transparent}.dropdown-toggle:empty:after{margin-left:0}.dropdown-menu{position:absolute;top:100%;left:0;z-index:1000;display:none;float:left;min-width:10rem;padding:.5rem 0;margin:.125rem 0 0;font-size:1rem;color:#212529;text-align:left;list-style:none;background-color:#fff;background-clip:padding-box;border:1px solid rgba(33,37,41,.15);border-radius:8px}.dropdown-menu-left{right:auto;left:0}.dropdown-menu-right{right:0;left:auto}@media(min-width:400px){.dropdown-menu-xs-left{right:auto;left:0}.dropdown-menu-xs-right{right:0;left:auto}}@media(min-width:616px){.dropdown-menu-sm-left{right:auto;left:0}.dropdown-menu-sm-right{right:0;left:auto}}@media(min-width:768px){.dropdown-menu-md-left{right:auto;left:0}.dropdown-menu-md-right{right:0;left:auto}}@media(min-width:980px){.dropdown-menu-lg-left{right:auto;left:0}.dropdown-menu-lg-right{right:0;left:auto}}@media(min-width:1240px){.dropdown-menu-xl-left{right:auto;left:0}.dropdown-menu-xl-right{right:0;left:auto}}.dropup .dropdown-menu{top:auto;bottom:100%;margin-top:0;margin-bottom:.125rem}.dropup .dropdown-toggle:after{display:inline-block;margin-left:.255em;vertical-align:.255em;content:"";border-top:0;border-right:.3em solid transparent;border-bottom:.3em solid;border-left:.3em solid transparent}.dropup .dropdown-toggle:empty:after{margin-left:0}.dropright .dropdown-menu{top:0;right:auto;left:100%;margin-top:0;margin-left:.125rem}.dropright .dropdown-toggle:after{display:inline-block;margin-left:.255em;vertical-align:.255em;content:"";border-top:.3em solid transparent;border-right:0;border-bottom:.3em solid transparent;border-left:.3em solid}.dropright .dropdown-toggle:empty:after{margin-left:0}.dropright .dropdown-toggle:after{vertical-align:0}.dropleft .dropdown-menu{top:0;right:100%;left:auto;margin-top:0;margin-right:.125rem}.dropleft .dropdown-toggle:after{display:inline-block;margin-left:.255em;vertical-align:.255em;content:"";display:none}.dropleft .dropdown-toggle:before{display:inline-block;margin-right:.255em;vertical-align:.255em;content:"";border-top:.3em solid transparent;border-right:.3em solid;border-bottom:.3em solid transparent}.dropleft .dropdown-toggle:empty:after{margin-left:0}.dropleft .dropdown-toggle:before{vertical-align:0}.dropdown-menu[x-placement^=bottom],.dropdown-menu[x-placement^=left],.dropdown-menu[x-placement^=right],.dropdown-menu[x-placement^=top]{right:auto;bottom:auto}.dropdown-divider{height:0;margin:4px 0;overflow:hidden;border-top:1px solid #e9ecef}.dropdown-item{display:block;width:100%;padding:.25rem 1.5rem;clear:both;font-weight:400;color:#495057;text-align:inherit;white-space:nowrap;background-color:transparent;border:0}.dropdown-item:focus,.dropdown-item:hover{color:#3d4349;text-decoration:none;background-color:#f1f6f9}.dropdown-item.active,.dropdown-item:active{color:#fff;text-decoration:none;background-color:#fc0}.dropdown-item.disabled,.dropdown-item:disabled{color:#6c757d;pointer-events:none;background-color:transparent}.dropdown-menu.show{display:block}.dropdown-header{display:block;padding:.5rem 1.5rem;margin-bottom:0;font-size:.875rem;color:#6c757d;white-space:nowrap}.dropdown-item-text{display:block;padding:.25rem 1.5rem;color:#495057}.btn-group,.btn-group-vertical{position:relative;display:inline-flex;vertical-align:middle}.btn-group-vertical>.btn,.btn-group>.btn{position:relative;flex:1 1 auto}.btn-group-vertical>.btn.active,.btn-group-vertical>.btn:active,.btn-group-vertical>.btn:focus,.btn-group-vertical>.btn:hover,.btn-group>.btn.active,.btn-group>.btn:active,.btn-group>.btn:focus,.btn-group>.btn:hover{z-index:1}.btn-toolbar{display:flex;flex-wrap:wrap;justify-content:flex-start}.btn-toolbar .input-group{width:auto}.btn-group>.btn-group:not(:first-child),.btn-group>.btn:not(:first-child){margin-left:-1px}.btn-group>.btn-group:not(:last-child)>.btn,.btn-group>.btn:not(:last-child):not(.dropdown-toggle){border-top-right-radius:0;border-bottom-right-radius:0}.btn-group>.btn-group:not(:first-child)>.btn,.btn-group>.btn:not(:first-child){border-top-left-radius:0;border-bottom-left-radius:0}.dropdown-toggle-split{padding-right:24px;padding-left:24px}.dropdown-toggle-split:after,.dropright .dropdown-toggle-split:after,.dropup .dropdown-toggle-split:after{margin-left:0}.dropleft .dropdown-toggle-split:before{margin-right:0}.btn-group-lg>.btn+.dropdown-toggle-split,.btn-group-sm>.btn+.dropdown-toggle-split,.btn-lg+.dropdown-toggle-split,.btn-sm+.dropdown-toggle-split{padding-right:24px;padding-left:24px}.btn-group-vertical{flex-direction:column;align-items:flex-start;justify-content:center}.btn-group-vertical>.btn,.btn-group-vertical>.btn-group{width:100%}.btn-group-vertical>.btn-group:not(:first-child),.btn-group-vertical>.btn:not(:first-child){margin-top:-1px}.btn-group-vertical>.btn-group:not(:last-child)>.btn,.btn-group-vertical>.btn:not(:last-child):not(.dropdown-toggle){border-bottom-right-radius:0;border-bottom-left-radius:0}.btn-group-vertical>.btn-group:not(:first-child)>.btn,.btn-group-vertical>.btn:not(:first-child){border-top-left-radius:0;border-top-right-radius:0}.btn-group-toggle>.btn,.btn-group-toggle>.btn-group>.btn{margin-bottom:0}.btn-group-toggle>.btn-group>.btn input[type=checkbox],.btn-group-toggle>.btn-group>.btn input[type=radio],.btn-group-toggle>.btn input[type=checkbox],.btn-group-toggle>.btn input[type=radio]{position:absolute;clip:rect(0,0,0,0);pointer-events:none}.input-group{position:relative;display:flex;flex-wrap:wrap;align-items:stretch;width:100%}.input-group>.custom-file,.input-group>.custom-select,.input-group>.form-control,.input-group>.form-control-plaintext{position:relative;flex:1 1 0%;min-width:0;margin-bottom:0}.input-group>.custom-file+.custom-file,.input-group>.custom-file+.custom-select,.input-group>.custom-file+.form-control,.input-group>.custom-select+.custom-file,.input-group>.custom-select+.custom-select,.input-group>.custom-select+.form-control,.input-group>.form-control+.custom-file,.input-group>.form-control+.custom-select,.input-group>.form-control+.form-control,.input-group>.form-control-plaintext+.custom-file,.input-group>.form-control-plaintext+.custom-select,.input-group>.form-control-plaintext+.form-control{margin-left:-1px}.input-group>.custom-file .custom-file-input:focus~.custom-file-label,.input-group>.custom-select:focus,.input-group>.form-control:focus{z-index:3}.input-group>.custom-file .custom-file-input:focus{z-index:4}.input-group>.custom-select:not(:last-child),.input-group>.form-control:not(:last-child){border-top-right-radius:0;border-bottom-right-radius:0}.input-group>.custom-select:not(:first-child),.input-group>.form-control:not(:first-child){border-top-left-radius:0;border-bottom-left-radius:0}.input-group>.custom-file{display:flex;align-items:center}.input-group>.custom-file:not(:last-child) .custom-file-label,.input-group>.custom-file:not(:last-child) .custom-file-label:after{border-top-right-radius:0;border-bottom-right-radius:0}.input-group>.custom-file:not(:first-child) .custom-file-label{border-top-left-radius:0;border-bottom-left-radius:0}.input-group-append,.input-group-prepend{display:flex}.input-group-append .btn,.input-group-prepend .btn{position:relative;z-index:2}.input-group-append .btn:focus,.input-group-prepend .btn:focus{z-index:3}.input-group-append .btn+.btn,.input-group-append .btn+.input-group-text,.input-group-append .input-group-text+.btn,.input-group-append .input-group-text+.input-group-text,.input-group-prepend .btn+.btn,.input-group-prepend .btn+.input-group-text,.input-group-prepend .input-group-text+.btn,.input-group-prepend .input-group-text+.input-group-text{margin-left:-1px}.input-group-prepend{margin-right:-1px}.input-group-append{margin-left:-1px}.input-group-text{display:flex;align-items:center;padding:.375rem .75rem;margin-bottom:0;font-size:1rem;font-weight:400;line-height:1.5;color:#6c757d;text-align:center;white-space:nowrap;background-color:#e9ecef;border:1px solid #ced4da;border-radius:8px}.input-group-text input[type=checkbox],.input-group-text input[type=radio]{margin-top:0}.input-group-lg>.custom-select,.input-group-lg>.form-control:not(textarea){height:calc(1.5em + 1rem + 2px)}.input-group-lg>.custom-select,.input-group-lg>.form-control,.input-group-lg>.input-group-append>.btn,.input-group-lg>.input-group-append>.input-group-text,.input-group-lg>.input-group-prepend>.btn,.input-group-lg>.input-group-prepend>.input-group-text{padding:.5rem 1rem;font-size:1.125rem;line-height:1.5;border-radius:8px}.input-group-sm>.custom-select,.input-group-sm>.form-control:not(textarea){height:calc(1.5em + .5rem + 2px)}.input-group-sm>.custom-select,.input-group-sm>.form-control,.input-group-sm>.input-group-append>.btn,.input-group-sm>.input-group-append>.input-group-text,.input-group-sm>.input-group-prepend>.btn,.input-group-sm>.input-group-prepend>.input-group-text{padding:.25rem .5rem;font-size:.875rem;line-height:1.5;border-radius:8px}.input-group-lg>.custom-select,.input-group-sm>.custom-select{padding-right:1.75rem}.input-group>.input-group-append:last-child>.btn:not(:last-child):not(.dropdown-toggle),.input-group>.input-group-append:last-child>.input-group-text:not(:last-child),.input-group>.input-group-append:not(:last-child)>.btn,.input-group>.input-group-append:not(:last-child)>.input-group-text,.input-group>.input-group-prepend>.btn,.input-group>.input-group-prepend>.input-group-text{border-top-right-radius:0;border-bottom-right-radius:0}.input-group>.input-group-append>.btn,.input-group>.input-group-append>.input-group-text,.input-group>.input-group-prepend:first-child>.btn:not(:first-child),.input-group>.input-group-prepend:first-child>.input-group-text:not(:first-child),.input-group>.input-group-prepend:not(:first-child)>.btn,.input-group>.input-group-prepend:not(:first-child)>.input-group-text{border-top-left-radius:0;border-bottom-left-radius:0}.custom-control{position:relative;display:block;min-height:1.5rem;padding-left:1.5rem}.custom-control-inline{display:inline-flex;margin-right:1rem}.custom-control-input{position:absolute;left:0;z-index:-1;width:1rem;height:1.25rem;opacity:0}.custom-control-input:checked~.custom-control-label:before{color:#fff;border-color:#fc0;background-color:#fc0}.custom-control-input:focus~.custom-control-label:before{box-shadow:0 0 0 .2rem rgba(255,204,0,.25)}.custom-control-input:focus:not(:checked)~.custom-control-label:before{border-color:#ffe680}.custom-control-input:not(:disabled):active~.custom-control-label:before{color:#fff;background-color:#fff0b3;border-color:#fff0b3}.custom-control-input:disabled~.custom-control-label,.custom-control-input[disabled]~.custom-control-label{color:#6c757d}.custom-control-input:disabled~.custom-control-label:before,.custom-control-input[disabled]~.custom-control-label:before{background-color:#e9ecef}.custom-control-label{position:relative;margin-bottom:0;vertical-align:top}.custom-control-label:before{pointer-events:none;background-color:#fff;border:1px solid #d6dbdf}.custom-control-label:after,.custom-control-label:before{position:absolute;top:.25rem;left:-1.5rem;display:block;width:1rem;height:1rem;content:""}.custom-control-label:after{background:no-repeat 50%/50% 50%}.custom-checkbox .custom-control-label:before{border-radius:8px}.custom-checkbox .custom-control-input:checked~.custom-control-label:after{background-image:url("data:image/svg+xml;charset=utf-8,%3Csvg xmlns='http://www.w3.org/2000/svg' width='8' height='8'%3E%3Cpath fill='%23fff' d='M6.564.75l-3.59 3.612-1.538-1.55L0 4.26l2.974 2.99L8 2.193z'/%3E%3C/svg%3E")}.custom-checkbox .custom-control-input:indeterminate~.custom-control-label:before{border-color:#fc0;background-color:#fc0}.custom-checkbox .custom-control-input:indeterminate~.custom-control-label:after{background-image:url("data:image/svg+xml;charset=utf-8,%3Csvg xmlns='http://www.w3.org/2000/svg' width='4' height='4'%3E%3Cpath stroke='%23fff' d='M0 2h4'/%3E%3C/svg%3E")}.custom-checkbox .custom-control-input:disabled:checked~.custom-control-label:before{background-color:rgba(255,204,0,.5)}.custom-checkbox .custom-control-input:disabled:indeterminate~.custom-control-label:before{background-color:rgba(255,204,0,.5)}.custom-radio .custom-control-label:before{border-radius:50%}.custom-radio .custom-control-input:checked~.custom-control-label:after{background-image:url("data:image/svg+xml;charset=utf-8,%3Csvg xmlns='http://www.w3.org/2000/svg' width='12' height='12' viewBox='-4 -4 8 8'%3E%3Ccircle r='3' fill='%23fff'/%3E%3C/svg%3E")}.custom-radio .custom-control-input:disabled:checked~.custom-control-label:before{background-color:rgba(255,204,0,.5)}.custom-switch{padding-left:2.25rem}.custom-switch .custom-control-label:before{left:-2.25rem;width:1.75rem;pointer-events:all;border-radius:.5rem}.custom-switch .custom-control-label:after{top:calc(.25rem + 2px);left:calc(-2.25rem + 2px);width:calc(1rem - 4px);height:calc(1rem - 4px);background-color:#d6dbdf;border-radius:.5rem;transition:transform .15s ease-in-out,background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out}@media(prefers-reduced-motion:reduce){.custom-switch .custom-control-label:after{transition:none}}.custom-switch .custom-control-input:checked~.custom-control-label:after{background-color:#fff;transform:translateX(.75rem)}.custom-switch .custom-control-input:disabled:checked~.custom-control-label:before{background-color:rgba(255,204,0,.5)}.custom-select{display:inline-block;width:100%;height:calc(1.5em + .75rem + 2px);padding:.375rem 1.75rem .375rem .75rem;font-size:1rem;font-weight:400;line-height:1.5;color:#6c757d;vertical-align:middle;background:#fff url("data:image/svg+xml;charset=utf-8,%3Csvg xmlns='http://www.w3.org/2000/svg' width='4' height='5'%3E%3Cpath fill='%23343a40' d='M2 0L0 2h4zm0 5L0 3h4z'/%3E%3C/svg%3E") no-repeat right .75rem center/8px 10px;border:1px solid #ced4da;border-radius:8px;-webkit-appearance:none;-moz-appearance:none;appearance:none}.custom-select:focus{border-color:#ffe680;outline:0;box-shadow:0 0 0 .2rem rgba(255,204,0,.25)}.custom-select:focus::-ms-value{color:#6c757d;background-color:#fff}.custom-select[multiple],.custom-select[size]:not([size="1"]){height:auto;padding-right:.75rem;background-image:none}.custom-select:disabled{color:#6c757d;background-color:#e9ecef}.custom-select::-ms-expand{display:none}.custom-select:-moz-focusring{color:transparent;text-shadow:0 0 0 #6c757d}.custom-select-sm{height:calc(1.5em + .5rem + 2px);padding-top:.25rem;padding-bottom:.25rem;padding-left:.5rem;font-size:.875rem}.custom-select-lg{height:calc(1.5em + 1rem + 2px);padding-top:.5rem;padding-bottom:.5rem;padding-left:1rem;font-size:1.125rem}.custom-file{display:inline-block;margin-bottom:0}.custom-file,.custom-file-input{position:relative;width:100%;height:calc(1.5em + .75rem + 2px)}.custom-file-input{z-index:2;margin:0;opacity:0}.custom-file-input:focus~.custom-file-label{border-color:#ffe680;box-shadow:0 0 0 .2rem rgba(255,204,0,.25)}.custom-file-input:disabled~.custom-file-label,.custom-file-input[disabled]~.custom-file-label{background-color:#e9ecef}.custom-file-input:lang(en)~.custom-file-label:after{content:"Browse"}.custom-file-input~.custom-file-label[data-browse]:after{content:attr(data-browse)}.custom-file-label{left:0;z-index:1;height:calc(1.5em + .75rem + 2px);font-weight:400;background-color:#fff;border:1px solid #ced4da;border-radius:8px}.custom-file-label,.custom-file-label:after{position:absolute;top:0;right:0;padding:.375rem .75rem;line-height:1.5;color:#6c757d}.custom-file-label:after{bottom:0;z-index:3;display:block;height:calc(1.5em + .75rem);content:"Browse";background-color:#e9ecef;border-left:inherit;border-radius:0 8px 8px 0}.custom-range{width:100%;height:1.4rem;padding:0;background-color:transparent;-webkit-appearance:none;-moz-appearance:none;appearance:none}.custom-range:focus{outline:none}.custom-range:focus::-webkit-slider-thumb{box-shadow:0 0 0 1px #fff,0 0 0 .2rem rgba(255,204,0,.25)}.custom-range:focus::-moz-range-thumb{box-shadow:0 0 0 1px #fff,0 0 0 .2rem rgba(255,204,0,.25)}.custom-range:focus::-ms-thumb{box-shadow:0 0 0 1px #fff,0 0 0 .2rem rgba(255,204,0,.25)}.custom-range::-moz-focus-outer{border:0}.custom-range::-webkit-slider-thumb{width:1rem;height:1rem;margin-top:-.25rem;background-color:#fc0;border:0;border-radius:1rem;-webkit-transition:background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out;transition:background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out;-webkit-appearance:none;appearance:none}@media(prefers-reduced-motion:reduce){.custom-range::-webkit-slider-thumb{-webkit-transition:none;transition:none}}.custom-range::-webkit-slider-thumb:active{background-color:#fff0b3}.custom-range::-webkit-slider-runnable-track{width:100%;height:.5rem;color:transparent;cursor:pointer;background-color:#dee2e6;border-color:transparent;border-radius:1rem}.custom-range::-moz-range-thumb{width:1rem;height:1rem;background-color:#fc0;border:0;border-radius:1rem;-moz-transition:background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out;transition:background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out;-moz-appearance:none;appearance:none}@media(prefers-reduced-motion:reduce){.custom-range::-moz-range-thumb{-moz-transition:none;transition:none}}.custom-range::-moz-range-thumb:active{background-color:#fff0b3}.custom-range::-moz-range-track{width:100%;height:.5rem;color:transparent;cursor:pointer;background-color:#dee2e6;border-color:transparent;border-radius:1rem}.custom-range::-ms-thumb{width:1rem;height:1rem;margin-top:0;margin-right:.2rem;margin-left:.2rem;background-color:#fc0;border:0;border-radius:1rem;-ms-transition:background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out;transition:background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out;appearance:none}@media(prefers-reduced-motion:reduce){.custom-range::-ms-thumb{-ms-transition:none;transition:none}}.custom-range::-ms-thumb:active{background-color:#fff0b3}.custom-range::-ms-track{width:100%;height:.5rem;color:transparent;cursor:pointer;background-color:transparent;border-color:transparent;border-width:.5rem}.custom-range::-ms-fill-lower,.custom-range::-ms-fill-upper{background-color:#dee2e6;border-radius:1rem}.custom-range::-ms-fill-upper{margin-right:15px}.custom-range:disabled::-webkit-slider-thumb{background-color:#d6dbdf}.custom-range:disabled::-webkit-slider-runnable-track{cursor:default}.custom-range:disabled::-moz-range-thumb{background-color:#d6dbdf}.custom-range:disabled::-moz-range-track{cursor:default}.custom-range:disabled::-ms-thumb{background-color:#d6dbdf}.custom-control-label:before,.custom-file-label,.custom-select{transition:background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out}@media(prefers-reduced-motion:reduce){.custom-control-label:before,.custom-file-label,.custom-select{transition:none}}.nav{display:flex;flex-wrap:wrap;padding-left:0;margin-bottom:0;list-style:none}.nav-link{display:block;padding:0}.nav-link:focus,.nav-link:hover{text-decoration:none}.nav-link.disabled{color:#d6dbdf;pointer-events:none;cursor:default}.nav-tabs{border-bottom:1px solid #6c757d}.nav-tabs .nav-item{margin-bottom:-1px}.nav-tabs .nav-link{border:1px solid transparent;border-top-left-radius:8px;border-top-right-radius:8px}.nav-tabs .nav-link:focus,.nav-tabs .nav-link:hover{border-color:transparent}.nav-tabs .nav-link.disabled{color:#d6dbdf;background-color:transparent;border-color:transparent}.nav-tabs .nav-item.show .nav-link,.nav-tabs .nav-link.active{color:#257af4;background-color:#fff;border-color:#6c757d}.nav-tabs .dropdown-menu{margin-top:-1px;border-top-left-radius:0;border-top-right-radius:0}.nav-pills .nav-link{border-radius:8px}.nav-pills .nav-link.active,.nav-pills .show>.nav-link{color:#fff;background-color:#fc0}.nav-fill .nav-item{flex:1 1 auto;text-align:center}.nav-justified .nav-item{flex-basis:0;flex-grow:1;text-align:center}.tab-content>.tab-pane{display:none}.tab-content>.active{display:block}.navbar{position:relative;padding:24px 0}.navbar,.navbar .container,.navbar .container-fluid,.navbar .container-lg,.navbar .container-md,.navbar .container-sm,.navbar .container-xl,.navbar .container-xs{display:flex;flex-wrap:wrap;align-items:center;justify-content:space-between}.navbar-brand{display:inline-block;padding-top:-.09375rem;padding-bottom:-.09375rem;margin-right:0;font-size:1.125rem;line-height:inherit;white-space:nowrap}.navbar-brand:focus,.navbar-brand:hover{text-decoration:none}.navbar-nav{display:flex;flex-direction:column;padding-left:0;margin-bottom:0;list-style:none}.navbar-nav .nav-link{padding-right:0;padding-left:0}.navbar-nav .dropdown-menu{position:static;float:none}.navbar-text{display:inline-block;padding-top:0;padding-bottom:0}.navbar-collapse{flex-basis:100%;flex-grow:1;align-items:center}.navbar-toggler{padding:.25rem .75rem;font-size:1.125rem;line-height:1;background-color:transparent;border:1px solid transparent;border-radius:8px}.navbar-toggler:focus,.navbar-toggler:hover{text-decoration:none}.navbar-toggler-icon{display:inline-block;width:1.5em;height:1.5em;vertical-align:middle;content:"";background:no-repeat 50%;background-size:100% 100%}@media(max-width:399.98px){.navbar-expand-xs>.container,.navbar-expand-xs>.container-fluid,.navbar-expand-xs>.container-lg,.navbar-expand-xs>.container-md,.navbar-expand-xs>.container-sm,.navbar-expand-xs>.container-xl,.navbar-expand-xs>.container-xs{padding-right:0;padding-left:0}}@media(min-width:400px){.navbar-expand-xs{flex-flow:row nowrap;justify-content:flex-start}.navbar-expand-xs .navbar-nav{flex-direction:row}.navbar-expand-xs .navbar-nav .dropdown-menu{position:absolute}.navbar-expand-xs .navbar-nav .nav-link{padding-right:.5rem;padding-left:.5rem}.navbar-expand-xs>.container,.navbar-expand-xs>.container-fluid,.navbar-expand-xs>.container-lg,.navbar-expand-xs>.container-md,.navbar-expand-xs>.container-sm,.navbar-expand-xs>.container-xl,.navbar-expand-xs>.container-xs{flex-wrap:nowrap}.navbar-expand-xs .navbar-collapse{display:flex!important;flex-basis:auto}.navbar-expand-xs .navbar-toggler{display:none}}@media(max-width:615.98px){.navbar-expand-sm>.container,.navbar-expand-sm>.container-fluid,.navbar-expand-sm>.container-lg,.navbar-expand-sm>.container-md,.navbar-expand-sm>.container-sm,.navbar-expand-sm>.container-xl,.navbar-expand-sm>.container-xs{padding-right:0;padding-left:0}}@media(min-width:616px){.navbar-expand-sm{flex-flow:row nowrap;justify-content:flex-start}.navbar-expand-sm .navbar-nav{flex-direction:row}.navbar-expand-sm .navbar-nav .dropdown-menu{position:absolute}.navbar-expand-sm .navbar-nav .nav-link{padding-right:.5rem;padding-left:.5rem}.navbar-expand-sm>.container,.navbar-expand-sm>.container-fluid,.navbar-expand-sm>.container-lg,.navbar-expand-sm>.container-md,.navbar-expand-sm>.container-sm,.navbar-expand-sm>.container-xl,.navbar-expand-sm>.container-xs{flex-wrap:nowrap}.navbar-expand-sm .navbar-collapse{display:flex!important;flex-basis:auto}.navbar-expand-sm .navbar-toggler{display:none}}@media(max-width:767.98px){.navbar-expand-md>.container,.navbar-expand-md>.container-fluid,.navbar-expand-md>.container-lg,.navbar-expand-md>.container-md,.navbar-expand-md>.container-sm,.navbar-expand-md>.container-xl,.navbar-expand-md>.container-xs{padding-right:0;padding-left:0}}@media(min-width:768px){.navbar-expand-md{flex-flow:row nowrap;justify-content:flex-start}.navbar-expand-md .navbar-nav{flex-direction:row}.navbar-expand-md .navbar-nav .dropdown-menu{position:absolute}.navbar-expand-md .navbar-nav .nav-link{padding-right:.5rem;padding-left:.5rem}.navbar-expand-md>.container,.navbar-expand-md>.container-fluid,.navbar-expand-md>.container-lg,.navbar-expand-md>.container-md,.navbar-expand-md>.container-sm,.navbar-expand-md>.container-xl,.navbar-expand-md>.container-xs{flex-wrap:nowrap}.navbar-expand-md .navbar-collapse{display:flex!important;flex-basis:auto}.navbar-expand-md .navbar-toggler{display:none}}@media(max-width:979.98px){.navbar-expand-lg>.container,.navbar-expand-lg>.container-fluid,.navbar-expand-lg>.container-lg,.navbar-expand-lg>.container-md,.navbar-expand-lg>.container-sm,.navbar-expand-lg>.container-xl,.navbar-expand-lg>.container-xs{padding-right:0;padding-left:0}}@media(min-width:980px){.navbar-expand-lg{flex-flow:row nowrap;justify-content:flex-start}.navbar-expand-lg .navbar-nav{flex-direction:row}.navbar-expand-lg .navbar-nav .dropdown-menu{position:absolute}.navbar-expand-lg .navbar-nav .nav-link{padding-right:.5rem;padding-left:.5rem}.navbar-expand-lg>.container,.navbar-expand-lg>.container-fluid,.navbar-expand-lg>.container-lg,.navbar-expand-lg>.container-md,.navbar-expand-lg>.container-sm,.navbar-expand-lg>.container-xl,.navbar-expand-lg>.container-xs{flex-wrap:nowrap}.navbar-expand-lg .navbar-collapse{display:flex!important;flex-basis:auto}.navbar-expand-lg .navbar-toggler{display:none}}@media(max-width:1239.98px){.navbar-expand-xl>.container,.navbar-expand-xl>.container-fluid,.navbar-expand-xl>.container-lg,.navbar-expand-xl>.container-md,.navbar-expand-xl>.container-sm,.navbar-expand-xl>.container-xl,.navbar-expand-xl>.container-xs{padding-right:0;padding-left:0}}@media(min-width:1240px){.navbar-expand-xl{flex-flow:row nowrap;justify-content:flex-start}.navbar-expand-xl .navbar-nav{flex-direction:row}.navbar-expand-xl .navbar-nav .dropdown-menu{position:absolute}.navbar-expand-xl .navbar-nav .nav-link{padding-right:.5rem;padding-left:.5rem}.navbar-expand-xl>.container,.navbar-expand-xl>.container-fluid,.navbar-expand-xl>.container-lg,.navbar-expand-xl>.container-md,.navbar-expand-xl>.container-sm,.navbar-expand-xl>.container-xl,.navbar-expand-xl>.container-xs{flex-wrap:nowrap}.navbar-expand-xl .navbar-collapse{display:flex!important;flex-basis:auto}.navbar-expand-xl .navbar-toggler{display:none}}.navbar-expand{flex-flow:row nowrap;justify-content:flex-start}.navbar-expand>.container,.navbar-expand>.container-fluid,.navbar-expand>.container-lg,.navbar-expand>.container-md,.navbar-expand>.container-sm,.navbar-expand>.container-xl,.navbar-expand>.container-xs{padding-right:0;padding-left:0}.navbar-expand .navbar-nav{flex-direction:row}.navbar-expand .navbar-nav .dropdown-menu{position:absolute}.navbar-expand .navbar-nav .nav-link{padding-right:.5rem;padding-left:.5rem}.navbar-expand>.container,.navbar-expand>.container-fluid,.navbar-expand>.container-lg,.navbar-expand>.container-md,.navbar-expand>.container-sm,.navbar-expand>.container-xl,.navbar-expand>.container-xs{flex-wrap:nowrap}.navbar-expand .navbar-collapse{display:flex!important;flex-basis:auto}.navbar-expand .navbar-toggler{display:none}.navbar-light .navbar-brand,.navbar-light .navbar-brand:focus,.navbar-light .navbar-brand:hover{color:rgba(33,37,41,.9)}.navbar-light .navbar-nav .nav-link{color:rgba(33,37,41,.5)}.navbar-light .navbar-nav .nav-link:focus,.navbar-light .navbar-nav .nav-link:hover{color:rgba(33,37,41,.7)}.navbar-light .navbar-nav .nav-link.disabled{color:rgba(33,37,41,.3)}.navbar-light .navbar-nav .active>.nav-link,.navbar-light .navbar-nav .nav-link.active,.navbar-light .navbar-nav .nav-link.show,.navbar-light .navbar-nav .show>.nav-link{color:rgba(33,37,41,.9)}.navbar-light .navbar-toggler{color:rgba(33,37,41,.5);border-color:rgba(33,37,41,.1)}.navbar-light .navbar-toggler-icon{background-image:url("data:image/svg+xml;charset=utf-8,%3Csvg xmlns='http://www.w3.org/2000/svg' width='30' height='30'%3E%3Cpath stroke='rgba(33, 37, 41, 0.5)' stroke-linecap='round' stroke-miterlimit='10' stroke-width='2' d='M4 7h22M4 15h22M4 23h22'/%3E%3C/svg%3E")}.navbar-light .navbar-text{color:rgba(33,37,41,.5)}.navbar-light .navbar-text a,.navbar-light .navbar-text a:focus,.navbar-light .navbar-text a:hover{color:rgba(33,37,41,.9)}.navbar-dark .navbar-brand,.navbar-dark .navbar-brand:focus,.navbar-dark .navbar-brand:hover{color:#fff}.navbar-dark .navbar-nav .nav-link{color:hsla(0,0%,100%,.5)}.navbar-dark .navbar-nav .nav-link:focus,.navbar-dark .navbar-nav .nav-link:hover{color:hsla(0,0%,100%,.75)}.navbar-dark .navbar-nav .nav-link.disabled{color:hsla(0,0%,100%,.25)}.navbar-dark .navbar-nav .active>.nav-link,.navbar-dark .navbar-nav .nav-link.active,.navbar-dark .navbar-nav .nav-link.show,.navbar-dark .navbar-nav .show>.nav-link{color:#fff}.navbar-dark .navbar-toggler{color:hsla(0,0%,100%,.5);border-color:hsla(0,0%,100%,.1)}.navbar-dark .navbar-toggler-icon{background-image:url("data:image/svg+xml;charset=utf-8,%3Csvg xmlns='http://www.w3.org/2000/svg' width='30' height='30'%3E%3Cpath stroke='rgba(255, 255, 255, 0.5)' stroke-linecap='round' stroke-miterlimit='10' stroke-width='2' d='M4 7h22M4 15h22M4 23h22'/%3E%3C/svg%3E")}.navbar-dark .navbar-text{color:hsla(0,0%,100%,.5)}.navbar-dark .navbar-text a,.navbar-dark .navbar-text a:focus,.navbar-dark .navbar-text a:hover{color:#fff}.card{position:relative;display:flex;flex-direction:column;min-width:0;word-wrap:break-word;background-color:#fff;background-clip:border-box;border:1px solid #d6dbdf;border-radius:8px}.card>hr{margin-right:0;margin-left:0}.card>.list-group:first-child .list-group-item:first-child{border-top-left-radius:8px;border-top-right-radius:8px}.card>.list-group:last-child .list-group-item:last-child{border-bottom-right-radius:8px;border-bottom-left-radius:8px}.card-body{flex:1 1 auto;min-height:1px;padding:24px}.card-title{margin-bottom:24px}.card-subtitle{margin-top:-12px}.card-subtitle,.card-text:last-child{margin-bottom:0}.card-link:hover{text-decoration:none}.card-link+.card-link{margin-left:24px}.card-header{padding:24px;margin-bottom:0;background-color:#f1f6f9;border-bottom:1px solid #d6dbdf}.card-header:first-child{border-radius:subtract(8px,1px) subtract(8px,1px) 0 0}.card-header+.list-group .list-group-item:first-child{border-top:0}.card-footer{padding:24px;background-color:#f1f6f9;border-top:1px solid #d6dbdf}.card-footer:last-child{border-radius:0 0 subtract(8px,1px) subtract(8px,1px)}.card-header-tabs{margin-bottom:-24px;border-bottom:0}.card-header-pills,.card-header-tabs{margin-right:-12px;margin-left:-12px}.card-img-overlay{position:absolute;top:0;right:0;bottom:0;left:0;padding:24px}.card-img,.card-img-bottom,.card-img-top{flex-shrink:0;width:100%}.card-img,.card-img-top{border-top-left-radius:subtract(8px,1px);border-top-right-radius:subtract(8px,1px)}.card-img,.card-img-bottom{border-bottom-right-radius:subtract(8px,1px);border-bottom-left-radius:subtract(8px,1px)}.card-deck .card{margin-bottom:20px}@media(min-width:616px){.card-deck{display:flex;flex-flow:row wrap;margin-right:-20px;margin-left:-20px}.card-deck .card{flex:1 0 0%;margin-right:20px;margin-bottom:0;margin-left:20px}}.card-group>.card{margin-bottom:20px}@media(min-width:616px){.card-group{display:flex;flex-flow:row wrap}.card-group>.card{flex:1 0 0%;margin-bottom:0}.card-group>.card+.card{margin-left:0;border-left:0}.card-group>.card:not(:last-child){border-top-right-radius:0;border-bottom-right-radius:0}.card-group>.card:not(:last-child) .card-header,.card-group>.card:not(:last-child) .card-img-top{border-top-right-radius:0}.card-group>.card:not(:last-child) .card-footer,.card-group>.card:not(:last-child) .card-img-bottom{border-bottom-right-radius:0}.card-group>.card:not(:first-child){border-top-left-radius:0;border-bottom-left-radius:0}.card-group>.card:not(:first-child) .card-header,.card-group>.card:not(:first-child) .card-img-top{border-top-left-radius:0}.card-group>.card:not(:first-child) .card-footer,.card-group>.card:not(:first-child) .card-img-bottom{border-bottom-left-radius:0}}.card-columns .card{margin-bottom:40px}@media(min-width:616px){.card-columns{-moz-column-count:3;column-count:3;-moz-column-gap:40px;column-gap:40px;orphans:1;widows:1}.card-columns .card{display:inline-block;width:100%}}.accordion>.card{overflow:hidden}.accordion>.card:not(:last-of-type){border-bottom:0;border-bottom-right-radius:0;border-bottom-left-radius:0}.accordion>.card:not(:first-of-type){border-top-left-radius:0;border-top-right-radius:0}.accordion>.card>.card-header{border-radius:0;margin-bottom:-1px}.breadcrumb{display:flex;flex-wrap:wrap;padding:.75rem 1rem;margin-bottom:1rem;list-style:none;background-color:#e9ecef;border-radius:8px}.breadcrumb-item+.breadcrumb-item{padding-left:.5rem}.breadcrumb-item+.breadcrumb-item:before{display:inline-block;padding-right:.5rem;color:#6c757d;content:"/"}.breadcrumb-item+.breadcrumb-item:hover:before{text-decoration:underline;text-decoration:none}.breadcrumb-item.active{color:#6c757d}.pagination{display:flex;padding-left:0;list-style:none;border-radius:8px}.page-link{position:relative;display:block;padding:.5rem .75rem;margin-left:-1px;line-height:1.25;color:#ff8c00;background-color:#fff;border:1px solid #dee2e6}.page-link:hover{z-index:2;color:#ff8c00;text-decoration:none;background-color:#e9ecef;border-color:#dee2e6}.page-link:focus{z-index:3;outline:0;box-shadow:0 0 0 .2rem rgba(255,204,0,.25)}.page-item:first-child .page-link{margin-left:0;border-top-left-radius:8px;border-bottom-left-radius:8px}.page-item:last-child .page-link{border-top-right-radius:8px;border-bottom-right-radius:8px}.page-item.active .page-link{z-index:3;color:#fff;background-color:#fc0;border-color:#fc0}.page-item.disabled .page-link{color:#6c757d;pointer-events:none;cursor:auto;background-color:#fff;border-color:#dee2e6}.pagination-lg .page-link{padding:.75rem 1.5rem;font-size:1.125rem;line-height:1.5}.pagination-lg .page-item:first-child .page-link{border-top-left-radius:8px;border-bottom-left-radius:8px}.pagination-lg .page-item:last-child .page-link{border-top-right-radius:8px;border-bottom-right-radius:8px}.pagination-sm .page-link{padding:.25rem .5rem;font-size:.875rem;line-height:1.5}.pagination-sm .page-item:first-child .page-link{border-top-left-radius:8px;border-bottom-left-radius:8px}.pagination-sm .page-item:last-child .page-link{border-top-right-radius:8px;border-bottom-right-radius:8px}.badge{display:inline-block;padding:.25em .4em;font-size:75%;font-weight:700;line-height:1;text-align:center;white-space:nowrap;vertical-align:baseline;border-radius:8px;transition:color .15s ease-in-out,background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out}@media(prefers-reduced-motion:reduce){.badge{transition:none}}a.badge:focus,a.badge:hover{text-decoration:none}.badge:empty{display:none}.btn .badge{position:relative;top:-1px}.badge-pill{padding-right:.6em;padding-left:.6em;border-radius:10rem}.badge-primary{color:#495057;background-color:#fc0}a.badge-primary:focus,a.badge-primary:hover{color:#495057;background-color:#cca300}a.badge-primary.focus,a.badge-primary:focus{outline:0;box-shadow:0 0 0 .2rem rgba(255,204,0,.5)}.badge-secondary{color:#fff;background-color:#212529}a.badge-secondary:focus,a.badge-secondary:hover{color:#fff;background-color:#0a0c0d}a.badge-secondary.focus,a.badge-secondary:focus{outline:0;box-shadow:0 0 0 .2rem rgba(33,37,41,.5)}.badge-success{color:#fff;background-color:#28a745}a.badge-success:focus,a.badge-success:hover{color:#fff;background-color:#1e7e34}a.badge-success.focus,a.badge-success:focus{outline:0;box-shadow:0 0 0 .2rem rgba(40,167,69,.5)}.badge-info{color:#fff;background-color:#17a2b8}a.badge-info:focus,a.badge-info:hover{color:#fff;background-color:#117a8b}a.badge-info.focus,a.badge-info:focus{outline:0;box-shadow:0 0 0 .2rem rgba(23,162,184,.5)}.badge-warning{color:#495057;background-color:#ffc107}a.badge-warning:focus,a.badge-warning:hover{color:#495057;background-color:#d39e00}a.badge-warning.focus,a.badge-warning:focus{outline:0;box-shadow:0 0 0 .2rem rgba(255,193,7,.5)}.badge-danger{color:#fff;background-color:#dc3545}a.badge-danger:focus,a.badge-danger:hover{color:#fff;background-color:#bd2130}a.badge-danger.focus,a.badge-danger:focus{outline:0;box-shadow:0 0 0 .2rem rgba(220,53,69,.5)}.badge-light{color:#495057;background-color:#f1f6f9}a.badge-light:focus,a.badge-light:hover{color:#495057;background-color:#cddfea}a.badge-light.focus,a.badge-light:focus{outline:0;box-shadow:0 0 0 .2rem rgba(241,246,249,.5)}.badge-dark{color:#fff;background-color:#495057}a.badge-dark:focus,a.badge-dark:hover{color:#fff;background-color:#32373b}a.badge-dark.focus,a.badge-dark:focus{outline:0;box-shadow:0 0 0 .2rem rgba(73,80,87,.5)}.badge-primary-light{color:#495057;background-color:#fffaf0}a.badge-primary-light:focus,a.badge-primary-light:hover{color:#495057;background-color:#ffe9bd}a.badge-primary-light.focus,a.badge-primary-light:focus{outline:0;box-shadow:0 0 0 .2rem rgba(255,250,240,.5)}.badge-secondary-light{color:#495057;background-color:#fff}a.badge-secondary-light:focus,a.badge-secondary-light:hover{color:#495057;background-color:#e6e6e6}a.badge-secondary-light.focus,a.badge-secondary-light:focus{outline:0;box-shadow:0 0 0 .2rem hsla(0,0%,100%,.5)}.badge-tertiary{color:#fff;background-color:#257af4}a.badge-tertiary:focus,a.badge-tertiary:hover{color:#fff;background-color:#0b60db}a.badge-tertiary.focus,a.badge-tertiary:focus{outline:0;box-shadow:0 0 0 .2rem rgba(37,122,244,.5)}.badge-tertiary-light{color:#495057;background-color:#e3f1fe}a.badge-tertiary-light:focus,a.badge-tertiary-light:hover{color:#495057;background-color:#b2d8fc}a.badge-tertiary-light.focus,a.badge-tertiary-light:focus{outline:0;box-shadow:0 0 0 .2rem rgba(227,241,254,.5)}.badge-white{color:#495057;background-color:#fff}a.badge-white:focus,a.badge-white:hover{color:#495057;background-color:#e6e6e6}a.badge-white.focus,a.badge-white:focus{outline:0;box-shadow:0 0 0 .2rem hsla(0,0%,100%,.5)}.badge-black{color:#fff;background-color:#212529}a.badge-black:focus,a.badge-black:hover{color:#fff;background-color:#0a0c0d}a.badge-black.focus,a.badge-black:focus{outline:0;box-shadow:0 0 0 .2rem rgba(33,37,41,.5)}.badge-blue{color:#fff;background-color:#257af4}a.badge-blue:focus,a.badge-blue:hover{color:#fff;background-color:#0b60db}a.badge-blue.focus,a.badge-blue:focus{outline:0;box-shadow:0 0 0 .2rem rgba(37,122,244,.5)}.badge-light-blue{color:#495057;background-color:#e3f1fe}a.badge-light-blue:focus,a.badge-light-blue:hover{color:#495057;background-color:#b2d8fc}a.badge-light-blue.focus,a.badge-light-blue:focus{outline:0;box-shadow:0 0 0 .2rem rgba(227,241,254,.5)}.badge-yellow{color:#495057;background-color:#fc0}a.badge-yellow:focus,a.badge-yellow:hover{color:#495057;background-color:#cca300}a.badge-yellow.focus,a.badge-yellow:focus{outline:0;box-shadow:0 0 0 .2rem rgba(255,204,0,.5)}.badge-light-yellow{color:#495057;background-color:#fffaf0}a.badge-light-yellow:focus,a.badge-light-yellow:hover{color:#495057;background-color:#ffe9bd}a.badge-light-yellow.focus,a.badge-light-yellow:focus{outline:0;box-shadow:0 0 0 .2rem rgba(255,250,240,.5)}.badge-orange{color:#495057;background-color:#ff8c00}a.badge-orange:focus,a.badge-orange:hover{color:#495057;background-color:#cc7000}a.badge-orange.focus,a.badge-orange:focus{outline:0;box-shadow:0 0 0 .2rem rgba(255,140,0,.5)}.badge-light-orange{color:#495057;background-color:#ffe4b5}a.badge-light-orange:focus,a.badge-light-orange:hover{color:#495057;background-color:#ffd182}a.badge-light-orange.focus,a.badge-light-orange:focus{outline:0;box-shadow:0 0 0 .2rem rgba(255,228,181,.5)}.badge-red{color:#fff;background-color:#ff3939}a.badge-red:focus,a.badge-red:hover{color:#fff;background-color:#ff0606}a.badge-red.focus,a.badge-red:focus{outline:0;box-shadow:0 0 0 .2rem rgba(255,57,57,.5)}.badge-light-red{color:#495057;background-color:#ffe4e1}a.badge-light-red:focus,a.badge-light-red:hover{color:#495057;background-color:#ffb6ae}a.badge-light-red.focus,a.badge-light-red:focus{outline:0;box-shadow:0 0 0 .2rem rgba(255,228,225,.5)}.badge-medium{color:#495057;background-color:#d6dbdf}a.badge-medium:focus,a.badge-medium:hover{color:#495057;background-color:#b9c2c9}a.badge-medium.focus,a.badge-medium:focus{outline:0;box-shadow:0 0 0 .2rem rgba(214,219,223,.5)}.jumbotron{padding:2rem 1rem;margin-bottom:2rem;background-color:#e9ecef;border-radius:8px}@media(min-width:616px){.jumbotron{padding:4rem 2rem}}.jumbotron-fluid{padding-right:0;padding-left:0;border-radius:0}.alert{position:relative;padding:.75rem 1.25rem;margin-bottom:1rem;border:1px solid transparent;border-radius:8px}.alert-heading{color:inherit}.alert-link{font-weight:700}.alert-dismissible{padding-right:4rem}.alert-dismissible .close{position:absolute;top:0;right:0;padding:.75rem 1.25rem;color:inherit}.alert-primary{color:#947c14;background-color:#fff5cc;border-color:#fff1b8}.alert-primary hr{border-top-color:#ffec9f}.alert-primary .alert-link{color:#67560e}.alert-secondary{color:#212529;background-color:#d3d3d4;border-color:#c1c2c3}.alert-secondary hr{border-top-color:#b4b5b6}.alert-secondary .alert-link{color:#0a0c0d}.alert-success{color:#256938;background-color:#d4edda;border-color:#c3e6cb}.alert-success hr{border-top-color:#b1dfbb}.alert-success .alert-link{color:#184324}.alert-info{color:#1c6673;background-color:#d1ecf1;border-color:#bee5eb}.alert-info hr{border-top-color:#abdde5}.alert-info .alert-link{color:#12424a}.alert-warning{color:#947617;background-color:#fff3cd;border-color:#ffeeba}.alert-warning hr{border-top-color:#ffe8a1}.alert-warning .alert-link{color:#685310}.alert-danger{color:#822d38;background-color:#f8d7da;border-color:#f5c6cb}.alert-danger hr{border-top-color:#f1b0b7}.alert-danger .alert-link{color:#5c2028}.alert-light{color:#8d9295;background-color:#fcfdfe;border-color:#fbfcfd}.alert-light hr{border-top-color:#eaeff5}.alert-light .alert-link{color:#73797c}.alert-dark{color:#363b41;background-color:#dbdcdd;border-color:#ccced0}.alert-dark hr{border-top-color:#bfc1c4}.alert-dark .alert-link{color:#1f2225}.alert-primary-light{color:#949490;background-color:#fffefc;border-color:#fffefb}.alert-primary-light hr{border-top-color:#fff8e2}.alert-primary-light .alert-link{color:#7b7b76}.alert-secondary-light{color:#949698;background-color:#fff;border-color:#fff}.alert-secondary-light hr{border-top-color:#f2f2f2}.alert-secondary-light .alert-link{color:#7a7d7f}.alert-tertiary{color:#235193;background-color:#d3e4fd;border-color:#c2dafc}.alert-tertiary hr{border-top-color:#aacbfb}.alert-tertiary .alert-link{color:#193a6a}.alert-tertiary-light{color:#868f98;background-color:#f9fcff;border-color:#f7fbff}.alert-tertiary-light hr{border-top-color:#deeeff}.alert-tertiary-light .alert-link{color:#6c767f}.alert-white{color:#949698;background-color:#fff;border-color:#fff}.alert-white hr{border-top-color:#f2f2f2}.alert-white .alert-link{color:#7a7d7f}.alert-black{color:#212529;background-color:#d3d3d4;border-color:#c1c2c3}.alert-black hr{border-top-color:#b4b5b6}.alert-black .alert-link{color:#0a0c0d}.alert-blue{color:#235193;background-color:#d3e4fd;border-color:#c2dafc}.alert-blue hr{border-top-color:#aacbfb}.alert-blue .alert-link{color:#193a6a}.alert-light-blue{color:#868f98;background-color:#f9fcff;border-color:#f7fbff}.alert-light-blue hr{border-top-color:#deeeff}.alert-light-blue .alert-link{color:#6c767f}.alert-yellow{color:#947c14;background-color:#fff5cc;border-color:#fff1b8}.alert-yellow hr{border-top-color:#ffec9f}.alert-yellow .alert-link{color:#67560e}.alert-light-yellow{color:#949490;background-color:#fffefc;border-color:#fffefb}.alert-light-yellow hr{border-top-color:#fff8e2}.alert-light-yellow .alert-link{color:#7b7b76}.alert-orange{color:#945b14;background-color:#ffe8cc;border-color:#ffdfb8}.alert-orange hr{border-top-color:#ffd49f}.alert-orange .alert-link{color:#673f0e}.alert-light-orange{color:#948872;background-color:#fffaf0;border-color:#fff7ea}.alert-light-orange hr{border-top-color:#ffedd1}.alert-light-orange .alert-link{color:#786e5b}.alert-red{color:#942f31;background-color:#ffd7d7;border-color:#ffc8c8}.alert-red hr{border-top-color:#ffafaf}.alert-red .alert-link{color:#6d2324}.alert-light-red{color:#948889;background-color:#fffaf9;border-color:#fff7f7}.alert-light-red hr{border-top-color:#ffdede}.alert-light-red .alert-link{color:#7b6e6f}.alert-medium{color:#7f8488;background-color:#f7f8f9;border-color:#f4f5f6}.alert-medium hr{border-top-color:#e6e8eb}.alert-medium .alert-link{color:#666a6e}@-webkit-keyframes progress-bar-stripes{0%{background-position:1rem 0}to{background-position:0 0}}@keyframes progress-bar-stripes{0%{background-position:1rem 0}to{background-position:0 0}}.progress{height:1rem;font-size:.75rem;background-color:#e9ecef;border-radius:8px}.progress,.progress-bar{display:flex;overflow:hidden}.progress-bar{flex-direction:column;justify-content:center;color:#fff;text-align:center;white-space:nowrap;background-color:#fc0;transition:width .6s ease}@media(prefers-reduced-motion:reduce){.progress-bar{transition:none}}.progress-bar-striped{background-image:linear-gradient(45deg,hsla(0,0%,100%,.15) 25%,transparent 0,transparent 50%,hsla(0,0%,100%,.15) 0,hsla(0,0%,100%,.15) 75%,transparent 0,transparent);background-size:1rem 1rem}.progress-bar-animated{-webkit-animation:progress-bar-stripes 1s linear infinite;animation:progress-bar-stripes 1s linear infinite}@media(prefers-reduced-motion:reduce){.progress-bar-animated{-webkit-animation:none;animation:none}}.media{display:flex;align-items:flex-start}.media-body{flex:1}.list-group{display:flex;flex-direction:column;padding-left:0;margin-bottom:0}.list-group-item-action{width:100%;color:#6c757d;text-align:inherit}.list-group-item-action:focus,.list-group-item-action:hover{z-index:1;color:#6c757d;text-decoration:none;background-color:#f1f6f9}.list-group-item-action:active{color:#212529;background-color:#e9ecef}.list-group-item{position:relative;display:block;padding:.75rem 1.25rem;background-color:#fff;border:1px solid rgba(33,37,41,.125)}.list-group-item:first-child{border-top-left-radius:8px;border-top-right-radius:8px}.list-group-item:last-child{border-bottom-right-radius:8px;border-bottom-left-radius:8px}.list-group-item.disabled,.list-group-item:disabled{color:#6c757d;pointer-events:none;background-color:#fff}.list-group-item.active{z-index:2;color:#fff;background-color:#fc0;border-color:#fc0}.list-group-item+.list-group-item{border-top-width:0}.list-group-item+.list-group-item.active{margin-top:-1px;border-top-width:1px}.list-group-horizontal{flex-direction:row}.list-group-horizontal .list-group-item:first-child{border-bottom-left-radius:8px;border-top-right-radius:0}.list-group-horizontal .list-group-item:last-child{border-top-right-radius:8px;border-bottom-left-radius:0}.list-group-horizontal .list-group-item.active{margin-top:0}.list-group-horizontal .list-group-item+.list-group-item{border-top-width:1px;border-left-width:0}.list-group-horizontal .list-group-item+.list-group-item.active{margin-left:-1px;border-left-width:1px}@media(min-width:400px){.list-group-horizontal-xs{flex-direction:row}.list-group-horizontal-xs .list-group-item:first-child{border-bottom-left-radius:8px;border-top-right-radius:0}.list-group-horizontal-xs .list-group-item:last-child{border-top-right-radius:8px;border-bottom-left-radius:0}.list-group-horizontal-xs .list-group-item.active{margin-top:0}.list-group-horizontal-xs .list-group-item+.list-group-item{border-top-width:1px;border-left-width:0}.list-group-horizontal-xs .list-group-item+.list-group-item.active{margin-left:-1px;border-left-width:1px}}@media(min-width:616px){.list-group-horizontal-sm{flex-direction:row}.list-group-horizontal-sm .list-group-item:first-child{border-bottom-left-radius:8px;border-top-right-radius:0}.list-group-horizontal-sm .list-group-item:last-child{border-top-right-radius:8px;border-bottom-left-radius:0}.list-group-horizontal-sm .list-group-item.active{margin-top:0}.list-group-horizontal-sm .list-group-item+.list-group-item{border-top-width:1px;border-left-width:0}.list-group-horizontal-sm .list-group-item+.list-group-item.active{margin-left:-1px;border-left-width:1px}}@media(min-width:768px){.list-group-horizontal-md{flex-direction:row}.list-group-horizontal-md .list-group-item:first-child{border-bottom-left-radius:8px;border-top-right-radius:0}.list-group-horizontal-md .list-group-item:last-child{border-top-right-radius:8px;border-bottom-left-radius:0}.list-group-horizontal-md .list-group-item.active{margin-top:0}.list-group-horizontal-md .list-group-item+.list-group-item{border-top-width:1px;border-left-width:0}.list-group-horizontal-md .list-group-item+.list-group-item.active{margin-left:-1px;border-left-width:1px}}@media(min-width:980px){.list-group-horizontal-lg{flex-direction:row}.list-group-horizontal-lg .list-group-item:first-child{border-bottom-left-radius:8px;border-top-right-radius:0}.list-group-horizontal-lg .list-group-item:last-child{border-top-right-radius:8px;border-bottom-left-radius:0}.list-group-horizontal-lg .list-group-item.active{margin-top:0}.list-group-horizontal-lg .list-group-item+.list-group-item{border-top-width:1px;border-left-width:0}.list-group-horizontal-lg .list-group-item+.list-group-item.active{margin-left:-1px;border-left-width:1px}}@media(min-width:1240px){.list-group-horizontal-xl{flex-direction:row}.list-group-horizontal-xl .list-group-item:first-child{border-bottom-left-radius:8px;border-top-right-radius:0}.list-group-horizontal-xl .list-group-item:last-child{border-top-right-radius:8px;border-bottom-left-radius:0}.list-group-horizontal-xl .list-group-item.active{margin-top:0}.list-group-horizontal-xl .list-group-item+.list-group-item{border-top-width:1px;border-left-width:0}.list-group-horizontal-xl .list-group-item+.list-group-item.active{margin-left:-1px;border-left-width:1px}}.list-group-flush .list-group-item{border-right-width:0;border-left-width:0;border-radius:0}.list-group-flush .list-group-item:first-child{border-top-width:0}.list-group-flush:last-child .list-group-item:last-child{border-bottom-width:0}.list-group-item-primary{color:#947c14;background-color:#fff1b8}.list-group-item-primary.list-group-item-action:focus,.list-group-item-primary.list-group-item-action:hover{color:#947c14;background-color:#ffec9f}.list-group-item-primary.list-group-item-action.active{color:#fff;background-color:#947c14;border-color:#947c14}.list-group-item-secondary{color:#212529;background-color:#c1c2c3}.list-group-item-secondary.list-group-item-action:focus,.list-group-item-secondary.list-group-item-action:hover{color:#212529;background-color:#b4b5b6}.list-group-item-secondary.list-group-item-action.active{color:#fff;background-color:#212529;border-color:#212529}.list-group-item-success{color:#256938;background-color:#c3e6cb}.list-group-item-success.list-group-item-action:focus,.list-group-item-success.list-group-item-action:hover{color:#256938;background-color:#b1dfbb}.list-group-item-success.list-group-item-action.active{color:#fff;background-color:#256938;border-color:#256938}.list-group-item-info{color:#1c6673;background-color:#bee5eb}.list-group-item-info.list-group-item-action:focus,.list-group-item-info.list-group-item-action:hover{color:#1c6673;background-color:#abdde5}.list-group-item-info.list-group-item-action.active{color:#fff;background-color:#1c6673;border-color:#1c6673}.list-group-item-warning{color:#947617;background-color:#ffeeba}.list-group-item-warning.list-group-item-action:focus,.list-group-item-warning.list-group-item-action:hover{color:#947617;background-color:#ffe8a1}.list-group-item-warning.list-group-item-action.active{color:#fff;background-color:#947617;border-color:#947617}.list-group-item-danger{color:#822d38;background-color:#f5c6cb}.list-group-item-danger.list-group-item-action:focus,.list-group-item-danger.list-group-item-action:hover{color:#822d38;background-color:#f1b0b7}.list-group-item-danger.list-group-item-action.active{color:#fff;background-color:#822d38;border-color:#822d38}.list-group-item-light{color:#8d9295;background-color:#fbfcfd}.list-group-item-light.list-group-item-action:focus,.list-group-item-light.list-group-item-action:hover{color:#8d9295;background-color:#eaeff5}.list-group-item-light.list-group-item-action.active{color:#fff;background-color:#8d9295;border-color:#8d9295}.list-group-item-dark{color:#363b41;background-color:#ccced0}.list-group-item-dark.list-group-item-action:focus,.list-group-item-dark.list-group-item-action:hover{color:#363b41;background-color:#bfc1c4}.list-group-item-dark.list-group-item-action.active{color:#fff;background-color:#363b41;border-color:#363b41}.list-group-item-primary-light{color:#949490;background-color:#fffefb}.list-group-item-primary-light.list-group-item-action:focus,.list-group-item-primary-light.list-group-item-action:hover{color:#949490;background-color:#fff8e2}.list-group-item-primary-light.list-group-item-action.active{color:#fff;background-color:#949490;border-color:#949490}.list-group-item-secondary-light{color:#949698;background-color:#fff}.list-group-item-secondary-light.list-group-item-action:focus,.list-group-item-secondary-light.list-group-item-action:hover{color:#949698;background-color:#f2f2f2}.list-group-item-secondary-light.list-group-item-action.active{color:#fff;background-color:#949698;border-color:#949698}.list-group-item-tertiary{color:#235193;background-color:#c2dafc}.list-group-item-tertiary.list-group-item-action:focus,.list-group-item-tertiary.list-group-item-action:hover{color:#235193;background-color:#aacbfb}.list-group-item-tertiary.list-group-item-action.active{color:#fff;background-color:#235193;border-color:#235193}.list-group-item-tertiary-light{color:#868f98;background-color:#f7fbff}.list-group-item-tertiary-light.list-group-item-action:focus,.list-group-item-tertiary-light.list-group-item-action:hover{color:#868f98;background-color:#deeeff}.list-group-item-tertiary-light.list-group-item-action.active{color:#fff;background-color:#868f98;border-color:#868f98}.list-group-item-white{color:#949698;background-color:#fff}.list-group-item-white.list-group-item-action:focus,.list-group-item-white.list-group-item-action:hover{color:#949698;background-color:#f2f2f2}.list-group-item-white.list-group-item-action.active{color:#fff;background-color:#949698;border-color:#949698}.list-group-item-black{color:#212529;background-color:#c1c2c3}.list-group-item-black.list-group-item-action:focus,.list-group-item-black.list-group-item-action:hover{color:#212529;background-color:#b4b5b6}.list-group-item-black.list-group-item-action.active{color:#fff;background-color:#212529;border-color:#212529}.list-group-item-blue{color:#235193;background-color:#c2dafc}.list-group-item-blue.list-group-item-action:focus,.list-group-item-blue.list-group-item-action:hover{color:#235193;background-color:#aacbfb}.list-group-item-blue.list-group-item-action.active{color:#fff;background-color:#235193;border-color:#235193}.list-group-item-light-blue{color:#868f98;background-color:#f7fbff}.list-group-item-light-blue.list-group-item-action:focus,.list-group-item-light-blue.list-group-item-action:hover{color:#868f98;background-color:#deeeff}.list-group-item-light-blue.list-group-item-action.active{color:#fff;background-color:#868f98;border-color:#868f98}.list-group-item-yellow{color:#947c14;background-color:#fff1b8}.list-group-item-yellow.list-group-item-action:focus,.list-group-item-yellow.list-group-item-action:hover{color:#947c14;background-color:#ffec9f}.list-group-item-yellow.list-group-item-action.active{color:#fff;background-color:#947c14;border-color:#947c14}.list-group-item-light-yellow{color:#949490;background-color:#fffefb}.list-group-item-light-yellow.list-group-item-action:focus,.list-group-item-light-yellow.list-group-item-action:hover{color:#949490;background-color:#fff8e2}.list-group-item-light-yellow.list-group-item-action.active{color:#fff;background-color:#949490;border-color:#949490}.list-group-item-orange{color:#945b14;background-color:#ffdfb8}.list-group-item-orange.list-group-item-action:focus,.list-group-item-orange.list-group-item-action:hover{color:#945b14;background-color:#ffd49f}.list-group-item-orange.list-group-item-action.active{color:#fff;background-color:#945b14;border-color:#945b14}.list-group-item-light-orange{color:#948872;background-color:#fff7ea}.list-group-item-light-orange.list-group-item-action:focus,.list-group-item-light-orange.list-group-item-action:hover{color:#948872;background-color:#ffedd1}.list-group-item-light-orange.list-group-item-action.active{color:#fff;background-color:#948872;border-color:#948872}.list-group-item-red{color:#942f31;background-color:#ffc8c8}.list-group-item-red.list-group-item-action:focus,.list-group-item-red.list-group-item-action:hover{color:#942f31;background-color:#ffafaf}.list-group-item-red.list-group-item-action.active{color:#fff;background-color:#942f31;border-color:#942f31}.list-group-item-light-red{color:#948889;background-color:#fff7f7}.list-group-item-light-red.list-group-item-action:focus,.list-group-item-light-red.list-group-item-action:hover{color:#948889;background-color:#ffdede}.list-group-item-light-red.list-group-item-action.active{color:#fff;background-color:#948889;border-color:#948889}.list-group-item-medium{color:#7f8488;background-color:#f4f5f6}.list-group-item-medium.list-group-item-action:focus,.list-group-item-medium.list-group-item-action:hover{color:#7f8488;background-color:#e6e8eb}.list-group-item-medium.list-group-item-action.active{color:#fff;background-color:#7f8488;border-color:#7f8488}.close{float:right;font-size:1.5rem;font-weight:700;line-height:1;color:#212529;text-shadow:0 1px 0 #fff;opacity:.5}@media(max-width:1200px){.close{font-size:calc(1.275rem + .3vw)}}.close:hover{color:#212529;text-decoration:none}.close:not(:disabled):not(.disabled):focus,.close:not(:disabled):not(.disabled):hover{opacity:.75}button.close{padding:0;background-color:transparent;border:0;-webkit-appearance:none;-moz-appearance:none;appearance:none}a.close.disabled{pointer-events:none}.toast{max-width:350px;overflow:hidden;font-size:.875rem;background-color:hsla(0,0%,100%,.85);background-clip:padding-box;border:1px solid rgba(0,0,0,.1);box-shadow:0 .25rem .75rem rgba(33,37,41,.1);-webkit-backdrop-filter:blur(10px);backdrop-filter:blur(10px);opacity:0;border-radius:.25rem}.toast:not(:last-child){margin-bottom:.75rem}.toast.showing{opacity:1}.toast.show{display:block;opacity:1}.toast.hide{display:none}.toast-header{display:flex;align-items:center;padding:.25rem .75rem;color:#6c757d;background-color:hsla(0,0%,100%,.85);background-clip:padding-box;border-bottom:1px solid rgba(0,0,0,.05)}.toast-body{padding:.75rem}.modal-open{overflow:hidden}.modal-open .modal{overflow-x:hidden;overflow-y:auto}.modal{position:fixed;top:0;left:0;z-index:1050;display:none;width:100%;height:100%;overflow:hidden;outline:0}.modal-dialog{position:relative;width:auto;margin:.5rem;pointer-events:none}.modal.fade .modal-dialog{transition:transform .3s ease-out;transform:translateY(-50px)}@media(prefers-reduced-motion:reduce){.modal.fade .modal-dialog{transition:none}}.modal.show .modal-dialog{transform:none}.modal.modal-static .modal-dialog{transform:scale(1.02)}.modal-dialog-scrollable{display:flex;max-height:calc(100% - 1rem)}.modal-dialog-scrollable .modal-content{max-height:calc(100vh - 1rem);overflow:hidden}.modal-dialog-scrollable .modal-footer,.modal-dialog-scrollable .modal-header{flex-shrink:0}.modal-dialog-scrollable .modal-body{overflow-y:auto}.modal-dialog-centered{display:flex;align-items:center;min-height:calc(100% - 1rem)}.modal-dialog-centered:before{display:block;height:calc(100vh - 1rem);content:""}.modal-dialog-centered.modal-dialog-scrollable{flex-direction:column;justify-content:center;height:100%}.modal-dialog-centered.modal-dialog-scrollable .modal-content{max-height:none}.modal-dialog-centered.modal-dialog-scrollable:before{content:none}.modal-content{position:relative;display:flex;flex-direction:column;width:100%;pointer-events:auto;background-color:#fff;background-clip:padding-box;border:1px solid rgba(33,37,41,.2);border-radius:8px;outline:0}.modal-backdrop{position:fixed;top:0;left:0;z-index:1040;width:100vw;height:100vh;background-color:#212529}.modal-backdrop.fade{opacity:0}.modal-backdrop.show{opacity:.5}.modal-header{display:flex;align-items:flex-start;justify-content:space-between;padding:1rem;border-bottom:1px solid #d6dbdf;border-top-left-radius:7px;border-top-right-radius:7px}.modal-header .close{padding:1rem;margin:-1rem -1rem -1rem auto}.modal-title{margin-bottom:0;line-height:1.5}.modal-body{position:relative;flex:1 1 auto;padding:1rem}.modal-footer{display:flex;flex-wrap:wrap;align-items:center;justify-content:flex-end;padding:.75rem;border-top:1px solid #d6dbdf;border-bottom-right-radius:7px;border-bottom-left-radius:7px}.modal-footer>*{margin:.25rem}.modal-scrollbar-measure{position:absolute;top:-9999px;width:50px;height:50px;overflow:scroll}@media(min-width:616px){.modal-dialog{max-width:500px;margin:1.75rem auto}.modal-dialog-scrollable{max-height:calc(100% - 3.5rem)}.modal-dialog-scrollable .modal-content{max-height:calc(100vh - 3.5rem)}.modal-dialog-centered{min-height:calc(100% - 3.5rem)}.modal-dialog-centered:before{height:calc(100vh - 3.5rem)}.modal-sm{max-width:300px}}@media(min-width:980px){.modal-lg,.modal-xl{max-width:800px}}@media(min-width:1240px){.modal-xl{max-width:1140px}}.tooltip{position:absolute;z-index:1070;display:block;margin:0;font-family:Noto Sans,sans-serif;font-style:normal;font-weight:400;line-height:1.5;text-align:left;text-align:start;text-decoration:none;text-shadow:none;text-transform:none;letter-spacing:normal;word-break:normal;word-spacing:normal;white-space:normal;line-break:auto;font-size:.875rem;word-wrap:break-word;opacity:0}.tooltip.show{opacity:.9}.tooltip .arrow{position:absolute;display:block;width:.8rem;height:.4rem}.tooltip .arrow:before{position:absolute;content:"";border-color:transparent;border-style:solid}.bs-tooltip-auto[x-placement^=top],.bs-tooltip-top{padding:.4rem 0}.bs-tooltip-auto[x-placement^=top] .arrow,.bs-tooltip-top .arrow{bottom:0}.bs-tooltip-auto[x-placement^=top] .arrow:before,.bs-tooltip-top .arrow:before{top:0;border-width:.4rem .4rem 0;border-top-color:#212529}.bs-tooltip-auto[x-placement^=right],.bs-tooltip-right{padding:0 .4rem}.bs-tooltip-auto[x-placement^=right] .arrow,.bs-tooltip-right .arrow{left:0;width:.4rem;height:.8rem}.bs-tooltip-auto[x-placement^=right] .arrow:before,.bs-tooltip-right .arrow:before{right:0;border-width:.4rem .4rem .4rem 0;border-right-color:#212529}.bs-tooltip-auto[x-placement^=bottom],.bs-tooltip-bottom{padding:.4rem 0}.bs-tooltip-auto[x-placement^=bottom] .arrow,.bs-tooltip-bottom .arrow{top:0}.bs-tooltip-auto[x-placement^=bottom] .arrow:before,.bs-tooltip-bottom .arrow:before{bottom:0;border-width:0 .4rem .4rem;border-bottom-color:#212529}.bs-tooltip-auto[x-placement^=left],.bs-tooltip-left{padding:0 .4rem}.bs-tooltip-auto[x-placement^=left] .arrow,.bs-tooltip-left .arrow{right:0;width:.4rem;height:.8rem}.bs-tooltip-auto[x-placement^=left] .arrow:before,.bs-tooltip-left .arrow:before{left:0;border-width:.4rem 0 .4rem .4rem;border-left-color:#212529}.tooltip-inner{max-width:200px;padding:.25rem .5rem;color:#fff;text-align:center;background-color:#212529;border-radius:8px}.popover{top:0;left:0;z-index:1060;max-width:276px;font-family:Noto Sans,sans-serif;font-style:normal;font-weight:400;line-height:1.5;text-align:left;text-align:start;text-decoration:none;text-shadow:none;text-transform:none;letter-spacing:normal;word-break:normal;word-spacing:normal;white-space:normal;line-break:auto;font-size:.875rem;word-wrap:break-word;background-color:#fff;background-clip:padding-box;border:1px solid rgba(33,37,41,.2);border-radius:8px}.popover,.popover .arrow{position:absolute;display:block}.popover .arrow{width:1rem;height:.5rem;margin:0 8px}.popover .arrow:after,.popover .arrow:before{position:absolute;display:block;content:"";border-color:transparent;border-style:solid}.bs-popover-auto[x-placement^=top],.bs-popover-top{margin-bottom:.5rem}.bs-popover-auto[x-placement^=top]>.arrow,.bs-popover-top>.arrow{bottom:calc(-.5rem - 1px)}.bs-popover-auto[x-placement^=top]>.arrow:before,.bs-popover-top>.arrow:before{bottom:0;border-width:.5rem .5rem 0;border-top-color:rgba(33,37,41,.25)}.bs-popover-auto[x-placement^=top]>.arrow:after,.bs-popover-top>.arrow:after{bottom:1px;border-width:.5rem .5rem 0;border-top-color:#fff}.bs-popover-auto[x-placement^=right],.bs-popover-right{margin-left:.5rem}.bs-popover-auto[x-placement^=right]>.arrow,.bs-popover-right>.arrow{left:calc(-.5rem - 1px);width:.5rem;height:1rem;margin:8px 0}.bs-popover-auto[x-placement^=right]>.arrow:before,.bs-popover-right>.arrow:before{left:0;border-width:.5rem .5rem .5rem 0;border-right-color:rgba(33,37,41,.25)}.bs-popover-auto[x-placement^=right]>.arrow:after,.bs-popover-right>.arrow:after{left:1px;border-width:.5rem .5rem .5rem 0;border-right-color:#fff}.bs-popover-auto[x-placement^=bottom],.bs-popover-bottom{margin-top:.5rem}.bs-popover-auto[x-placement^=bottom]>.arrow,.bs-popover-bottom>.arrow{top:calc(-.5rem - 1px)}.bs-popover-auto[x-placement^=bottom]>.arrow:before,.bs-popover-bottom>.arrow:before{top:0;border-width:0 .5rem .5rem;border-bottom-color:rgba(33,37,41,.25)}.bs-popover-auto[x-placement^=bottom]>.arrow:after,.bs-popover-bottom>.arrow:after{top:1px;border-width:0 .5rem .5rem;border-bottom-color:#fff}.bs-popover-auto[x-placement^=bottom] .popover-header:before,.bs-popover-bottom .popover-header:before{position:absolute;top:0;left:50%;display:block;width:1rem;margin-left:-.5rem;content:"";border-bottom:1px solid #f7f7f7}.bs-popover-auto[x-placement^=left],.bs-popover-left{margin-right:.5rem}.bs-popover-auto[x-placement^=left]>.arrow,.bs-popover-left>.arrow{right:calc(-.5rem - 1px);width:.5rem;height:1rem;margin:8px 0}.bs-popover-auto[x-placement^=left]>.arrow:before,.bs-popover-left>.arrow:before{right:0;border-width:.5rem 0 .5rem .5rem;border-left-color:rgba(33,37,41,.25)}.bs-popover-auto[x-placement^=left]>.arrow:after,.bs-popover-left>.arrow:after{right:1px;border-width:.5rem 0 .5rem .5rem;border-left-color:#fff}.popover-header{padding:.5rem .75rem;margin-bottom:0;font-size:1rem;background-color:#f7f7f7;border-bottom:1px solid #ebebeb;border-top-left-radius:7px;border-top-right-radius:7px}.popover-header:empty{display:none}.popover-body{padding:.5rem .75rem;color:#212529}.carousel{position:relative}.carousel.pointer-event{touch-action:pan-y}.carousel-inner{position:relative;width:100%;overflow:hidden}.carousel-inner:after{display:block;clear:both;content:""}.carousel-item{position:relative;display:none;float:left;width:100%;margin-right:-100%;-webkit-backface-visibility:hidden;backface-visibility:hidden;transition:transform .6s ease-in-out}@media(prefers-reduced-motion:reduce){.carousel-item{transition:none}}.carousel-item-next,.carousel-item-prev,.carousel-item.active{display:block}.active.carousel-item-right,.carousel-item-next:not(.carousel-item-left){transform:translateX(100%)}.active.carousel-item-left,.carousel-item-prev:not(.carousel-item-right){transform:translateX(-100%)}.carousel-fade .carousel-item{opacity:0;transition-property:opacity;transform:none}.carousel-fade .carousel-item-next.carousel-item-left,.carousel-fade .carousel-item-prev.carousel-item-right,.carousel-fade .carousel-item.active{z-index:1;opacity:1}.carousel-fade .active.carousel-item-left,.carousel-fade .active.carousel-item-right{z-index:0;opacity:0;transition:opacity 0s .6s}@media(prefers-reduced-motion:reduce){.carousel-fade .active.carousel-item-left,.carousel-fade .active.carousel-item-right{transition:none}}.carousel-control-next,.carousel-control-prev{position:absolute;top:0;bottom:0;z-index:1;display:flex;align-items:center;justify-content:center;width:15%;color:#fff;text-align:center;opacity:.5;transition:opacity .15s ease}@media(prefers-reduced-motion:reduce){.carousel-control-next,.carousel-control-prev{transition:none}}.carousel-control-next:focus,.carousel-control-next:hover,.carousel-control-prev:focus,.carousel-control-prev:hover{color:#fff;text-decoration:none;outline:0;opacity:.9}.carousel-control-prev{left:0}.carousel-control-next{right:0}.carousel-control-next-icon,.carousel-control-prev-icon{display:inline-block;width:20px;height:20px;background:no-repeat 50%/100% 100%}.carousel-control-prev-icon{background-image:url("data:image/svg+xml;charset=utf-8,%3Csvg xmlns='http://www.w3.org/2000/svg' fill='%23fff' width='8' height='8'%3E%3Cpath d='M5.25 0l-4 4 4 4 1.5-1.5L4.25 4l2.5-2.5L5.25 0z'/%3E%3C/svg%3E")}.carousel-control-next-icon{background-image:url("data:image/svg+xml;charset=utf-8,%3Csvg xmlns='http://www.w3.org/2000/svg' fill='%23fff' width='8' height='8'%3E%3Cpath d='M2.75 0l-1.5 1.5L3.75 4l-2.5 2.5L2.75 8l4-4-4-4z'/%3E%3C/svg%3E")}.carousel-indicators{position:absolute;right:0;bottom:0;left:0;z-index:15;display:flex;justify-content:center;padding-left:0;margin-right:15%;margin-left:15%;list-style:none}.carousel-indicators li{box-sizing:content-box;flex:0 1 auto;width:30px;height:3px;margin-right:3px;margin-left:3px;text-indent:-999px;cursor:pointer;background-color:#fff;background-clip:padding-box;border-top:10px solid transparent;border-bottom:10px solid transparent;opacity:.5;transition:opacity .6s ease}@media(prefers-reduced-motion:reduce){.carousel-indicators li{transition:none}}.carousel-indicators .active{opacity:1}.carousel-caption{position:absolute;right:15%;bottom:20px;left:15%;z-index:10;padding-top:20px;padding-bottom:20px;color:#fff;text-align:center}@-webkit-keyframes spinner-border{to{transform:rotate(1turn)}}@keyframes spinner-border{to{transform:rotate(1turn)}}.spinner-border{display:inline-block;width:2rem;height:2rem;vertical-align:text-bottom;border:.25em solid;border-right:.25em solid transparent;border-radius:50%;-webkit-animation:spinner-border .75s linear infinite;animation:spinner-border .75s linear infinite}.spinner-border-sm{width:1rem;height:1rem;border-width:.2em}@-webkit-keyframes spinner-grow{0%{transform:scale(0)}50%{opacity:1}}@keyframes spinner-grow{0%{transform:scale(0)}50%{opacity:1}}.spinner-grow{display:inline-block;width:2rem;height:2rem;vertical-align:text-bottom;background-color:currentColor;border-radius:50%;opacity:0;-webkit-animation:spinner-grow .75s linear infinite;animation:spinner-grow .75s linear infinite}.spinner-grow-sm{width:1rem;height:1rem}.align-baseline{vertical-align:baseline!important}.align-top{vertical-align:top!important}.align-middle{vertical-align:middle!important}.align-bottom{vertical-align:bottom!important}.align-text-bottom{vertical-align:text-bottom!important}.align-text-top{vertical-align:text-top!important}.bg-primary{background-color:#fc0!important}a.bg-primary:focus,a.bg-primary:hover,button.bg-primary:focus,button.bg-primary:hover{background-color:#cca300!important}.bg-secondary{background-color:#212529!important}a.bg-secondary:focus,a.bg-secondary:hover,button.bg-secondary:focus,button.bg-secondary:hover{background-color:#0a0c0d!important}.bg-success{background-color:#28a745!important}a.bg-success:focus,a.bg-success:hover,button.bg-success:focus,button.bg-success:hover{background-color:#1e7e34!important}.bg-info{background-color:#17a2b8!important}a.bg-info:focus,a.bg-info:hover,button.bg-info:focus,button.bg-info:hover{background-color:#117a8b!important}.bg-warning{background-color:#ffc107!important}a.bg-warning:focus,a.bg-warning:hover,button.bg-warning:focus,button.bg-warning:hover{background-color:#d39e00!important}.bg-danger{background-color:#dc3545!important}a.bg-danger:focus,a.bg-danger:hover,button.bg-danger:focus,button.bg-danger:hover{background-color:#bd2130!important}.bg-light{background-color:#f1f6f9!important}a.bg-light:focus,a.bg-light:hover,button.bg-light:focus,button.bg-light:hover{background-color:#cddfea!important}.bg-dark{background-color:#495057!important}a.bg-dark:focus,a.bg-dark:hover,button.bg-dark:focus,button.bg-dark:hover{background-color:#32373b!important}.bg-primary-light{background-color:#fffaf0!important}a.bg-primary-light:focus,a.bg-primary-light:hover,button.bg-primary-light:focus,button.bg-primary-light:hover{background-color:#ffe9bd!important}.bg-secondary-light{background-color:#fff!important}a.bg-secondary-light:focus,a.bg-secondary-light:hover,button.bg-secondary-light:focus,button.bg-secondary-light:hover{background-color:#e6e6e6!important}.bg-tertiary{background-color:#257af4!important}a.bg-tertiary:focus,a.bg-tertiary:hover,button.bg-tertiary:focus,button.bg-tertiary:hover{background-color:#0b60db!important}.bg-tertiary-light{background-color:#e3f1fe!important}a.bg-tertiary-light:focus,a.bg-tertiary-light:hover,button.bg-tertiary-light:focus,button.bg-tertiary-light:hover{background-color:#b2d8fc!important}a.bg-white:focus,a.bg-white:hover,button.bg-white:focus,button.bg-white:hover{background-color:#e6e6e6!important}.bg-black{background-color:#212529!important}a.bg-black:focus,a.bg-black:hover,button.bg-black:focus,button.bg-black:hover{background-color:#0a0c0d!important}.bg-blue{background-color:#257af4!important}a.bg-blue:focus,a.bg-blue:hover,button.bg-blue:focus,button.bg-blue:hover{background-color:#0b60db!important}.bg-light-blue{background-color:#e3f1fe!important}a.bg-light-blue:focus,a.bg-light-blue:hover,button.bg-light-blue:focus,button.bg-light-blue:hover{background-color:#b2d8fc!important}.bg-yellow{background-color:#fc0!important}a.bg-yellow:focus,a.bg-yellow:hover,button.bg-yellow:focus,button.bg-yellow:hover{background-color:#cca300!important}.bg-light-yellow{background-color:#fffaf0!important}a.bg-light-yellow:focus,a.bg-light-yellow:hover,button.bg-light-yellow:focus,button.bg-light-yellow:hover{background-color:#ffe9bd!important}.bg-orange{background-color:#ff8c00!important}a.bg-orange:focus,a.bg-orange:hover,button.bg-orange:focus,button.bg-orange:hover{background-color:#cc7000!important}.bg-light-orange{background-color:#ffe4b5!important}a.bg-light-orange:focus,a.bg-light-orange:hover,button.bg-light-orange:focus,button.bg-light-orange:hover{background-color:#ffd182!important}.bg-red{background-color:#ff3939!important}a.bg-red:focus,a.bg-red:hover,button.bg-red:focus,button.bg-red:hover{background-color:#ff0606!important}.bg-light-red{background-color:#ffe4e1!important}a.bg-light-red:focus,a.bg-light-red:hover,button.bg-light-red:focus,button.bg-light-red:hover{background-color:#ffb6ae!important}.bg-medium{background-color:#d6dbdf!important}a.bg-medium:focus,a.bg-medium:hover,button.bg-medium:focus,button.bg-medium:hover{background-color:#b9c2c9!important}.bg-white{background-color:#fff!important}.bg-transparent{background-color:transparent!important}.border{border:1px solid #d6dbdf!important}.border-top{border-top:1px solid #d6dbdf!important}.border-right{border-right:1px solid #d6dbdf!important}.border-bottom{border-bottom:1px solid #d6dbdf!important}.border-left{border-left:1px solid #d6dbdf!important}.border-0{border:0!important}.border-top-0{border-top:0!important}.border-right-0{border-right:0!important}.border-bottom-0{border-bottom:0!important}.border-left-0{border-left:0!important}.border-primary{border-color:#fc0!important}.border-secondary{border-color:#212529!important}.border-success{border-color:#28a745!important}.border-info{border-color:#17a2b8!important}.border-warning{border-color:#ffc107!important}.border-danger{border-color:#dc3545!important}.border-light{border-color:#f1f6f9!important}.border-dark{border-color:#495057!important}.border-primary-light{border-color:#fffaf0!important}.border-secondary-light{border-color:#fff!important}.border-tertiary{border-color:#257af4!important}.border-tertiary-light{border-color:#e3f1fe!important}.border-black{border-color:#212529!important}.border-blue{border-color:#257af4!important}.border-light-blue{border-color:#e3f1fe!important}.border-yellow{border-color:#fc0!important}.border-light-yellow{border-color:#fffaf0!important}.border-orange{border-color:#ff8c00!important}.border-light-orange{border-color:#ffe4b5!important}.border-red{border-color:#ff3939!important}.border-light-red{border-color:#ffe4e1!important}.border-medium{border-color:#d6dbdf!important}.border-white{border-color:#fff!important}.rounded,.rounded-sm{border-radius:8px!important}.rounded-top{border-top-left-radius:8px!important}.rounded-right,.rounded-top{border-top-right-radius:8px!important}.rounded-bottom,.rounded-right{border-bottom-right-radius:8px!important}.rounded-bottom,.rounded-left{border-bottom-left-radius:8px!important}.rounded-left{border-top-left-radius:8px!important}.rounded-lg{border-radius:8px!important}.rounded-circle{border-radius:50%!important}.rounded-pill{border-radius:50rem!important}.rounded-0{border-radius:0!important}.clearfix:after{display:block;clear:both;content:""}.d-none{display:none!important}.d-inline{display:inline!important}.d-inline-block{display:inline-block!important}.d-block{display:block!important}.d-table{display:table!important}.d-table-row{display:table-row!important}.d-table-cell{display:table-cell!important}.d-flex{display:flex!important}.d-inline-flex{display:inline-flex!important}@media(min-width:400px){.d-xs-none{display:none!important}.d-xs-inline{display:inline!important}.d-xs-inline-block{display:inline-block!important}.d-xs-block{display:block!important}.d-xs-table{display:table!important}.d-xs-table-row{display:table-row!important}.d-xs-table-cell{display:table-cell!important}.d-xs-flex{display:flex!important}.d-xs-inline-flex{display:inline-flex!important}}@media(min-width:616px){.d-sm-none{display:none!important}.d-sm-inline{display:inline!important}.d-sm-inline-block{display:inline-block!important}.d-sm-block{display:block!important}.d-sm-table{display:table!important}.d-sm-table-row{display:table-row!important}.d-sm-table-cell{display:table-cell!important}.d-sm-flex{display:flex!important}.d-sm-inline-flex{display:inline-flex!important}}@media(min-width:768px){.d-md-none{display:none!important}.d-md-inline{display:inline!important}.d-md-inline-block{display:inline-block!important}.d-md-block{display:block!important}.d-md-table{display:table!important}.d-md-table-row{display:table-row!important}.d-md-table-cell{display:table-cell!important}.d-md-flex{display:flex!important}.d-md-inline-flex{display:inline-flex!important}}@media(min-width:980px){.d-lg-none{display:none!important}.d-lg-inline{display:inline!important}.d-lg-inline-block{display:inline-block!important}.d-lg-block{display:block!important}.d-lg-table{display:table!important}.d-lg-table-row{display:table-row!important}.d-lg-table-cell{display:table-cell!important}.d-lg-flex{display:flex!important}.d-lg-inline-flex{display:inline-flex!important}}@media(min-width:1240px){.d-xl-none{display:none!important}.d-xl-inline{display:inline!important}.d-xl-inline-block{display:inline-block!important}.d-xl-block{display:block!important}.d-xl-table{display:table!important}.d-xl-table-row{display:table-row!important}.d-xl-table-cell{display:table-cell!important}.d-xl-flex{display:flex!important}.d-xl-inline-flex{display:inline-flex!important}}@media print{.d-print-none{display:none!important}.d-print-inline{display:inline!important}.d-print-inline-block{display:inline-block!important}.d-print-block{display:block!important}.d-print-table{display:table!important}.d-print-table-row{display:table-row!important}.d-print-table-cell{display:table-cell!important}.d-print-flex{display:flex!important}.d-print-inline-flex{display:inline-flex!important}}.embed-responsive{position:relative;display:block;width:100%;padding:0;overflow:hidden}.embed-responsive:before{display:block;content:""}.embed-responsive .embed-responsive-item,.embed-responsive embed,.embed-responsive iframe,.embed-responsive object,.embed-responsive video{position:absolute;top:0;bottom:0;left:0;width:100%;height:100%;border:0}.embed-responsive-21by9:before{padding-top:42.8571428571%}.embed-responsive-16by9:before{padding-top:56.25%}.embed-responsive-4by3:before{padding-top:75%}.embed-responsive-1by1:before{padding-top:100%}.flex-row{flex-direction:row!important}.flex-column{flex-direction:column!important}.flex-row-reverse{flex-direction:row-reverse!important}.flex-column-reverse{flex-direction:column-reverse!important}.flex-wrap{flex-wrap:wrap!important}.flex-nowrap{flex-wrap:nowrap!important}.flex-wrap-reverse{flex-wrap:wrap-reverse!important}.flex-fill{flex:1 1 auto!important}.flex-grow-0{flex-grow:0!important}.flex-grow-1{flex-grow:1!important}.flex-shrink-0{flex-shrink:0!important}.flex-shrink-1{flex-shrink:1!important}.justify-content-start{justify-content:flex-start!important}.justify-content-end{justify-content:flex-end!important}.justify-content-center{justify-content:center!important}.justify-content-between{justify-content:space-between!important}.justify-content-around{justify-content:space-around!important}.align-items-start{align-items:flex-start!important}.align-items-end{align-items:flex-end!important}.align-items-center{align-items:center!important}.align-items-baseline{align-items:baseline!important}.align-items-stretch{align-items:stretch!important}.align-content-start{align-content:flex-start!important}.align-content-end{align-content:flex-end!important}.align-content-center{align-content:center!important}.align-content-between{align-content:space-between!important}.align-content-around{align-content:space-around!important}.align-content-stretch{align-content:stretch!important}.align-self-auto{align-self:auto!important}.align-self-start{align-self:flex-start!important}.align-self-end{align-self:flex-end!important}.align-self-center{align-self:center!important}.align-self-baseline{align-self:baseline!important}.align-self-stretch{align-self:stretch!important}@media(min-width:400px){.flex-xs-row{flex-direction:row!important}.flex-xs-column{flex-direction:column!important}.flex-xs-row-reverse{flex-direction:row-reverse!important}.flex-xs-column-reverse{flex-direction:column-reverse!important}.flex-xs-wrap{flex-wrap:wrap!important}.flex-xs-nowrap{flex-wrap:nowrap!important}.flex-xs-wrap-reverse{flex-wrap:wrap-reverse!important}.flex-xs-fill{flex:1 1 auto!important}.flex-xs-grow-0{flex-grow:0!important}.flex-xs-grow-1{flex-grow:1!important}.flex-xs-shrink-0{flex-shrink:0!important}.flex-xs-shrink-1{flex-shrink:1!important}.justify-content-xs-start{justify-content:flex-start!important}.justify-content-xs-end{justify-content:flex-end!important}.justify-content-xs-center{justify-content:center!important}.justify-content-xs-between{justify-content:space-between!important}.justify-content-xs-around{justify-content:space-around!important}.align-items-xs-start{align-items:flex-start!important}.align-items-xs-end{align-items:flex-end!important}.align-items-xs-center{align-items:center!important}.align-items-xs-baseline{align-items:baseline!important}.align-items-xs-stretch{align-items:stretch!important}.align-content-xs-start{align-content:flex-start!important}.align-content-xs-end{align-content:flex-end!important}.align-content-xs-center{align-content:center!important}.align-content-xs-between{align-content:space-between!important}.align-content-xs-around{align-content:space-around!important}.align-content-xs-stretch{align-content:stretch!important}.align-self-xs-auto{align-self:auto!important}.align-self-xs-start{align-self:flex-start!important}.align-self-xs-end{align-self:flex-end!important}.align-self-xs-center{align-self:center!important}.align-self-xs-baseline{align-self:baseline!important}.align-self-xs-stretch{align-self:stretch!important}}@media(min-width:616px){.flex-sm-row{flex-direction:row!important}.flex-sm-column{flex-direction:column!important}.flex-sm-row-reverse{flex-direction:row-reverse!important}.flex-sm-column-reverse{flex-direction:column-reverse!important}.flex-sm-wrap{flex-wrap:wrap!important}.flex-sm-nowrap{flex-wrap:nowrap!important}.flex-sm-wrap-reverse{flex-wrap:wrap-reverse!important}.flex-sm-fill{flex:1 1 auto!important}.flex-sm-grow-0{flex-grow:0!important}.flex-sm-grow-1{flex-grow:1!important}.flex-sm-shrink-0{flex-shrink:0!important}.flex-sm-shrink-1{flex-shrink:1!important}.justify-content-sm-start{justify-content:flex-start!important}.justify-content-sm-end{justify-content:flex-end!important}.justify-content-sm-center{justify-content:center!important}.justify-content-sm-between{justify-content:space-between!important}.justify-content-sm-around{justify-content:space-around!important}.align-items-sm-start{align-items:flex-start!important}.align-items-sm-end{align-items:flex-end!important}.align-items-sm-center{align-items:center!important}.align-items-sm-baseline{align-items:baseline!important}.align-items-sm-stretch{align-items:stretch!important}.align-content-sm-start{align-content:flex-start!important}.align-content-sm-end{align-content:flex-end!important}.align-content-sm-center{align-content:center!important}.align-content-sm-between{align-content:space-between!important}.align-content-sm-around{align-content:space-around!important}.align-content-sm-stretch{align-content:stretch!important}.align-self-sm-auto{align-self:auto!important}.align-self-sm-start{align-self:flex-start!important}.align-self-sm-end{align-self:flex-end!important}.align-self-sm-center{align-self:center!important}.align-self-sm-baseline{align-self:baseline!important}.align-self-sm-stretch{align-self:stretch!important}}@media(min-width:768px){.flex-md-row{flex-direction:row!important}.flex-md-column{flex-direction:column!important}.flex-md-row-reverse{flex-direction:row-reverse!important}.flex-md-column-reverse{flex-direction:column-reverse!important}.flex-md-wrap{flex-wrap:wrap!important}.flex-md-nowrap{flex-wrap:nowrap!important}.flex-md-wrap-reverse{flex-wrap:wrap-reverse!important}.flex-md-fill{flex:1 1 auto!important}.flex-md-grow-0{flex-grow:0!important}.flex-md-grow-1{flex-grow:1!important}.flex-md-shrink-0{flex-shrink:0!important}.flex-md-shrink-1{flex-shrink:1!important}.justify-content-md-start{justify-content:flex-start!important}.justify-content-md-end{justify-content:flex-end!important}.justify-content-md-center{justify-content:center!important}.justify-content-md-between{justify-content:space-between!important}.justify-content-md-around{justify-content:space-around!important}.align-items-md-start{align-items:flex-start!important}.align-items-md-end{align-items:flex-end!important}.align-items-md-center{align-items:center!important}.align-items-md-baseline{align-items:baseline!important}.align-items-md-stretch{align-items:stretch!important}.align-content-md-start{align-content:flex-start!important}.align-content-md-end{align-content:flex-end!important}.align-content-md-center{align-content:center!important}.align-content-md-between{align-content:space-between!important}.align-content-md-around{align-content:space-around!important}.align-content-md-stretch{align-content:stretch!important}.align-self-md-auto{align-self:auto!important}.align-self-md-start{align-self:flex-start!important}.align-self-md-end{align-self:flex-end!important}.align-self-md-center{align-self:center!important}.align-self-md-baseline{align-self:baseline!important}.align-self-md-stretch{align-self:stretch!important}}@media(min-width:980px){.flex-lg-row{flex-direction:row!important}.flex-lg-column{flex-direction:column!important}.flex-lg-row-reverse{flex-direction:row-reverse!important}.flex-lg-column-reverse{flex-direction:column-reverse!important}.flex-lg-wrap{flex-wrap:wrap!important}.flex-lg-nowrap{flex-wrap:nowrap!important}.flex-lg-wrap-reverse{flex-wrap:wrap-reverse!important}.flex-lg-fill{flex:1 1 auto!important}.flex-lg-grow-0{flex-grow:0!important}.flex-lg-grow-1{flex-grow:1!important}.flex-lg-shrink-0{flex-shrink:0!important}.flex-lg-shrink-1{flex-shrink:1!important}.justify-content-lg-start{justify-content:flex-start!important}.justify-content-lg-end{justify-content:flex-end!important}.justify-content-lg-center{justify-content:center!important}.justify-content-lg-between{justify-content:space-between!important}.justify-content-lg-around{justify-content:space-around!important}.align-items-lg-start{align-items:flex-start!important}.align-items-lg-end{align-items:flex-end!important}.align-items-lg-center{align-items:center!important}.align-items-lg-baseline{align-items:baseline!important}.align-items-lg-stretch{align-items:stretch!important}.align-content-lg-start{align-content:flex-start!important}.align-content-lg-end{align-content:flex-end!important}.align-content-lg-center{align-content:center!important}.align-content-lg-between{align-content:space-between!important}.align-content-lg-around{align-content:space-around!important}.align-content-lg-stretch{align-content:stretch!important}.align-self-lg-auto{align-self:auto!important}.align-self-lg-start{align-self:flex-start!important}.align-self-lg-end{align-self:flex-end!important}.align-self-lg-center{align-self:center!important}.align-self-lg-baseline{align-self:baseline!important}.align-self-lg-stretch{align-self:stretch!important}}@media(min-width:1240px){.flex-xl-row{flex-direction:row!important}.flex-xl-column{flex-direction:column!important}.flex-xl-row-reverse{flex-direction:row-reverse!important}.flex-xl-column-reverse{flex-direction:column-reverse!important}.flex-xl-wrap{flex-wrap:wrap!important}.flex-xl-nowrap{flex-wrap:nowrap!important}.flex-xl-wrap-reverse{flex-wrap:wrap-reverse!important}.flex-xl-fill{flex:1 1 auto!important}.flex-xl-grow-0{flex-grow:0!important}.flex-xl-grow-1{flex-grow:1!important}.flex-xl-shrink-0{flex-shrink:0!important}.flex-xl-shrink-1{flex-shrink:1!important}.justify-content-xl-start{justify-content:flex-start!important}.justify-content-xl-end{justify-content:flex-end!important}.justify-content-xl-center{justify-content:center!important}.justify-content-xl-between{justify-content:space-between!important}.justify-content-xl-around{justify-content:space-around!important}.align-items-xl-start{align-items:flex-start!important}.align-items-xl-end{align-items:flex-end!important}.align-items-xl-center{align-items:center!important}.align-items-xl-baseline{align-items:baseline!important}.align-items-xl-stretch{align-items:stretch!important}.align-content-xl-start{align-content:flex-start!important}.align-content-xl-end{align-content:flex-end!important}.align-content-xl-center{align-content:center!important}.align-content-xl-between{align-content:space-between!important}.align-content-xl-around{align-content:space-around!important}.align-content-xl-stretch{align-content:stretch!important}.align-self-xl-auto{align-self:auto!important}.align-self-xl-start{align-self:flex-start!important}.align-self-xl-end{align-self:flex-end!important}.align-self-xl-center{align-self:center!important}.align-self-xl-baseline{align-self:baseline!important}.align-self-xl-stretch{align-self:stretch!important}}.float-left{float:left!important}.float-right{float:right!important}.float-none{float:none!important}@media(min-width:400px){.float-xs-left{float:left!important}.float-xs-right{float:right!important}.float-xs-none{float:none!important}}@media(min-width:616px){.float-sm-left{float:left!important}.float-sm-right{float:right!important}.float-sm-none{float:none!important}}@media(min-width:768px){.float-md-left{float:left!important}.float-md-right{float:right!important}.float-md-none{float:none!important}}@media(min-width:980px){.float-lg-left{float:left!important}.float-lg-right{float:right!important}.float-lg-none{float:none!important}}@media(min-width:1240px){.float-xl-left{float:left!important}.float-xl-right{float:right!important}.float-xl-none{float:none!important}}.overflow-auto{overflow:auto!important}.overflow-hidden{overflow:hidden!important}.position-static{position:static!important}.position-relative{position:relative!important}.position-absolute{position:absolute!important}.position-fixed{position:fixed!important}.position-sticky{position:sticky!important}.fixed-top{top:0}.fixed-bottom,.fixed-top{position:fixed;right:0;left:0;z-index:1030}.fixed-bottom{bottom:0}@supports(position:sticky){.sticky-top{position:sticky;top:0;z-index:1020}}.sr-only{position:absolute;width:1px;height:1px;padding:0;margin:-1px;overflow:hidden;clip:rect(0,0,0,0);white-space:nowrap;border:0}.sr-only-focusable:active,.sr-only-focusable:focus{position:static;width:auto;height:auto;overflow:visible;clip:auto;white-space:normal}.shadow-sm{box-shadow:0 2px 14px rgba(108,117,125,.2)!important}.shadow{box-shadow:0 8px 20px rgba(108,117,125,.2)!important}.shadow-lg{box-shadow:0 12px 32px rgba(108,117,125,.2)!important}.shadow-none{box-shadow:none!important}.w-25{width:25%!important}.w-50{width:50%!important}.w-75{width:75%!important}.w-100{width:100%!important}.w-auto{width:auto!important}.h-25{height:25%!important}.h-50{height:50%!important}.h-75{height:75%!important}.h-100{height:100%!important}.h-auto{height:auto!important}.mw-100{max-width:100%!important}.mh-100{max-height:100%!important}.min-vw-100{min-width:100vw!important}.min-vh-100{min-height:100vh!important}.vw-100{width:100vw!important}.vh-100{height:100vh!important}.stretched-link:after{position:absolute;top:0;right:0;bottom:0;left:0;z-index:1;pointer-events:auto;content:"";background-color:transparent}.m-0{margin:0!important}.mt-0,.my-0{margin-top:0!important}.mr-0,.mx-0{margin-right:0!important}.mb-0,.my-0{margin-bottom:0!important}.ml-0,.mx-0{margin-left:0!important}.m-1{margin:8px!important}.mt-1,.my-1{margin-top:8px!important}.mr-1,.mx-1{margin-right:8px!important}.mb-1,.my-1{margin-bottom:8px!important}.ml-1,.mx-1{margin-left:8px!important}.m-2{margin:16px!important}.mt-2,.my-2{margin-top:16px!important}.mr-2,.mx-2{margin-right:16px!important}.mb-2,.my-2{margin-bottom:16px!important}.ml-2,.mx-2{margin-left:16px!important}.m-3{margin:24px!important}.mt-3,.my-3{margin-top:24px!important}.mr-3,.mx-3{margin-right:24px!important}.mb-3,.my-3{margin-bottom:24px!important}.ml-3,.mx-3{margin-left:24px!important}.m-4{margin:32px!important}.mt-4,.my-4{margin-top:32px!important}.mr-4,.mx-4{margin-right:32px!important}.mb-4,.my-4{margin-bottom:32px!important}.ml-4,.mx-4{margin-left:32px!important}.m-5{margin:40px!important}.mt-5,.my-5{margin-top:40px!important}.mr-5,.mx-5{margin-right:40px!important}.mb-5,.my-5{margin-bottom:40px!important}.ml-5,.mx-5{margin-left:40px!important}.m-6{margin:48px!important}.mt-6,.my-6{margin-top:48px!important}.mr-6,.mx-6{margin-right:48px!important}.mb-6,.my-6{margin-bottom:48px!important}.ml-6,.mx-6{margin-left:48px!important}.m-7{margin:56px!important}.mt-7,.my-7{margin-top:56px!important}.mr-7,.mx-7{margin-right:56px!important}.mb-7,.my-7{margin-bottom:56px!important}.ml-7,.mx-7{margin-left:56px!important}.m-8{margin:64px!important}.mt-8,.my-8{margin-top:64px!important}.mr-8,.mx-8{margin-right:64px!important}.mb-8,.my-8{margin-bottom:64px!important}.ml-8,.mx-8{margin-left:64px!important}.m-9{margin:72px!important}.mt-9,.my-9{margin-top:72px!important}.mr-9,.mx-9{margin-right:72px!important}.mb-9,.my-9{margin-bottom:72px!important}.ml-9,.mx-9{margin-left:72px!important}.m-10{margin:80px!important}.mt-10,.my-10{margin-top:80px!important}.mr-10,.mx-10{margin-right:80px!important}.mb-10,.my-10{margin-bottom:80px!important}.ml-10,.mx-10{margin-left:80px!important}.m-12{margin:96px!important}.mt-12,.my-12{margin-top:96px!important}.mr-12,.mx-12{margin-right:96px!important}.mb-12,.my-12{margin-bottom:96px!important}.ml-12,.mx-12{margin-left:96px!important}.m-15{margin:120px!important}.mt-15,.my-15{margin-top:120px!important}.mr-15,.mx-15{margin-right:120px!important}.mb-15,.my-15{margin-bottom:120px!important}.ml-15,.mx-15{margin-left:120px!important}.p-0{padding:0!important}.pt-0,.py-0{padding-top:0!important}.pr-0,.px-0{padding-right:0!important}.pb-0,.py-0{padding-bottom:0!important}.pl-0,.px-0{padding-left:0!important}.p-1{padding:8px!important}.pt-1,.py-1{padding-top:8px!important}.pr-1,.px-1{padding-right:8px!important}.pb-1,.py-1{padding-bottom:8px!important}.pl-1,.px-1{padding-left:8px!important}.p-2{padding:16px!important}.pt-2,.py-2{padding-top:16px!important}.pr-2,.px-2{padding-right:16px!important}.pb-2,.py-2{padding-bottom:16px!important}.pl-2,.px-2{padding-left:16px!important}.p-3{padding:24px!important}.pt-3,.py-3{padding-top:24px!important}.pr-3,.px-3{padding-right:24px!important}.pb-3,.py-3{padding-bottom:24px!important}.pl-3,.px-3{padding-left:24px!important}.p-4{padding:32px!important}.pt-4,.py-4{padding-top:32px!important}.pr-4,.px-4{padding-right:32px!important}.pb-4,.py-4{padding-bottom:32px!important}.pl-4,.px-4{padding-left:32px!important}.p-5{padding:40px!important}.pt-5,.py-5{padding-top:40px!important}.pr-5,.px-5{padding-right:40px!important}.pb-5,.py-5{padding-bottom:40px!important}.pl-5,.px-5{padding-left:40px!important}.p-6{padding:48px!important}.pt-6,.py-6{padding-top:48px!important}.pr-6,.px-6{padding-right:48px!important}.pb-6,.py-6{padding-bottom:48px!important}.pl-6,.px-6{padding-left:48px!important}.p-7{padding:56px!important}.pt-7,.py-7{padding-top:56px!important}.pr-7,.px-7{padding-right:56px!important}.pb-7,.py-7{padding-bottom:56px!important}.pl-7,.px-7{padding-left:56px!important}.p-8{padding:64px!important}.pt-8,.py-8{padding-top:64px!important}.pr-8,.px-8{padding-right:64px!important}.pb-8,.py-8{padding-bottom:64px!important}.pl-8,.px-8{padding-left:64px!important}.p-9{padding:72px!important}.pt-9,.py-9{padding-top:72px!important}.pr-9,.px-9{padding-right:72px!important}.pb-9,.py-9{padding-bottom:72px!important}.pl-9,.px-9{padding-left:72px!important}.p-10{padding:80px!important}.pt-10,.py-10{padding-top:80px!important}.pr-10,.px-10{padding-right:80px!important}.pb-10,.py-10{padding-bottom:80px!important}.pl-10,.px-10{padding-left:80px!important}.p-12{padding:96px!important}.pt-12,.py-12{padding-top:96px!important}.pr-12,.px-12{padding-right:96px!important}.pb-12,.py-12{padding-bottom:96px!important}.pl-12,.px-12{padding-left:96px!important}.p-15{padding:120px!important}.pt-15,.py-15{padding-top:120px!important}.pr-15,.px-15{padding-right:120px!important}.pb-15,.py-15{padding-bottom:120px!important}.pl-15,.px-15{padding-left:120px!important}.m-n1{margin:-8px!important}.mt-n1,.my-n1{margin-top:-8px!important}.mr-n1,.mx-n1{margin-right:-8px!important}.mb-n1,.my-n1{margin-bottom:-8px!important}.ml-n1,.mx-n1{margin-left:-8px!important}.m-n2{margin:-16px!important}.mt-n2,.my-n2{margin-top:-16px!important}.mr-n2,.mx-n2{margin-right:-16px!important}.mb-n2,.my-n2{margin-bottom:-16px!important}.ml-n2,.mx-n2{margin-left:-16px!important}.m-n3{margin:-24px!important}.mt-n3,.my-n3{margin-top:-24px!important}.mr-n3,.mx-n3{margin-right:-24px!important}.mb-n3,.my-n3{margin-bottom:-24px!important}.ml-n3,.mx-n3{margin-left:-24px!important}.m-n4{margin:-32px!important}.mt-n4,.my-n4{margin-top:-32px!important}.mr-n4,.mx-n4{margin-right:-32px!important}.mb-n4,.my-n4{margin-bottom:-32px!important}.ml-n4,.mx-n4{margin-left:-32px!important}.m-n5{margin:-40px!important}.mt-n5,.my-n5{margin-top:-40px!important}.mr-n5,.mx-n5{margin-right:-40px!important}.mb-n5,.my-n5{margin-bottom:-40px!important}.ml-n5,.mx-n5{margin-left:-40px!important}.m-n6{margin:-48px!important}.mt-n6,.my-n6{margin-top:-48px!important}.mr-n6,.mx-n6{margin-right:-48px!important}.mb-n6,.my-n6{margin-bottom:-48px!important}.ml-n6,.mx-n6{margin-left:-48px!important}.m-n7{margin:-56px!important}.mt-n7,.my-n7{margin-top:-56px!important}.mr-n7,.mx-n7{margin-right:-56px!important}.mb-n7,.my-n7{margin-bottom:-56px!important}.ml-n7,.mx-n7{margin-left:-56px!important}.m-n8{margin:-64px!important}.mt-n8,.my-n8{margin-top:-64px!important}.mr-n8,.mx-n8{margin-right:-64px!important}.mb-n8,.my-n8{margin-bottom:-64px!important}.ml-n8,.mx-n8{margin-left:-64px!important}.m-n9{margin:-72px!important}.mt-n9,.my-n9{margin-top:-72px!important}.mr-n9,.mx-n9{margin-right:-72px!important}.mb-n9,.my-n9{margin-bottom:-72px!important}.ml-n9,.mx-n9{margin-left:-72px!important}.m-n10{margin:-80px!important}.mt-n10,.my-n10{margin-top:-80px!important}.mr-n10,.mx-n10{margin-right:-80px!important}.mb-n10,.my-n10{margin-bottom:-80px!important}.ml-n10,.mx-n10{margin-left:-80px!important}.m-n12{margin:-96px!important}.mt-n12,.my-n12{margin-top:-96px!important}.mr-n12,.mx-n12{margin-right:-96px!important}.mb-n12,.my-n12{margin-bottom:-96px!important}.ml-n12,.mx-n12{margin-left:-96px!important}.m-n15{margin:-120px!important}.mt-n15,.my-n15{margin-top:-120px!important}.mr-n15,.mx-n15{margin-right:-120px!important}.mb-n15,.my-n15{margin-bottom:-120px!important}.ml-n15,.mx-n15{margin-left:-120px!important}.m-auto{margin:auto!important}.mt-auto,.my-auto{margin-top:auto!important}.mr-auto,.mx-auto{margin-right:auto!important}.mb-auto,.my-auto{margin-bottom:auto!important}.ml-auto,.mx-auto{margin-left:auto!important}@media(min-width:400px){.m-xs-0{margin:0!important}.mt-xs-0,.my-xs-0{margin-top:0!important}.mr-xs-0,.mx-xs-0{margin-right:0!important}.mb-xs-0,.my-xs-0{margin-bottom:0!important}.ml-xs-0,.mx-xs-0{margin-left:0!important}.m-xs-1{margin:8px!important}.mt-xs-1,.my-xs-1{margin-top:8px!important}.mr-xs-1,.mx-xs-1{margin-right:8px!important}.mb-xs-1,.my-xs-1{margin-bottom:8px!important}.ml-xs-1,.mx-xs-1{margin-left:8px!important}.m-xs-2{margin:16px!important}.mt-xs-2,.my-xs-2{margin-top:16px!important}.mr-xs-2,.mx-xs-2{margin-right:16px!important}.mb-xs-2,.my-xs-2{margin-bottom:16px!important}.ml-xs-2,.mx-xs-2{margin-left:16px!important}.m-xs-3{margin:24px!important}.mt-xs-3,.my-xs-3{margin-top:24px!important}.mr-xs-3,.mx-xs-3{margin-right:24px!important}.mb-xs-3,.my-xs-3{margin-bottom:24px!important}.ml-xs-3,.mx-xs-3{margin-left:24px!important}.m-xs-4{margin:32px!important}.mt-xs-4,.my-xs-4{margin-top:32px!important}.mr-xs-4,.mx-xs-4{margin-right:32px!important}.mb-xs-4,.my-xs-4{margin-bottom:32px!important}.ml-xs-4,.mx-xs-4{margin-left:32px!important}.m-xs-5{margin:40px!important}.mt-xs-5,.my-xs-5{margin-top:40px!important}.mr-xs-5,.mx-xs-5{margin-right:40px!important}.mb-xs-5,.my-xs-5{margin-bottom:40px!important}.ml-xs-5,.mx-xs-5{margin-left:40px!important}.m-xs-6{margin:48px!important}.mt-xs-6,.my-xs-6{margin-top:48px!important}.mr-xs-6,.mx-xs-6{margin-right:48px!important}.mb-xs-6,.my-xs-6{margin-bottom:48px!important}.ml-xs-6,.mx-xs-6{margin-left:48px!important}.m-xs-7{margin:56px!important}.mt-xs-7,.my-xs-7{margin-top:56px!important}.mr-xs-7,.mx-xs-7{margin-right:56px!important}.mb-xs-7,.my-xs-7{margin-bottom:56px!important}.ml-xs-7,.mx-xs-7{margin-left:56px!important}.m-xs-8{margin:64px!important}.mt-xs-8,.my-xs-8{margin-top:64px!important}.mr-xs-8,.mx-xs-8{margin-right:64px!important}.mb-xs-8,.my-xs-8{margin-bottom:64px!important}.ml-xs-8,.mx-xs-8{margin-left:64px!important}.m-xs-9{margin:72px!important}.mt-xs-9,.my-xs-9{margin-top:72px!important}.mr-xs-9,.mx-xs-9{margin-right:72px!important}.mb-xs-9,.my-xs-9{margin-bottom:72px!important}.ml-xs-9,.mx-xs-9{margin-left:72px!important}.m-xs-10{margin:80px!important}.mt-xs-10,.my-xs-10{margin-top:80px!important}.mr-xs-10,.mx-xs-10{margin-right:80px!important}.mb-xs-10,.my-xs-10{margin-bottom:80px!important}.ml-xs-10,.mx-xs-10{margin-left:80px!important}.m-xs-12{margin:96px!important}.mt-xs-12,.my-xs-12{margin-top:96px!important}.mr-xs-12,.mx-xs-12{margin-right:96px!important}.mb-xs-12,.my-xs-12{margin-bottom:96px!important}.ml-xs-12,.mx-xs-12{margin-left:96px!important}.m-xs-15{margin:120px!important}.mt-xs-15,.my-xs-15{margin-top:120px!important}.mr-xs-15,.mx-xs-15{margin-right:120px!important}.mb-xs-15,.my-xs-15{margin-bottom:120px!important}.ml-xs-15,.mx-xs-15{margin-left:120px!important}.p-xs-0{padding:0!important}.pt-xs-0,.py-xs-0{padding-top:0!important}.pr-xs-0,.px-xs-0{padding-right:0!important}.pb-xs-0,.py-xs-0{padding-bottom:0!important}.pl-xs-0,.px-xs-0{padding-left:0!important}.p-xs-1{padding:8px!important}.pt-xs-1,.py-xs-1{padding-top:8px!important}.pr-xs-1,.px-xs-1{padding-right:8px!important}.pb-xs-1,.py-xs-1{padding-bottom:8px!important}.pl-xs-1,.px-xs-1{padding-left:8px!important}.p-xs-2{padding:16px!important}.pt-xs-2,.py-xs-2{padding-top:16px!important}.pr-xs-2,.px-xs-2{padding-right:16px!important}.pb-xs-2,.py-xs-2{padding-bottom:16px!important}.pl-xs-2,.px-xs-2{padding-left:16px!important}.p-xs-3{padding:24px!important}.pt-xs-3,.py-xs-3{padding-top:24px!important}.pr-xs-3,.px-xs-3{padding-right:24px!important}.pb-xs-3,.py-xs-3{padding-bottom:24px!important}.pl-xs-3,.px-xs-3{padding-left:24px!important}.p-xs-4{padding:32px!important}.pt-xs-4,.py-xs-4{padding-top:32px!important}.pr-xs-4,.px-xs-4{padding-right:32px!important}.pb-xs-4,.py-xs-4{padding-bottom:32px!important}.pl-xs-4,.px-xs-4{padding-left:32px!important}.p-xs-5{padding:40px!important}.pt-xs-5,.py-xs-5{padding-top:40px!important}.pr-xs-5,.px-xs-5{padding-right:40px!important}.pb-xs-5,.py-xs-5{padding-bottom:40px!important}.pl-xs-5,.px-xs-5{padding-left:40px!important}.p-xs-6{padding:48px!important}.pt-xs-6,.py-xs-6{padding-top:48px!important}.pr-xs-6,.px-xs-6{padding-right:48px!important}.pb-xs-6,.py-xs-6{padding-bottom:48px!important}.pl-xs-6,.px-xs-6{padding-left:48px!important}.p-xs-7{padding:56px!important}.pt-xs-7,.py-xs-7{padding-top:56px!important}.pr-xs-7,.px-xs-7{padding-right:56px!important}.pb-xs-7,.py-xs-7{padding-bottom:56px!important}.pl-xs-7,.px-xs-7{padding-left:56px!important}.p-xs-8{padding:64px!important}.pt-xs-8,.py-xs-8{padding-top:64px!important}.pr-xs-8,.px-xs-8{padding-right:64px!important}.pb-xs-8,.py-xs-8{padding-bottom:64px!important}.pl-xs-8,.px-xs-8{padding-left:64px!important}.p-xs-9{padding:72px!important}.pt-xs-9,.py-xs-9{padding-top:72px!important}.pr-xs-9,.px-xs-9{padding-right:72px!important}.pb-xs-9,.py-xs-9{padding-bottom:72px!important}.pl-xs-9,.px-xs-9{padding-left:72px!important}.p-xs-10{padding:80px!important}.pt-xs-10,.py-xs-10{padding-top:80px!important}.pr-xs-10,.px-xs-10{padding-right:80px!important}.pb-xs-10,.py-xs-10{padding-bottom:80px!important}.pl-xs-10,.px-xs-10{padding-left:80px!important}.p-xs-12{padding:96px!important}.pt-xs-12,.py-xs-12{padding-top:96px!important}.pr-xs-12,.px-xs-12{padding-right:96px!important}.pb-xs-12,.py-xs-12{padding-bottom:96px!important}.pl-xs-12,.px-xs-12{padding-left:96px!important}.p-xs-15{padding:120px!important}.pt-xs-15,.py-xs-15{padding-top:120px!important}.pr-xs-15,.px-xs-15{padding-right:120px!important}.pb-xs-15,.py-xs-15{padding-bottom:120px!important}.pl-xs-15,.px-xs-15{padding-left:120px!important}.m-xs-n1{margin:-8px!important}.mt-xs-n1,.my-xs-n1{margin-top:-8px!important}.mr-xs-n1,.mx-xs-n1{margin-right:-8px!important}.mb-xs-n1,.my-xs-n1{margin-bottom:-8px!important}.ml-xs-n1,.mx-xs-n1{margin-left:-8px!important}.m-xs-n2{margin:-16px!important}.mt-xs-n2,.my-xs-n2{margin-top:-16px!important}.mr-xs-n2,.mx-xs-n2{margin-right:-16px!important}.mb-xs-n2,.my-xs-n2{margin-bottom:-16px!important}.ml-xs-n2,.mx-xs-n2{margin-left:-16px!important}.m-xs-n3{margin:-24px!important}.mt-xs-n3,.my-xs-n3{margin-top:-24px!important}.mr-xs-n3,.mx-xs-n3{margin-right:-24px!important}.mb-xs-n3,.my-xs-n3{margin-bottom:-24px!important}.ml-xs-n3,.mx-xs-n3{margin-left:-24px!important}.m-xs-n4{margin:-32px!important}.mt-xs-n4,.my-xs-n4{margin-top:-32px!important}.mr-xs-n4,.mx-xs-n4{margin-right:-32px!important}.mb-xs-n4,.my-xs-n4{margin-bottom:-32px!important}.ml-xs-n4,.mx-xs-n4{margin-left:-32px!important}.m-xs-n5{margin:-40px!important}.mt-xs-n5,.my-xs-n5{margin-top:-40px!important}.mr-xs-n5,.mx-xs-n5{margin-right:-40px!important}.mb-xs-n5,.my-xs-n5{margin-bottom:-40px!important}.ml-xs-n5,.mx-xs-n5{margin-left:-40px!important}.m-xs-n6{margin:-48px!important}.mt-xs-n6,.my-xs-n6{margin-top:-48px!important}.mr-xs-n6,.mx-xs-n6{margin-right:-48px!important}.mb-xs-n6,.my-xs-n6{margin-bottom:-48px!important}.ml-xs-n6,.mx-xs-n6{margin-left:-48px!important}.m-xs-n7{margin:-56px!important}.mt-xs-n7,.my-xs-n7{margin-top:-56px!important}.mr-xs-n7,.mx-xs-n7{margin-right:-56px!important}.mb-xs-n7,.my-xs-n7{margin-bottom:-56px!important}.ml-xs-n7,.mx-xs-n7{margin-left:-56px!important}.m-xs-n8{margin:-64px!important}.mt-xs-n8,.my-xs-n8{margin-top:-64px!important}.mr-xs-n8,.mx-xs-n8{margin-right:-64px!important}.mb-xs-n8,.my-xs-n8{margin-bottom:-64px!important}.ml-xs-n8,.mx-xs-n8{margin-left:-64px!important}.m-xs-n9{margin:-72px!important}.mt-xs-n9,.my-xs-n9{margin-top:-72px!important}.mr-xs-n9,.mx-xs-n9{margin-right:-72px!important}.mb-xs-n9,.my-xs-n9{margin-bottom:-72px!important}.ml-xs-n9,.mx-xs-n9{margin-left:-72px!important}.m-xs-n10{margin:-80px!important}.mt-xs-n10,.my-xs-n10{margin-top:-80px!important}.mr-xs-n10,.mx-xs-n10{margin-right:-80px!important}.mb-xs-n10,.my-xs-n10{margin-bottom:-80px!important}.ml-xs-n10,.mx-xs-n10{margin-left:-80px!important}.m-xs-n12{margin:-96px!important}.mt-xs-n12,.my-xs-n12{margin-top:-96px!important}.mr-xs-n12,.mx-xs-n12{margin-right:-96px!important}.mb-xs-n12,.my-xs-n12{margin-bottom:-96px!important}.ml-xs-n12,.mx-xs-n12{margin-left:-96px!important}.m-xs-n15{margin:-120px!important}.mt-xs-n15,.my-xs-n15{margin-top:-120px!important}.mr-xs-n15,.mx-xs-n15{margin-right:-120px!important}.mb-xs-n15,.my-xs-n15{margin-bottom:-120px!important}.ml-xs-n15,.mx-xs-n15{margin-left:-120px!important}.m-xs-auto{margin:auto!important}.mt-xs-auto,.my-xs-auto{margin-top:auto!important}.mr-xs-auto,.mx-xs-auto{margin-right:auto!important}.mb-xs-auto,.my-xs-auto{margin-bottom:auto!important}.ml-xs-auto,.mx-xs-auto{margin-left:auto!important}}@media(min-width:616px){.m-sm-0{margin:0!important}.mt-sm-0,.my-sm-0{margin-top:0!important}.mr-sm-0,.mx-sm-0{margin-right:0!important}.mb-sm-0,.my-sm-0{margin-bottom:0!important}.ml-sm-0,.mx-sm-0{margin-left:0!important}.m-sm-1{margin:8px!important}.mt-sm-1,.my-sm-1{margin-top:8px!important}.mr-sm-1,.mx-sm-1{margin-right:8px!important}.mb-sm-1,.my-sm-1{margin-bottom:8px!important}.ml-sm-1,.mx-sm-1{margin-left:8px!important}.m-sm-2{margin:16px!important}.mt-sm-2,.my-sm-2{margin-top:16px!important}.mr-sm-2,.mx-sm-2{margin-right:16px!important}.mb-sm-2,.my-sm-2{margin-bottom:16px!important}.ml-sm-2,.mx-sm-2{margin-left:16px!important}.m-sm-3{margin:24px!important}.mt-sm-3,.my-sm-3{margin-top:24px!important}.mr-sm-3,.mx-sm-3{margin-right:24px!important}.mb-sm-3,.my-sm-3{margin-bottom:24px!important}.ml-sm-3,.mx-sm-3{margin-left:24px!important}.m-sm-4{margin:32px!important}.mt-sm-4,.my-sm-4{margin-top:32px!important}.mr-sm-4,.mx-sm-4{margin-right:32px!important}.mb-sm-4,.my-sm-4{margin-bottom:32px!important}.ml-sm-4,.mx-sm-4{margin-left:32px!important}.m-sm-5{margin:40px!important}.mt-sm-5,.my-sm-5{margin-top:40px!important}.mr-sm-5,.mx-sm-5{margin-right:40px!important}.mb-sm-5,.my-sm-5{margin-bottom:40px!important}.ml-sm-5,.mx-sm-5{margin-left:40px!important}.m-sm-6{margin:48px!important}.mt-sm-6,.my-sm-6{margin-top:48px!important}.mr-sm-6,.mx-sm-6{margin-right:48px!important}.mb-sm-6,.my-sm-6{margin-bottom:48px!important}.ml-sm-6,.mx-sm-6{margin-left:48px!important}.m-sm-7{margin:56px!important}.mt-sm-7,.my-sm-7{margin-top:56px!important}.mr-sm-7,.mx-sm-7{margin-right:56px!important}.mb-sm-7,.my-sm-7{margin-bottom:56px!important}.ml-sm-7,.mx-sm-7{margin-left:56px!important}.m-sm-8{margin:64px!important}.mt-sm-8,.my-sm-8{margin-top:64px!important}.mr-sm-8,.mx-sm-8{margin-right:64px!important}.mb-sm-8,.my-sm-8{margin-bottom:64px!important}.ml-sm-8,.mx-sm-8{margin-left:64px!important}.m-sm-9{margin:72px!important}.mt-sm-9,.my-sm-9{margin-top:72px!important}.mr-sm-9,.mx-sm-9{margin-right:72px!important}.mb-sm-9,.my-sm-9{margin-bottom:72px!important}.ml-sm-9,.mx-sm-9{margin-left:72px!important}.m-sm-10{margin:80px!important}.mt-sm-10,.my-sm-10{margin-top:80px!important}.mr-sm-10,.mx-sm-10{margin-right:80px!important}.mb-sm-10,.my-sm-10{margin-bottom:80px!important}.ml-sm-10,.mx-sm-10{margin-left:80px!important}.m-sm-12{margin:96px!important}.mt-sm-12,.my-sm-12{margin-top:96px!important}.mr-sm-12,.mx-sm-12{margin-right:96px!important}.mb-sm-12,.my-sm-12{margin-bottom:96px!important}.ml-sm-12,.mx-sm-12{margin-left:96px!important}.m-sm-15{margin:120px!important}.mt-sm-15,.my-sm-15{margin-top:120px!important}.mr-sm-15,.mx-sm-15{margin-right:120px!important}.mb-sm-15,.my-sm-15{margin-bottom:120px!important}.ml-sm-15,.mx-sm-15{margin-left:120px!important}.p-sm-0{padding:0!important}.pt-sm-0,.py-sm-0{padding-top:0!important}.pr-sm-0,.px-sm-0{padding-right:0!important}.pb-sm-0,.py-sm-0{padding-bottom:0!important}.pl-sm-0,.px-sm-0{padding-left:0!important}.p-sm-1{padding:8px!important}.pt-sm-1,.py-sm-1{padding-top:8px!important}.pr-sm-1,.px-sm-1{padding-right:8px!important}.pb-sm-1,.py-sm-1{padding-bottom:8px!important}.pl-sm-1,.px-sm-1{padding-left:8px!important}.p-sm-2{padding:16px!important}.pt-sm-2,.py-sm-2{padding-top:16px!important}.pr-sm-2,.px-sm-2{padding-right:16px!important}.pb-sm-2,.py-sm-2{padding-bottom:16px!important}.pl-sm-2,.px-sm-2{padding-left:16px!important}.p-sm-3{padding:24px!important}.pt-sm-3,.py-sm-3{padding-top:24px!important}.pr-sm-3,.px-sm-3{padding-right:24px!important}.pb-sm-3,.py-sm-3{padding-bottom:24px!important}.pl-sm-3,.px-sm-3{padding-left:24px!important}.p-sm-4{padding:32px!important}.pt-sm-4,.py-sm-4{padding-top:32px!important}.pr-sm-4,.px-sm-4{padding-right:32px!important}.pb-sm-4,.py-sm-4{padding-bottom:32px!important}.pl-sm-4,.px-sm-4{padding-left:32px!important}.p-sm-5{padding:40px!important}.pt-sm-5,.py-sm-5{padding-top:40px!important}.pr-sm-5,.px-sm-5{padding-right:40px!important}.pb-sm-5,.py-sm-5{padding-bottom:40px!important}.pl-sm-5,.px-sm-5{padding-left:40px!important}.p-sm-6{padding:48px!important}.pt-sm-6,.py-sm-6{padding-top:48px!important}.pr-sm-6,.px-sm-6{padding-right:48px!important}.pb-sm-6,.py-sm-6{padding-bottom:48px!important}.pl-sm-6,.px-sm-6{padding-left:48px!important}.p-sm-7{padding:56px!important}.pt-sm-7,.py-sm-7{padding-top:56px!important}.pr-sm-7,.px-sm-7{padding-right:56px!important}.pb-sm-7,.py-sm-7{padding-bottom:56px!important}.pl-sm-7,.px-sm-7{padding-left:56px!important}.p-sm-8{padding:64px!important}.pt-sm-8,.py-sm-8{padding-top:64px!important}.pr-sm-8,.px-sm-8{padding-right:64px!important}.pb-sm-8,.py-sm-8{padding-bottom:64px!important}.pl-sm-8,.px-sm-8{padding-left:64px!important}.p-sm-9{padding:72px!important}.pt-sm-9,.py-sm-9{padding-top:72px!important}.pr-sm-9,.px-sm-9{padding-right:72px!important}.pb-sm-9,.py-sm-9{padding-bottom:72px!important}.pl-sm-9,.px-sm-9{padding-left:72px!important}.p-sm-10{padding:80px!important}.pt-sm-10,.py-sm-10{padding-top:80px!important}.pr-sm-10,.px-sm-10{padding-right:80px!important}.pb-sm-10,.py-sm-10{padding-bottom:80px!important}.pl-sm-10,.px-sm-10{padding-left:80px!important}.p-sm-12{padding:96px!important}.pt-sm-12,.py-sm-12{padding-top:96px!important}.pr-sm-12,.px-sm-12{padding-right:96px!important}.pb-sm-12,.py-sm-12{padding-bottom:96px!important}.pl-sm-12,.px-sm-12{padding-left:96px!important}.p-sm-15{padding:120px!important}.pt-sm-15,.py-sm-15{padding-top:120px!important}.pr-sm-15,.px-sm-15{padding-right:120px!important}.pb-sm-15,.py-sm-15{padding-bottom:120px!important}.pl-sm-15,.px-sm-15{padding-left:120px!important}.m-sm-n1{margin:-8px!important}.mt-sm-n1,.my-sm-n1{margin-top:-8px!important}.mr-sm-n1,.mx-sm-n1{margin-right:-8px!important}.mb-sm-n1,.my-sm-n1{margin-bottom:-8px!important}.ml-sm-n1,.mx-sm-n1{margin-left:-8px!important}.m-sm-n2{margin:-16px!important}.mt-sm-n2,.my-sm-n2{margin-top:-16px!important}.mr-sm-n2,.mx-sm-n2{margin-right:-16px!important}.mb-sm-n2,.my-sm-n2{margin-bottom:-16px!important}.ml-sm-n2,.mx-sm-n2{margin-left:-16px!important}.m-sm-n3{margin:-24px!important}.mt-sm-n3,.my-sm-n3{margin-top:-24px!important}.mr-sm-n3,.mx-sm-n3{margin-right:-24px!important}.mb-sm-n3,.my-sm-n3{margin-bottom:-24px!important}.ml-sm-n3,.mx-sm-n3{margin-left:-24px!important}.m-sm-n4{margin:-32px!important}.mt-sm-n4,.my-sm-n4{margin-top:-32px!important}.mr-sm-n4,.mx-sm-n4{margin-right:-32px!important}.mb-sm-n4,.my-sm-n4{margin-bottom:-32px!important}.ml-sm-n4,.mx-sm-n4{margin-left:-32px!important}.m-sm-n5{margin:-40px!important}.mt-sm-n5,.my-sm-n5{margin-top:-40px!important}.mr-sm-n5,.mx-sm-n5{margin-right:-40px!important}.mb-sm-n5,.my-sm-n5{margin-bottom:-40px!important}.ml-sm-n5,.mx-sm-n5{margin-left:-40px!important}.m-sm-n6{margin:-48px!important}.mt-sm-n6,.my-sm-n6{margin-top:-48px!important}.mr-sm-n6,.mx-sm-n6{margin-right:-48px!important}.mb-sm-n6,.my-sm-n6{margin-bottom:-48px!important}.ml-sm-n6,.mx-sm-n6{margin-left:-48px!important}.m-sm-n7{margin:-56px!important}.mt-sm-n7,.my-sm-n7{margin-top:-56px!important}.mr-sm-n7,.mx-sm-n7{margin-right:-56px!important}.mb-sm-n7,.my-sm-n7{margin-bottom:-56px!important}.ml-sm-n7,.mx-sm-n7{margin-left:-56px!important}.m-sm-n8{margin:-64px!important}.mt-sm-n8,.my-sm-n8{margin-top:-64px!important}.mr-sm-n8,.mx-sm-n8{margin-right:-64px!important}.mb-sm-n8,.my-sm-n8{margin-bottom:-64px!important}.ml-sm-n8,.mx-sm-n8{margin-left:-64px!important}.m-sm-n9{margin:-72px!important}.mt-sm-n9,.my-sm-n9{margin-top:-72px!important}.mr-sm-n9,.mx-sm-n9{margin-right:-72px!important}.mb-sm-n9,.my-sm-n9{margin-bottom:-72px!important}.ml-sm-n9,.mx-sm-n9{margin-left:-72px!important}.m-sm-n10{margin:-80px!important}.mt-sm-n10,.my-sm-n10{margin-top:-80px!important}.mr-sm-n10,.mx-sm-n10{margin-right:-80px!important}.mb-sm-n10,.my-sm-n10{margin-bottom:-80px!important}.ml-sm-n10,.mx-sm-n10{margin-left:-80px!important}.m-sm-n12{margin:-96px!important}.mt-sm-n12,.my-sm-n12{margin-top:-96px!important}.mr-sm-n12,.mx-sm-n12{margin-right:-96px!important}.mb-sm-n12,.my-sm-n12{margin-bottom:-96px!important}.ml-sm-n12,.mx-sm-n12{margin-left:-96px!important}.m-sm-n15{margin:-120px!important}.mt-sm-n15,.my-sm-n15{margin-top:-120px!important}.mr-sm-n15,.mx-sm-n15{margin-right:-120px!important}.mb-sm-n15,.my-sm-n15{margin-bottom:-120px!important}.ml-sm-n15,.mx-sm-n15{margin-left:-120px!important}.m-sm-auto{margin:auto!important}.mt-sm-auto,.my-sm-auto{margin-top:auto!important}.mr-sm-auto,.mx-sm-auto{margin-right:auto!important}.mb-sm-auto,.my-sm-auto{margin-bottom:auto!important}.ml-sm-auto,.mx-sm-auto{margin-left:auto!important}}@media(min-width:768px){.m-md-0{margin:0!important}.mt-md-0,.my-md-0{margin-top:0!important}.mr-md-0,.mx-md-0{margin-right:0!important}.mb-md-0,.my-md-0{margin-bottom:0!important}.ml-md-0,.mx-md-0{margin-left:0!important}.m-md-1{margin:8px!important}.mt-md-1,.my-md-1{margin-top:8px!important}.mr-md-1,.mx-md-1{margin-right:8px!important}.mb-md-1,.my-md-1{margin-bottom:8px!important}.ml-md-1,.mx-md-1{margin-left:8px!important}.m-md-2{margin:16px!important}.mt-md-2,.my-md-2{margin-top:16px!important}.mr-md-2,.mx-md-2{margin-right:16px!important}.mb-md-2,.my-md-2{margin-bottom:16px!important}.ml-md-2,.mx-md-2{margin-left:16px!important}.m-md-3{margin:24px!important}.mt-md-3,.my-md-3{margin-top:24px!important}.mr-md-3,.mx-md-3{margin-right:24px!important}.mb-md-3,.my-md-3{margin-bottom:24px!important}.ml-md-3,.mx-md-3{margin-left:24px!important}.m-md-4{margin:32px!important}.mt-md-4,.my-md-4{margin-top:32px!important}.mr-md-4,.mx-md-4{margin-right:32px!important}.mb-md-4,.my-md-4{margin-bottom:32px!important}.ml-md-4,.mx-md-4{margin-left:32px!important}.m-md-5{margin:40px!important}.mt-md-5,.my-md-5{margin-top:40px!important}.mr-md-5,.mx-md-5{margin-right:40px!important}.mb-md-5,.my-md-5{margin-bottom:40px!important}.ml-md-5,.mx-md-5{margin-left:40px!important}.m-md-6{margin:48px!important}.mt-md-6,.my-md-6{margin-top:48px!important}.mr-md-6,.mx-md-6{margin-right:48px!important}.mb-md-6,.my-md-6{margin-bottom:48px!important}.ml-md-6,.mx-md-6{margin-left:48px!important}.m-md-7{margin:56px!important}.mt-md-7,.my-md-7{margin-top:56px!important}.mr-md-7,.mx-md-7{margin-right:56px!important}.mb-md-7,.my-md-7{margin-bottom:56px!important}.ml-md-7,.mx-md-7{margin-left:56px!important}.m-md-8{margin:64px!important}.mt-md-8,.my-md-8{margin-top:64px!important}.mr-md-8,.mx-md-8{margin-right:64px!important}.mb-md-8,.my-md-8{margin-bottom:64px!important}.ml-md-8,.mx-md-8{margin-left:64px!important}.m-md-9{margin:72px!important}.mt-md-9,.my-md-9{margin-top:72px!important}.mr-md-9,.mx-md-9{margin-right:72px!important}.mb-md-9,.my-md-9{margin-bottom:72px!important}.ml-md-9,.mx-md-9{margin-left:72px!important}.m-md-10{margin:80px!important}.mt-md-10,.my-md-10{margin-top:80px!important}.mr-md-10,.mx-md-10{margin-right:80px!important}.mb-md-10,.my-md-10{margin-bottom:80px!important}.ml-md-10,.mx-md-10{margin-left:80px!important}.m-md-12{margin:96px!important}.mt-md-12,.my-md-12{margin-top:96px!important}.mr-md-12,.mx-md-12{margin-right:96px!important}.mb-md-12,.my-md-12{margin-bottom:96px!important}.ml-md-12,.mx-md-12{margin-left:96px!important}.m-md-15{margin:120px!important}.mt-md-15,.my-md-15{margin-top:120px!important}.mr-md-15,.mx-md-15{margin-right:120px!important}.mb-md-15,.my-md-15{margin-bottom:120px!important}.ml-md-15,.mx-md-15{margin-left:120px!important}.p-md-0{padding:0!important}.pt-md-0,.py-md-0{padding-top:0!important}.pr-md-0,.px-md-0{padding-right:0!important}.pb-md-0,.py-md-0{padding-bottom:0!important}.pl-md-0,.px-md-0{padding-left:0!important}.p-md-1{padding:8px!important}.pt-md-1,.py-md-1{padding-top:8px!important}.pr-md-1,.px-md-1{padding-right:8px!important}.pb-md-1,.py-md-1{padding-bottom:8px!important}.pl-md-1,.px-md-1{padding-left:8px!important}.p-md-2{padding:16px!important}.pt-md-2,.py-md-2{padding-top:16px!important}.pr-md-2,.px-md-2{padding-right:16px!important}.pb-md-2,.py-md-2{padding-bottom:16px!important}.pl-md-2,.px-md-2{padding-left:16px!important}.p-md-3{padding:24px!important}.pt-md-3,.py-md-3{padding-top:24px!important}.pr-md-3,.px-md-3{padding-right:24px!important}.pb-md-3,.py-md-3{padding-bottom:24px!important}.pl-md-3,.px-md-3{padding-left:24px!important}.p-md-4{padding:32px!important}.pt-md-4,.py-md-4{padding-top:32px!important}.pr-md-4,.px-md-4{padding-right:32px!important}.pb-md-4,.py-md-4{padding-bottom:32px!important}.pl-md-4,.px-md-4{padding-left:32px!important}.p-md-5{padding:40px!important}.pt-md-5,.py-md-5{padding-top:40px!important}.pr-md-5,.px-md-5{padding-right:40px!important}.pb-md-5,.py-md-5{padding-bottom:40px!important}.pl-md-5,.px-md-5{padding-left:40px!important}.p-md-6{padding:48px!important}.pt-md-6,.py-md-6{padding-top:48px!important}.pr-md-6,.px-md-6{padding-right:48px!important}.pb-md-6,.py-md-6{padding-bottom:48px!important}.pl-md-6,.px-md-6{padding-left:48px!important}.p-md-7{padding:56px!important}.pt-md-7,.py-md-7{padding-top:56px!important}.pr-md-7,.px-md-7{padding-right:56px!important}.pb-md-7,.py-md-7{padding-bottom:56px!important}.pl-md-7,.px-md-7{padding-left:56px!important}.p-md-8{padding:64px!important}.pt-md-8,.py-md-8{padding-top:64px!important}.pr-md-8,.px-md-8{padding-right:64px!important}.pb-md-8,.py-md-8{padding-bottom:64px!important}.pl-md-8,.px-md-8{padding-left:64px!important}.p-md-9{padding:72px!important}.pt-md-9,.py-md-9{padding-top:72px!important}.pr-md-9,.px-md-9{padding-right:72px!important}.pb-md-9,.py-md-9{padding-bottom:72px!important}.pl-md-9,.px-md-9{padding-left:72px!important}.p-md-10{padding:80px!important}.pt-md-10,.py-md-10{padding-top:80px!important}.pr-md-10,.px-md-10{padding-right:80px!important}.pb-md-10,.py-md-10{padding-bottom:80px!important}.pl-md-10,.px-md-10{padding-left:80px!important}.p-md-12{padding:96px!important}.pt-md-12,.py-md-12{padding-top:96px!important}.pr-md-12,.px-md-12{padding-right:96px!important}.pb-md-12,.py-md-12{padding-bottom:96px!important}.pl-md-12,.px-md-12{padding-left:96px!important}.p-md-15{padding:120px!important}.pt-md-15,.py-md-15{padding-top:120px!important}.pr-md-15,.px-md-15{padding-right:120px!important}.pb-md-15,.py-md-15{padding-bottom:120px!important}.pl-md-15,.px-md-15{padding-left:120px!important}.m-md-n1{margin:-8px!important}.mt-md-n1,.my-md-n1{margin-top:-8px!important}.mr-md-n1,.mx-md-n1{margin-right:-8px!important}.mb-md-n1,.my-md-n1{margin-bottom:-8px!important}.ml-md-n1,.mx-md-n1{margin-left:-8px!important}.m-md-n2{margin:-16px!important}.mt-md-n2,.my-md-n2{margin-top:-16px!important}.mr-md-n2,.mx-md-n2{margin-right:-16px!important}.mb-md-n2,.my-md-n2{margin-bottom:-16px!important}.ml-md-n2,.mx-md-n2{margin-left:-16px!important}.m-md-n3{margin:-24px!important}.mt-md-n3,.my-md-n3{margin-top:-24px!important}.mr-md-n3,.mx-md-n3{margin-right:-24px!important}.mb-md-n3,.my-md-n3{margin-bottom:-24px!important}.ml-md-n3,.mx-md-n3{margin-left:-24px!important}.m-md-n4{margin:-32px!important}.mt-md-n4,.my-md-n4{margin-top:-32px!important}.mr-md-n4,.mx-md-n4{margin-right:-32px!important}.mb-md-n4,.my-md-n4{margin-bottom:-32px!important}.ml-md-n4,.mx-md-n4{margin-left:-32px!important}.m-md-n5{margin:-40px!important}.mt-md-n5,.my-md-n5{margin-top:-40px!important}.mr-md-n5,.mx-md-n5{margin-right:-40px!important}.mb-md-n5,.my-md-n5{margin-bottom:-40px!important}.ml-md-n5,.mx-md-n5{margin-left:-40px!important}.m-md-n6{margin:-48px!important}.mt-md-n6,.my-md-n6{margin-top:-48px!important}.mr-md-n6,.mx-md-n6{margin-right:-48px!important}.mb-md-n6,.my-md-n6{margin-bottom:-48px!important}.ml-md-n6,.mx-md-n6{margin-left:-48px!important}.m-md-n7{margin:-56px!important}.mt-md-n7,.my-md-n7{margin-top:-56px!important}.mr-md-n7,.mx-md-n7{margin-right:-56px!important}.mb-md-n7,.my-md-n7{margin-bottom:-56px!important}.ml-md-n7,.mx-md-n7{margin-left:-56px!important}.m-md-n8{margin:-64px!important}.mt-md-n8,.my-md-n8{margin-top:-64px!important}.mr-md-n8,.mx-md-n8{margin-right:-64px!important}.mb-md-n8,.my-md-n8{margin-bottom:-64px!important}.ml-md-n8,.mx-md-n8{margin-left:-64px!important}.m-md-n9{margin:-72px!important}.mt-md-n9,.my-md-n9{margin-top:-72px!important}.mr-md-n9,.mx-md-n9{margin-right:-72px!important}.mb-md-n9,.my-md-n9{margin-bottom:-72px!important}.ml-md-n9,.mx-md-n9{margin-left:-72px!important}.m-md-n10{margin:-80px!important}.mt-md-n10,.my-md-n10{margin-top:-80px!important}.mr-md-n10,.mx-md-n10{margin-right:-80px!important}.mb-md-n10,.my-md-n10{margin-bottom:-80px!important}.ml-md-n10,.mx-md-n10{margin-left:-80px!important}.m-md-n12{margin:-96px!important}.mt-md-n12,.my-md-n12{margin-top:-96px!important}.mr-md-n12,.mx-md-n12{margin-right:-96px!important}.mb-md-n12,.my-md-n12{margin-bottom:-96px!important}.ml-md-n12,.mx-md-n12{margin-left:-96px!important}.m-md-n15{margin:-120px!important}.mt-md-n15,.my-md-n15{margin-top:-120px!important}.mr-md-n15,.mx-md-n15{margin-right:-120px!important}.mb-md-n15,.my-md-n15{margin-bottom:-120px!important}.ml-md-n15,.mx-md-n15{margin-left:-120px!important}.m-md-auto{margin:auto!important}.mt-md-auto,.my-md-auto{margin-top:auto!important}.mr-md-auto,.mx-md-auto{margin-right:auto!important}.mb-md-auto,.my-md-auto{margin-bottom:auto!important}.ml-md-auto,.mx-md-auto{margin-left:auto!important}}@media(min-width:980px){.m-lg-0{margin:0!important}.mt-lg-0,.my-lg-0{margin-top:0!important}.mr-lg-0,.mx-lg-0{margin-right:0!important}.mb-lg-0,.my-lg-0{margin-bottom:0!important}.ml-lg-0,.mx-lg-0{margin-left:0!important}.m-lg-1{margin:8px!important}.mt-lg-1,.my-lg-1{margin-top:8px!important}.mr-lg-1,.mx-lg-1{margin-right:8px!important}.mb-lg-1,.my-lg-1{margin-bottom:8px!important}.ml-lg-1,.mx-lg-1{margin-left:8px!important}.m-lg-2{margin:16px!important}.mt-lg-2,.my-lg-2{margin-top:16px!important}.mr-lg-2,.mx-lg-2{margin-right:16px!important}.mb-lg-2,.my-lg-2{margin-bottom:16px!important}.ml-lg-2,.mx-lg-2{margin-left:16px!important}.m-lg-3{margin:24px!important}.mt-lg-3,.my-lg-3{margin-top:24px!important}.mr-lg-3,.mx-lg-3{margin-right:24px!important}.mb-lg-3,.my-lg-3{margin-bottom:24px!important}.ml-lg-3,.mx-lg-3{margin-left:24px!important}.m-lg-4{margin:32px!important}.mt-lg-4,.my-lg-4{margin-top:32px!important}.mr-lg-4,.mx-lg-4{margin-right:32px!important}.mb-lg-4,.my-lg-4{margin-bottom:32px!important}.ml-lg-4,.mx-lg-4{margin-left:32px!important}.m-lg-5{margin:40px!important}.mt-lg-5,.my-lg-5{margin-top:40px!important}.mr-lg-5,.mx-lg-5{margin-right:40px!important}.mb-lg-5,.my-lg-5{margin-bottom:40px!important}.ml-lg-5,.mx-lg-5{margin-left:40px!important}.m-lg-6{margin:48px!important}.mt-lg-6,.my-lg-6{margin-top:48px!important}.mr-lg-6,.mx-lg-6{margin-right:48px!important}.mb-lg-6,.my-lg-6{margin-bottom:48px!important}.ml-lg-6,.mx-lg-6{margin-left:48px!important}.m-lg-7{margin:56px!important}.mt-lg-7,.my-lg-7{margin-top:56px!important}.mr-lg-7,.mx-lg-7{margin-right:56px!important}.mb-lg-7,.my-lg-7{margin-bottom:56px!important}.ml-lg-7,.mx-lg-7{margin-left:56px!important}.m-lg-8{margin:64px!important}.mt-lg-8,.my-lg-8{margin-top:64px!important}.mr-lg-8,.mx-lg-8{margin-right:64px!important}.mb-lg-8,.my-lg-8{margin-bottom:64px!important}.ml-lg-8,.mx-lg-8{margin-left:64px!important}.m-lg-9{margin:72px!important}.mt-lg-9,.my-lg-9{margin-top:72px!important}.mr-lg-9,.mx-lg-9{margin-right:72px!important}.mb-lg-9,.my-lg-9{margin-bottom:72px!important}.ml-lg-9,.mx-lg-9{margin-left:72px!important}.m-lg-10{margin:80px!important}.mt-lg-10,.my-lg-10{margin-top:80px!important}.mr-lg-10,.mx-lg-10{margin-right:80px!important}.mb-lg-10,.my-lg-10{margin-bottom:80px!important}.ml-lg-10,.mx-lg-10{margin-left:80px!important}.m-lg-12{margin:96px!important}.mt-lg-12,.my-lg-12{margin-top:96px!important}.mr-lg-12,.mx-lg-12{margin-right:96px!important}.mb-lg-12,.my-lg-12{margin-bottom:96px!important}.ml-lg-12,.mx-lg-12{margin-left:96px!important}.m-lg-15{margin:120px!important}.mt-lg-15,.my-lg-15{margin-top:120px!important}.mr-lg-15,.mx-lg-15{margin-right:120px!important}.mb-lg-15,.my-lg-15{margin-bottom:120px!important}.ml-lg-15,.mx-lg-15{margin-left:120px!important}.p-lg-0{padding:0!important}.pt-lg-0,.py-lg-0{padding-top:0!important}.pr-lg-0,.px-lg-0{padding-right:0!important}.pb-lg-0,.py-lg-0{padding-bottom:0!important}.pl-lg-0,.px-lg-0{padding-left:0!important}.p-lg-1{padding:8px!important}.pt-lg-1,.py-lg-1{padding-top:8px!important}.pr-lg-1,.px-lg-1{padding-right:8px!important}.pb-lg-1,.py-lg-1{padding-bottom:8px!important}.pl-lg-1,.px-lg-1{padding-left:8px!important}.p-lg-2{padding:16px!important}.pt-lg-2,.py-lg-2{padding-top:16px!important}.pr-lg-2,.px-lg-2{padding-right:16px!important}.pb-lg-2,.py-lg-2{padding-bottom:16px!important}.pl-lg-2,.px-lg-2{padding-left:16px!important}.p-lg-3{padding:24px!important}.pt-lg-3,.py-lg-3{padding-top:24px!important}.pr-lg-3,.px-lg-3{padding-right:24px!important}.pb-lg-3,.py-lg-3{padding-bottom:24px!important}.pl-lg-3,.px-lg-3{padding-left:24px!important}.p-lg-4{padding:32px!important}.pt-lg-4,.py-lg-4{padding-top:32px!important}.pr-lg-4,.px-lg-4{padding-right:32px!important}.pb-lg-4,.py-lg-4{padding-bottom:32px!important}.pl-lg-4,.px-lg-4{padding-left:32px!important}.p-lg-5{padding:40px!important}.pt-lg-5,.py-lg-5{padding-top:40px!important}.pr-lg-5,.px-lg-5{padding-right:40px!important}.pb-lg-5,.py-lg-5{padding-bottom:40px!important}.pl-lg-5,.px-lg-5{padding-left:40px!important}.p-lg-6{padding:48px!important}.pt-lg-6,.py-lg-6{padding-top:48px!important}.pr-lg-6,.px-lg-6{padding-right:48px!important}.pb-lg-6,.py-lg-6{padding-bottom:48px!important}.pl-lg-6,.px-lg-6{padding-left:48px!important}.p-lg-7{padding:56px!important}.pt-lg-7,.py-lg-7{padding-top:56px!important}.pr-lg-7,.px-lg-7{padding-right:56px!important}.pb-lg-7,.py-lg-7{padding-bottom:56px!important}.pl-lg-7,.px-lg-7{padding-left:56px!important}.p-lg-8{padding:64px!important}.pt-lg-8,.py-lg-8{padding-top:64px!important}.pr-lg-8,.px-lg-8{padding-right:64px!important}.pb-lg-8,.py-lg-8{padding-bottom:64px!important}.pl-lg-8,.px-lg-8{padding-left:64px!important}.p-lg-9{padding:72px!important}.pt-lg-9,.py-lg-9{padding-top:72px!important}.pr-lg-9,.px-lg-9{padding-right:72px!important}.pb-lg-9,.py-lg-9{padding-bottom:72px!important}.pl-lg-9,.px-lg-9{padding-left:72px!important}.p-lg-10{padding:80px!important}.pt-lg-10,.py-lg-10{padding-top:80px!important}.pr-lg-10,.px-lg-10{padding-right:80px!important}.pb-lg-10,.py-lg-10{padding-bottom:80px!important}.pl-lg-10,.px-lg-10{padding-left:80px!important}.p-lg-12{padding:96px!important}.pt-lg-12,.py-lg-12{padding-top:96px!important}.pr-lg-12,.px-lg-12{padding-right:96px!important}.pb-lg-12,.py-lg-12{padding-bottom:96px!important}.pl-lg-12,.px-lg-12{padding-left:96px!important}.p-lg-15{padding:120px!important}.pt-lg-15,.py-lg-15{padding-top:120px!important}.pr-lg-15,.px-lg-15{padding-right:120px!important}.pb-lg-15,.py-lg-15{padding-bottom:120px!important}.pl-lg-15,.px-lg-15{padding-left:120px!important}.m-lg-n1{margin:-8px!important}.mt-lg-n1,.my-lg-n1{margin-top:-8px!important}.mr-lg-n1,.mx-lg-n1{margin-right:-8px!important}.mb-lg-n1,.my-lg-n1{margin-bottom:-8px!important}.ml-lg-n1,.mx-lg-n1{margin-left:-8px!important}.m-lg-n2{margin:-16px!important}.mt-lg-n2,.my-lg-n2{margin-top:-16px!important}.mr-lg-n2,.mx-lg-n2{margin-right:-16px!important}.mb-lg-n2,.my-lg-n2{margin-bottom:-16px!important}.ml-lg-n2,.mx-lg-n2{margin-left:-16px!important}.m-lg-n3{margin:-24px!important}.mt-lg-n3,.my-lg-n3{margin-top:-24px!important}.mr-lg-n3,.mx-lg-n3{margin-right:-24px!important}.mb-lg-n3,.my-lg-n3{margin-bottom:-24px!important}.ml-lg-n3,.mx-lg-n3{margin-left:-24px!important}.m-lg-n4{margin:-32px!important}.mt-lg-n4,.my-lg-n4{margin-top:-32px!important}.mr-lg-n4,.mx-lg-n4{margin-right:-32px!important}.mb-lg-n4,.my-lg-n4{margin-bottom:-32px!important}.ml-lg-n4,.mx-lg-n4{margin-left:-32px!important}.m-lg-n5{margin:-40px!important}.mt-lg-n5,.my-lg-n5{margin-top:-40px!important}.mr-lg-n5,.mx-lg-n5{margin-right:-40px!important}.mb-lg-n5,.my-lg-n5{margin-bottom:-40px!important}.ml-lg-n5,.mx-lg-n5{margin-left:-40px!important}.m-lg-n6{margin:-48px!important}.mt-lg-n6,.my-lg-n6{margin-top:-48px!important}.mr-lg-n6,.mx-lg-n6{margin-right:-48px!important}.mb-lg-n6,.my-lg-n6{margin-bottom:-48px!important}.ml-lg-n6,.mx-lg-n6{margin-left:-48px!important}.m-lg-n7{margin:-56px!important}.mt-lg-n7,.my-lg-n7{margin-top:-56px!important}.mr-lg-n7,.mx-lg-n7{margin-right:-56px!important}.mb-lg-n7,.my-lg-n7{margin-bottom:-56px!important}.ml-lg-n7,.mx-lg-n7{margin-left:-56px!important}.m-lg-n8{margin:-64px!important}.mt-lg-n8,.my-lg-n8{margin-top:-64px!important}.mr-lg-n8,.mx-lg-n8{margin-right:-64px!important}.mb-lg-n8,.my-lg-n8{margin-bottom:-64px!important}.ml-lg-n8,.mx-lg-n8{margin-left:-64px!important}.m-lg-n9{margin:-72px!important}.mt-lg-n9,.my-lg-n9{margin-top:-72px!important}.mr-lg-n9,.mx-lg-n9{margin-right:-72px!important}.mb-lg-n9,.my-lg-n9{margin-bottom:-72px!important}.ml-lg-n9,.mx-lg-n9{margin-left:-72px!important}.m-lg-n10{margin:-80px!important}.mt-lg-n10,.my-lg-n10{margin-top:-80px!important}.mr-lg-n10,.mx-lg-n10{margin-right:-80px!important}.mb-lg-n10,.my-lg-n10{margin-bottom:-80px!important}.ml-lg-n10,.mx-lg-n10{margin-left:-80px!important}.m-lg-n12{margin:-96px!important}.mt-lg-n12,.my-lg-n12{margin-top:-96px!important}.mr-lg-n12,.mx-lg-n12{margin-right:-96px!important}.mb-lg-n12,.my-lg-n12{margin-bottom:-96px!important}.ml-lg-n12,.mx-lg-n12{margin-left:-96px!important}.m-lg-n15{margin:-120px!important}.mt-lg-n15,.my-lg-n15{margin-top:-120px!important}.mr-lg-n15,.mx-lg-n15{margin-right:-120px!important}.mb-lg-n15,.my-lg-n15{margin-bottom:-120px!important}.ml-lg-n15,.mx-lg-n15{margin-left:-120px!important}.m-lg-auto{margin:auto!important}.mt-lg-auto,.my-lg-auto{margin-top:auto!important}.mr-lg-auto,.mx-lg-auto{margin-right:auto!important}.mb-lg-auto,.my-lg-auto{margin-bottom:auto!important}.ml-lg-auto,.mx-lg-auto{margin-left:auto!important}}@media(min-width:1240px){.m-xl-0{margin:0!important}.mt-xl-0,.my-xl-0{margin-top:0!important}.mr-xl-0,.mx-xl-0{margin-right:0!important}.mb-xl-0,.my-xl-0{margin-bottom:0!important}.ml-xl-0,.mx-xl-0{margin-left:0!important}.m-xl-1{margin:8px!important}.mt-xl-1,.my-xl-1{margin-top:8px!important}.mr-xl-1,.mx-xl-1{margin-right:8px!important}.mb-xl-1,.my-xl-1{margin-bottom:8px!important}.ml-xl-1,.mx-xl-1{margin-left:8px!important}.m-xl-2{margin:16px!important}.mt-xl-2,.my-xl-2{margin-top:16px!important}.mr-xl-2,.mx-xl-2{margin-right:16px!important}.mb-xl-2,.my-xl-2{margin-bottom:16px!important}.ml-xl-2,.mx-xl-2{margin-left:16px!important}.m-xl-3{margin:24px!important}.mt-xl-3,.my-xl-3{margin-top:24px!important}.mr-xl-3,.mx-xl-3{margin-right:24px!important}.mb-xl-3,.my-xl-3{margin-bottom:24px!important}.ml-xl-3,.mx-xl-3{margin-left:24px!important}.m-xl-4{margin:32px!important}.mt-xl-4,.my-xl-4{margin-top:32px!important}.mr-xl-4,.mx-xl-4{margin-right:32px!important}.mb-xl-4,.my-xl-4{margin-bottom:32px!important}.ml-xl-4,.mx-xl-4{margin-left:32px!important}.m-xl-5{margin:40px!important}.mt-xl-5,.my-xl-5{margin-top:40px!important}.mr-xl-5,.mx-xl-5{margin-right:40px!important}.mb-xl-5,.my-xl-5{margin-bottom:40px!important}.ml-xl-5,.mx-xl-5{margin-left:40px!important}.m-xl-6{margin:48px!important}.mt-xl-6,.my-xl-6{margin-top:48px!important}.mr-xl-6,.mx-xl-6{margin-right:48px!important}.mb-xl-6,.my-xl-6{margin-bottom:48px!important}.ml-xl-6,.mx-xl-6{margin-left:48px!important}.m-xl-7{margin:56px!important}.mt-xl-7,.my-xl-7{margin-top:56px!important}.mr-xl-7,.mx-xl-7{margin-right:56px!important}.mb-xl-7,.my-xl-7{margin-bottom:56px!important}.ml-xl-7,.mx-xl-7{margin-left:56px!important}.m-xl-8{margin:64px!important}.mt-xl-8,.my-xl-8{margin-top:64px!important}.mr-xl-8,.mx-xl-8{margin-right:64px!important}.mb-xl-8,.my-xl-8{margin-bottom:64px!important}.ml-xl-8,.mx-xl-8{margin-left:64px!important}.m-xl-9{margin:72px!important}.mt-xl-9,.my-xl-9{margin-top:72px!important}.mr-xl-9,.mx-xl-9{margin-right:72px!important}.mb-xl-9,.my-xl-9{margin-bottom:72px!important}.ml-xl-9,.mx-xl-9{margin-left:72px!important}.m-xl-10{margin:80px!important}.mt-xl-10,.my-xl-10{margin-top:80px!important}.mr-xl-10,.mx-xl-10{margin-right:80px!important}.mb-xl-10,.my-xl-10{margin-bottom:80px!important}.ml-xl-10,.mx-xl-10{margin-left:80px!important}.m-xl-12{margin:96px!important}.mt-xl-12,.my-xl-12{margin-top:96px!important}.mr-xl-12,.mx-xl-12{margin-right:96px!important}.mb-xl-12,.my-xl-12{margin-bottom:96px!important}.ml-xl-12,.mx-xl-12{margin-left:96px!important}.m-xl-15{margin:120px!important}.mt-xl-15,.my-xl-15{margin-top:120px!important}.mr-xl-15,.mx-xl-15{margin-right:120px!important}.mb-xl-15,.my-xl-15{margin-bottom:120px!important}.ml-xl-15,.mx-xl-15{margin-left:120px!important}.p-xl-0{padding:0!important}.pt-xl-0,.py-xl-0{padding-top:0!important}.pr-xl-0,.px-xl-0{padding-right:0!important}.pb-xl-0,.py-xl-0{padding-bottom:0!important}.pl-xl-0,.px-xl-0{padding-left:0!important}.p-xl-1{padding:8px!important}.pt-xl-1,.py-xl-1{padding-top:8px!important}.pr-xl-1,.px-xl-1{padding-right:8px!important}.pb-xl-1,.py-xl-1{padding-bottom:8px!important}.pl-xl-1,.px-xl-1{padding-left:8px!important}.p-xl-2{padding:16px!important}.pt-xl-2,.py-xl-2{padding-top:16px!important}.pr-xl-2,.px-xl-2{padding-right:16px!important}.pb-xl-2,.py-xl-2{padding-bottom:16px!important}.pl-xl-2,.px-xl-2{padding-left:16px!important}.p-xl-3{padding:24px!important}.pt-xl-3,.py-xl-3{padding-top:24px!important}.pr-xl-3,.px-xl-3{padding-right:24px!important}.pb-xl-3,.py-xl-3{padding-bottom:24px!important}.pl-xl-3,.px-xl-3{padding-left:24px!important}.p-xl-4{padding:32px!important}.pt-xl-4,.py-xl-4{padding-top:32px!important}.pr-xl-4,.px-xl-4{padding-right:32px!important}.pb-xl-4,.py-xl-4{padding-bottom:32px!important}.pl-xl-4,.px-xl-4{padding-left:32px!important}.p-xl-5{padding:40px!important}.pt-xl-5,.py-xl-5{padding-top:40px!important}.pr-xl-5,.px-xl-5{padding-right:40px!important}.pb-xl-5,.py-xl-5{padding-bottom:40px!important}.pl-xl-5,.px-xl-5{padding-left:40px!important}.p-xl-6{padding:48px!important}.pt-xl-6,.py-xl-6{padding-top:48px!important}.pr-xl-6,.px-xl-6{padding-right:48px!important}.pb-xl-6,.py-xl-6{padding-bottom:48px!important}.pl-xl-6,.px-xl-6{padding-left:48px!important}.p-xl-7{padding:56px!important}.pt-xl-7,.py-xl-7{padding-top:56px!important}.pr-xl-7,.px-xl-7{padding-right:56px!important}.pb-xl-7,.py-xl-7{padding-bottom:56px!important}.pl-xl-7,.px-xl-7{padding-left:56px!important}.p-xl-8{padding:64px!important}.pt-xl-8,.py-xl-8{padding-top:64px!important}.pr-xl-8,.px-xl-8{padding-right:64px!important}.pb-xl-8,.py-xl-8{padding-bottom:64px!important}.pl-xl-8,.px-xl-8{padding-left:64px!important}.p-xl-9{padding:72px!important}.pt-xl-9,.py-xl-9{padding-top:72px!important}.pr-xl-9,.px-xl-9{padding-right:72px!important}.pb-xl-9,.py-xl-9{padding-bottom:72px!important}.pl-xl-9,.px-xl-9{padding-left:72px!important}.p-xl-10{padding:80px!important}.pt-xl-10,.py-xl-10{padding-top:80px!important}.pr-xl-10,.px-xl-10{padding-right:80px!important}.pb-xl-10,.py-xl-10{padding-bottom:80px!important}.pl-xl-10,.px-xl-10{padding-left:80px!important}.p-xl-12{padding:96px!important}.pt-xl-12,.py-xl-12{padding-top:96px!important}.pr-xl-12,.px-xl-12{padding-right:96px!important}.pb-xl-12,.py-xl-12{padding-bottom:96px!important}.pl-xl-12,.px-xl-12{padding-left:96px!important}.p-xl-15{padding:120px!important}.pt-xl-15,.py-xl-15{padding-top:120px!important}.pr-xl-15,.px-xl-15{padding-right:120px!important}.pb-xl-15,.py-xl-15{padding-bottom:120px!important}.pl-xl-15,.px-xl-15{padding-left:120px!important}.m-xl-n1{margin:-8px!important}.mt-xl-n1,.my-xl-n1{margin-top:-8px!important}.mr-xl-n1,.mx-xl-n1{margin-right:-8px!important}.mb-xl-n1,.my-xl-n1{margin-bottom:-8px!important}.ml-xl-n1,.mx-xl-n1{margin-left:-8px!important}.m-xl-n2{margin:-16px!important}.mt-xl-n2,.my-xl-n2{margin-top:-16px!important}.mr-xl-n2,.mx-xl-n2{margin-right:-16px!important}.mb-xl-n2,.my-xl-n2{margin-bottom:-16px!important}.ml-xl-n2,.mx-xl-n2{margin-left:-16px!important}.m-xl-n3{margin:-24px!important}.mt-xl-n3,.my-xl-n3{margin-top:-24px!important}.mr-xl-n3,.mx-xl-n3{margin-right:-24px!important}.mb-xl-n3,.my-xl-n3{margin-bottom:-24px!important}.ml-xl-n3,.mx-xl-n3{margin-left:-24px!important}.m-xl-n4{margin:-32px!important}.mt-xl-n4,.my-xl-n4{margin-top:-32px!important}.mr-xl-n4,.mx-xl-n4{margin-right:-32px!important}.mb-xl-n4,.my-xl-n4{margin-bottom:-32px!important}.ml-xl-n4,.mx-xl-n4{margin-left:-32px!important}.m-xl-n5{margin:-40px!important}.mt-xl-n5,.my-xl-n5{margin-top:-40px!important}.mr-xl-n5,.mx-xl-n5{margin-right:-40px!important}.mb-xl-n5,.my-xl-n5{margin-bottom:-40px!important}.ml-xl-n5,.mx-xl-n5{margin-left:-40px!important}.m-xl-n6{margin:-48px!important}.mt-xl-n6,.my-xl-n6{margin-top:-48px!important}.mr-xl-n6,.mx-xl-n6{margin-right:-48px!important}.mb-xl-n6,.my-xl-n6{margin-bottom:-48px!important}.ml-xl-n6,.mx-xl-n6{margin-left:-48px!important}.m-xl-n7{margin:-56px!important}.mt-xl-n7,.my-xl-n7{margin-top:-56px!important}.mr-xl-n7,.mx-xl-n7{margin-right:-56px!important}.mb-xl-n7,.my-xl-n7{margin-bottom:-56px!important}.ml-xl-n7,.mx-xl-n7{margin-left:-56px!important}.m-xl-n8{margin:-64px!important}.mt-xl-n8,.my-xl-n8{margin-top:-64px!important}.mr-xl-n8,.mx-xl-n8{margin-right:-64px!important}.mb-xl-n8,.my-xl-n8{margin-bottom:-64px!important}.ml-xl-n8,.mx-xl-n8{margin-left:-64px!important}.m-xl-n9{margin:-72px!important}.mt-xl-n9,.my-xl-n9{margin-top:-72px!important}.mr-xl-n9,.mx-xl-n9{margin-right:-72px!important}.mb-xl-n9,.my-xl-n9{margin-bottom:-72px!important}.ml-xl-n9,.mx-xl-n9{margin-left:-72px!important}.m-xl-n10{margin:-80px!important}.mt-xl-n10,.my-xl-n10{margin-top:-80px!important}.mr-xl-n10,.mx-xl-n10{margin-right:-80px!important}.mb-xl-n10,.my-xl-n10{margin-bottom:-80px!important}.ml-xl-n10,.mx-xl-n10{margin-left:-80px!important}.m-xl-n12{margin:-96px!important}.mt-xl-n12,.my-xl-n12{margin-top:-96px!important}.mr-xl-n12,.mx-xl-n12{margin-right:-96px!important}.mb-xl-n12,.my-xl-n12{margin-bottom:-96px!important}.ml-xl-n12,.mx-xl-n12{margin-left:-96px!important}.m-xl-n15{margin:-120px!important}.mt-xl-n15,.my-xl-n15{margin-top:-120px!important}.mr-xl-n15,.mx-xl-n15{margin-right:-120px!important}.mb-xl-n15,.my-xl-n15{margin-bottom:-120px!important}.ml-xl-n15,.mx-xl-n15{margin-left:-120px!important}.m-xl-auto{margin:auto!important}.mt-xl-auto,.my-xl-auto{margin-top:auto!important}.mr-xl-auto,.mx-xl-auto{margin-right:auto!important}.mb-xl-auto,.my-xl-auto{margin-bottom:auto!important}.ml-xl-auto,.mx-xl-auto{margin-left:auto!important}}.text-monospace{font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,monospace!important}.text-justify{text-align:justify!important}.text-wrap{white-space:normal!important}.text-nowrap{white-space:nowrap!important}.text-truncate{overflow:hidden;text-overflow:ellipsis;white-space:nowrap}.text-left{text-align:left!important}.text-right{text-align:right!important}.text-center{text-align:center!important}@media(min-width:400px){.text-xs-left{text-align:left!important}.text-xs-right{text-align:right!important}.text-xs-center{text-align:center!important}}@media(min-width:616px){.text-sm-left{text-align:left!important}.text-sm-right{text-align:right!important}.text-sm-center{text-align:center!important}}@media(min-width:768px){.text-md-left{text-align:left!important}.text-md-right{text-align:right!important}.text-md-center{text-align:center!important}}@media(min-width:980px){.text-lg-left{text-align:left!important}.text-lg-right{text-align:right!important}.text-lg-center{text-align:center!important}}@media(min-width:1240px){.text-xl-left{text-align:left!important}.text-xl-right{text-align:right!important}.text-xl-center{text-align:center!important}}.text-lowercase{text-transform:lowercase!important}.text-uppercase{text-transform:uppercase!important}.text-capitalize{text-transform:capitalize!important}.font-weight-light{font-weight:300!important}.font-weight-lighter{font-weight:lighter!important}.font-weight-normal{font-weight:400!important}.font-weight-bold{font-weight:700!important}.font-weight-bolder{font-weight:bolder!important}.font-italic{font-style:italic!important}.text-primary{color:#fc0!important}a.text-primary:focus,a.text-primary:hover{color:#b38f00!important}.text-secondary{color:#212529!important}a.text-secondary:focus,a.text-secondary:hover{color:#000!important}.text-success{color:#28a745!important}a.text-success:focus,a.text-success:hover{color:#19692c!important}.text-info{color:#17a2b8!important}a.text-info:focus,a.text-info:hover{color:#0f6674!important}.text-warning{color:#ffc107!important}a.text-warning:focus,a.text-warning:hover{color:#ba8b00!important}.text-danger{color:#dc3545!important}a.text-danger:focus,a.text-danger:hover{color:#a71d2a!important}.text-light{color:#f1f6f9!important}a.text-light:focus,a.text-light:hover{color:#bbd4e2!important}.text-dark{color:#495057!important}a.text-dark:focus,a.text-dark:hover{color:#262a2d!important}.text-primary-light{color:#fffaf0!important}a.text-primary-light:focus,a.text-primary-light:hover{color:#ffe1a4!important}.text-secondary-light{color:#fff!important}a.text-secondary-light:focus,a.text-secondary-light:hover{color:#d9d9d9!important}.text-tertiary{color:#257af4!important}a.text-tertiary:focus,a.text-tertiary:hover{color:#0a56c3!important}.text-tertiary-light{color:#e3f1fe!important}a.text-tertiary-light:focus,a.text-tertiary-light:hover{color:#99ccfb!important}.text-white{color:#fff!important}a.text-white:focus,a.text-white:hover{color:#d9d9d9!important}.text-black{color:#212529!important}a.text-black:focus,a.text-black:hover{color:#000!important}.text-blue{color:#257af4!important}a.text-blue:focus,a.text-blue:hover{color:#0a56c3!important}.text-light-blue{color:#e3f1fe!important}a.text-light-blue:focus,a.text-light-blue:hover{color:#99ccfb!important}.text-yellow{color:#fc0!important}a.text-yellow:focus,a.text-yellow:hover{color:#b38f00!important}.text-light-yellow{color:#fffaf0!important}a.text-light-yellow:focus,a.text-light-yellow:hover{color:#ffe1a4!important}.text-orange{color:#ff8c00!important}a.text-orange:focus,a.text-orange:hover{color:#b36200!important}.text-light-orange{color:#ffe4b5!important}a.text-light-orange:focus,a.text-light-orange:hover{color:#ffc869!important}.text-red{color:#ff3939!important}a.text-red:focus,a.text-red:hover{color:#ec0000!important}.text-light-red{color:#ffe4e1!important}a.text-light-red:focus,a.text-light-red:hover{color:#ff9f95!important}.text-medium{color:#d6dbdf!important}a.text-medium:focus,a.text-medium:hover{color:#abb5bd!important}.text-body{color:#212529!important}.text-muted{color:#6c757d!important}.text-black-50{color:rgba(33,37,41,.5)!important}.text-white-50{color:hsla(0,0%,100%,.5)!important}.text-hide{font:0/0 a;color:transparent;text-shadow:none;background-color:transparent;border:0}.text-decoration-none{text-decoration:none!important}.text-break{word-break:break-word!important;overflow-wrap:break-word!important}.text-reset{color:inherit!important}.visible{visibility:visible!important}.invisible{visibility:hidden!important}@media print{*,:after,:before{text-shadow:none!important;box-shadow:none!important}a:not(.btn){text-decoration:underline}abbr[title]:after{content:" (" attr(title) ")"}pre{white-space:pre-wrap!important}blockquote,pre{border:1px solid #d6dbdf;page-break-inside:avoid}thead{display:table-header-group}img,tr{page-break-inside:avoid}h2,h3,p{orphans:3;widows:3}h2,h3{page-break-after:avoid}@page{size:a3}.container,body{min-width:980px!important}.navbar{display:none}.badge{border:1px solid #212529}.table{border-collapse:collapse!important}.table td,.table th{background-color:#fff!important}.table-bordered td,.table-bordered th{border:1px solid #dee2e6!important}.table-dark{color:inherit}.table-dark tbody+tbody,.table-dark td,.table-dark th,.table-dark thead th{border-color:#d6dbdf}.table .thead-dark th{color:inherit;border-color:#d6dbdf}} \ No newline at end of file + */:root{--blue:#007bff;--indigo:#6610f2;--purple:#6f42c1;--pink:#e83e8c;--red:#dc3545;--orange:#fd7e14;--yellow:#ffc107;--green:#28a745;--teal:#20c997;--cyan:#17a2b8;--gray:#6c757d;--gray-dark:#343a40;--brand-primary:#fc0;--brand-secondary:#ff3939;--primary-accent-yellow:#fc0;--primary-accent-light-yellow:#fffaf0;--primary-accent-blue:#257af4;--primary-accent-light-blue:#e3f1fe;--secondary-accent-orange:#ff8c00;--secondary-accent-light-orange:#ffe4b5;--secondary-accent-red:#ff3939;--secondary-accent-light-red:#ffe4e1;--primary:#fc0;--secondary:#212529;--success:#28a745;--info:#17a2b8;--warning:#ffc107;--danger:#dc3545;--light:#f1f6f9;--dark:#495057;--primary-light:#fffaf0;--secondary-light:#fff;--tertiary:#257af4;--tertiary-light:#e3f1fe;--white:#fff;--black:#212529;--blue:#257af4;--light-blue:#e3f1fe;--yellow:#fc0;--light-yellow:#fffaf0;--orange:#ff8c00;--light-orange:#ffe4b5;--red:#ff3939;--light-red:#ffe4e1;--medium:#d6dbdf;--breakpoint-xxs:0;--breakpoint-xs:400px;--breakpoint-sm:616px;--breakpoint-md:768px;--breakpoint-lg:980px;--breakpoint-xl:1240px;--font-family-sans-serif:"Noto Sans",sans-serif;--font-family-monospace:SFMono-Regular,Menlo,Monaco,Consolas,"Liberation Mono","Courier New",monospace}*,:after,:before{box-sizing:border-box}html{font-family:sans-serif;line-height:1.15;-webkit-text-size-adjust:100%;-webkit-tap-highlight-color:rgba(33,37,41,0)}article,aside,figcaption,figure,footer,header,hgroup,main,nav,section{display:block}body{margin:0;font-family:Noto Sans,sans-serif;font-size:1rem;font-weight:400;line-height:1.5;color:#212529;text-align:left;background-color:#fff}[tabindex="-1"]:focus:not(:focus-visible){outline:0!important}hr{box-sizing:content-box;height:0;overflow:visible}h1,h2,h3,h4,h5,h6{margin-top:0;margin-bottom:16px}p{margin-top:0;margin-bottom:1rem}abbr[data-original-title],abbr[title]{text-decoration:underline;-webkit-text-decoration:underline dotted;text-decoration:underline dotted;cursor:help;border-bottom:0;-webkit-text-decoration-skip-ink:none;text-decoration-skip-ink:none}address{font-style:normal;line-height:inherit}address,dl,ol,ul{margin-bottom:1rem}dl,ol,ul{margin-top:0}ol ol,ol ul,ul ol,ul ul{margin-bottom:0}dt{font-weight:700}dd{margin-bottom:.5rem;margin-left:0}blockquote{margin:0 0 1rem}b,strong{font-weight:bolder}small{font-size:80%}sub,sup{position:relative;font-size:75%;line-height:0;vertical-align:baseline}sub{bottom:-.25em}sup{top:-.5em}a{text-decoration:none;background-color:transparent}a,a:hover{color:#ff8c00}a:hover{text-decoration:underline}a:not([href]),a:not([href]):hover{color:inherit;text-decoration:none}code,kbd,pre,samp{font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,monospace;font-size:1em}pre{margin-top:0;margin-bottom:1rem;overflow:auto}figure{margin:0 0 1rem}img{border-style:none}img,svg{vertical-align:middle}svg{overflow:hidden}table{border-collapse:collapse}caption{padding-top:.75rem;padding-bottom:.75rem;color:#6c757d;text-align:left;caption-side:bottom}th{text-align:inherit}label{display:inline-block;margin-bottom:.5rem}button{border-radius:0}button:focus{outline:1px dotted;outline:5px auto -webkit-focus-ring-color}button,input,optgroup,select,textarea{margin:0;font-family:inherit;font-size:inherit;line-height:inherit}button,input{overflow:visible}button,select{text-transform:none}select{word-wrap:normal}[type=button],[type=reset],[type=submit],button{-webkit-appearance:button}[type=button]:not(:disabled),[type=reset]:not(:disabled),[type=submit]:not(:disabled),button:not(:disabled){cursor:pointer}[type=button]::-moz-focus-inner,[type=reset]::-moz-focus-inner,[type=submit]::-moz-focus-inner,button::-moz-focus-inner{padding:0;border-style:none}input[type=checkbox],input[type=radio]{box-sizing:border-box;padding:0}input[type=date],input[type=datetime-local],input[type=month],input[type=time]{-webkit-appearance:listbox}textarea{overflow:auto;resize:vertical}fieldset{min-width:0;padding:0;margin:0;border:0}legend{display:block;width:100%;max-width:100%;padding:0;margin-bottom:.5rem;font-size:1.5rem;line-height:inherit;color:inherit;white-space:normal}@media(max-width:1200px){legend{font-size:calc(1.275rem + .3vw)}}progress{vertical-align:baseline}[type=number]::-webkit-inner-spin-button,[type=number]::-webkit-outer-spin-button{height:auto}[type=search]{outline-offset:-2px;-webkit-appearance:none}[type=search]::-webkit-search-decoration{-webkit-appearance:none}::-webkit-file-upload-button{font:inherit;-webkit-appearance:button}output{display:inline-block}summary{display:list-item;cursor:pointer}template{display:none}[hidden]{display:none!important}.h1,.h2,.h3,.h4,.h5,.h6,h1,h2,h3,h4,h5,h6{margin-bottom:16px;font-family:Hind Siliguri,sans-serif;font-weight:500;line-height:1.125}.h1,h1{font-size:2.5rem}@media(max-width:1200px){.h1,h1{font-size:calc(1.375rem + 1.5vw)}}.h2,h2{font-size:2rem}@media(max-width:1200px){.h2,h2{font-size:calc(1.325rem + .9vw)}}.h3,h3{font-size:1.75rem}@media(max-width:1200px){.h3,h3{font-size:calc(1.3rem + .6vw)}}.h4,h4{font-size:1.5rem}@media(max-width:1200px){.h4,h4{font-size:calc(1.275rem + .3vw)}}.h5,h5{font-size:1.125rem}.h6,h6{font-size:.875rem}.lead{font-size:1.375rem;font-weight:400}@media(max-width:1200px){.lead{font-size:calc(1.2625rem + .15vw)}}.display-1{font-size:4rem;font-weight:600;line-height:1.125}@media(max-width:1200px){.display-1{font-size:calc(1.525rem + 3.3vw)}}.display-2{font-size:2.5rem;font-weight:600;line-height:1.125}@media(max-width:1200px){.display-2{font-size:calc(1.375rem + 1.5vw)}}.display-3{font-size:2rem;font-weight:500;line-height:1.125}@media(max-width:1200px){.display-3{font-size:calc(1.325rem + .9vw)}}.display-4{font-size:1.75rem;font-weight:500;line-height:1.125}@media(max-width:1200px){.display-4{font-size:calc(1.3rem + .6vw)}}hr{margin-top:8px;margin-bottom:8px;border:0;border-top:1px solid rgba(33,37,41,.1)}.small,small{font-size:80%;font-weight:400}.mark,mark{padding:.2em;background-color:#fcf8e3}.list-inline,.list-unstyled{padding-left:0;list-style:none}.list-inline-item{display:inline-block}.list-inline-item:not(:last-child){margin-right:.5rem}.initialism{font-size:90%;text-transform:uppercase}.blockquote{margin-bottom:8px;font-size:1.25rem}.blockquote-footer{display:block;font-size:80%;color:#6c757d}.blockquote-footer:before{content:"— "}.img-fluid,.img-thumbnail{max-width:100%;height:auto}.img-thumbnail{padding:.25rem;background-color:#fff;border:1px solid #dee2e6;border-radius:8px}.figure{display:inline-block}.figure-img{margin-bottom:4px;line-height:1}.figure-caption{font-size:90%;color:#6c757d}code{font-size:87.5%;color:#e83e8c;word-wrap:break-word}a>code{color:inherit}kbd{padding:.2rem .4rem;font-size:87.5%;color:#fff;background-color:#495057;border-radius:4px}kbd kbd{padding:0;font-size:100%;font-weight:700}pre{display:block;font-size:87.5%;color:#495057}pre code{font-size:inherit;color:inherit;word-break:normal}.pre-scrollable{max-height:340px;overflow-y:scroll}.container{width:100%;padding-right:20px;padding-left:20px;margin-right:auto;margin-left:auto}@media(min-width:400px){.container{max-width:576px}}@media(min-width:616px){.container{max-width:576px}}@media(min-width:768px){.container{max-width:958px}}@media(min-width:980px){.container{max-width:1008px}}@media(min-width:1240px){.container{max-width:1118px}}.container-fluid,.container-lg,.container-md,.container-sm,.container-xl,.container-xs{width:100%;padding-right:20px;padding-left:20px;margin-right:auto;margin-left:auto}@media(min-width:400px){.container,.container-xs{max-width:576px}}@media(min-width:616px){.container,.container-sm,.container-xs{max-width:576px}}@media(min-width:768px){.container,.container-md,.container-sm,.container-xs{max-width:958px}}@media(min-width:980px){.container,.container-lg,.container-md,.container-sm,.container-xs{max-width:1008px}}@media(min-width:1240px){.container,.container-lg,.container-md,.container-sm,.container-xl,.container-xs{max-width:1118px}}.row{display:flex;flex-wrap:wrap;margin-right:-20px;margin-left:-20px}.no-gutters{margin-right:0;margin-left:0}.no-gutters>.col,.no-gutters>[class*=col-]{padding-right:0;padding-left:0}.col,.col-1,.col-2,.col-3,.col-4,.col-5,.col-6,.col-7,.col-8,.col-9,.col-10,.col-11,.col-12,.col-auto,.col-lg,.col-lg-1,.col-lg-2,.col-lg-3,.col-lg-4,.col-lg-5,.col-lg-6,.col-lg-7,.col-lg-8,.col-lg-9,.col-lg-10,.col-lg-11,.col-lg-12,.col-lg-auto,.col-md,.col-md-1,.col-md-2,.col-md-3,.col-md-4,.col-md-5,.col-md-6,.col-md-7,.col-md-8,.col-md-9,.col-md-10,.col-md-11,.col-md-12,.col-md-auto,.col-sm,.col-sm-1,.col-sm-2,.col-sm-3,.col-sm-4,.col-sm-5,.col-sm-6,.col-sm-7,.col-sm-8,.col-sm-9,.col-sm-10,.col-sm-11,.col-sm-12,.col-sm-auto,.col-xl,.col-xl-1,.col-xl-2,.col-xl-3,.col-xl-4,.col-xl-5,.col-xl-6,.col-xl-7,.col-xl-8,.col-xl-9,.col-xl-10,.col-xl-11,.col-xl-12,.col-xl-auto,.col-xs,.col-xs-1,.col-xs-2,.col-xs-3,.col-xs-4,.col-xs-5,.col-xs-6,.col-xs-7,.col-xs-8,.col-xs-9,.col-xs-10,.col-xs-11,.col-xs-12,.col-xs-auto{position:relative;width:100%;padding-right:20px;padding-left:20px}.col{flex-basis:0;flex-grow:1;max-width:100%}.row-cols-1>*{flex:0 0 100%;max-width:100%}.row-cols-2>*{flex:0 0 50%;max-width:50%}.row-cols-3>*{flex:0 0 33.3333333333%;max-width:33.3333333333%}.row-cols-4>*{flex:0 0 25%;max-width:25%}.row-cols-5>*{flex:0 0 20%;max-width:20%}.row-cols-6>*{flex:0 0 16.6666666667%;max-width:16.6666666667%}.col-auto{flex:0 0 auto;width:auto;max-width:100%}.col-1{flex:0 0 8.3333333333%;max-width:8.3333333333%}.col-2{flex:0 0 16.6666666667%;max-width:16.6666666667%}.col-3{flex:0 0 25%;max-width:25%}.col-4{flex:0 0 33.3333333333%;max-width:33.3333333333%}.col-5{flex:0 0 41.6666666667%;max-width:41.6666666667%}.col-6{flex:0 0 50%;max-width:50%}.col-7{flex:0 0 58.3333333333%;max-width:58.3333333333%}.col-8{flex:0 0 66.6666666667%;max-width:66.6666666667%}.col-9{flex:0 0 75%;max-width:75%}.col-10{flex:0 0 83.3333333333%;max-width:83.3333333333%}.col-11{flex:0 0 91.6666666667%;max-width:91.6666666667%}.col-12{flex:0 0 100%;max-width:100%}.order-first{order:-1}.order-last{order:13}.order-0{order:0}.order-1{order:1}.order-2{order:2}.order-3{order:3}.order-4{order:4}.order-5{order:5}.order-6{order:6}.order-7{order:7}.order-8{order:8}.order-9{order:9}.order-10{order:10}.order-11{order:11}.order-12{order:12}.offset-1{margin-left:8.3333333333%}.offset-2{margin-left:16.6666666667%}.offset-3{margin-left:25%}.offset-4{margin-left:33.3333333333%}.offset-5{margin-left:41.6666666667%}.offset-6{margin-left:50%}.offset-7{margin-left:58.3333333333%}.offset-8{margin-left:66.6666666667%}.offset-9{margin-left:75%}.offset-10{margin-left:83.3333333333%}.offset-11{margin-left:91.6666666667%}@media(min-width:400px){.col-xs{flex-basis:0;flex-grow:1;max-width:100%}.row-cols-xs-1>*{flex:0 0 100%;max-width:100%}.row-cols-xs-2>*{flex:0 0 50%;max-width:50%}.row-cols-xs-3>*{flex:0 0 33.3333333333%;max-width:33.3333333333%}.row-cols-xs-4>*{flex:0 0 25%;max-width:25%}.row-cols-xs-5>*{flex:0 0 20%;max-width:20%}.row-cols-xs-6>*{flex:0 0 16.6666666667%;max-width:16.6666666667%}.col-xs-auto{flex:0 0 auto;width:auto;max-width:100%}.col-xs-1{flex:0 0 8.3333333333%;max-width:8.3333333333%}.col-xs-2{flex:0 0 16.6666666667%;max-width:16.6666666667%}.col-xs-3{flex:0 0 25%;max-width:25%}.col-xs-4{flex:0 0 33.3333333333%;max-width:33.3333333333%}.col-xs-5{flex:0 0 41.6666666667%;max-width:41.6666666667%}.col-xs-6{flex:0 0 50%;max-width:50%}.col-xs-7{flex:0 0 58.3333333333%;max-width:58.3333333333%}.col-xs-8{flex:0 0 66.6666666667%;max-width:66.6666666667%}.col-xs-9{flex:0 0 75%;max-width:75%}.col-xs-10{flex:0 0 83.3333333333%;max-width:83.3333333333%}.col-xs-11{flex:0 0 91.6666666667%;max-width:91.6666666667%}.col-xs-12{flex:0 0 100%;max-width:100%}.order-xs-first{order:-1}.order-xs-last{order:13}.order-xs-0{order:0}.order-xs-1{order:1}.order-xs-2{order:2}.order-xs-3{order:3}.order-xs-4{order:4}.order-xs-5{order:5}.order-xs-6{order:6}.order-xs-7{order:7}.order-xs-8{order:8}.order-xs-9{order:9}.order-xs-10{order:10}.order-xs-11{order:11}.order-xs-12{order:12}.offset-xs-0{margin-left:0}.offset-xs-1{margin-left:8.3333333333%}.offset-xs-2{margin-left:16.6666666667%}.offset-xs-3{margin-left:25%}.offset-xs-4{margin-left:33.3333333333%}.offset-xs-5{margin-left:41.6666666667%}.offset-xs-6{margin-left:50%}.offset-xs-7{margin-left:58.3333333333%}.offset-xs-8{margin-left:66.6666666667%}.offset-xs-9{margin-left:75%}.offset-xs-10{margin-left:83.3333333333%}.offset-xs-11{margin-left:91.6666666667%}}@media(min-width:616px){.col-sm{flex-basis:0;flex-grow:1;max-width:100%}.row-cols-sm-1>*{flex:0 0 100%;max-width:100%}.row-cols-sm-2>*{flex:0 0 50%;max-width:50%}.row-cols-sm-3>*{flex:0 0 33.3333333333%;max-width:33.3333333333%}.row-cols-sm-4>*{flex:0 0 25%;max-width:25%}.row-cols-sm-5>*{flex:0 0 20%;max-width:20%}.row-cols-sm-6>*{flex:0 0 16.6666666667%;max-width:16.6666666667%}.col-sm-auto{flex:0 0 auto;width:auto;max-width:100%}.col-sm-1{flex:0 0 8.3333333333%;max-width:8.3333333333%}.col-sm-2{flex:0 0 16.6666666667%;max-width:16.6666666667%}.col-sm-3{flex:0 0 25%;max-width:25%}.col-sm-4{flex:0 0 33.3333333333%;max-width:33.3333333333%}.col-sm-5{flex:0 0 41.6666666667%;max-width:41.6666666667%}.col-sm-6{flex:0 0 50%;max-width:50%}.col-sm-7{flex:0 0 58.3333333333%;max-width:58.3333333333%}.col-sm-8{flex:0 0 66.6666666667%;max-width:66.6666666667%}.col-sm-9{flex:0 0 75%;max-width:75%}.col-sm-10{flex:0 0 83.3333333333%;max-width:83.3333333333%}.col-sm-11{flex:0 0 91.6666666667%;max-width:91.6666666667%}.col-sm-12{flex:0 0 100%;max-width:100%}.order-sm-first{order:-1}.order-sm-last{order:13}.order-sm-0{order:0}.order-sm-1{order:1}.order-sm-2{order:2}.order-sm-3{order:3}.order-sm-4{order:4}.order-sm-5{order:5}.order-sm-6{order:6}.order-sm-7{order:7}.order-sm-8{order:8}.order-sm-9{order:9}.order-sm-10{order:10}.order-sm-11{order:11}.order-sm-12{order:12}.offset-sm-0{margin-left:0}.offset-sm-1{margin-left:8.3333333333%}.offset-sm-2{margin-left:16.6666666667%}.offset-sm-3{margin-left:25%}.offset-sm-4{margin-left:33.3333333333%}.offset-sm-5{margin-left:41.6666666667%}.offset-sm-6{margin-left:50%}.offset-sm-7{margin-left:58.3333333333%}.offset-sm-8{margin-left:66.6666666667%}.offset-sm-9{margin-left:75%}.offset-sm-10{margin-left:83.3333333333%}.offset-sm-11{margin-left:91.6666666667%}}@media(min-width:768px){.col-md{flex-basis:0;flex-grow:1;max-width:100%}.row-cols-md-1>*{flex:0 0 100%;max-width:100%}.row-cols-md-2>*{flex:0 0 50%;max-width:50%}.row-cols-md-3>*{flex:0 0 33.3333333333%;max-width:33.3333333333%}.row-cols-md-4>*{flex:0 0 25%;max-width:25%}.row-cols-md-5>*{flex:0 0 20%;max-width:20%}.row-cols-md-6>*{flex:0 0 16.6666666667%;max-width:16.6666666667%}.col-md-auto{flex:0 0 auto;width:auto;max-width:100%}.col-md-1{flex:0 0 8.3333333333%;max-width:8.3333333333%}.col-md-2{flex:0 0 16.6666666667%;max-width:16.6666666667%}.col-md-3{flex:0 0 25%;max-width:25%}.col-md-4{flex:0 0 33.3333333333%;max-width:33.3333333333%}.col-md-5{flex:0 0 41.6666666667%;max-width:41.6666666667%}.col-md-6{flex:0 0 50%;max-width:50%}.col-md-7{flex:0 0 58.3333333333%;max-width:58.3333333333%}.col-md-8{flex:0 0 66.6666666667%;max-width:66.6666666667%}.col-md-9{flex:0 0 75%;max-width:75%}.col-md-10{flex:0 0 83.3333333333%;max-width:83.3333333333%}.col-md-11{flex:0 0 91.6666666667%;max-width:91.6666666667%}.col-md-12{flex:0 0 100%;max-width:100%}.order-md-first{order:-1}.order-md-last{order:13}.order-md-0{order:0}.order-md-1{order:1}.order-md-2{order:2}.order-md-3{order:3}.order-md-4{order:4}.order-md-5{order:5}.order-md-6{order:6}.order-md-7{order:7}.order-md-8{order:8}.order-md-9{order:9}.order-md-10{order:10}.order-md-11{order:11}.order-md-12{order:12}.offset-md-0{margin-left:0}.offset-md-1{margin-left:8.3333333333%}.offset-md-2{margin-left:16.6666666667%}.offset-md-3{margin-left:25%}.offset-md-4{margin-left:33.3333333333%}.offset-md-5{margin-left:41.6666666667%}.offset-md-6{margin-left:50%}.offset-md-7{margin-left:58.3333333333%}.offset-md-8{margin-left:66.6666666667%}.offset-md-9{margin-left:75%}.offset-md-10{margin-left:83.3333333333%}.offset-md-11{margin-left:91.6666666667%}}@media(min-width:980px){.col-lg{flex-basis:0;flex-grow:1;max-width:100%}.row-cols-lg-1>*{flex:0 0 100%;max-width:100%}.row-cols-lg-2>*{flex:0 0 50%;max-width:50%}.row-cols-lg-3>*{flex:0 0 33.3333333333%;max-width:33.3333333333%}.row-cols-lg-4>*{flex:0 0 25%;max-width:25%}.row-cols-lg-5>*{flex:0 0 20%;max-width:20%}.row-cols-lg-6>*{flex:0 0 16.6666666667%;max-width:16.6666666667%}.col-lg-auto{flex:0 0 auto;width:auto;max-width:100%}.col-lg-1{flex:0 0 8.3333333333%;max-width:8.3333333333%}.col-lg-2{flex:0 0 16.6666666667%;max-width:16.6666666667%}.col-lg-3{flex:0 0 25%;max-width:25%}.col-lg-4{flex:0 0 33.3333333333%;max-width:33.3333333333%}.col-lg-5{flex:0 0 41.6666666667%;max-width:41.6666666667%}.col-lg-6{flex:0 0 50%;max-width:50%}.col-lg-7{flex:0 0 58.3333333333%;max-width:58.3333333333%}.col-lg-8{flex:0 0 66.6666666667%;max-width:66.6666666667%}.col-lg-9{flex:0 0 75%;max-width:75%}.col-lg-10{flex:0 0 83.3333333333%;max-width:83.3333333333%}.col-lg-11{flex:0 0 91.6666666667%;max-width:91.6666666667%}.col-lg-12{flex:0 0 100%;max-width:100%}.order-lg-first{order:-1}.order-lg-last{order:13}.order-lg-0{order:0}.order-lg-1{order:1}.order-lg-2{order:2}.order-lg-3{order:3}.order-lg-4{order:4}.order-lg-5{order:5}.order-lg-6{order:6}.order-lg-7{order:7}.order-lg-8{order:8}.order-lg-9{order:9}.order-lg-10{order:10}.order-lg-11{order:11}.order-lg-12{order:12}.offset-lg-0{margin-left:0}.offset-lg-1{margin-left:8.3333333333%}.offset-lg-2{margin-left:16.6666666667%}.offset-lg-3{margin-left:25%}.offset-lg-4{margin-left:33.3333333333%}.offset-lg-5{margin-left:41.6666666667%}.offset-lg-6{margin-left:50%}.offset-lg-7{margin-left:58.3333333333%}.offset-lg-8{margin-left:66.6666666667%}.offset-lg-9{margin-left:75%}.offset-lg-10{margin-left:83.3333333333%}.offset-lg-11{margin-left:91.6666666667%}}@media(min-width:1240px){.col-xl{flex-basis:0;flex-grow:1;max-width:100%}.row-cols-xl-1>*{flex:0 0 100%;max-width:100%}.row-cols-xl-2>*{flex:0 0 50%;max-width:50%}.row-cols-xl-3>*{flex:0 0 33.3333333333%;max-width:33.3333333333%}.row-cols-xl-4>*{flex:0 0 25%;max-width:25%}.row-cols-xl-5>*{flex:0 0 20%;max-width:20%}.row-cols-xl-6>*{flex:0 0 16.6666666667%;max-width:16.6666666667%}.col-xl-auto{flex:0 0 auto;width:auto;max-width:100%}.col-xl-1{flex:0 0 8.3333333333%;max-width:8.3333333333%}.col-xl-2{flex:0 0 16.6666666667%;max-width:16.6666666667%}.col-xl-3{flex:0 0 25%;max-width:25%}.col-xl-4{flex:0 0 33.3333333333%;max-width:33.3333333333%}.col-xl-5{flex:0 0 41.6666666667%;max-width:41.6666666667%}.col-xl-6{flex:0 0 50%;max-width:50%}.col-xl-7{flex:0 0 58.3333333333%;max-width:58.3333333333%}.col-xl-8{flex:0 0 66.6666666667%;max-width:66.6666666667%}.col-xl-9{flex:0 0 75%;max-width:75%}.col-xl-10{flex:0 0 83.3333333333%;max-width:83.3333333333%}.col-xl-11{flex:0 0 91.6666666667%;max-width:91.6666666667%}.col-xl-12{flex:0 0 100%;max-width:100%}.order-xl-first{order:-1}.order-xl-last{order:13}.order-xl-0{order:0}.order-xl-1{order:1}.order-xl-2{order:2}.order-xl-3{order:3}.order-xl-4{order:4}.order-xl-5{order:5}.order-xl-6{order:6}.order-xl-7{order:7}.order-xl-8{order:8}.order-xl-9{order:9}.order-xl-10{order:10}.order-xl-11{order:11}.order-xl-12{order:12}.offset-xl-0{margin-left:0}.offset-xl-1{margin-left:8.3333333333%}.offset-xl-2{margin-left:16.6666666667%}.offset-xl-3{margin-left:25%}.offset-xl-4{margin-left:33.3333333333%}.offset-xl-5{margin-left:41.6666666667%}.offset-xl-6{margin-left:50%}.offset-xl-7{margin-left:58.3333333333%}.offset-xl-8{margin-left:66.6666666667%}.offset-xl-9{margin-left:75%}.offset-xl-10{margin-left:83.3333333333%}.offset-xl-11{margin-left:91.6666666667%}}.table{width:100%;margin-bottom:8px;color:#212529}.table td,.table th{padding:.75rem;vertical-align:top;border-top:1px solid #d6dbdf}.table thead th{vertical-align:bottom;border-bottom:2px solid #d6dbdf}.table tbody+tbody{border-top:2px solid #d6dbdf}.table-sm td,.table-sm th{padding:.3rem}.table-bordered,.table-bordered td,.table-bordered th{border:1px solid #d6dbdf}.table-bordered thead td,.table-bordered thead th{border-bottom-width:2px}.table-borderless tbody+tbody,.table-borderless td,.table-borderless th,.table-borderless thead th{border:0}.table-striped tbody tr:nth-of-type(odd){background-color:rgba(33,37,41,.05)}.table-hover tbody tr:hover{color:#212529;background-color:rgba(33,37,41,.075)}.table-primary,.table-primary>td,.table-primary>th{background-color:#fff1b8}.table-primary tbody+tbody,.table-primary td,.table-primary th,.table-primary thead th{border-color:#ffe47a}.table-hover .table-primary:hover,.table-hover .table-primary:hover>td,.table-hover .table-primary:hover>th{background-color:#ffec9f}.table-secondary,.table-secondary>td,.table-secondary>th{background-color:#c1c2c3}.table-secondary tbody+tbody,.table-secondary td,.table-secondary th,.table-secondary thead th{border-color:#8c8e90}.table-hover .table-secondary:hover,.table-hover .table-secondary:hover>td,.table-hover .table-secondary:hover>th{background-color:#b4b5b6}.table-success,.table-success>td,.table-success>th{background-color:#c3e6cb}.table-success tbody+tbody,.table-success td,.table-success th,.table-success thead th{border-color:#8fd19e}.table-hover .table-success:hover,.table-hover .table-success:hover>td,.table-hover .table-success:hover>th{background-color:#b1dfbb}.table-info,.table-info>td,.table-info>th{background-color:#bee5eb}.table-info tbody+tbody,.table-info td,.table-info th,.table-info thead th{border-color:#86cfda}.table-hover .table-info:hover,.table-hover .table-info:hover>td,.table-hover .table-info:hover>th{background-color:#abdde5}.table-warning,.table-warning>td,.table-warning>th{background-color:#ffeeba}.table-warning tbody+tbody,.table-warning td,.table-warning th,.table-warning thead th{border-color:#ffdf7e}.table-hover .table-warning:hover,.table-hover .table-warning:hover>td,.table-hover .table-warning:hover>th{background-color:#ffe8a1}.table-danger,.table-danger>td,.table-danger>th{background-color:#f5c6cb}.table-danger tbody+tbody,.table-danger td,.table-danger th,.table-danger thead th{border-color:#ed969e}.table-hover .table-danger:hover,.table-hover .table-danger:hover>td,.table-hover .table-danger:hover>th{background-color:#f1b0b7}.table-light,.table-light>td,.table-light>th{background-color:#fbfcfd}.table-light tbody+tbody,.table-light td,.table-light th,.table-light thead th{border-color:#f8fafc}.table-hover .table-light:hover,.table-hover .table-light:hover>td,.table-hover .table-light:hover>th{background-color:#eaeff5}.table-dark,.table-dark>td,.table-dark>th{background-color:#ccced0}.table-dark tbody+tbody,.table-dark td,.table-dark th,.table-dark thead th{border-color:#a0a4a8}.table-hover .table-dark:hover,.table-hover .table-dark:hover>td,.table-hover .table-dark:hover>th{background-color:#bfc1c4}.table-primary-light,.table-primary-light>td,.table-primary-light>th{background-color:#fffefb}.table-primary-light tbody+tbody,.table-primary-light td,.table-primary-light th,.table-primary-light thead th{border-color:#fffcf7}.table-hover .table-primary-light:hover,.table-hover .table-primary-light:hover>td,.table-hover .table-primary-light:hover>th{background-color:#fff8e2}.table-secondary-light,.table-secondary-light>td,.table-secondary-light>th{background-color:#fff}.table-secondary-light tbody+tbody,.table-secondary-light td,.table-secondary-light th,.table-secondary-light thead th{border-color:#fff}.table-hover .table-secondary-light:hover,.table-hover .table-secondary-light:hover>td,.table-hover .table-secondary-light:hover>th{background-color:#f2f2f2}.table-tertiary,.table-tertiary>td,.table-tertiary>th{background-color:#c2dafc}.table-tertiary tbody+tbody,.table-tertiary td,.table-tertiary th,.table-tertiary thead th{border-color:#8ebaf9}.table-hover .table-tertiary:hover,.table-hover .table-tertiary:hover>td,.table-hover .table-tertiary:hover>th{background-color:#aacbfb}.table-tertiary-light,.table-tertiary-light>td,.table-tertiary-light>th{background-color:#f7fbff}.table-tertiary-light tbody+tbody,.table-tertiary-light td,.table-tertiary-light th,.table-tertiary-light thead th{border-color:#f0f8fe}.table-hover .table-tertiary-light:hover,.table-hover .table-tertiary-light:hover>td,.table-hover .table-tertiary-light:hover>th{background-color:#deeeff}.table-white,.table-white>td,.table-white>th{background-color:#fff}.table-white tbody+tbody,.table-white td,.table-white th,.table-white thead th{border-color:#fff}.table-hover .table-white:hover,.table-hover .table-white:hover>td,.table-hover .table-white:hover>th{background-color:#f2f2f2}.table-black,.table-black>td,.table-black>th{background-color:#c1c2c3}.table-black tbody+tbody,.table-black td,.table-black th,.table-black thead th{border-color:#8c8e90}.table-hover .table-black:hover,.table-hover .table-black:hover>td,.table-hover .table-black:hover>th{background-color:#b4b5b6}.table-blue,.table-blue>td,.table-blue>th{background-color:#c2dafc}.table-blue tbody+tbody,.table-blue td,.table-blue th,.table-blue thead th{border-color:#8ebaf9}.table-hover .table-blue:hover,.table-hover .table-blue:hover>td,.table-hover .table-blue:hover>th{background-color:#aacbfb}.table-light-blue,.table-light-blue>td,.table-light-blue>th{background-color:#f7fbff}.table-light-blue tbody+tbody,.table-light-blue td,.table-light-blue th,.table-light-blue thead th{border-color:#f0f8fe}.table-hover .table-light-blue:hover,.table-hover .table-light-blue:hover>td,.table-hover .table-light-blue:hover>th{background-color:#deeeff}.table-yellow,.table-yellow>td,.table-yellow>th{background-color:#fff1b8}.table-yellow tbody+tbody,.table-yellow td,.table-yellow th,.table-yellow thead th{border-color:#ffe47a}.table-hover .table-yellow:hover,.table-hover .table-yellow:hover>td,.table-hover .table-yellow:hover>th{background-color:#ffec9f}.table-light-yellow,.table-light-yellow>td,.table-light-yellow>th{background-color:#fffefb}.table-light-yellow tbody+tbody,.table-light-yellow td,.table-light-yellow th,.table-light-yellow thead th{border-color:#fffcf7}.table-hover .table-light-yellow:hover,.table-hover .table-light-yellow:hover>td,.table-hover .table-light-yellow:hover>th{background-color:#fff8e2}.table-orange,.table-orange>td,.table-orange>th{background-color:#ffdfb8}.table-orange tbody+tbody,.table-orange td,.table-orange th,.table-orange thead th{border-color:#ffc37a}.table-hover .table-orange:hover,.table-hover .table-orange:hover>td,.table-hover .table-orange:hover>th{background-color:#ffd49f}.table-light-orange,.table-light-orange>td,.table-light-orange>th{background-color:#fff7ea}.table-light-orange tbody+tbody,.table-light-orange td,.table-light-orange th,.table-light-orange thead th{border-color:#fff1d9}.table-hover .table-light-orange:hover,.table-hover .table-light-orange:hover>td,.table-hover .table-light-orange:hover>th{background-color:#ffedd1}.table-red,.table-red>td,.table-red>th{background-color:#ffc8c8}.table-red tbody+tbody,.table-red td,.table-red th,.table-red thead th{border-color:#ff9898}.table-hover .table-red:hover,.table-hover .table-red:hover>td,.table-hover .table-red:hover>th{background-color:#ffafaf}.table-light-red,.table-light-red>td,.table-light-red>th{background-color:#fff7f7}.table-light-red tbody+tbody,.table-light-red td,.table-light-red th,.table-light-red thead th{border-color:#fff1ef}.table-hover .table-light-red:hover,.table-hover .table-light-red:hover>td,.table-hover .table-light-red:hover>th{background-color:#ffdede}.table-medium,.table-medium>td,.table-medium>th{background-color:#f4f5f6}.table-medium tbody+tbody,.table-medium td,.table-medium th,.table-medium thead th{border-color:#eaecee}.table-hover .table-medium:hover,.table-hover .table-medium:hover>td,.table-hover .table-medium:hover>th{background-color:#e6e8eb}.table-active,.table-active>td,.table-active>th{background-color:rgba(33,37,41,.075)}.table-hover .table-active:hover,.table-hover .table-active:hover>td,.table-hover .table-active:hover>th{background-color:rgba(22,24,27,.075)}.table .thead-dark th{color:#fff;background-color:#343a40;border-color:#454d55}.table .thead-light th{color:#6c757d;background-color:#e9ecef;border-color:#d6dbdf}.table-dark{color:#fff;background-color:#343a40}.table-dark td,.table-dark th,.table-dark thead th{border-color:#454d55}.table-dark.table-bordered{border:0}.table-dark.table-striped tbody tr:nth-of-type(odd){background-color:hsla(0,0%,100%,.05)}.table-dark.table-hover tbody tr:hover{color:#fff;background-color:hsla(0,0%,100%,.075)}@media(max-width:399.98px){.table-responsive-xs{display:block;width:100%;overflow-x:auto;-webkit-overflow-scrolling:touch}.table-responsive-xs>.table-bordered{border:0}}@media(max-width:615.98px){.table-responsive-sm{display:block;width:100%;overflow-x:auto;-webkit-overflow-scrolling:touch}.table-responsive-sm>.table-bordered{border:0}}@media(max-width:767.98px){.table-responsive-md{display:block;width:100%;overflow-x:auto;-webkit-overflow-scrolling:touch}.table-responsive-md>.table-bordered{border:0}}@media(max-width:979.98px){.table-responsive-lg{display:block;width:100%;overflow-x:auto;-webkit-overflow-scrolling:touch}.table-responsive-lg>.table-bordered{border:0}}@media(max-width:1239.98px){.table-responsive-xl{display:block;width:100%;overflow-x:auto;-webkit-overflow-scrolling:touch}.table-responsive-xl>.table-bordered{border:0}}.table-responsive{display:block;width:100%;overflow-x:auto;-webkit-overflow-scrolling:touch}.table-responsive>.table-bordered{border:0}.form-control{display:block;width:100%;height:calc(1.5em + .75rem + 2px);padding:.375rem .75rem;font-size:1rem;font-weight:400;line-height:1.5;color:#6c757d;background-color:#fff;background-clip:padding-box;border:1px solid #ced4da;border-radius:8px;transition:border-color .15s ease-in-out,box-shadow .15s ease-in-out}@media(prefers-reduced-motion:reduce){.form-control{transition:none}}.form-control::-ms-expand{background-color:transparent;border:0}.form-control:-moz-focusring{color:transparent;text-shadow:0 0 0 #6c757d}.form-control:focus{color:#6c757d;background-color:#fff;border-color:#ffe680;outline:0;box-shadow:0 0 0 .2rem rgba(255,204,0,.25)}.form-control::-moz-placeholder{color:#6c757d;opacity:1}.form-control:-ms-input-placeholder{color:#6c757d;opacity:1}.form-control::placeholder{color:#6c757d;opacity:1}.form-control:disabled,.form-control[readonly]{background-color:#e9ecef;opacity:1}select.form-control:focus::-ms-value{color:#6c757d;background-color:#fff}.form-control-file,.form-control-range{display:block;width:100%}.col-form-label{padding-top:calc(.375rem + 1px);padding-bottom:calc(.375rem + 1px);margin-bottom:0;font-size:inherit;line-height:1.5}.col-form-label-lg{padding-top:calc(.5rem + 1px);padding-bottom:calc(.5rem + 1px);font-size:1.125rem;line-height:1.5}.col-form-label-sm{padding-top:calc(.25rem + 1px);padding-bottom:calc(.25rem + 1px);font-size:.875rem;line-height:1.5}.form-control-plaintext{display:block;width:100%;padding:.375rem 0;margin-bottom:0;font-size:1rem;line-height:1.5;color:#212529;background-color:transparent;border:solid transparent;border-width:1px 0}.form-control-plaintext.form-control-lg,.form-control-plaintext.form-control-sm{padding-right:0;padding-left:0}.form-control-sm{height:calc(1.5em + .5rem + 2px);padding:.25rem .5rem;font-size:.875rem;line-height:1.5;border-radius:4px}.form-control-lg{height:calc(1.5em + 1rem + 2px);padding:.5rem 1rem;font-size:1.125rem;line-height:1.5;border-radius:8px}select.form-control[multiple],select.form-control[size],textarea.form-control{height:auto}.form-group{margin-bottom:1rem}.form-text{display:block;margin-top:.25rem}.form-row{display:flex;flex-wrap:wrap;margin-right:-5px;margin-left:-5px}.form-row>.col,.form-row>[class*=col-]{padding-right:5px;padding-left:5px}.form-check{position:relative;display:block;padding-left:1.25rem}.form-check-input{position:absolute;margin-top:.3rem;margin-left:-1.25rem}.form-check-input:disabled~.form-check-label,.form-check-input[disabled]~.form-check-label{color:#6c757d}.form-check-label{margin-bottom:0}.form-check-inline{display:inline-flex;align-items:center;padding-left:0;margin-right:.75rem}.form-check-inline .form-check-input{position:static;margin-top:0;margin-right:.3125rem;margin-left:0}.valid-feedback{display:none;width:100%;margin-top:.25rem;font-size:80%;color:#28a745}.valid-tooltip{position:absolute;top:100%;z-index:5;display:none;max-width:100%;padding:.25rem .5rem;margin-top:.1rem;font-size:.875rem;line-height:1.5;color:#fff;background-color:rgba(40,167,69,.9);border-radius:8px}.is-valid~.valid-feedback,.is-valid~.valid-tooltip,.was-validated :valid~.valid-feedback,.was-validated :valid~.valid-tooltip{display:block}.form-control.is-valid,.was-validated .form-control:valid{border-color:#28a745;padding-right:calc(1.5em + .75rem);background-image:url("data:image/svg+xml;charset=utf-8,%3Csvg xmlns='http://www.w3.org/2000/svg' width='8' height='8'%3E%3Cpath fill='%2328a745' d='M2.3 6.73L.6 4.53c-.4-1.04.46-1.4 1.1-.8l1.1 1.4 3.4-3.8c.6-.63 1.6-.27 1.2.7l-4 4.6c-.43.5-.8.4-1.1.1z'/%3E%3C/svg%3E");background-repeat:no-repeat;background-position:right calc(.375em + .1875rem) center;background-size:calc(.75em + .375rem) calc(.75em + .375rem)}.form-control.is-valid:focus,.was-validated .form-control:valid:focus{border-color:#28a745;box-shadow:0 0 0 .2rem rgba(40,167,69,.25)}.was-validated textarea.form-control:valid,textarea.form-control.is-valid{padding-right:calc(1.5em + .75rem);background-position:top calc(.375em + .1875rem) right calc(.375em + .1875rem)}.custom-select.is-valid,.was-validated .custom-select:valid{border-color:#28a745;padding-right:calc(.75em + 2.3125rem);background:url("data:image/svg+xml;charset=utf-8,%3Csvg xmlns='http://www.w3.org/2000/svg' width='4' height='5'%3E%3Cpath fill='%23343a40' d='M2 0L0 2h4zm0 5L0 3h4z'/%3E%3C/svg%3E") no-repeat right .75rem center/8px 10px,url("data:image/svg+xml;charset=utf-8,%3Csvg xmlns='http://www.w3.org/2000/svg' width='8' height='8'%3E%3Cpath fill='%2328a745' d='M2.3 6.73L.6 4.53c-.4-1.04.46-1.4 1.1-.8l1.1 1.4 3.4-3.8c.6-.63 1.6-.27 1.2.7l-4 4.6c-.43.5-.8.4-1.1.1z'/%3E%3C/svg%3E") #fff no-repeat center right 1.75rem/calc(.75em + .375rem) calc(.75em + .375rem)}.custom-select.is-valid:focus,.was-validated .custom-select:valid:focus{border-color:#28a745;box-shadow:0 0 0 .2rem rgba(40,167,69,.25)}.form-check-input.is-valid~.form-check-label,.was-validated .form-check-input:valid~.form-check-label{color:#28a745}.form-check-input.is-valid~.valid-feedback,.form-check-input.is-valid~.valid-tooltip,.was-validated .form-check-input:valid~.valid-feedback,.was-validated .form-check-input:valid~.valid-tooltip{display:block}.custom-control-input.is-valid~.custom-control-label,.was-validated .custom-control-input:valid~.custom-control-label{color:#28a745}.custom-control-input.is-valid~.custom-control-label:before,.was-validated .custom-control-input:valid~.custom-control-label:before{border-color:#28a745}.custom-control-input.is-valid:checked~.custom-control-label:before,.was-validated .custom-control-input:valid:checked~.custom-control-label:before{border-color:#34ce57;background-color:#34ce57}.custom-control-input.is-valid:focus~.custom-control-label:before,.was-validated .custom-control-input:valid:focus~.custom-control-label:before{box-shadow:0 0 0 .2rem rgba(40,167,69,.25)}.custom-control-input.is-valid:focus:not(:checked)~.custom-control-label:before,.custom-file-input.is-valid~.custom-file-label,.was-validated .custom-control-input:valid:focus:not(:checked)~.custom-control-label:before,.was-validated .custom-file-input:valid~.custom-file-label{border-color:#28a745}.custom-file-input.is-valid:focus~.custom-file-label,.was-validated .custom-file-input:valid:focus~.custom-file-label{border-color:#28a745;box-shadow:0 0 0 .2rem rgba(40,167,69,.25)}.invalid-feedback{display:none;width:100%;margin-top:.25rem;font-size:80%;color:#dc3545}.invalid-tooltip{position:absolute;top:100%;z-index:5;display:none;max-width:100%;padding:.25rem .5rem;margin-top:.1rem;font-size:.875rem;line-height:1.5;color:#fff;background-color:rgba(220,53,69,.9);border-radius:8px}.is-invalid~.invalid-feedback,.is-invalid~.invalid-tooltip,.was-validated :invalid~.invalid-feedback,.was-validated :invalid~.invalid-tooltip{display:block}.form-control.is-invalid,.was-validated .form-control:invalid{border-color:#dc3545;padding-right:calc(1.5em + .75rem);background-image:url("data:image/svg+xml;charset=utf-8,%3Csvg xmlns='http://www.w3.org/2000/svg' width='12' height='12' fill='none' stroke='%23dc3545'%3E%3Ccircle cx='6' cy='6' r='4.5'/%3E%3Cpath stroke-linejoin='round' d='M5.8 3.6h.4L6 6.5z'/%3E%3Ccircle cx='6' cy='8.2' r='.6' fill='%23dc3545' stroke='none'/%3E%3C/svg%3E");background-repeat:no-repeat;background-position:right calc(.375em + .1875rem) center;background-size:calc(.75em + .375rem) calc(.75em + .375rem)}.form-control.is-invalid:focus,.was-validated .form-control:invalid:focus{border-color:#dc3545;box-shadow:0 0 0 .2rem rgba(220,53,69,.25)}.was-validated textarea.form-control:invalid,textarea.form-control.is-invalid{padding-right:calc(1.5em + .75rem);background-position:top calc(.375em + .1875rem) right calc(.375em + .1875rem)}.custom-select.is-invalid,.was-validated .custom-select:invalid{border-color:#dc3545;padding-right:calc(.75em + 2.3125rem);background:url("data:image/svg+xml;charset=utf-8,%3Csvg xmlns='http://www.w3.org/2000/svg' width='4' height='5'%3E%3Cpath fill='%23343a40' d='M2 0L0 2h4zm0 5L0 3h4z'/%3E%3C/svg%3E") no-repeat right .75rem center/8px 10px,url("data:image/svg+xml;charset=utf-8,%3Csvg xmlns='http://www.w3.org/2000/svg' width='12' height='12' fill='none' stroke='%23dc3545'%3E%3Ccircle cx='6' cy='6' r='4.5'/%3E%3Cpath stroke-linejoin='round' d='M5.8 3.6h.4L6 6.5z'/%3E%3Ccircle cx='6' cy='8.2' r='.6' fill='%23dc3545' stroke='none'/%3E%3C/svg%3E") #fff no-repeat center right 1.75rem/calc(.75em + .375rem) calc(.75em + .375rem)}.custom-select.is-invalid:focus,.was-validated .custom-select:invalid:focus{border-color:#dc3545;box-shadow:0 0 0 .2rem rgba(220,53,69,.25)}.form-check-input.is-invalid~.form-check-label,.was-validated .form-check-input:invalid~.form-check-label{color:#dc3545}.form-check-input.is-invalid~.invalid-feedback,.form-check-input.is-invalid~.invalid-tooltip,.was-validated .form-check-input:invalid~.invalid-feedback,.was-validated .form-check-input:invalid~.invalid-tooltip{display:block}.custom-control-input.is-invalid~.custom-control-label,.was-validated .custom-control-input:invalid~.custom-control-label{color:#dc3545}.custom-control-input.is-invalid~.custom-control-label:before,.was-validated .custom-control-input:invalid~.custom-control-label:before{border-color:#dc3545}.custom-control-input.is-invalid:checked~.custom-control-label:before,.was-validated .custom-control-input:invalid:checked~.custom-control-label:before{border-color:#e4606d;background-color:#e4606d}.custom-control-input.is-invalid:focus~.custom-control-label:before,.was-validated .custom-control-input:invalid:focus~.custom-control-label:before{box-shadow:0 0 0 .2rem rgba(220,53,69,.25)}.custom-control-input.is-invalid:focus:not(:checked)~.custom-control-label:before,.custom-file-input.is-invalid~.custom-file-label,.was-validated .custom-control-input:invalid:focus:not(:checked)~.custom-control-label:before,.was-validated .custom-file-input:invalid~.custom-file-label{border-color:#dc3545}.custom-file-input.is-invalid:focus~.custom-file-label,.was-validated .custom-file-input:invalid:focus~.custom-file-label{border-color:#dc3545;box-shadow:0 0 0 .2rem rgba(220,53,69,.25)}.form-inline{display:flex;flex-flow:row wrap;align-items:center}.form-inline .form-check{width:100%}@media(min-width:616px){.form-inline label{justify-content:center}.form-inline .form-group,.form-inline label{display:flex;align-items:center;margin-bottom:0}.form-inline .form-group{flex:0 0 auto;flex-flow:row wrap}.form-inline .form-control{display:inline-block;width:auto;vertical-align:middle}.form-inline .form-control-plaintext{display:inline-block}.form-inline .custom-select,.form-inline .input-group{width:auto}.form-inline .form-check{display:flex;align-items:center;justify-content:center;width:auto;padding-left:0}.form-inline .form-check-input{position:relative;flex-shrink:0;margin-top:0;margin-right:.25rem;margin-left:0}.form-inline .custom-control{align-items:center;justify-content:center}.form-inline .custom-control-label{margin-bottom:0}}.btn{display:inline-block;font-family:inherit;font-weight:700;color:#212529;text-align:center;vertical-align:middle;cursor:pointer;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none;background-color:transparent;border:1px solid transparent;padding:12px 32px;font-size:.875rem;line-height:20px;border-radius:8px;transition:color .15s ease-in-out,background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out}@media(prefers-reduced-motion:reduce){.btn{transition:none}}.btn:hover{color:#212529;text-decoration:none}.btn.focus,.btn:focus{outline:0;box-shadow:none}.btn.disabled,.btn:disabled{opacity:.65}a.btn.disabled,fieldset:disabled a.btn{pointer-events:none}.btn-primary{color:#495057;background-color:#fc0;border-color:#fc0}.btn-primary.focus,.btn-primary:focus,.btn-primary:hover{color:#495057;background-color:#d9ad00;border-color:#cca300}.btn-primary.focus,.btn-primary:focus{box-shadow:0 0 0 0 rgba(228,185,13,.5)}.btn-primary.disabled,.btn-primary:disabled{color:#495057;background-color:#fc0;border-color:#fc0}.btn-primary:not(:disabled):not(.disabled).active,.btn-primary:not(:disabled):not(.disabled):active,.show>.btn-primary.dropdown-toggle{color:#495057;background-color:#cca300;border-color:#bf9900}.btn-primary:not(:disabled):not(.disabled).active:focus,.btn-primary:not(:disabled):not(.disabled):active:focus,.show>.btn-primary.dropdown-toggle:focus{box-shadow:0 0 0 0 rgba(228,185,13,.5)}.btn-secondary{color:#fff;background-color:#212529;border-color:#212529}.btn-secondary.focus,.btn-secondary:focus,.btn-secondary:hover{color:#fff;background-color:#101214;border-color:#0a0c0d}.btn-secondary.focus,.btn-secondary:focus{box-shadow:0 0 0 0 rgba(66,70,73,.5)}.btn-secondary.disabled,.btn-secondary:disabled{color:#fff;background-color:#212529;border-color:#212529}.btn-secondary:not(:disabled):not(.disabled).active,.btn-secondary:not(:disabled):not(.disabled):active,.show>.btn-secondary.dropdown-toggle{color:#fff;background-color:#0a0c0d;border-color:#050506}.btn-secondary:not(:disabled):not(.disabled).active:focus,.btn-secondary:not(:disabled):not(.disabled):active:focus,.show>.btn-secondary.dropdown-toggle:focus{box-shadow:0 0 0 0 rgba(66,70,73,.5)}.btn-success{color:#fff;background-color:#28a745;border-color:#28a745}.btn-success.focus,.btn-success:focus,.btn-success:hover{color:#fff;background-color:#218838;border-color:#1e7e34}.btn-success.focus,.btn-success:focus{box-shadow:0 0 0 0 rgba(72,180,97,.5)}.btn-success.disabled,.btn-success:disabled{color:#fff;background-color:#28a745;border-color:#28a745}.btn-success:not(:disabled):not(.disabled).active,.btn-success:not(:disabled):not(.disabled):active,.show>.btn-success.dropdown-toggle{color:#fff;background-color:#1e7e34;border-color:#1c7430}.btn-success:not(:disabled):not(.disabled).active:focus,.btn-success:not(:disabled):not(.disabled):active:focus,.show>.btn-success.dropdown-toggle:focus{box-shadow:0 0 0 0 rgba(72,180,97,.5)}.btn-info{color:#fff;background-color:#17a2b8;border-color:#17a2b8}.btn-info.focus,.btn-info:focus,.btn-info:hover{color:#fff;background-color:#138496;border-color:#117a8b}.btn-info.focus,.btn-info:focus{box-shadow:0 0 0 0 rgba(58,176,195,.5)}.btn-info.disabled,.btn-info:disabled{color:#fff;background-color:#17a2b8;border-color:#17a2b8}.btn-info:not(:disabled):not(.disabled).active,.btn-info:not(:disabled):not(.disabled):active,.show>.btn-info.dropdown-toggle{color:#fff;background-color:#117a8b;border-color:#10707f}.btn-info:not(:disabled):not(.disabled).active:focus,.btn-info:not(:disabled):not(.disabled):active:focus,.show>.btn-info.dropdown-toggle:focus{box-shadow:0 0 0 0 rgba(58,176,195,.5)}.btn-warning{color:#495057;background-color:#ffc107;border-color:#ffc107}.btn-warning.focus,.btn-warning:focus,.btn-warning:hover{color:#495057;background-color:#e0a800;border-color:#d39e00}.btn-warning.focus,.btn-warning:focus{box-shadow:0 0 0 0 rgba(228,176,19,.5)}.btn-warning.disabled,.btn-warning:disabled{color:#495057;background-color:#ffc107;border-color:#ffc107}.btn-warning:not(:disabled):not(.disabled).active,.btn-warning:not(:disabled):not(.disabled):active,.show>.btn-warning.dropdown-toggle{color:#495057;background-color:#d39e00;border-color:#c69500}.btn-warning:not(:disabled):not(.disabled).active:focus,.btn-warning:not(:disabled):not(.disabled):active:focus,.show>.btn-warning.dropdown-toggle:focus{box-shadow:0 0 0 0 rgba(228,176,19,.5)}.btn-danger{color:#fff;background-color:#dc3545;border-color:#dc3545}.btn-danger.focus,.btn-danger:focus,.btn-danger:hover{color:#fff;background-color:#c82333;border-color:#bd2130}.btn-danger.focus,.btn-danger:focus{box-shadow:0 0 0 0 rgba(225,83,97,.5)}.btn-danger.disabled,.btn-danger:disabled{color:#fff;background-color:#dc3545;border-color:#dc3545}.btn-danger:not(:disabled):not(.disabled).active,.btn-danger:not(:disabled):not(.disabled):active,.show>.btn-danger.dropdown-toggle{color:#fff;background-color:#bd2130;border-color:#b21f2d}.btn-danger:not(:disabled):not(.disabled).active:focus,.btn-danger:not(:disabled):not(.disabled):active:focus,.show>.btn-danger.dropdown-toggle:focus{box-shadow:0 0 0 0 rgba(225,83,97,.5)}.btn-light{color:#495057;background-color:#f1f6f9;border-color:#f1f6f9}.btn-light.focus,.btn-light:focus,.btn-light:hover{color:#495057;background-color:#d6e5ee;border-color:#cddfea}.btn-light.focus,.btn-light:focus{box-shadow:0 0 0 0 rgba(216,221,225,.5)}.btn-light.disabled,.btn-light:disabled{color:#495057;background-color:#f1f6f9;border-color:#f1f6f9}.btn-light:not(:disabled):not(.disabled).active,.btn-light:not(:disabled):not(.disabled):active,.show>.btn-light.dropdown-toggle{color:#495057;background-color:#cddfea;border-color:#c4d9e6}.btn-light:not(:disabled):not(.disabled).active:focus,.btn-light:not(:disabled):not(.disabled):active:focus,.show>.btn-light.dropdown-toggle:focus{box-shadow:0 0 0 0 rgba(216,221,225,.5)}.btn-dark{color:#fff;background-color:#495057;border-color:#495057}.btn-dark.focus,.btn-dark:focus,.btn-dark:hover{color:#fff;background-color:#383d42;border-color:#32373b}.btn-dark.focus,.btn-dark:focus{box-shadow:0 0 0 0 rgba(100,106,112,.5)}.btn-dark.disabled,.btn-dark:disabled{color:#fff;background-color:#495057;border-color:#495057}.btn-dark:not(:disabled):not(.disabled).active,.btn-dark:not(:disabled):not(.disabled):active,.show>.btn-dark.dropdown-toggle{color:#fff;background-color:#32373b;border-color:#2c3034}.btn-dark:not(:disabled):not(.disabled).active:focus,.btn-dark:not(:disabled):not(.disabled):active:focus,.show>.btn-dark.dropdown-toggle:focus{box-shadow:0 0 0 0 rgba(100,106,112,.5)}.btn-primary-light{color:#495057;background-color:#fffaf0;border-color:#fffaf0}.btn-primary-light.focus,.btn-primary-light:focus,.btn-primary-light:hover{color:#495057;background-color:#ffedca;border-color:#ffe9bd}.btn-primary-light.focus,.btn-primary-light:focus{box-shadow:0 0 0 0 rgba(228,225,217,.5)}.btn-primary-light.disabled,.btn-primary-light:disabled{color:#495057;background-color:#fffaf0;border-color:#fffaf0}.btn-primary-light:not(:disabled):not(.disabled).active,.btn-primary-light:not(:disabled):not(.disabled):active,.show>.btn-primary-light.dropdown-toggle{color:#495057;background-color:#ffe9bd;border-color:#ffe5b0}.btn-primary-light:not(:disabled):not(.disabled).active:focus,.btn-primary-light:not(:disabled):not(.disabled):active:focus,.show>.btn-primary-light.dropdown-toggle:focus{box-shadow:0 0 0 0 rgba(228,225,217,.5)}.btn-secondary-light{color:#495057;background-color:#fff;border-color:#fff}.btn-secondary-light.focus,.btn-secondary-light:focus,.btn-secondary-light:hover{color:#495057;background-color:#ececec;border-color:#e6e6e6}.btn-secondary-light.focus,.btn-secondary-light:focus{box-shadow:0 0 0 0 rgba(228,229,230,.5)}.btn-secondary-light.disabled,.btn-secondary-light:disabled{color:#495057;background-color:#fff;border-color:#fff}.btn-secondary-light:not(:disabled):not(.disabled).active,.btn-secondary-light:not(:disabled):not(.disabled):active,.show>.btn-secondary-light.dropdown-toggle{color:#495057;background-color:#e6e6e6;border-color:#dfdfdf}.btn-secondary-light:not(:disabled):not(.disabled).active:focus,.btn-secondary-light:not(:disabled):not(.disabled):active:focus,.show>.btn-secondary-light.dropdown-toggle:focus{box-shadow:0 0 0 0 rgba(228,229,230,.5)}.btn-tertiary{color:#fff;background-color:#257af4;border-color:#257af4}.btn-tertiary.focus,.btn-tertiary:focus,.btn-tertiary:hover{color:#fff;background-color:#0c66e7;border-color:#0b60db}.btn-tertiary.focus,.btn-tertiary:focus{box-shadow:0 0 0 0 rgba(70,142,246,.5)}.btn-tertiary.disabled,.btn-tertiary:disabled{color:#fff;background-color:#257af4;border-color:#257af4}.btn-tertiary:not(:disabled):not(.disabled).active,.btn-tertiary:not(:disabled):not(.disabled):active,.show>.btn-tertiary.dropdown-toggle{color:#fff;background-color:#0b60db;border-color:#0a5bcf}.btn-tertiary:not(:disabled):not(.disabled).active:focus,.btn-tertiary:not(:disabled):not(.disabled):active:focus,.show>.btn-tertiary.dropdown-toggle:focus{box-shadow:0 0 0 0 rgba(70,142,246,.5)}.btn-tertiary-light{color:#495057;background-color:#e3f1fe;border-color:#e3f1fe}.btn-tertiary-light.focus,.btn-tertiary-light:focus,.btn-tertiary-light:hover{color:#495057;background-color:#bedffd;border-color:#b2d8fc}.btn-tertiary-light.focus,.btn-tertiary-light:focus{box-shadow:0 0 0 0 rgba(204,217,229,.5)}.btn-tertiary-light.disabled,.btn-tertiary-light:disabled{color:#495057;background-color:#e3f1fe;border-color:#e3f1fe}.btn-tertiary-light:not(:disabled):not(.disabled).active,.btn-tertiary-light:not(:disabled):not(.disabled):active,.show>.btn-tertiary-light.dropdown-toggle{color:#495057;background-color:#b2d8fc;border-color:#a5d2fc}.btn-tertiary-light:not(:disabled):not(.disabled).active:focus,.btn-tertiary-light:not(:disabled):not(.disabled):active:focus,.show>.btn-tertiary-light.dropdown-toggle:focus{box-shadow:0 0 0 0 rgba(204,217,229,.5)}.btn-white{color:#495057;background-color:#fff;border-color:#fff}.btn-white.focus,.btn-white:focus,.btn-white:hover{color:#495057;background-color:#ececec;border-color:#e6e6e6}.btn-white.focus,.btn-white:focus{box-shadow:0 0 0 0 rgba(228,229,230,.5)}.btn-white.disabled,.btn-white:disabled{color:#495057;background-color:#fff;border-color:#fff}.btn-white:not(:disabled):not(.disabled).active,.btn-white:not(:disabled):not(.disabled):active,.show>.btn-white.dropdown-toggle{color:#495057;background-color:#e6e6e6;border-color:#dfdfdf}.btn-white:not(:disabled):not(.disabled).active:focus,.btn-white:not(:disabled):not(.disabled):active:focus,.show>.btn-white.dropdown-toggle:focus{box-shadow:0 0 0 0 rgba(228,229,230,.5)}.btn-black{color:#fff;background-color:#212529;border-color:#212529}.btn-black.focus,.btn-black:focus,.btn-black:hover{color:#fff;background-color:#101214;border-color:#0a0c0d}.btn-black.focus,.btn-black:focus{box-shadow:0 0 0 0 rgba(66,70,73,.5)}.btn-black.disabled,.btn-black:disabled{color:#fff;background-color:#212529;border-color:#212529}.btn-black:not(:disabled):not(.disabled).active,.btn-black:not(:disabled):not(.disabled):active,.show>.btn-black.dropdown-toggle{color:#fff;background-color:#0a0c0d;border-color:#050506}.btn-black:not(:disabled):not(.disabled).active:focus,.btn-black:not(:disabled):not(.disabled):active:focus,.show>.btn-black.dropdown-toggle:focus{box-shadow:0 0 0 0 rgba(66,70,73,.5)}.btn-blue{color:#fff;background-color:#257af4;border-color:#257af4}.btn-blue.focus,.btn-blue:focus,.btn-blue:hover{color:#fff;background-color:#0c66e7;border-color:#0b60db}.btn-blue.focus,.btn-blue:focus{box-shadow:0 0 0 0 rgba(70,142,246,.5)}.btn-blue.disabled,.btn-blue:disabled{color:#fff;background-color:#257af4;border-color:#257af4}.btn-blue:not(:disabled):not(.disabled).active,.btn-blue:not(:disabled):not(.disabled):active,.show>.btn-blue.dropdown-toggle{color:#fff;background-color:#0b60db;border-color:#0a5bcf}.btn-blue:not(:disabled):not(.disabled).active:focus,.btn-blue:not(:disabled):not(.disabled):active:focus,.show>.btn-blue.dropdown-toggle:focus{box-shadow:0 0 0 0 rgba(70,142,246,.5)}.btn-light-blue{color:#495057;background-color:#e3f1fe;border-color:#e3f1fe}.btn-light-blue.focus,.btn-light-blue:focus,.btn-light-blue:hover{color:#495057;background-color:#bedffd;border-color:#b2d8fc}.btn-light-blue.focus,.btn-light-blue:focus{box-shadow:0 0 0 0 rgba(204,217,229,.5)}.btn-light-blue.disabled,.btn-light-blue:disabled{color:#495057;background-color:#e3f1fe;border-color:#e3f1fe}.btn-light-blue:not(:disabled):not(.disabled).active,.btn-light-blue:not(:disabled):not(.disabled):active,.show>.btn-light-blue.dropdown-toggle{color:#495057;background-color:#b2d8fc;border-color:#a5d2fc}.btn-light-blue:not(:disabled):not(.disabled).active:focus,.btn-light-blue:not(:disabled):not(.disabled):active:focus,.show>.btn-light-blue.dropdown-toggle:focus{box-shadow:0 0 0 0 rgba(204,217,229,.5)}.btn-yellow{color:#495057;background-color:#fc0;border-color:#fc0}.btn-yellow.focus,.btn-yellow:focus,.btn-yellow:hover{color:#495057;background-color:#d9ad00;border-color:#cca300}.btn-yellow.focus,.btn-yellow:focus{box-shadow:0 0 0 0 rgba(228,185,13,.5)}.btn-yellow.disabled,.btn-yellow:disabled{color:#495057;background-color:#fc0;border-color:#fc0}.btn-yellow:not(:disabled):not(.disabled).active,.btn-yellow:not(:disabled):not(.disabled):active,.show>.btn-yellow.dropdown-toggle{color:#495057;background-color:#cca300;border-color:#bf9900}.btn-yellow:not(:disabled):not(.disabled).active:focus,.btn-yellow:not(:disabled):not(.disabled):active:focus,.show>.btn-yellow.dropdown-toggle:focus{box-shadow:0 0 0 0 rgba(228,185,13,.5)}.btn-light-yellow{color:#495057;background-color:#fffaf0;border-color:#fffaf0}.btn-light-yellow.focus,.btn-light-yellow:focus,.btn-light-yellow:hover{color:#495057;background-color:#ffedca;border-color:#ffe9bd}.btn-light-yellow.focus,.btn-light-yellow:focus{box-shadow:0 0 0 0 rgba(228,225,217,.5)}.btn-light-yellow.disabled,.btn-light-yellow:disabled{color:#495057;background-color:#fffaf0;border-color:#fffaf0}.btn-light-yellow:not(:disabled):not(.disabled).active,.btn-light-yellow:not(:disabled):not(.disabled):active,.show>.btn-light-yellow.dropdown-toggle{color:#495057;background-color:#ffe9bd;border-color:#ffe5b0}.btn-light-yellow:not(:disabled):not(.disabled).active:focus,.btn-light-yellow:not(:disabled):not(.disabled):active:focus,.show>.btn-light-yellow.dropdown-toggle:focus{box-shadow:0 0 0 0 rgba(228,225,217,.5)}.btn-orange{color:#495057;background-color:#ff8c00;border-color:#ff8c00}.btn-orange.focus,.btn-orange:focus,.btn-orange:hover{color:#fff;background-color:#d97700;border-color:#cc7000}.btn-orange.focus,.btn-orange:focus{box-shadow:0 0 0 0 rgba(228,131,13,.5)}.btn-orange.disabled,.btn-orange:disabled{color:#495057;background-color:#ff8c00;border-color:#ff8c00}.btn-orange:not(:disabled):not(.disabled).active,.btn-orange:not(:disabled):not(.disabled):active,.show>.btn-orange.dropdown-toggle{color:#fff;background-color:#cc7000;border-color:#bf6900}.btn-orange:not(:disabled):not(.disabled).active:focus,.btn-orange:not(:disabled):not(.disabled):active:focus,.show>.btn-orange.dropdown-toggle:focus{box-shadow:0 0 0 0 rgba(228,131,13,.5)}.btn-light-orange{color:#495057;background-color:#ffe4b5;border-color:#ffe4b5}.btn-light-orange.focus,.btn-light-orange:focus,.btn-light-orange:hover{color:#495057;background-color:#ffd68f;border-color:#ffd182}.btn-light-orange.focus,.btn-light-orange:focus{box-shadow:0 0 0 0 rgba(228,206,167,.5)}.btn-light-orange.disabled,.btn-light-orange:disabled{color:#495057;background-color:#ffe4b5;border-color:#ffe4b5}.btn-light-orange:not(:disabled):not(.disabled).active,.btn-light-orange:not(:disabled):not(.disabled):active,.show>.btn-light-orange.dropdown-toggle{color:#495057;background-color:#ffd182;border-color:#ffcd75}.btn-light-orange:not(:disabled):not(.disabled).active:focus,.btn-light-orange:not(:disabled):not(.disabled):active:focus,.show>.btn-light-orange.dropdown-toggle:focus{box-shadow:0 0 0 0 rgba(228,206,167,.5)}.btn-red{color:#fff;background-color:#ff3939;border-color:#ff3939}.btn-red.focus,.btn-red:focus,.btn-red:hover{color:#fff;background-color:#ff1313;border-color:#ff0606}.btn-red.focus,.btn-red:focus{box-shadow:0 0 0 0 rgba(255,87,87,.5)}.btn-red.disabled,.btn-red:disabled{color:#fff;background-color:#ff3939;border-color:#ff3939}.btn-red:not(:disabled):not(.disabled).active,.btn-red:not(:disabled):not(.disabled):active,.show>.btn-red.dropdown-toggle{color:#fff;background-color:#ff0606;border-color:#f80000}.btn-red:not(:disabled):not(.disabled).active:focus,.btn-red:not(:disabled):not(.disabled):active:focus,.show>.btn-red.dropdown-toggle:focus{box-shadow:0 0 0 0 rgba(255,87,87,.5)}.btn-light-red{color:#495057;background-color:#ffe4e1;border-color:#ffe4e1}.btn-light-red.focus,.btn-light-red:focus,.btn-light-red:hover{color:#495057;background-color:#ffc2bb;border-color:#ffb6ae}.btn-light-red.focus,.btn-light-red:focus{box-shadow:0 0 0 0 rgba(228,206,204,.5)}.btn-light-red.disabled,.btn-light-red:disabled{color:#495057;background-color:#ffe4e1;border-color:#ffe4e1}.btn-light-red:not(:disabled):not(.disabled).active,.btn-light-red:not(:disabled):not(.disabled):active,.show>.btn-light-red.dropdown-toggle{color:#495057;background-color:#ffb6ae;border-color:#ffaba1}.btn-light-red:not(:disabled):not(.disabled).active:focus,.btn-light-red:not(:disabled):not(.disabled):active:focus,.show>.btn-light-red.dropdown-toggle:focus{box-shadow:0 0 0 0 rgba(228,206,204,.5)}.btn-medium{color:#495057;background-color:#d6dbdf;border-color:#d6dbdf}.btn-medium.focus,.btn-medium:focus,.btn-medium:hover{color:#495057;background-color:#c1c8ce;border-color:#b9c2c9}.btn-medium.focus,.btn-medium:focus{box-shadow:0 0 0 0 rgba(193,198,203,.5)}.btn-medium.disabled,.btn-medium:disabled{color:#495057;background-color:#d6dbdf;border-color:#d6dbdf}.btn-medium:not(:disabled):not(.disabled).active,.btn-medium:not(:disabled):not(.disabled):active,.show>.btn-medium.dropdown-toggle{color:#495057;background-color:#b9c2c9;border-color:#b2bcc3}.btn-medium:not(:disabled):not(.disabled).active:focus,.btn-medium:not(:disabled):not(.disabled):active:focus,.show>.btn-medium.dropdown-toggle:focus{box-shadow:0 0 0 0 rgba(193,198,203,.5)}.btn-outline-primary{color:#fc0;border-color:#fc0}.btn-outline-primary:hover{color:#495057;background-color:#fc0;border-color:#fc0}.btn-outline-primary.focus,.btn-outline-primary:focus{box-shadow:0 0 0 0 rgba(255,204,0,.5)}.btn-outline-primary.disabled,.btn-outline-primary:disabled{color:#fc0;background-color:transparent}.btn-outline-primary:not(:disabled):not(.disabled).active,.btn-outline-primary:not(:disabled):not(.disabled):active,.show>.btn-outline-primary.dropdown-toggle{color:#495057;background-color:#fc0;border-color:#fc0}.btn-outline-primary:not(:disabled):not(.disabled).active:focus,.btn-outline-primary:not(:disabled):not(.disabled):active:focus,.show>.btn-outline-primary.dropdown-toggle:focus{box-shadow:0 0 0 0 rgba(255,204,0,.5)}.btn-outline-secondary{color:#212529;border-color:#212529}.btn-outline-secondary:hover{color:#fff;background-color:#212529;border-color:#212529}.btn-outline-secondary.focus,.btn-outline-secondary:focus{box-shadow:0 0 0 0 rgba(33,37,41,.5)}.btn-outline-secondary.disabled,.btn-outline-secondary:disabled{color:#212529;background-color:transparent}.btn-outline-secondary:not(:disabled):not(.disabled).active,.btn-outline-secondary:not(:disabled):not(.disabled):active,.show>.btn-outline-secondary.dropdown-toggle{color:#fff;background-color:#212529;border-color:#212529}.btn-outline-secondary:not(:disabled):not(.disabled).active:focus,.btn-outline-secondary:not(:disabled):not(.disabled):active:focus,.show>.btn-outline-secondary.dropdown-toggle:focus{box-shadow:0 0 0 0 rgba(33,37,41,.5)}.btn-outline-success{color:#28a745;border-color:#28a745}.btn-outline-success:hover{color:#fff;background-color:#28a745;border-color:#28a745}.btn-outline-success.focus,.btn-outline-success:focus{box-shadow:0 0 0 0 rgba(40,167,69,.5)}.btn-outline-success.disabled,.btn-outline-success:disabled{color:#28a745;background-color:transparent}.btn-outline-success:not(:disabled):not(.disabled).active,.btn-outline-success:not(:disabled):not(.disabled):active,.show>.btn-outline-success.dropdown-toggle{color:#fff;background-color:#28a745;border-color:#28a745}.btn-outline-success:not(:disabled):not(.disabled).active:focus,.btn-outline-success:not(:disabled):not(.disabled):active:focus,.show>.btn-outline-success.dropdown-toggle:focus{box-shadow:0 0 0 0 rgba(40,167,69,.5)}.btn-outline-info{color:#17a2b8;border-color:#17a2b8}.btn-outline-info:hover{color:#fff;background-color:#17a2b8;border-color:#17a2b8}.btn-outline-info.focus,.btn-outline-info:focus{box-shadow:0 0 0 0 rgba(23,162,184,.5)}.btn-outline-info.disabled,.btn-outline-info:disabled{color:#17a2b8;background-color:transparent}.btn-outline-info:not(:disabled):not(.disabled).active,.btn-outline-info:not(:disabled):not(.disabled):active,.show>.btn-outline-info.dropdown-toggle{color:#fff;background-color:#17a2b8;border-color:#17a2b8}.btn-outline-info:not(:disabled):not(.disabled).active:focus,.btn-outline-info:not(:disabled):not(.disabled):active:focus,.show>.btn-outline-info.dropdown-toggle:focus{box-shadow:0 0 0 0 rgba(23,162,184,.5)}.btn-outline-warning{color:#ffc107;border-color:#ffc107}.btn-outline-warning:hover{color:#495057;background-color:#ffc107;border-color:#ffc107}.btn-outline-warning.focus,.btn-outline-warning:focus{box-shadow:0 0 0 0 rgba(255,193,7,.5)}.btn-outline-warning.disabled,.btn-outline-warning:disabled{color:#ffc107;background-color:transparent}.btn-outline-warning:not(:disabled):not(.disabled).active,.btn-outline-warning:not(:disabled):not(.disabled):active,.show>.btn-outline-warning.dropdown-toggle{color:#495057;background-color:#ffc107;border-color:#ffc107}.btn-outline-warning:not(:disabled):not(.disabled).active:focus,.btn-outline-warning:not(:disabled):not(.disabled):active:focus,.show>.btn-outline-warning.dropdown-toggle:focus{box-shadow:0 0 0 0 rgba(255,193,7,.5)}.btn-outline-danger{color:#dc3545;border-color:#dc3545}.btn-outline-danger:hover{color:#fff;background-color:#dc3545;border-color:#dc3545}.btn-outline-danger.focus,.btn-outline-danger:focus{box-shadow:0 0 0 0 rgba(220,53,69,.5)}.btn-outline-danger.disabled,.btn-outline-danger:disabled{color:#dc3545;background-color:transparent}.btn-outline-danger:not(:disabled):not(.disabled).active,.btn-outline-danger:not(:disabled):not(.disabled):active,.show>.btn-outline-danger.dropdown-toggle{color:#fff;background-color:#dc3545;border-color:#dc3545}.btn-outline-danger:not(:disabled):not(.disabled).active:focus,.btn-outline-danger:not(:disabled):not(.disabled):active:focus,.show>.btn-outline-danger.dropdown-toggle:focus{box-shadow:0 0 0 0 rgba(220,53,69,.5)}.btn-outline-light{color:#f1f6f9;border-color:#f1f6f9}.btn-outline-light:hover{color:#495057;background-color:#f1f6f9;border-color:#f1f6f9}.btn-outline-light.focus,.btn-outline-light:focus{box-shadow:0 0 0 0 rgba(241,246,249,.5)}.btn-outline-light.disabled,.btn-outline-light:disabled{color:#f1f6f9;background-color:transparent}.btn-outline-light:not(:disabled):not(.disabled).active,.btn-outline-light:not(:disabled):not(.disabled):active,.show>.btn-outline-light.dropdown-toggle{color:#495057;background-color:#f1f6f9;border-color:#f1f6f9}.btn-outline-light:not(:disabled):not(.disabled).active:focus,.btn-outline-light:not(:disabled):not(.disabled):active:focus,.show>.btn-outline-light.dropdown-toggle:focus{box-shadow:0 0 0 0 rgba(241,246,249,.5)}.btn-outline-dark{color:#495057;border-color:#495057}.btn-outline-dark:hover{color:#fff;background-color:#495057;border-color:#495057}.btn-outline-dark.focus,.btn-outline-dark:focus{box-shadow:0 0 0 0 rgba(73,80,87,.5)}.btn-outline-dark.disabled,.btn-outline-dark:disabled{color:#495057;background-color:transparent}.btn-outline-dark:not(:disabled):not(.disabled).active,.btn-outline-dark:not(:disabled):not(.disabled):active,.show>.btn-outline-dark.dropdown-toggle{color:#fff;background-color:#495057;border-color:#495057}.btn-outline-dark:not(:disabled):not(.disabled).active:focus,.btn-outline-dark:not(:disabled):not(.disabled):active:focus,.show>.btn-outline-dark.dropdown-toggle:focus{box-shadow:0 0 0 0 rgba(73,80,87,.5)}.btn-outline-primary-light{color:#fffaf0;border-color:#fffaf0}.btn-outline-primary-light:hover{color:#495057;background-color:#fffaf0;border-color:#fffaf0}.btn-outline-primary-light.focus,.btn-outline-primary-light:focus{box-shadow:0 0 0 0 rgba(255,250,240,.5)}.btn-outline-primary-light.disabled,.btn-outline-primary-light:disabled{color:#fffaf0;background-color:transparent}.btn-outline-primary-light:not(:disabled):not(.disabled).active,.btn-outline-primary-light:not(:disabled):not(.disabled):active,.show>.btn-outline-primary-light.dropdown-toggle{color:#495057;background-color:#fffaf0;border-color:#fffaf0}.btn-outline-primary-light:not(:disabled):not(.disabled).active:focus,.btn-outline-primary-light:not(:disabled):not(.disabled):active:focus,.show>.btn-outline-primary-light.dropdown-toggle:focus{box-shadow:0 0 0 0 rgba(255,250,240,.5)}.btn-outline-secondary-light{color:#fff;border-color:#fff}.btn-outline-secondary-light:hover{color:#495057;background-color:#fff;border-color:#fff}.btn-outline-secondary-light.focus,.btn-outline-secondary-light:focus{box-shadow:0 0 0 0 hsla(0,0%,100%,.5)}.btn-outline-secondary-light.disabled,.btn-outline-secondary-light:disabled{color:#fff;background-color:transparent}.btn-outline-secondary-light:not(:disabled):not(.disabled).active,.btn-outline-secondary-light:not(:disabled):not(.disabled):active,.show>.btn-outline-secondary-light.dropdown-toggle{color:#495057;background-color:#fff;border-color:#fff}.btn-outline-secondary-light:not(:disabled):not(.disabled).active:focus,.btn-outline-secondary-light:not(:disabled):not(.disabled):active:focus,.show>.btn-outline-secondary-light.dropdown-toggle:focus{box-shadow:0 0 0 0 hsla(0,0%,100%,.5)}.btn-outline-tertiary{color:#257af4;border-color:#257af4}.btn-outline-tertiary:hover{color:#fff;background-color:#257af4;border-color:#257af4}.btn-outline-tertiary.focus,.btn-outline-tertiary:focus{box-shadow:0 0 0 0 rgba(37,122,244,.5)}.btn-outline-tertiary.disabled,.btn-outline-tertiary:disabled{color:#257af4;background-color:transparent}.btn-outline-tertiary:not(:disabled):not(.disabled).active,.btn-outline-tertiary:not(:disabled):not(.disabled):active,.show>.btn-outline-tertiary.dropdown-toggle{color:#fff;background-color:#257af4;border-color:#257af4}.btn-outline-tertiary:not(:disabled):not(.disabled).active:focus,.btn-outline-tertiary:not(:disabled):not(.disabled):active:focus,.show>.btn-outline-tertiary.dropdown-toggle:focus{box-shadow:0 0 0 0 rgba(37,122,244,.5)}.btn-outline-tertiary-light{color:#e3f1fe;border-color:#e3f1fe}.btn-outline-tertiary-light:hover{color:#495057;background-color:#e3f1fe;border-color:#e3f1fe}.btn-outline-tertiary-light.focus,.btn-outline-tertiary-light:focus{box-shadow:0 0 0 0 rgba(227,241,254,.5)}.btn-outline-tertiary-light.disabled,.btn-outline-tertiary-light:disabled{color:#e3f1fe;background-color:transparent}.btn-outline-tertiary-light:not(:disabled):not(.disabled).active,.btn-outline-tertiary-light:not(:disabled):not(.disabled):active,.show>.btn-outline-tertiary-light.dropdown-toggle{color:#495057;background-color:#e3f1fe;border-color:#e3f1fe}.btn-outline-tertiary-light:not(:disabled):not(.disabled).active:focus,.btn-outline-tertiary-light:not(:disabled):not(.disabled):active:focus,.show>.btn-outline-tertiary-light.dropdown-toggle:focus{box-shadow:0 0 0 0 rgba(227,241,254,.5)}.btn-outline-white{color:#fff;border-color:#fff}.btn-outline-white:hover{color:#495057;background-color:#fff;border-color:#fff}.btn-outline-white.focus,.btn-outline-white:focus{box-shadow:0 0 0 0 hsla(0,0%,100%,.5)}.btn-outline-white.disabled,.btn-outline-white:disabled{color:#fff;background-color:transparent}.btn-outline-white:not(:disabled):not(.disabled).active,.btn-outline-white:not(:disabled):not(.disabled):active,.show>.btn-outline-white.dropdown-toggle{color:#495057;background-color:#fff;border-color:#fff}.btn-outline-white:not(:disabled):not(.disabled).active:focus,.btn-outline-white:not(:disabled):not(.disabled):active:focus,.show>.btn-outline-white.dropdown-toggle:focus{box-shadow:0 0 0 0 hsla(0,0%,100%,.5)}.btn-outline-black{color:#212529;border-color:#212529}.btn-outline-black:hover{color:#fff;background-color:#212529;border-color:#212529}.btn-outline-black.focus,.btn-outline-black:focus{box-shadow:0 0 0 0 rgba(33,37,41,.5)}.btn-outline-black.disabled,.btn-outline-black:disabled{color:#212529;background-color:transparent}.btn-outline-black:not(:disabled):not(.disabled).active,.btn-outline-black:not(:disabled):not(.disabled):active,.show>.btn-outline-black.dropdown-toggle{color:#fff;background-color:#212529;border-color:#212529}.btn-outline-black:not(:disabled):not(.disabled).active:focus,.btn-outline-black:not(:disabled):not(.disabled):active:focus,.show>.btn-outline-black.dropdown-toggle:focus{box-shadow:0 0 0 0 rgba(33,37,41,.5)}.btn-outline-blue{color:#257af4;border-color:#257af4}.btn-outline-blue:hover{color:#fff;background-color:#257af4;border-color:#257af4}.btn-outline-blue.focus,.btn-outline-blue:focus{box-shadow:0 0 0 0 rgba(37,122,244,.5)}.btn-outline-blue.disabled,.btn-outline-blue:disabled{color:#257af4;background-color:transparent}.btn-outline-blue:not(:disabled):not(.disabled).active,.btn-outline-blue:not(:disabled):not(.disabled):active,.show>.btn-outline-blue.dropdown-toggle{color:#fff;background-color:#257af4;border-color:#257af4}.btn-outline-blue:not(:disabled):not(.disabled).active:focus,.btn-outline-blue:not(:disabled):not(.disabled):active:focus,.show>.btn-outline-blue.dropdown-toggle:focus{box-shadow:0 0 0 0 rgba(37,122,244,.5)}.btn-outline-light-blue{color:#e3f1fe;border-color:#e3f1fe}.btn-outline-light-blue:hover{color:#495057;background-color:#e3f1fe;border-color:#e3f1fe}.btn-outline-light-blue.focus,.btn-outline-light-blue:focus{box-shadow:0 0 0 0 rgba(227,241,254,.5)}.btn-outline-light-blue.disabled,.btn-outline-light-blue:disabled{color:#e3f1fe;background-color:transparent}.btn-outline-light-blue:not(:disabled):not(.disabled).active,.btn-outline-light-blue:not(:disabled):not(.disabled):active,.show>.btn-outline-light-blue.dropdown-toggle{color:#495057;background-color:#e3f1fe;border-color:#e3f1fe}.btn-outline-light-blue:not(:disabled):not(.disabled).active:focus,.btn-outline-light-blue:not(:disabled):not(.disabled):active:focus,.show>.btn-outline-light-blue.dropdown-toggle:focus{box-shadow:0 0 0 0 rgba(227,241,254,.5)}.btn-outline-yellow{color:#fc0;border-color:#fc0}.btn-outline-yellow:hover{color:#495057;background-color:#fc0;border-color:#fc0}.btn-outline-yellow.focus,.btn-outline-yellow:focus{box-shadow:0 0 0 0 rgba(255,204,0,.5)}.btn-outline-yellow.disabled,.btn-outline-yellow:disabled{color:#fc0;background-color:transparent}.btn-outline-yellow:not(:disabled):not(.disabled).active,.btn-outline-yellow:not(:disabled):not(.disabled):active,.show>.btn-outline-yellow.dropdown-toggle{color:#495057;background-color:#fc0;border-color:#fc0}.btn-outline-yellow:not(:disabled):not(.disabled).active:focus,.btn-outline-yellow:not(:disabled):not(.disabled):active:focus,.show>.btn-outline-yellow.dropdown-toggle:focus{box-shadow:0 0 0 0 rgba(255,204,0,.5)}.btn-outline-light-yellow{color:#fffaf0;border-color:#fffaf0}.btn-outline-light-yellow:hover{color:#495057;background-color:#fffaf0;border-color:#fffaf0}.btn-outline-light-yellow.focus,.btn-outline-light-yellow:focus{box-shadow:0 0 0 0 rgba(255,250,240,.5)}.btn-outline-light-yellow.disabled,.btn-outline-light-yellow:disabled{color:#fffaf0;background-color:transparent}.btn-outline-light-yellow:not(:disabled):not(.disabled).active,.btn-outline-light-yellow:not(:disabled):not(.disabled):active,.show>.btn-outline-light-yellow.dropdown-toggle{color:#495057;background-color:#fffaf0;border-color:#fffaf0}.btn-outline-light-yellow:not(:disabled):not(.disabled).active:focus,.btn-outline-light-yellow:not(:disabled):not(.disabled):active:focus,.show>.btn-outline-light-yellow.dropdown-toggle:focus{box-shadow:0 0 0 0 rgba(255,250,240,.5)}.btn-outline-orange{color:#ff8c00;border-color:#ff8c00}.btn-outline-orange:hover{color:#495057;background-color:#ff8c00;border-color:#ff8c00}.btn-outline-orange.focus,.btn-outline-orange:focus{box-shadow:0 0 0 0 rgba(255,140,0,.5)}.btn-outline-orange.disabled,.btn-outline-orange:disabled{color:#ff8c00;background-color:transparent}.btn-outline-orange:not(:disabled):not(.disabled).active,.btn-outline-orange:not(:disabled):not(.disabled):active,.show>.btn-outline-orange.dropdown-toggle{color:#495057;background-color:#ff8c00;border-color:#ff8c00}.btn-outline-orange:not(:disabled):not(.disabled).active:focus,.btn-outline-orange:not(:disabled):not(.disabled):active:focus,.show>.btn-outline-orange.dropdown-toggle:focus{box-shadow:0 0 0 0 rgba(255,140,0,.5)}.btn-outline-light-orange{color:#ffe4b5;border-color:#ffe4b5}.btn-outline-light-orange:hover{color:#495057;background-color:#ffe4b5;border-color:#ffe4b5}.btn-outline-light-orange.focus,.btn-outline-light-orange:focus{box-shadow:0 0 0 0 rgba(255,228,181,.5)}.btn-outline-light-orange.disabled,.btn-outline-light-orange:disabled{color:#ffe4b5;background-color:transparent}.btn-outline-light-orange:not(:disabled):not(.disabled).active,.btn-outline-light-orange:not(:disabled):not(.disabled):active,.show>.btn-outline-light-orange.dropdown-toggle{color:#495057;background-color:#ffe4b5;border-color:#ffe4b5}.btn-outline-light-orange:not(:disabled):not(.disabled).active:focus,.btn-outline-light-orange:not(:disabled):not(.disabled):active:focus,.show>.btn-outline-light-orange.dropdown-toggle:focus{box-shadow:0 0 0 0 rgba(255,228,181,.5)}.btn-outline-red{color:#ff3939;border-color:#ff3939}.btn-outline-red:hover{color:#fff;background-color:#ff3939;border-color:#ff3939}.btn-outline-red.focus,.btn-outline-red:focus{box-shadow:0 0 0 0 rgba(255,57,57,.5)}.btn-outline-red.disabled,.btn-outline-red:disabled{color:#ff3939;background-color:transparent}.btn-outline-red:not(:disabled):not(.disabled).active,.btn-outline-red:not(:disabled):not(.disabled):active,.show>.btn-outline-red.dropdown-toggle{color:#fff;background-color:#ff3939;border-color:#ff3939}.btn-outline-red:not(:disabled):not(.disabled).active:focus,.btn-outline-red:not(:disabled):not(.disabled):active:focus,.show>.btn-outline-red.dropdown-toggle:focus{box-shadow:0 0 0 0 rgba(255,57,57,.5)}.btn-outline-light-red{color:#ffe4e1;border-color:#ffe4e1}.btn-outline-light-red:hover{color:#495057;background-color:#ffe4e1;border-color:#ffe4e1}.btn-outline-light-red.focus,.btn-outline-light-red:focus{box-shadow:0 0 0 0 rgba(255,228,225,.5)}.btn-outline-light-red.disabled,.btn-outline-light-red:disabled{color:#ffe4e1;background-color:transparent}.btn-outline-light-red:not(:disabled):not(.disabled).active,.btn-outline-light-red:not(:disabled):not(.disabled):active,.show>.btn-outline-light-red.dropdown-toggle{color:#495057;background-color:#ffe4e1;border-color:#ffe4e1}.btn-outline-light-red:not(:disabled):not(.disabled).active:focus,.btn-outline-light-red:not(:disabled):not(.disabled):active:focus,.show>.btn-outline-light-red.dropdown-toggle:focus{box-shadow:0 0 0 0 rgba(255,228,225,.5)}.btn-outline-medium{color:#d6dbdf;border-color:#d6dbdf}.btn-outline-medium:hover{color:#495057;background-color:#d6dbdf;border-color:#d6dbdf}.btn-outline-medium.focus,.btn-outline-medium:focus{box-shadow:0 0 0 0 rgba(214,219,223,.5)}.btn-outline-medium.disabled,.btn-outline-medium:disabled{color:#d6dbdf;background-color:transparent}.btn-outline-medium:not(:disabled):not(.disabled).active,.btn-outline-medium:not(:disabled):not(.disabled):active,.show>.btn-outline-medium.dropdown-toggle{color:#495057;background-color:#d6dbdf;border-color:#d6dbdf}.btn-outline-medium:not(:disabled):not(.disabled).active:focus,.btn-outline-medium:not(:disabled):not(.disabled):active:focus,.show>.btn-outline-medium.dropdown-toggle:focus{box-shadow:0 0 0 0 rgba(214,219,223,.5)}.btn-link{font-weight:400;color:#ff8c00;text-decoration:none}.btn-link:hover{color:#ff8c00;text-decoration:underline}.btn-link.focus,.btn-link:focus{text-decoration:underline;box-shadow:none}.btn-link.disabled,.btn-link:disabled{color:#d6dbdf;pointer-events:none}.btn-group-lg>.btn,.btn-lg{padding:16px 32px;font-size:1.125rem;line-height:26px;border-radius:8px}.btn-group-sm>.btn,.btn-sm{padding:6px 12px;font-size:.75rem;line-height:16px;border-radius:4px}.btn-block{display:block;width:100%}.btn-block+.btn-block{margin-top:24px}input[type=button].btn-block,input[type=reset].btn-block,input[type=submit].btn-block{width:100%}.fade{transition:opacity .15s linear}@media(prefers-reduced-motion:reduce){.fade{transition:none}}.fade:not(.show){opacity:0}.collapse:not(.show){display:none}.collapsing{position:relative;height:0;overflow:hidden;transition:height .35s ease}@media(prefers-reduced-motion:reduce){.collapsing{transition:none}}.dropdown,.dropleft,.dropright,.dropup{position:relative}.dropdown-toggle{white-space:nowrap}.dropdown-toggle:after{display:inline-block;margin-left:.255em;vertical-align:.255em;content:"";border-top:.3em solid;border-right:.3em solid transparent;border-bottom:0;border-left:.3em solid transparent}.dropdown-toggle:empty:after{margin-left:0}.dropdown-menu{position:absolute;top:100%;left:0;z-index:1000;display:none;float:left;min-width:10rem;padding:.5rem 0;margin:.125rem 0 0;font-size:1rem;color:#212529;text-align:left;list-style:none;background-color:#fff;background-clip:padding-box;border:1px solid rgba(33,37,41,.15);border-radius:8px}.dropdown-menu-left{right:auto;left:0}.dropdown-menu-right{right:0;left:auto}@media(min-width:400px){.dropdown-menu-xs-left{right:auto;left:0}.dropdown-menu-xs-right{right:0;left:auto}}@media(min-width:616px){.dropdown-menu-sm-left{right:auto;left:0}.dropdown-menu-sm-right{right:0;left:auto}}@media(min-width:768px){.dropdown-menu-md-left{right:auto;left:0}.dropdown-menu-md-right{right:0;left:auto}}@media(min-width:980px){.dropdown-menu-lg-left{right:auto;left:0}.dropdown-menu-lg-right{right:0;left:auto}}@media(min-width:1240px){.dropdown-menu-xl-left{right:auto;left:0}.dropdown-menu-xl-right{right:0;left:auto}}.dropup .dropdown-menu{top:auto;bottom:100%;margin-top:0;margin-bottom:.125rem}.dropup .dropdown-toggle:after{display:inline-block;margin-left:.255em;vertical-align:.255em;content:"";border-top:0;border-right:.3em solid transparent;border-bottom:.3em solid;border-left:.3em solid transparent}.dropup .dropdown-toggle:empty:after{margin-left:0}.dropright .dropdown-menu{top:0;right:auto;left:100%;margin-top:0;margin-left:.125rem}.dropright .dropdown-toggle:after{display:inline-block;margin-left:.255em;vertical-align:.255em;content:"";border-top:.3em solid transparent;border-right:0;border-bottom:.3em solid transparent;border-left:.3em solid}.dropright .dropdown-toggle:empty:after{margin-left:0}.dropright .dropdown-toggle:after{vertical-align:0}.dropleft .dropdown-menu{top:0;right:100%;left:auto;margin-top:0;margin-right:.125rem}.dropleft .dropdown-toggle:after{display:inline-block;margin-left:.255em;vertical-align:.255em;content:"";display:none}.dropleft .dropdown-toggle:before{display:inline-block;margin-right:.255em;vertical-align:.255em;content:"";border-top:.3em solid transparent;border-right:.3em solid;border-bottom:.3em solid transparent}.dropleft .dropdown-toggle:empty:after{margin-left:0}.dropleft .dropdown-toggle:before{vertical-align:0}.dropdown-menu[x-placement^=bottom],.dropdown-menu[x-placement^=left],.dropdown-menu[x-placement^=right],.dropdown-menu[x-placement^=top]{right:auto;bottom:auto}.dropdown-divider{height:0;margin:4px 0;overflow:hidden;border-top:1px solid #e9ecef}.dropdown-item{display:block;width:100%;padding:.25rem 1.5rem;clear:both;font-weight:400;color:#495057;text-align:inherit;white-space:nowrap;background-color:transparent;border:0}.dropdown-item:focus,.dropdown-item:hover{color:#3d4349;text-decoration:none;background-color:#f1f6f9}.dropdown-item.active,.dropdown-item:active{color:#fff;text-decoration:none;background-color:#fc0}.dropdown-item.disabled,.dropdown-item:disabled{color:#6c757d;pointer-events:none;background-color:transparent}.dropdown-menu.show{display:block}.dropdown-header{display:block;padding:.5rem 1.5rem;margin-bottom:0;font-size:.875rem;color:#6c757d;white-space:nowrap}.dropdown-item-text{display:block;padding:.25rem 1.5rem;color:#495057}.btn-group,.btn-group-vertical{position:relative;display:inline-flex;vertical-align:middle}.btn-group-vertical>.btn,.btn-group>.btn{position:relative;flex:1 1 auto}.btn-group-vertical>.btn.active,.btn-group-vertical>.btn:active,.btn-group-vertical>.btn:focus,.btn-group-vertical>.btn:hover,.btn-group>.btn.active,.btn-group>.btn:active,.btn-group>.btn:focus,.btn-group>.btn:hover{z-index:1}.btn-toolbar{display:flex;flex-wrap:wrap;justify-content:flex-start}.btn-toolbar .input-group{width:auto}.btn-group>.btn-group:not(:first-child),.btn-group>.btn:not(:first-child){margin-left:-1px}.btn-group>.btn-group:not(:last-child)>.btn,.btn-group>.btn:not(:last-child):not(.dropdown-toggle){border-top-right-radius:0;border-bottom-right-radius:0}.btn-group>.btn-group:not(:first-child)>.btn,.btn-group>.btn:not(:first-child){border-top-left-radius:0;border-bottom-left-radius:0}.dropdown-toggle-split{padding-right:24px;padding-left:24px}.dropdown-toggle-split:after,.dropright .dropdown-toggle-split:after,.dropup .dropdown-toggle-split:after{margin-left:0}.dropleft .dropdown-toggle-split:before{margin-right:0}.btn-group-sm>.btn+.dropdown-toggle-split,.btn-sm+.dropdown-toggle-split{padding-right:9px;padding-left:9px}.btn-group-lg>.btn+.dropdown-toggle-split,.btn-lg+.dropdown-toggle-split{padding-right:24px;padding-left:24px}.btn-group-vertical{flex-direction:column;align-items:flex-start;justify-content:center}.btn-group-vertical>.btn,.btn-group-vertical>.btn-group{width:100%}.btn-group-vertical>.btn-group:not(:first-child),.btn-group-vertical>.btn:not(:first-child){margin-top:-1px}.btn-group-vertical>.btn-group:not(:last-child)>.btn,.btn-group-vertical>.btn:not(:last-child):not(.dropdown-toggle){border-bottom-right-radius:0;border-bottom-left-radius:0}.btn-group-vertical>.btn-group:not(:first-child)>.btn,.btn-group-vertical>.btn:not(:first-child){border-top-left-radius:0;border-top-right-radius:0}.btn-group-toggle>.btn,.btn-group-toggle>.btn-group>.btn{margin-bottom:0}.btn-group-toggle>.btn-group>.btn input[type=checkbox],.btn-group-toggle>.btn-group>.btn input[type=radio],.btn-group-toggle>.btn input[type=checkbox],.btn-group-toggle>.btn input[type=radio]{position:absolute;clip:rect(0,0,0,0);pointer-events:none}.input-group{position:relative;display:flex;flex-wrap:wrap;align-items:stretch;width:100%}.input-group>.custom-file,.input-group>.custom-select,.input-group>.form-control,.input-group>.form-control-plaintext{position:relative;flex:1 1 0%;min-width:0;margin-bottom:0}.input-group>.custom-file+.custom-file,.input-group>.custom-file+.custom-select,.input-group>.custom-file+.form-control,.input-group>.custom-select+.custom-file,.input-group>.custom-select+.custom-select,.input-group>.custom-select+.form-control,.input-group>.form-control+.custom-file,.input-group>.form-control+.custom-select,.input-group>.form-control+.form-control,.input-group>.form-control-plaintext+.custom-file,.input-group>.form-control-plaintext+.custom-select,.input-group>.form-control-plaintext+.form-control{margin-left:-1px}.input-group>.custom-file .custom-file-input:focus~.custom-file-label,.input-group>.custom-select:focus,.input-group>.form-control:focus{z-index:3}.input-group>.custom-file .custom-file-input:focus{z-index:4}.input-group>.custom-select:not(:last-child),.input-group>.form-control:not(:last-child){border-top-right-radius:0;border-bottom-right-radius:0}.input-group>.custom-select:not(:first-child),.input-group>.form-control:not(:first-child){border-top-left-radius:0;border-bottom-left-radius:0}.input-group>.custom-file{display:flex;align-items:center}.input-group>.custom-file:not(:last-child) .custom-file-label,.input-group>.custom-file:not(:last-child) .custom-file-label:after{border-top-right-radius:0;border-bottom-right-radius:0}.input-group>.custom-file:not(:first-child) .custom-file-label{border-top-left-radius:0;border-bottom-left-radius:0}.input-group-append,.input-group-prepend{display:flex}.input-group-append .btn,.input-group-prepend .btn{position:relative;z-index:2}.input-group-append .btn:focus,.input-group-prepend .btn:focus{z-index:3}.input-group-append .btn+.btn,.input-group-append .btn+.input-group-text,.input-group-append .input-group-text+.btn,.input-group-append .input-group-text+.input-group-text,.input-group-prepend .btn+.btn,.input-group-prepend .btn+.input-group-text,.input-group-prepend .input-group-text+.btn,.input-group-prepend .input-group-text+.input-group-text{margin-left:-1px}.input-group-prepend{margin-right:-1px}.input-group-append{margin-left:-1px}.input-group-text{display:flex;align-items:center;padding:.375rem .75rem;margin-bottom:0;font-size:1rem;font-weight:400;line-height:1.5;color:#6c757d;text-align:center;white-space:nowrap;background-color:#e9ecef;border:1px solid #ced4da;border-radius:8px}.input-group-text input[type=checkbox],.input-group-text input[type=radio]{margin-top:0}.input-group-lg>.custom-select,.input-group-lg>.form-control:not(textarea){height:calc(1.5em + 1rem + 2px)}.input-group-lg>.custom-select,.input-group-lg>.form-control,.input-group-lg>.input-group-append>.btn,.input-group-lg>.input-group-append>.input-group-text,.input-group-lg>.input-group-prepend>.btn,.input-group-lg>.input-group-prepend>.input-group-text{padding:.5rem 1rem;font-size:1.125rem;line-height:1.5;border-radius:8px}.input-group-sm>.custom-select,.input-group-sm>.form-control:not(textarea){height:calc(1.5em + .5rem + 2px)}.input-group-sm>.custom-select,.input-group-sm>.form-control,.input-group-sm>.input-group-append>.btn,.input-group-sm>.input-group-append>.input-group-text,.input-group-sm>.input-group-prepend>.btn,.input-group-sm>.input-group-prepend>.input-group-text{padding:.25rem .5rem;font-size:.875rem;line-height:1.5;border-radius:4px}.input-group-lg>.custom-select,.input-group-sm>.custom-select{padding-right:1.75rem}.input-group>.input-group-append:last-child>.btn:not(:last-child):not(.dropdown-toggle),.input-group>.input-group-append:last-child>.input-group-text:not(:last-child),.input-group>.input-group-append:not(:last-child)>.btn,.input-group>.input-group-append:not(:last-child)>.input-group-text,.input-group>.input-group-prepend>.btn,.input-group>.input-group-prepend>.input-group-text{border-top-right-radius:0;border-bottom-right-radius:0}.input-group>.input-group-append>.btn,.input-group>.input-group-append>.input-group-text,.input-group>.input-group-prepend:first-child>.btn:not(:first-child),.input-group>.input-group-prepend:first-child>.input-group-text:not(:first-child),.input-group>.input-group-prepend:not(:first-child)>.btn,.input-group>.input-group-prepend:not(:first-child)>.input-group-text{border-top-left-radius:0;border-bottom-left-radius:0}.custom-control{position:relative;display:block;min-height:1.5rem;padding-left:1.5rem}.custom-control-inline{display:inline-flex;margin-right:1rem}.custom-control-input{position:absolute;left:0;z-index:-1;width:1rem;height:1.25rem;opacity:0}.custom-control-input:checked~.custom-control-label:before{color:#fff;border-color:#fc0;background-color:#fc0}.custom-control-input:focus~.custom-control-label:before{box-shadow:0 0 0 .2rem rgba(255,204,0,.25)}.custom-control-input:focus:not(:checked)~.custom-control-label:before{border-color:#ffe680}.custom-control-input:not(:disabled):active~.custom-control-label:before{color:#fff;background-color:#fff0b3;border-color:#fff0b3}.custom-control-input:disabled~.custom-control-label,.custom-control-input[disabled]~.custom-control-label{color:#6c757d}.custom-control-input:disabled~.custom-control-label:before,.custom-control-input[disabled]~.custom-control-label:before{background-color:#e9ecef}.custom-control-label{position:relative;margin-bottom:0;vertical-align:top}.custom-control-label:before{pointer-events:none;background-color:#fff;border:1px solid #d6dbdf}.custom-control-label:after,.custom-control-label:before{position:absolute;top:.25rem;left:-1.5rem;display:block;width:1rem;height:1rem;content:""}.custom-control-label:after{background:no-repeat 50%/50% 50%}.custom-checkbox .custom-control-label:before{border-radius:8px}.custom-checkbox .custom-control-input:checked~.custom-control-label:after{background-image:url("data:image/svg+xml;charset=utf-8,%3Csvg xmlns='http://www.w3.org/2000/svg' width='8' height='8'%3E%3Cpath fill='%23fff' d='M6.564.75l-3.59 3.612-1.538-1.55L0 4.26l2.974 2.99L8 2.193z'/%3E%3C/svg%3E")}.custom-checkbox .custom-control-input:indeterminate~.custom-control-label:before{border-color:#fc0;background-color:#fc0}.custom-checkbox .custom-control-input:indeterminate~.custom-control-label:after{background-image:url("data:image/svg+xml;charset=utf-8,%3Csvg xmlns='http://www.w3.org/2000/svg' width='4' height='4'%3E%3Cpath stroke='%23fff' d='M0 2h4'/%3E%3C/svg%3E")}.custom-checkbox .custom-control-input:disabled:checked~.custom-control-label:before{background-color:rgba(255,204,0,.5)}.custom-checkbox .custom-control-input:disabled:indeterminate~.custom-control-label:before{background-color:rgba(255,204,0,.5)}.custom-radio .custom-control-label:before{border-radius:50%}.custom-radio .custom-control-input:checked~.custom-control-label:after{background-image:url("data:image/svg+xml;charset=utf-8,%3Csvg xmlns='http://www.w3.org/2000/svg' width='12' height='12' viewBox='-4 -4 8 8'%3E%3Ccircle r='3' fill='%23fff'/%3E%3C/svg%3E")}.custom-radio .custom-control-input:disabled:checked~.custom-control-label:before{background-color:rgba(255,204,0,.5)}.custom-switch{padding-left:2.25rem}.custom-switch .custom-control-label:before{left:-2.25rem;width:1.75rem;pointer-events:all;border-radius:.5rem}.custom-switch .custom-control-label:after{top:calc(.25rem + 2px);left:calc(-2.25rem + 2px);width:calc(1rem - 4px);height:calc(1rem - 4px);background-color:#d6dbdf;border-radius:.5rem;transition:transform .15s ease-in-out,background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out}@media(prefers-reduced-motion:reduce){.custom-switch .custom-control-label:after{transition:none}}.custom-switch .custom-control-input:checked~.custom-control-label:after{background-color:#fff;transform:translateX(.75rem)}.custom-switch .custom-control-input:disabled:checked~.custom-control-label:before{background-color:rgba(255,204,0,.5)}.custom-select{display:inline-block;width:100%;height:calc(1.5em + .75rem + 2px);padding:.375rem 1.75rem .375rem .75rem;font-size:1rem;font-weight:400;line-height:1.5;color:#6c757d;vertical-align:middle;background:#fff url("data:image/svg+xml;charset=utf-8,%3Csvg xmlns='http://www.w3.org/2000/svg' width='4' height='5'%3E%3Cpath fill='%23343a40' d='M2 0L0 2h4zm0 5L0 3h4z'/%3E%3C/svg%3E") no-repeat right .75rem center/8px 10px;border:1px solid #ced4da;border-radius:8px;-webkit-appearance:none;-moz-appearance:none;appearance:none}.custom-select:focus{border-color:#ffe680;outline:0;box-shadow:0 0 0 .2rem rgba(255,204,0,.25)}.custom-select:focus::-ms-value{color:#6c757d;background-color:#fff}.custom-select[multiple],.custom-select[size]:not([size="1"]){height:auto;padding-right:.75rem;background-image:none}.custom-select:disabled{color:#6c757d;background-color:#e9ecef}.custom-select::-ms-expand{display:none}.custom-select:-moz-focusring{color:transparent;text-shadow:0 0 0 #6c757d}.custom-select-sm{height:calc(1.5em + .5rem + 2px);padding-top:.25rem;padding-bottom:.25rem;padding-left:.5rem;font-size:.875rem}.custom-select-lg{height:calc(1.5em + 1rem + 2px);padding-top:.5rem;padding-bottom:.5rem;padding-left:1rem;font-size:1.125rem}.custom-file{display:inline-block;margin-bottom:0}.custom-file,.custom-file-input{position:relative;width:100%;height:calc(1.5em + .75rem + 2px)}.custom-file-input{z-index:2;margin:0;opacity:0}.custom-file-input:focus~.custom-file-label{border-color:#ffe680;box-shadow:0 0 0 .2rem rgba(255,204,0,.25)}.custom-file-input:disabled~.custom-file-label,.custom-file-input[disabled]~.custom-file-label{background-color:#e9ecef}.custom-file-input:lang(en)~.custom-file-label:after{content:"Browse"}.custom-file-input~.custom-file-label[data-browse]:after{content:attr(data-browse)}.custom-file-label{left:0;z-index:1;height:calc(1.5em + .75rem + 2px);font-weight:400;background-color:#fff;border:1px solid #ced4da;border-radius:8px}.custom-file-label,.custom-file-label:after{position:absolute;top:0;right:0;padding:.375rem .75rem;line-height:1.5;color:#6c757d}.custom-file-label:after{bottom:0;z-index:3;display:block;height:calc(1.5em + .75rem);content:"Browse";background-color:#e9ecef;border-left:inherit;border-radius:0 8px 8px 0}.custom-range{width:100%;height:1.4rem;padding:0;background-color:transparent;-webkit-appearance:none;-moz-appearance:none;appearance:none}.custom-range:focus{outline:none}.custom-range:focus::-webkit-slider-thumb{box-shadow:0 0 0 1px #fff,0 0 0 .2rem rgba(255,204,0,.25)}.custom-range:focus::-moz-range-thumb{box-shadow:0 0 0 1px #fff,0 0 0 .2rem rgba(255,204,0,.25)}.custom-range:focus::-ms-thumb{box-shadow:0 0 0 1px #fff,0 0 0 .2rem rgba(255,204,0,.25)}.custom-range::-moz-focus-outer{border:0}.custom-range::-webkit-slider-thumb{width:1rem;height:1rem;margin-top:-.25rem;background-color:#fc0;border:0;border-radius:1rem;-webkit-transition:background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out;transition:background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out;-webkit-appearance:none;appearance:none}@media(prefers-reduced-motion:reduce){.custom-range::-webkit-slider-thumb{-webkit-transition:none;transition:none}}.custom-range::-webkit-slider-thumb:active{background-color:#fff0b3}.custom-range::-webkit-slider-runnable-track{width:100%;height:.5rem;color:transparent;cursor:pointer;background-color:#dee2e6;border-color:transparent;border-radius:1rem}.custom-range::-moz-range-thumb{width:1rem;height:1rem;background-color:#fc0;border:0;border-radius:1rem;-moz-transition:background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out;transition:background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out;-moz-appearance:none;appearance:none}@media(prefers-reduced-motion:reduce){.custom-range::-moz-range-thumb{-moz-transition:none;transition:none}}.custom-range::-moz-range-thumb:active{background-color:#fff0b3}.custom-range::-moz-range-track{width:100%;height:.5rem;color:transparent;cursor:pointer;background-color:#dee2e6;border-color:transparent;border-radius:1rem}.custom-range::-ms-thumb{width:1rem;height:1rem;margin-top:0;margin-right:.2rem;margin-left:.2rem;background-color:#fc0;border:0;border-radius:1rem;-ms-transition:background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out;transition:background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out;appearance:none}@media(prefers-reduced-motion:reduce){.custom-range::-ms-thumb{-ms-transition:none;transition:none}}.custom-range::-ms-thumb:active{background-color:#fff0b3}.custom-range::-ms-track{width:100%;height:.5rem;color:transparent;cursor:pointer;background-color:transparent;border-color:transparent;border-width:.5rem}.custom-range::-ms-fill-lower,.custom-range::-ms-fill-upper{background-color:#dee2e6;border-radius:1rem}.custom-range::-ms-fill-upper{margin-right:15px}.custom-range:disabled::-webkit-slider-thumb{background-color:#d6dbdf}.custom-range:disabled::-webkit-slider-runnable-track{cursor:default}.custom-range:disabled::-moz-range-thumb{background-color:#d6dbdf}.custom-range:disabled::-moz-range-track{cursor:default}.custom-range:disabled::-ms-thumb{background-color:#d6dbdf}.custom-control-label:before,.custom-file-label,.custom-select{transition:background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out}@media(prefers-reduced-motion:reduce){.custom-control-label:before,.custom-file-label,.custom-select{transition:none}}.nav{display:flex;flex-wrap:wrap;padding-left:0;margin-bottom:0;list-style:none}.nav-link{display:block;padding:0}.nav-link:focus,.nav-link:hover{text-decoration:none}.nav-link.disabled{color:#d6dbdf;pointer-events:none;cursor:default}.nav-tabs{border-bottom:1px solid #6c757d}.nav-tabs .nav-item{margin-bottom:-1px}.nav-tabs .nav-link{border:1px solid transparent;border-top-left-radius:8px;border-top-right-radius:8px}.nav-tabs .nav-link:focus,.nav-tabs .nav-link:hover{border-color:transparent}.nav-tabs .nav-link.disabled{color:#d6dbdf;background-color:transparent;border-color:transparent}.nav-tabs .nav-item.show .nav-link,.nav-tabs .nav-link.active{color:#257af4;background-color:#fff;border-color:#6c757d}.nav-tabs .dropdown-menu{margin-top:-1px;border-top-left-radius:0;border-top-right-radius:0}.nav-pills .nav-link{border-radius:8px}.nav-pills .nav-link.active,.nav-pills .show>.nav-link{color:#fff;background-color:#fc0}.nav-fill .nav-item{flex:1 1 auto;text-align:center}.nav-justified .nav-item{flex-basis:0;flex-grow:1;text-align:center}.tab-content>.tab-pane{display:none}.tab-content>.active{display:block}.navbar{position:relative;padding:24px 0}.navbar,.navbar .container,.navbar .container-fluid,.navbar .container-lg,.navbar .container-md,.navbar .container-sm,.navbar .container-xl,.navbar .container-xs{display:flex;flex-wrap:wrap;align-items:center;justify-content:space-between}.navbar-brand{display:inline-block;padding-top:-.09375rem;padding-bottom:-.09375rem;margin-right:0;font-size:1.125rem;line-height:inherit;white-space:nowrap}.navbar-brand:focus,.navbar-brand:hover{text-decoration:none}.navbar-nav{display:flex;flex-direction:column;padding-left:0;margin-bottom:0;list-style:none}.navbar-nav .nav-link{padding-right:0;padding-left:0}.navbar-nav .dropdown-menu{position:static;float:none}.navbar-text{display:inline-block;padding-top:0;padding-bottom:0}.navbar-collapse{flex-basis:100%;flex-grow:1;align-items:center}.navbar-toggler{padding:.25rem .75rem;font-size:1.125rem;line-height:1;background-color:transparent;border:1px solid transparent;border-radius:8px}.navbar-toggler:focus,.navbar-toggler:hover{text-decoration:none}.navbar-toggler-icon{display:inline-block;width:1.5em;height:1.5em;vertical-align:middle;content:"";background:no-repeat 50%;background-size:100% 100%}@media(max-width:399.98px){.navbar-expand-xs>.container,.navbar-expand-xs>.container-fluid,.navbar-expand-xs>.container-lg,.navbar-expand-xs>.container-md,.navbar-expand-xs>.container-sm,.navbar-expand-xs>.container-xl,.navbar-expand-xs>.container-xs{padding-right:0;padding-left:0}}@media(min-width:400px){.navbar-expand-xs{flex-flow:row nowrap;justify-content:flex-start}.navbar-expand-xs .navbar-nav{flex-direction:row}.navbar-expand-xs .navbar-nav .dropdown-menu{position:absolute}.navbar-expand-xs .navbar-nav .nav-link{padding-right:.5rem;padding-left:.5rem}.navbar-expand-xs>.container,.navbar-expand-xs>.container-fluid,.navbar-expand-xs>.container-lg,.navbar-expand-xs>.container-md,.navbar-expand-xs>.container-sm,.navbar-expand-xs>.container-xl,.navbar-expand-xs>.container-xs{flex-wrap:nowrap}.navbar-expand-xs .navbar-collapse{display:flex!important;flex-basis:auto}.navbar-expand-xs .navbar-toggler{display:none}}@media(max-width:615.98px){.navbar-expand-sm>.container,.navbar-expand-sm>.container-fluid,.navbar-expand-sm>.container-lg,.navbar-expand-sm>.container-md,.navbar-expand-sm>.container-sm,.navbar-expand-sm>.container-xl,.navbar-expand-sm>.container-xs{padding-right:0;padding-left:0}}@media(min-width:616px){.navbar-expand-sm{flex-flow:row nowrap;justify-content:flex-start}.navbar-expand-sm .navbar-nav{flex-direction:row}.navbar-expand-sm .navbar-nav .dropdown-menu{position:absolute}.navbar-expand-sm .navbar-nav .nav-link{padding-right:.5rem;padding-left:.5rem}.navbar-expand-sm>.container,.navbar-expand-sm>.container-fluid,.navbar-expand-sm>.container-lg,.navbar-expand-sm>.container-md,.navbar-expand-sm>.container-sm,.navbar-expand-sm>.container-xl,.navbar-expand-sm>.container-xs{flex-wrap:nowrap}.navbar-expand-sm .navbar-collapse{display:flex!important;flex-basis:auto}.navbar-expand-sm .navbar-toggler{display:none}}@media(max-width:767.98px){.navbar-expand-md>.container,.navbar-expand-md>.container-fluid,.navbar-expand-md>.container-lg,.navbar-expand-md>.container-md,.navbar-expand-md>.container-sm,.navbar-expand-md>.container-xl,.navbar-expand-md>.container-xs{padding-right:0;padding-left:0}}@media(min-width:768px){.navbar-expand-md{flex-flow:row nowrap;justify-content:flex-start}.navbar-expand-md .navbar-nav{flex-direction:row}.navbar-expand-md .navbar-nav .dropdown-menu{position:absolute}.navbar-expand-md .navbar-nav .nav-link{padding-right:.5rem;padding-left:.5rem}.navbar-expand-md>.container,.navbar-expand-md>.container-fluid,.navbar-expand-md>.container-lg,.navbar-expand-md>.container-md,.navbar-expand-md>.container-sm,.navbar-expand-md>.container-xl,.navbar-expand-md>.container-xs{flex-wrap:nowrap}.navbar-expand-md .navbar-collapse{display:flex!important;flex-basis:auto}.navbar-expand-md .navbar-toggler{display:none}}@media(max-width:979.98px){.navbar-expand-lg>.container,.navbar-expand-lg>.container-fluid,.navbar-expand-lg>.container-lg,.navbar-expand-lg>.container-md,.navbar-expand-lg>.container-sm,.navbar-expand-lg>.container-xl,.navbar-expand-lg>.container-xs{padding-right:0;padding-left:0}}@media(min-width:980px){.navbar-expand-lg{flex-flow:row nowrap;justify-content:flex-start}.navbar-expand-lg .navbar-nav{flex-direction:row}.navbar-expand-lg .navbar-nav .dropdown-menu{position:absolute}.navbar-expand-lg .navbar-nav .nav-link{padding-right:.5rem;padding-left:.5rem}.navbar-expand-lg>.container,.navbar-expand-lg>.container-fluid,.navbar-expand-lg>.container-lg,.navbar-expand-lg>.container-md,.navbar-expand-lg>.container-sm,.navbar-expand-lg>.container-xl,.navbar-expand-lg>.container-xs{flex-wrap:nowrap}.navbar-expand-lg .navbar-collapse{display:flex!important;flex-basis:auto}.navbar-expand-lg .navbar-toggler{display:none}}@media(max-width:1239.98px){.navbar-expand-xl>.container,.navbar-expand-xl>.container-fluid,.navbar-expand-xl>.container-lg,.navbar-expand-xl>.container-md,.navbar-expand-xl>.container-sm,.navbar-expand-xl>.container-xl,.navbar-expand-xl>.container-xs{padding-right:0;padding-left:0}}@media(min-width:1240px){.navbar-expand-xl{flex-flow:row nowrap;justify-content:flex-start}.navbar-expand-xl .navbar-nav{flex-direction:row}.navbar-expand-xl .navbar-nav .dropdown-menu{position:absolute}.navbar-expand-xl .navbar-nav .nav-link{padding-right:.5rem;padding-left:.5rem}.navbar-expand-xl>.container,.navbar-expand-xl>.container-fluid,.navbar-expand-xl>.container-lg,.navbar-expand-xl>.container-md,.navbar-expand-xl>.container-sm,.navbar-expand-xl>.container-xl,.navbar-expand-xl>.container-xs{flex-wrap:nowrap}.navbar-expand-xl .navbar-collapse{display:flex!important;flex-basis:auto}.navbar-expand-xl .navbar-toggler{display:none}}.navbar-expand{flex-flow:row nowrap;justify-content:flex-start}.navbar-expand>.container,.navbar-expand>.container-fluid,.navbar-expand>.container-lg,.navbar-expand>.container-md,.navbar-expand>.container-sm,.navbar-expand>.container-xl,.navbar-expand>.container-xs{padding-right:0;padding-left:0}.navbar-expand .navbar-nav{flex-direction:row}.navbar-expand .navbar-nav .dropdown-menu{position:absolute}.navbar-expand .navbar-nav .nav-link{padding-right:.5rem;padding-left:.5rem}.navbar-expand>.container,.navbar-expand>.container-fluid,.navbar-expand>.container-lg,.navbar-expand>.container-md,.navbar-expand>.container-sm,.navbar-expand>.container-xl,.navbar-expand>.container-xs{flex-wrap:nowrap}.navbar-expand .navbar-collapse{display:flex!important;flex-basis:auto}.navbar-expand .navbar-toggler{display:none}.navbar-light .navbar-brand,.navbar-light .navbar-brand:focus,.navbar-light .navbar-brand:hover{color:rgba(33,37,41,.9)}.navbar-light .navbar-nav .nav-link{color:rgba(33,37,41,.5)}.navbar-light .navbar-nav .nav-link:focus,.navbar-light .navbar-nav .nav-link:hover{color:rgba(33,37,41,.7)}.navbar-light .navbar-nav .nav-link.disabled{color:rgba(33,37,41,.3)}.navbar-light .navbar-nav .active>.nav-link,.navbar-light .navbar-nav .nav-link.active,.navbar-light .navbar-nav .nav-link.show,.navbar-light .navbar-nav .show>.nav-link{color:rgba(33,37,41,.9)}.navbar-light .navbar-toggler{color:rgba(33,37,41,.5);border-color:rgba(33,37,41,.1)}.navbar-light .navbar-toggler-icon{background-image:url("data:image/svg+xml;charset=utf-8,%3Csvg xmlns='http://www.w3.org/2000/svg' width='30' height='30'%3E%3Cpath stroke='rgba(33, 37, 41, 0.5)' stroke-linecap='round' stroke-miterlimit='10' stroke-width='2' d='M4 7h22M4 15h22M4 23h22'/%3E%3C/svg%3E")}.navbar-light .navbar-text{color:rgba(33,37,41,.5)}.navbar-light .navbar-text a,.navbar-light .navbar-text a:focus,.navbar-light .navbar-text a:hover{color:rgba(33,37,41,.9)}.navbar-dark .navbar-brand,.navbar-dark .navbar-brand:focus,.navbar-dark .navbar-brand:hover{color:#fff}.navbar-dark .navbar-nav .nav-link{color:hsla(0,0%,100%,.5)}.navbar-dark .navbar-nav .nav-link:focus,.navbar-dark .navbar-nav .nav-link:hover{color:hsla(0,0%,100%,.75)}.navbar-dark .navbar-nav .nav-link.disabled{color:hsla(0,0%,100%,.25)}.navbar-dark .navbar-nav .active>.nav-link,.navbar-dark .navbar-nav .nav-link.active,.navbar-dark .navbar-nav .nav-link.show,.navbar-dark .navbar-nav .show>.nav-link{color:#fff}.navbar-dark .navbar-toggler{color:hsla(0,0%,100%,.5);border-color:hsla(0,0%,100%,.1)}.navbar-dark .navbar-toggler-icon{background-image:url("data:image/svg+xml;charset=utf-8,%3Csvg xmlns='http://www.w3.org/2000/svg' width='30' height='30'%3E%3Cpath stroke='rgba(255, 255, 255, 0.5)' stroke-linecap='round' stroke-miterlimit='10' stroke-width='2' d='M4 7h22M4 15h22M4 23h22'/%3E%3C/svg%3E")}.navbar-dark .navbar-text{color:hsla(0,0%,100%,.5)}.navbar-dark .navbar-text a,.navbar-dark .navbar-text a:focus,.navbar-dark .navbar-text a:hover{color:#fff}.card{position:relative;display:flex;flex-direction:column;min-width:0;word-wrap:break-word;background-color:#fff;background-clip:border-box;border:1px solid #d6dbdf;border-radius:8px}.card>hr{margin-right:0;margin-left:0}.card>.list-group:first-child .list-group-item:first-child{border-top-left-radius:8px;border-top-right-radius:8px}.card>.list-group:last-child .list-group-item:last-child{border-bottom-right-radius:8px;border-bottom-left-radius:8px}.card-body{flex:1 1 auto;min-height:1px;padding:24px}.card-title{margin-bottom:24px}.card-subtitle{margin-top:-12px}.card-subtitle,.card-text:last-child{margin-bottom:0}.card-link:hover{text-decoration:none}.card-link+.card-link{margin-left:24px}.card-header{padding:24px;margin-bottom:0;background-color:#f1f6f9;border-bottom:1px solid #d6dbdf}.card-header:first-child{border-radius:subtract(8px,1px) subtract(8px,1px) 0 0}.card-header+.list-group .list-group-item:first-child{border-top:0}.card-footer{padding:24px;background-color:#f1f6f9;border-top:1px solid #d6dbdf}.card-footer:last-child{border-radius:0 0 subtract(8px,1px) subtract(8px,1px)}.card-header-tabs{margin-bottom:-24px;border-bottom:0}.card-header-pills,.card-header-tabs{margin-right:-12px;margin-left:-12px}.card-img-overlay{position:absolute;top:0;right:0;bottom:0;left:0;padding:24px}.card-img,.card-img-bottom,.card-img-top{flex-shrink:0;width:100%}.card-img,.card-img-top{border-top-left-radius:subtract(8px,1px);border-top-right-radius:subtract(8px,1px)}.card-img,.card-img-bottom{border-bottom-right-radius:subtract(8px,1px);border-bottom-left-radius:subtract(8px,1px)}.card-deck .card{margin-bottom:20px}@media(min-width:616px){.card-deck{display:flex;flex-flow:row wrap;margin-right:-20px;margin-left:-20px}.card-deck .card{flex:1 0 0%;margin-right:20px;margin-bottom:0;margin-left:20px}}.card-group>.card{margin-bottom:20px}@media(min-width:616px){.card-group{display:flex;flex-flow:row wrap}.card-group>.card{flex:1 0 0%;margin-bottom:0}.card-group>.card+.card{margin-left:0;border-left:0}.card-group>.card:not(:last-child){border-top-right-radius:0;border-bottom-right-radius:0}.card-group>.card:not(:last-child) .card-header,.card-group>.card:not(:last-child) .card-img-top{border-top-right-radius:0}.card-group>.card:not(:last-child) .card-footer,.card-group>.card:not(:last-child) .card-img-bottom{border-bottom-right-radius:0}.card-group>.card:not(:first-child){border-top-left-radius:0;border-bottom-left-radius:0}.card-group>.card:not(:first-child) .card-header,.card-group>.card:not(:first-child) .card-img-top{border-top-left-radius:0}.card-group>.card:not(:first-child) .card-footer,.card-group>.card:not(:first-child) .card-img-bottom{border-bottom-left-radius:0}}.card-columns .card{margin-bottom:40px}@media(min-width:616px){.card-columns{-moz-column-count:3;column-count:3;-moz-column-gap:40px;column-gap:40px;orphans:1;widows:1}.card-columns .card{display:inline-block;width:100%}}.accordion>.card{overflow:hidden}.accordion>.card:not(:last-of-type){border-bottom:0;border-bottom-right-radius:0;border-bottom-left-radius:0}.accordion>.card:not(:first-of-type){border-top-left-radius:0;border-top-right-radius:0}.accordion>.card>.card-header{border-radius:0;margin-bottom:-1px}.breadcrumb{display:flex;flex-wrap:wrap;padding:.75rem 1rem;margin-bottom:1rem;list-style:none;background-color:#e9ecef;border-radius:8px}.breadcrumb-item+.breadcrumb-item{padding-left:.5rem}.breadcrumb-item+.breadcrumb-item:before{display:inline-block;padding-right:.5rem;color:#6c757d;content:"/"}.breadcrumb-item+.breadcrumb-item:hover:before{text-decoration:underline;text-decoration:none}.breadcrumb-item.active{color:#6c757d}.pagination{display:flex;padding-left:0;list-style:none;border-radius:8px}.page-link{position:relative;display:block;padding:.5rem .75rem;margin-left:-1px;line-height:1.25;color:#ff8c00;background-color:#fff;border:1px solid #dee2e6}.page-link:hover{z-index:2;color:#ff8c00;text-decoration:none;background-color:#e9ecef;border-color:#dee2e6}.page-link:focus{z-index:3;outline:0;box-shadow:0 0 0 .2rem rgba(255,204,0,.25)}.page-item:first-child .page-link{margin-left:0;border-top-left-radius:8px;border-bottom-left-radius:8px}.page-item:last-child .page-link{border-top-right-radius:8px;border-bottom-right-radius:8px}.page-item.active .page-link{z-index:3;color:#fff;background-color:#fc0;border-color:#fc0}.page-item.disabled .page-link{color:#6c757d;pointer-events:none;cursor:auto;background-color:#fff;border-color:#dee2e6}.pagination-lg .page-link{padding:.75rem 1.5rem;font-size:1.125rem;line-height:1.5}.pagination-lg .page-item:first-child .page-link{border-top-left-radius:8px;border-bottom-left-radius:8px}.pagination-lg .page-item:last-child .page-link{border-top-right-radius:8px;border-bottom-right-radius:8px}.pagination-sm .page-link{padding:.25rem .5rem;font-size:.875rem;line-height:1.5}.pagination-sm .page-item:first-child .page-link{border-top-left-radius:4px;border-bottom-left-radius:4px}.pagination-sm .page-item:last-child .page-link{border-top-right-radius:4px;border-bottom-right-radius:4px}.badge{display:inline-block;padding:.25em .4em;font-size:75%;font-weight:700;line-height:1;text-align:center;white-space:nowrap;vertical-align:baseline;border-radius:8px;transition:color .15s ease-in-out,background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out}@media(prefers-reduced-motion:reduce){.badge{transition:none}}a.badge:focus,a.badge:hover{text-decoration:none}.badge:empty{display:none}.btn .badge{position:relative;top:-1px}.badge-pill{padding-right:.6em;padding-left:.6em;border-radius:10rem}.badge-primary{color:#495057;background-color:#fc0}a.badge-primary:focus,a.badge-primary:hover{color:#495057;background-color:#cca300}a.badge-primary.focus,a.badge-primary:focus{outline:0;box-shadow:0 0 0 .2rem rgba(255,204,0,.5)}.badge-secondary{color:#fff;background-color:#212529}a.badge-secondary:focus,a.badge-secondary:hover{color:#fff;background-color:#0a0c0d}a.badge-secondary.focus,a.badge-secondary:focus{outline:0;box-shadow:0 0 0 .2rem rgba(33,37,41,.5)}.badge-success{color:#fff;background-color:#28a745}a.badge-success:focus,a.badge-success:hover{color:#fff;background-color:#1e7e34}a.badge-success.focus,a.badge-success:focus{outline:0;box-shadow:0 0 0 .2rem rgba(40,167,69,.5)}.badge-info{color:#fff;background-color:#17a2b8}a.badge-info:focus,a.badge-info:hover{color:#fff;background-color:#117a8b}a.badge-info.focus,a.badge-info:focus{outline:0;box-shadow:0 0 0 .2rem rgba(23,162,184,.5)}.badge-warning{color:#495057;background-color:#ffc107}a.badge-warning:focus,a.badge-warning:hover{color:#495057;background-color:#d39e00}a.badge-warning.focus,a.badge-warning:focus{outline:0;box-shadow:0 0 0 .2rem rgba(255,193,7,.5)}.badge-danger{color:#fff;background-color:#dc3545}a.badge-danger:focus,a.badge-danger:hover{color:#fff;background-color:#bd2130}a.badge-danger.focus,a.badge-danger:focus{outline:0;box-shadow:0 0 0 .2rem rgba(220,53,69,.5)}.badge-light{color:#495057;background-color:#f1f6f9}a.badge-light:focus,a.badge-light:hover{color:#495057;background-color:#cddfea}a.badge-light.focus,a.badge-light:focus{outline:0;box-shadow:0 0 0 .2rem rgba(241,246,249,.5)}.badge-dark{color:#fff;background-color:#495057}a.badge-dark:focus,a.badge-dark:hover{color:#fff;background-color:#32373b}a.badge-dark.focus,a.badge-dark:focus{outline:0;box-shadow:0 0 0 .2rem rgba(73,80,87,.5)}.badge-primary-light{color:#495057;background-color:#fffaf0}a.badge-primary-light:focus,a.badge-primary-light:hover{color:#495057;background-color:#ffe9bd}a.badge-primary-light.focus,a.badge-primary-light:focus{outline:0;box-shadow:0 0 0 .2rem rgba(255,250,240,.5)}.badge-secondary-light{color:#495057;background-color:#fff}a.badge-secondary-light:focus,a.badge-secondary-light:hover{color:#495057;background-color:#e6e6e6}a.badge-secondary-light.focus,a.badge-secondary-light:focus{outline:0;box-shadow:0 0 0 .2rem hsla(0,0%,100%,.5)}.badge-tertiary{color:#fff;background-color:#257af4}a.badge-tertiary:focus,a.badge-tertiary:hover{color:#fff;background-color:#0b60db}a.badge-tertiary.focus,a.badge-tertiary:focus{outline:0;box-shadow:0 0 0 .2rem rgba(37,122,244,.5)}.badge-tertiary-light{color:#495057;background-color:#e3f1fe}a.badge-tertiary-light:focus,a.badge-tertiary-light:hover{color:#495057;background-color:#b2d8fc}a.badge-tertiary-light.focus,a.badge-tertiary-light:focus{outline:0;box-shadow:0 0 0 .2rem rgba(227,241,254,.5)}.badge-white{color:#495057;background-color:#fff}a.badge-white:focus,a.badge-white:hover{color:#495057;background-color:#e6e6e6}a.badge-white.focus,a.badge-white:focus{outline:0;box-shadow:0 0 0 .2rem hsla(0,0%,100%,.5)}.badge-black{color:#fff;background-color:#212529}a.badge-black:focus,a.badge-black:hover{color:#fff;background-color:#0a0c0d}a.badge-black.focus,a.badge-black:focus{outline:0;box-shadow:0 0 0 .2rem rgba(33,37,41,.5)}.badge-blue{color:#fff;background-color:#257af4}a.badge-blue:focus,a.badge-blue:hover{color:#fff;background-color:#0b60db}a.badge-blue.focus,a.badge-blue:focus{outline:0;box-shadow:0 0 0 .2rem rgba(37,122,244,.5)}.badge-light-blue{color:#495057;background-color:#e3f1fe}a.badge-light-blue:focus,a.badge-light-blue:hover{color:#495057;background-color:#b2d8fc}a.badge-light-blue.focus,a.badge-light-blue:focus{outline:0;box-shadow:0 0 0 .2rem rgba(227,241,254,.5)}.badge-yellow{color:#495057;background-color:#fc0}a.badge-yellow:focus,a.badge-yellow:hover{color:#495057;background-color:#cca300}a.badge-yellow.focus,a.badge-yellow:focus{outline:0;box-shadow:0 0 0 .2rem rgba(255,204,0,.5)}.badge-light-yellow{color:#495057;background-color:#fffaf0}a.badge-light-yellow:focus,a.badge-light-yellow:hover{color:#495057;background-color:#ffe9bd}a.badge-light-yellow.focus,a.badge-light-yellow:focus{outline:0;box-shadow:0 0 0 .2rem rgba(255,250,240,.5)}.badge-orange{color:#495057;background-color:#ff8c00}a.badge-orange:focus,a.badge-orange:hover{color:#495057;background-color:#cc7000}a.badge-orange.focus,a.badge-orange:focus{outline:0;box-shadow:0 0 0 .2rem rgba(255,140,0,.5)}.badge-light-orange{color:#495057;background-color:#ffe4b5}a.badge-light-orange:focus,a.badge-light-orange:hover{color:#495057;background-color:#ffd182}a.badge-light-orange.focus,a.badge-light-orange:focus{outline:0;box-shadow:0 0 0 .2rem rgba(255,228,181,.5)}.badge-red{color:#fff;background-color:#ff3939}a.badge-red:focus,a.badge-red:hover{color:#fff;background-color:#ff0606}a.badge-red.focus,a.badge-red:focus{outline:0;box-shadow:0 0 0 .2rem rgba(255,57,57,.5)}.badge-light-red{color:#495057;background-color:#ffe4e1}a.badge-light-red:focus,a.badge-light-red:hover{color:#495057;background-color:#ffb6ae}a.badge-light-red.focus,a.badge-light-red:focus{outline:0;box-shadow:0 0 0 .2rem rgba(255,228,225,.5)}.badge-medium{color:#495057;background-color:#d6dbdf}a.badge-medium:focus,a.badge-medium:hover{color:#495057;background-color:#b9c2c9}a.badge-medium.focus,a.badge-medium:focus{outline:0;box-shadow:0 0 0 .2rem rgba(214,219,223,.5)}.jumbotron{padding:2rem 1rem;margin-bottom:2rem;background-color:#e9ecef;border-radius:8px}@media(min-width:616px){.jumbotron{padding:4rem 2rem}}.jumbotron-fluid{padding-right:0;padding-left:0;border-radius:0}.alert{position:relative;padding:.75rem 1.25rem;margin-bottom:1rem;border:1px solid transparent;border-radius:8px}.alert-heading{color:inherit}.alert-link{font-weight:700}.alert-dismissible{padding-right:4rem}.alert-dismissible .close{position:absolute;top:0;right:0;padding:.75rem 1.25rem;color:inherit}.alert-primary{color:#947c14;background-color:#fff5cc;border-color:#fff1b8}.alert-primary hr{border-top-color:#ffec9f}.alert-primary .alert-link{color:#67560e}.alert-secondary{color:#212529;background-color:#d3d3d4;border-color:#c1c2c3}.alert-secondary hr{border-top-color:#b4b5b6}.alert-secondary .alert-link{color:#0a0c0d}.alert-success{color:#256938;background-color:#d4edda;border-color:#c3e6cb}.alert-success hr{border-top-color:#b1dfbb}.alert-success .alert-link{color:#184324}.alert-info{color:#1c6673;background-color:#d1ecf1;border-color:#bee5eb}.alert-info hr{border-top-color:#abdde5}.alert-info .alert-link{color:#12424a}.alert-warning{color:#947617;background-color:#fff3cd;border-color:#ffeeba}.alert-warning hr{border-top-color:#ffe8a1}.alert-warning .alert-link{color:#685310}.alert-danger{color:#822d38;background-color:#f8d7da;border-color:#f5c6cb}.alert-danger hr{border-top-color:#f1b0b7}.alert-danger .alert-link{color:#5c2028}.alert-light{color:#8d9295;background-color:#fcfdfe;border-color:#fbfcfd}.alert-light hr{border-top-color:#eaeff5}.alert-light .alert-link{color:#73797c}.alert-dark{color:#363b41;background-color:#dbdcdd;border-color:#ccced0}.alert-dark hr{border-top-color:#bfc1c4}.alert-dark .alert-link{color:#1f2225}.alert-primary-light{color:#949490;background-color:#fffefc;border-color:#fffefb}.alert-primary-light hr{border-top-color:#fff8e2}.alert-primary-light .alert-link{color:#7b7b76}.alert-secondary-light{color:#949698;background-color:#fff;border-color:#fff}.alert-secondary-light hr{border-top-color:#f2f2f2}.alert-secondary-light .alert-link{color:#7a7d7f}.alert-tertiary{color:#235193;background-color:#d3e4fd;border-color:#c2dafc}.alert-tertiary hr{border-top-color:#aacbfb}.alert-tertiary .alert-link{color:#193a6a}.alert-tertiary-light{color:#868f98;background-color:#f9fcff;border-color:#f7fbff}.alert-tertiary-light hr{border-top-color:#deeeff}.alert-tertiary-light .alert-link{color:#6c767f}.alert-white{color:#949698;background-color:#fff;border-color:#fff}.alert-white hr{border-top-color:#f2f2f2}.alert-white .alert-link{color:#7a7d7f}.alert-black{color:#212529;background-color:#d3d3d4;border-color:#c1c2c3}.alert-black hr{border-top-color:#b4b5b6}.alert-black .alert-link{color:#0a0c0d}.alert-blue{color:#235193;background-color:#d3e4fd;border-color:#c2dafc}.alert-blue hr{border-top-color:#aacbfb}.alert-blue .alert-link{color:#193a6a}.alert-light-blue{color:#868f98;background-color:#f9fcff;border-color:#f7fbff}.alert-light-blue hr{border-top-color:#deeeff}.alert-light-blue .alert-link{color:#6c767f}.alert-yellow{color:#947c14;background-color:#fff5cc;border-color:#fff1b8}.alert-yellow hr{border-top-color:#ffec9f}.alert-yellow .alert-link{color:#67560e}.alert-light-yellow{color:#949490;background-color:#fffefc;border-color:#fffefb}.alert-light-yellow hr{border-top-color:#fff8e2}.alert-light-yellow .alert-link{color:#7b7b76}.alert-orange{color:#945b14;background-color:#ffe8cc;border-color:#ffdfb8}.alert-orange hr{border-top-color:#ffd49f}.alert-orange .alert-link{color:#673f0e}.alert-light-orange{color:#948872;background-color:#fffaf0;border-color:#fff7ea}.alert-light-orange hr{border-top-color:#ffedd1}.alert-light-orange .alert-link{color:#786e5b}.alert-red{color:#942f31;background-color:#ffd7d7;border-color:#ffc8c8}.alert-red hr{border-top-color:#ffafaf}.alert-red .alert-link{color:#6d2324}.alert-light-red{color:#948889;background-color:#fffaf9;border-color:#fff7f7}.alert-light-red hr{border-top-color:#ffdede}.alert-light-red .alert-link{color:#7b6e6f}.alert-medium{color:#7f8488;background-color:#f7f8f9;border-color:#f4f5f6}.alert-medium hr{border-top-color:#e6e8eb}.alert-medium .alert-link{color:#666a6e}@-webkit-keyframes progress-bar-stripes{0%{background-position:1rem 0}to{background-position:0 0}}@keyframes progress-bar-stripes{0%{background-position:1rem 0}to{background-position:0 0}}.progress{height:1rem;font-size:.75rem;background-color:#e9ecef;border-radius:8px}.progress,.progress-bar{display:flex;overflow:hidden}.progress-bar{flex-direction:column;justify-content:center;color:#fff;text-align:center;white-space:nowrap;background-color:#fc0;transition:width .6s ease}@media(prefers-reduced-motion:reduce){.progress-bar{transition:none}}.progress-bar-striped{background-image:linear-gradient(45deg,hsla(0,0%,100%,.15) 25%,transparent 0,transparent 50%,hsla(0,0%,100%,.15) 0,hsla(0,0%,100%,.15) 75%,transparent 0,transparent);background-size:1rem 1rem}.progress-bar-animated{-webkit-animation:progress-bar-stripes 1s linear infinite;animation:progress-bar-stripes 1s linear infinite}@media(prefers-reduced-motion:reduce){.progress-bar-animated{-webkit-animation:none;animation:none}}.media{display:flex;align-items:flex-start}.media-body{flex:1}.list-group{display:flex;flex-direction:column;padding-left:0;margin-bottom:0}.list-group-item-action{width:100%;color:#6c757d;text-align:inherit}.list-group-item-action:focus,.list-group-item-action:hover{z-index:1;color:#6c757d;text-decoration:none;background-color:#f1f6f9}.list-group-item-action:active{color:#212529;background-color:#e9ecef}.list-group-item{position:relative;display:block;padding:.75rem 1.25rem;background-color:#fff;border:1px solid rgba(33,37,41,.125)}.list-group-item:first-child{border-top-left-radius:8px;border-top-right-radius:8px}.list-group-item:last-child{border-bottom-right-radius:8px;border-bottom-left-radius:8px}.list-group-item.disabled,.list-group-item:disabled{color:#6c757d;pointer-events:none;background-color:#fff}.list-group-item.active{z-index:2;color:#fff;background-color:#fc0;border-color:#fc0}.list-group-item+.list-group-item{border-top-width:0}.list-group-item+.list-group-item.active{margin-top:-1px;border-top-width:1px}.list-group-horizontal{flex-direction:row}.list-group-horizontal .list-group-item:first-child{border-bottom-left-radius:8px;border-top-right-radius:0}.list-group-horizontal .list-group-item:last-child{border-top-right-radius:8px;border-bottom-left-radius:0}.list-group-horizontal .list-group-item.active{margin-top:0}.list-group-horizontal .list-group-item+.list-group-item{border-top-width:1px;border-left-width:0}.list-group-horizontal .list-group-item+.list-group-item.active{margin-left:-1px;border-left-width:1px}@media(min-width:400px){.list-group-horizontal-xs{flex-direction:row}.list-group-horizontal-xs .list-group-item:first-child{border-bottom-left-radius:8px;border-top-right-radius:0}.list-group-horizontal-xs .list-group-item:last-child{border-top-right-radius:8px;border-bottom-left-radius:0}.list-group-horizontal-xs .list-group-item.active{margin-top:0}.list-group-horizontal-xs .list-group-item+.list-group-item{border-top-width:1px;border-left-width:0}.list-group-horizontal-xs .list-group-item+.list-group-item.active{margin-left:-1px;border-left-width:1px}}@media(min-width:616px){.list-group-horizontal-sm{flex-direction:row}.list-group-horizontal-sm .list-group-item:first-child{border-bottom-left-radius:8px;border-top-right-radius:0}.list-group-horizontal-sm .list-group-item:last-child{border-top-right-radius:8px;border-bottom-left-radius:0}.list-group-horizontal-sm .list-group-item.active{margin-top:0}.list-group-horizontal-sm .list-group-item+.list-group-item{border-top-width:1px;border-left-width:0}.list-group-horizontal-sm .list-group-item+.list-group-item.active{margin-left:-1px;border-left-width:1px}}@media(min-width:768px){.list-group-horizontal-md{flex-direction:row}.list-group-horizontal-md .list-group-item:first-child{border-bottom-left-radius:8px;border-top-right-radius:0}.list-group-horizontal-md .list-group-item:last-child{border-top-right-radius:8px;border-bottom-left-radius:0}.list-group-horizontal-md .list-group-item.active{margin-top:0}.list-group-horizontal-md .list-group-item+.list-group-item{border-top-width:1px;border-left-width:0}.list-group-horizontal-md .list-group-item+.list-group-item.active{margin-left:-1px;border-left-width:1px}}@media(min-width:980px){.list-group-horizontal-lg{flex-direction:row}.list-group-horizontal-lg .list-group-item:first-child{border-bottom-left-radius:8px;border-top-right-radius:0}.list-group-horizontal-lg .list-group-item:last-child{border-top-right-radius:8px;border-bottom-left-radius:0}.list-group-horizontal-lg .list-group-item.active{margin-top:0}.list-group-horizontal-lg .list-group-item+.list-group-item{border-top-width:1px;border-left-width:0}.list-group-horizontal-lg .list-group-item+.list-group-item.active{margin-left:-1px;border-left-width:1px}}@media(min-width:1240px){.list-group-horizontal-xl{flex-direction:row}.list-group-horizontal-xl .list-group-item:first-child{border-bottom-left-radius:8px;border-top-right-radius:0}.list-group-horizontal-xl .list-group-item:last-child{border-top-right-radius:8px;border-bottom-left-radius:0}.list-group-horizontal-xl .list-group-item.active{margin-top:0}.list-group-horizontal-xl .list-group-item+.list-group-item{border-top-width:1px;border-left-width:0}.list-group-horizontal-xl .list-group-item+.list-group-item.active{margin-left:-1px;border-left-width:1px}}.list-group-flush .list-group-item{border-right-width:0;border-left-width:0;border-radius:0}.list-group-flush .list-group-item:first-child{border-top-width:0}.list-group-flush:last-child .list-group-item:last-child{border-bottom-width:0}.list-group-item-primary{color:#947c14;background-color:#fff1b8}.list-group-item-primary.list-group-item-action:focus,.list-group-item-primary.list-group-item-action:hover{color:#947c14;background-color:#ffec9f}.list-group-item-primary.list-group-item-action.active{color:#fff;background-color:#947c14;border-color:#947c14}.list-group-item-secondary{color:#212529;background-color:#c1c2c3}.list-group-item-secondary.list-group-item-action:focus,.list-group-item-secondary.list-group-item-action:hover{color:#212529;background-color:#b4b5b6}.list-group-item-secondary.list-group-item-action.active{color:#fff;background-color:#212529;border-color:#212529}.list-group-item-success{color:#256938;background-color:#c3e6cb}.list-group-item-success.list-group-item-action:focus,.list-group-item-success.list-group-item-action:hover{color:#256938;background-color:#b1dfbb}.list-group-item-success.list-group-item-action.active{color:#fff;background-color:#256938;border-color:#256938}.list-group-item-info{color:#1c6673;background-color:#bee5eb}.list-group-item-info.list-group-item-action:focus,.list-group-item-info.list-group-item-action:hover{color:#1c6673;background-color:#abdde5}.list-group-item-info.list-group-item-action.active{color:#fff;background-color:#1c6673;border-color:#1c6673}.list-group-item-warning{color:#947617;background-color:#ffeeba}.list-group-item-warning.list-group-item-action:focus,.list-group-item-warning.list-group-item-action:hover{color:#947617;background-color:#ffe8a1}.list-group-item-warning.list-group-item-action.active{color:#fff;background-color:#947617;border-color:#947617}.list-group-item-danger{color:#822d38;background-color:#f5c6cb}.list-group-item-danger.list-group-item-action:focus,.list-group-item-danger.list-group-item-action:hover{color:#822d38;background-color:#f1b0b7}.list-group-item-danger.list-group-item-action.active{color:#fff;background-color:#822d38;border-color:#822d38}.list-group-item-light{color:#8d9295;background-color:#fbfcfd}.list-group-item-light.list-group-item-action:focus,.list-group-item-light.list-group-item-action:hover{color:#8d9295;background-color:#eaeff5}.list-group-item-light.list-group-item-action.active{color:#fff;background-color:#8d9295;border-color:#8d9295}.list-group-item-dark{color:#363b41;background-color:#ccced0}.list-group-item-dark.list-group-item-action:focus,.list-group-item-dark.list-group-item-action:hover{color:#363b41;background-color:#bfc1c4}.list-group-item-dark.list-group-item-action.active{color:#fff;background-color:#363b41;border-color:#363b41}.list-group-item-primary-light{color:#949490;background-color:#fffefb}.list-group-item-primary-light.list-group-item-action:focus,.list-group-item-primary-light.list-group-item-action:hover{color:#949490;background-color:#fff8e2}.list-group-item-primary-light.list-group-item-action.active{color:#fff;background-color:#949490;border-color:#949490}.list-group-item-secondary-light{color:#949698;background-color:#fff}.list-group-item-secondary-light.list-group-item-action:focus,.list-group-item-secondary-light.list-group-item-action:hover{color:#949698;background-color:#f2f2f2}.list-group-item-secondary-light.list-group-item-action.active{color:#fff;background-color:#949698;border-color:#949698}.list-group-item-tertiary{color:#235193;background-color:#c2dafc}.list-group-item-tertiary.list-group-item-action:focus,.list-group-item-tertiary.list-group-item-action:hover{color:#235193;background-color:#aacbfb}.list-group-item-tertiary.list-group-item-action.active{color:#fff;background-color:#235193;border-color:#235193}.list-group-item-tertiary-light{color:#868f98;background-color:#f7fbff}.list-group-item-tertiary-light.list-group-item-action:focus,.list-group-item-tertiary-light.list-group-item-action:hover{color:#868f98;background-color:#deeeff}.list-group-item-tertiary-light.list-group-item-action.active{color:#fff;background-color:#868f98;border-color:#868f98}.list-group-item-white{color:#949698;background-color:#fff}.list-group-item-white.list-group-item-action:focus,.list-group-item-white.list-group-item-action:hover{color:#949698;background-color:#f2f2f2}.list-group-item-white.list-group-item-action.active{color:#fff;background-color:#949698;border-color:#949698}.list-group-item-black{color:#212529;background-color:#c1c2c3}.list-group-item-black.list-group-item-action:focus,.list-group-item-black.list-group-item-action:hover{color:#212529;background-color:#b4b5b6}.list-group-item-black.list-group-item-action.active{color:#fff;background-color:#212529;border-color:#212529}.list-group-item-blue{color:#235193;background-color:#c2dafc}.list-group-item-blue.list-group-item-action:focus,.list-group-item-blue.list-group-item-action:hover{color:#235193;background-color:#aacbfb}.list-group-item-blue.list-group-item-action.active{color:#fff;background-color:#235193;border-color:#235193}.list-group-item-light-blue{color:#868f98;background-color:#f7fbff}.list-group-item-light-blue.list-group-item-action:focus,.list-group-item-light-blue.list-group-item-action:hover{color:#868f98;background-color:#deeeff}.list-group-item-light-blue.list-group-item-action.active{color:#fff;background-color:#868f98;border-color:#868f98}.list-group-item-yellow{color:#947c14;background-color:#fff1b8}.list-group-item-yellow.list-group-item-action:focus,.list-group-item-yellow.list-group-item-action:hover{color:#947c14;background-color:#ffec9f}.list-group-item-yellow.list-group-item-action.active{color:#fff;background-color:#947c14;border-color:#947c14}.list-group-item-light-yellow{color:#949490;background-color:#fffefb}.list-group-item-light-yellow.list-group-item-action:focus,.list-group-item-light-yellow.list-group-item-action:hover{color:#949490;background-color:#fff8e2}.list-group-item-light-yellow.list-group-item-action.active{color:#fff;background-color:#949490;border-color:#949490}.list-group-item-orange{color:#945b14;background-color:#ffdfb8}.list-group-item-orange.list-group-item-action:focus,.list-group-item-orange.list-group-item-action:hover{color:#945b14;background-color:#ffd49f}.list-group-item-orange.list-group-item-action.active{color:#fff;background-color:#945b14;border-color:#945b14}.list-group-item-light-orange{color:#948872;background-color:#fff7ea}.list-group-item-light-orange.list-group-item-action:focus,.list-group-item-light-orange.list-group-item-action:hover{color:#948872;background-color:#ffedd1}.list-group-item-light-orange.list-group-item-action.active{color:#fff;background-color:#948872;border-color:#948872}.list-group-item-red{color:#942f31;background-color:#ffc8c8}.list-group-item-red.list-group-item-action:focus,.list-group-item-red.list-group-item-action:hover{color:#942f31;background-color:#ffafaf}.list-group-item-red.list-group-item-action.active{color:#fff;background-color:#942f31;border-color:#942f31}.list-group-item-light-red{color:#948889;background-color:#fff7f7}.list-group-item-light-red.list-group-item-action:focus,.list-group-item-light-red.list-group-item-action:hover{color:#948889;background-color:#ffdede}.list-group-item-light-red.list-group-item-action.active{color:#fff;background-color:#948889;border-color:#948889}.list-group-item-medium{color:#7f8488;background-color:#f4f5f6}.list-group-item-medium.list-group-item-action:focus,.list-group-item-medium.list-group-item-action:hover{color:#7f8488;background-color:#e6e8eb}.list-group-item-medium.list-group-item-action.active{color:#fff;background-color:#7f8488;border-color:#7f8488}.close{float:right;font-size:1.5rem;font-weight:700;line-height:1;color:#212529;text-shadow:0 1px 0 #fff;opacity:.5}@media(max-width:1200px){.close{font-size:calc(1.275rem + .3vw)}}.close:hover{color:#212529;text-decoration:none}.close:not(:disabled):not(.disabled):focus,.close:not(:disabled):not(.disabled):hover{opacity:.75}button.close{padding:0;background-color:transparent;border:0;-webkit-appearance:none;-moz-appearance:none;appearance:none}a.close.disabled{pointer-events:none}.toast{max-width:350px;overflow:hidden;font-size:.875rem;background-color:hsla(0,0%,100%,.85);background-clip:padding-box;border:1px solid rgba(0,0,0,.1);box-shadow:0 .25rem .75rem rgba(33,37,41,.1);-webkit-backdrop-filter:blur(10px);backdrop-filter:blur(10px);opacity:0;border-radius:.25rem}.toast:not(:last-child){margin-bottom:.75rem}.toast.showing{opacity:1}.toast.show{display:block;opacity:1}.toast.hide{display:none}.toast-header{display:flex;align-items:center;padding:.25rem .75rem;color:#6c757d;background-color:hsla(0,0%,100%,.85);background-clip:padding-box;border-bottom:1px solid rgba(0,0,0,.05)}.toast-body{padding:.75rem}.modal-open{overflow:hidden}.modal-open .modal{overflow-x:hidden;overflow-y:auto}.modal{position:fixed;top:0;left:0;z-index:1050;display:none;width:100%;height:100%;overflow:hidden;outline:0}.modal-dialog{position:relative;width:auto;margin:.5rem;pointer-events:none}.modal.fade .modal-dialog{transition:transform .3s ease-out;transform:translateY(-50px)}@media(prefers-reduced-motion:reduce){.modal.fade .modal-dialog{transition:none}}.modal.show .modal-dialog{transform:none}.modal.modal-static .modal-dialog{transform:scale(1.02)}.modal-dialog-scrollable{display:flex;max-height:calc(100% - 1rem)}.modal-dialog-scrollable .modal-content{max-height:calc(100vh - 1rem);overflow:hidden}.modal-dialog-scrollable .modal-footer,.modal-dialog-scrollable .modal-header{flex-shrink:0}.modal-dialog-scrollable .modal-body{overflow-y:auto}.modal-dialog-centered{display:flex;align-items:center;min-height:calc(100% - 1rem)}.modal-dialog-centered:before{display:block;height:calc(100vh - 1rem);content:""}.modal-dialog-centered.modal-dialog-scrollable{flex-direction:column;justify-content:center;height:100%}.modal-dialog-centered.modal-dialog-scrollable .modal-content{max-height:none}.modal-dialog-centered.modal-dialog-scrollable:before{content:none}.modal-content{position:relative;display:flex;flex-direction:column;width:100%;pointer-events:auto;background-color:#fff;background-clip:padding-box;border:1px solid rgba(33,37,41,.2);border-radius:8px;outline:0}.modal-backdrop{position:fixed;top:0;left:0;z-index:1040;width:100vw;height:100vh;background-color:#212529}.modal-backdrop.fade{opacity:0}.modal-backdrop.show{opacity:.5}.modal-header{display:flex;align-items:flex-start;justify-content:space-between;padding:1rem;border-bottom:1px solid #d6dbdf;border-top-left-radius:7px;border-top-right-radius:7px}.modal-header .close{padding:1rem;margin:-1rem -1rem -1rem auto}.modal-title{margin-bottom:0;line-height:1.5}.modal-body{position:relative;flex:1 1 auto;padding:1rem}.modal-footer{display:flex;flex-wrap:wrap;align-items:center;justify-content:flex-end;padding:.75rem;border-top:1px solid #d6dbdf;border-bottom-right-radius:7px;border-bottom-left-radius:7px}.modal-footer>*{margin:.25rem}.modal-scrollbar-measure{position:absolute;top:-9999px;width:50px;height:50px;overflow:scroll}@media(min-width:616px){.modal-dialog{max-width:500px;margin:1.75rem auto}.modal-dialog-scrollable{max-height:calc(100% - 3.5rem)}.modal-dialog-scrollable .modal-content{max-height:calc(100vh - 3.5rem)}.modal-dialog-centered{min-height:calc(100% - 3.5rem)}.modal-dialog-centered:before{height:calc(100vh - 3.5rem)}.modal-sm{max-width:300px}}@media(min-width:980px){.modal-lg,.modal-xl{max-width:800px}}@media(min-width:1240px){.modal-xl{max-width:1140px}}.tooltip{position:absolute;z-index:1070;display:block;margin:0;font-family:Noto Sans,sans-serif;font-style:normal;font-weight:400;line-height:1.5;text-align:left;text-align:start;text-decoration:none;text-shadow:none;text-transform:none;letter-spacing:normal;word-break:normal;word-spacing:normal;white-space:normal;line-break:auto;font-size:.875rem;word-wrap:break-word;opacity:0}.tooltip.show{opacity:.9}.tooltip .arrow{position:absolute;display:block;width:.8rem;height:.4rem}.tooltip .arrow:before{position:absolute;content:"";border-color:transparent;border-style:solid}.bs-tooltip-auto[x-placement^=top],.bs-tooltip-top{padding:.4rem 0}.bs-tooltip-auto[x-placement^=top] .arrow,.bs-tooltip-top .arrow{bottom:0}.bs-tooltip-auto[x-placement^=top] .arrow:before,.bs-tooltip-top .arrow:before{top:0;border-width:.4rem .4rem 0;border-top-color:#212529}.bs-tooltip-auto[x-placement^=right],.bs-tooltip-right{padding:0 .4rem}.bs-tooltip-auto[x-placement^=right] .arrow,.bs-tooltip-right .arrow{left:0;width:.4rem;height:.8rem}.bs-tooltip-auto[x-placement^=right] .arrow:before,.bs-tooltip-right .arrow:before{right:0;border-width:.4rem .4rem .4rem 0;border-right-color:#212529}.bs-tooltip-auto[x-placement^=bottom],.bs-tooltip-bottom{padding:.4rem 0}.bs-tooltip-auto[x-placement^=bottom] .arrow,.bs-tooltip-bottom .arrow{top:0}.bs-tooltip-auto[x-placement^=bottom] .arrow:before,.bs-tooltip-bottom .arrow:before{bottom:0;border-width:0 .4rem .4rem;border-bottom-color:#212529}.bs-tooltip-auto[x-placement^=left],.bs-tooltip-left{padding:0 .4rem}.bs-tooltip-auto[x-placement^=left] .arrow,.bs-tooltip-left .arrow{right:0;width:.4rem;height:.8rem}.bs-tooltip-auto[x-placement^=left] .arrow:before,.bs-tooltip-left .arrow:before{left:0;border-width:.4rem 0 .4rem .4rem;border-left-color:#212529}.tooltip-inner{max-width:200px;padding:.25rem .5rem;color:#fff;text-align:center;background-color:#212529;border-radius:8px}.popover{top:0;left:0;z-index:1060;max-width:276px;font-family:Noto Sans,sans-serif;font-style:normal;font-weight:400;line-height:1.5;text-align:left;text-align:start;text-decoration:none;text-shadow:none;text-transform:none;letter-spacing:normal;word-break:normal;word-spacing:normal;white-space:normal;line-break:auto;font-size:.875rem;word-wrap:break-word;background-color:#fff;background-clip:padding-box;border:1px solid rgba(33,37,41,.2);border-radius:8px}.popover,.popover .arrow{position:absolute;display:block}.popover .arrow{width:1rem;height:.5rem;margin:0 8px}.popover .arrow:after,.popover .arrow:before{position:absolute;display:block;content:"";border-color:transparent;border-style:solid}.bs-popover-auto[x-placement^=top],.bs-popover-top{margin-bottom:.5rem}.bs-popover-auto[x-placement^=top]>.arrow,.bs-popover-top>.arrow{bottom:calc(-.5rem - 1px)}.bs-popover-auto[x-placement^=top]>.arrow:before,.bs-popover-top>.arrow:before{bottom:0;border-width:.5rem .5rem 0;border-top-color:rgba(33,37,41,.25)}.bs-popover-auto[x-placement^=top]>.arrow:after,.bs-popover-top>.arrow:after{bottom:1px;border-width:.5rem .5rem 0;border-top-color:#fff}.bs-popover-auto[x-placement^=right],.bs-popover-right{margin-left:.5rem}.bs-popover-auto[x-placement^=right]>.arrow,.bs-popover-right>.arrow{left:calc(-.5rem - 1px);width:.5rem;height:1rem;margin:8px 0}.bs-popover-auto[x-placement^=right]>.arrow:before,.bs-popover-right>.arrow:before{left:0;border-width:.5rem .5rem .5rem 0;border-right-color:rgba(33,37,41,.25)}.bs-popover-auto[x-placement^=right]>.arrow:after,.bs-popover-right>.arrow:after{left:1px;border-width:.5rem .5rem .5rem 0;border-right-color:#fff}.bs-popover-auto[x-placement^=bottom],.bs-popover-bottom{margin-top:.5rem}.bs-popover-auto[x-placement^=bottom]>.arrow,.bs-popover-bottom>.arrow{top:calc(-.5rem - 1px)}.bs-popover-auto[x-placement^=bottom]>.arrow:before,.bs-popover-bottom>.arrow:before{top:0;border-width:0 .5rem .5rem;border-bottom-color:rgba(33,37,41,.25)}.bs-popover-auto[x-placement^=bottom]>.arrow:after,.bs-popover-bottom>.arrow:after{top:1px;border-width:0 .5rem .5rem;border-bottom-color:#fff}.bs-popover-auto[x-placement^=bottom] .popover-header:before,.bs-popover-bottom .popover-header:before{position:absolute;top:0;left:50%;display:block;width:1rem;margin-left:-.5rem;content:"";border-bottom:1px solid #f7f7f7}.bs-popover-auto[x-placement^=left],.bs-popover-left{margin-right:.5rem}.bs-popover-auto[x-placement^=left]>.arrow,.bs-popover-left>.arrow{right:calc(-.5rem - 1px);width:.5rem;height:1rem;margin:8px 0}.bs-popover-auto[x-placement^=left]>.arrow:before,.bs-popover-left>.arrow:before{right:0;border-width:.5rem 0 .5rem .5rem;border-left-color:rgba(33,37,41,.25)}.bs-popover-auto[x-placement^=left]>.arrow:after,.bs-popover-left>.arrow:after{right:1px;border-width:.5rem 0 .5rem .5rem;border-left-color:#fff}.popover-header{padding:.5rem .75rem;margin-bottom:0;font-size:1rem;background-color:#f7f7f7;border-bottom:1px solid #ebebeb;border-top-left-radius:7px;border-top-right-radius:7px}.popover-header:empty{display:none}.popover-body{padding:.5rem .75rem;color:#212529}.carousel{position:relative}.carousel.pointer-event{touch-action:pan-y}.carousel-inner{position:relative;width:100%;overflow:hidden}.carousel-inner:after{display:block;clear:both;content:""}.carousel-item{position:relative;display:none;float:left;width:100%;margin-right:-100%;-webkit-backface-visibility:hidden;backface-visibility:hidden;transition:transform .6s ease-in-out}@media(prefers-reduced-motion:reduce){.carousel-item{transition:none}}.carousel-item-next,.carousel-item-prev,.carousel-item.active{display:block}.active.carousel-item-right,.carousel-item-next:not(.carousel-item-left){transform:translateX(100%)}.active.carousel-item-left,.carousel-item-prev:not(.carousel-item-right){transform:translateX(-100%)}.carousel-fade .carousel-item{opacity:0;transition-property:opacity;transform:none}.carousel-fade .carousel-item-next.carousel-item-left,.carousel-fade .carousel-item-prev.carousel-item-right,.carousel-fade .carousel-item.active{z-index:1;opacity:1}.carousel-fade .active.carousel-item-left,.carousel-fade .active.carousel-item-right{z-index:0;opacity:0;transition:opacity 0s .6s}@media(prefers-reduced-motion:reduce){.carousel-fade .active.carousel-item-left,.carousel-fade .active.carousel-item-right{transition:none}}.carousel-control-next,.carousel-control-prev{position:absolute;top:0;bottom:0;z-index:1;display:flex;align-items:center;justify-content:center;width:15%;color:#fff;text-align:center;opacity:.5;transition:opacity .15s ease}@media(prefers-reduced-motion:reduce){.carousel-control-next,.carousel-control-prev{transition:none}}.carousel-control-next:focus,.carousel-control-next:hover,.carousel-control-prev:focus,.carousel-control-prev:hover{color:#fff;text-decoration:none;outline:0;opacity:.9}.carousel-control-prev{left:0}.carousel-control-next{right:0}.carousel-control-next-icon,.carousel-control-prev-icon{display:inline-block;width:20px;height:20px;background:no-repeat 50%/100% 100%}.carousel-control-prev-icon{background-image:url("data:image/svg+xml;charset=utf-8,%3Csvg xmlns='http://www.w3.org/2000/svg' fill='%23fff' width='8' height='8'%3E%3Cpath d='M5.25 0l-4 4 4 4 1.5-1.5L4.25 4l2.5-2.5L5.25 0z'/%3E%3C/svg%3E")}.carousel-control-next-icon{background-image:url("data:image/svg+xml;charset=utf-8,%3Csvg xmlns='http://www.w3.org/2000/svg' fill='%23fff' width='8' height='8'%3E%3Cpath d='M2.75 0l-1.5 1.5L3.75 4l-2.5 2.5L2.75 8l4-4-4-4z'/%3E%3C/svg%3E")}.carousel-indicators{position:absolute;right:0;bottom:0;left:0;z-index:15;display:flex;justify-content:center;padding-left:0;margin-right:15%;margin-left:15%;list-style:none}.carousel-indicators li{box-sizing:content-box;flex:0 1 auto;width:30px;height:3px;margin-right:3px;margin-left:3px;text-indent:-999px;cursor:pointer;background-color:#fff;background-clip:padding-box;border-top:10px solid transparent;border-bottom:10px solid transparent;opacity:.5;transition:opacity .6s ease}@media(prefers-reduced-motion:reduce){.carousel-indicators li{transition:none}}.carousel-indicators .active{opacity:1}.carousel-caption{position:absolute;right:15%;bottom:20px;left:15%;z-index:10;padding-top:20px;padding-bottom:20px;color:#fff;text-align:center}@-webkit-keyframes spinner-border{to{transform:rotate(1turn)}}@keyframes spinner-border{to{transform:rotate(1turn)}}.spinner-border{display:inline-block;width:2rem;height:2rem;vertical-align:text-bottom;border:.25em solid;border-right:.25em solid transparent;border-radius:50%;-webkit-animation:spinner-border .75s linear infinite;animation:spinner-border .75s linear infinite}.spinner-border-sm{width:1rem;height:1rem;border-width:.2em}@-webkit-keyframes spinner-grow{0%{transform:scale(0)}50%{opacity:1}}@keyframes spinner-grow{0%{transform:scale(0)}50%{opacity:1}}.spinner-grow{display:inline-block;width:2rem;height:2rem;vertical-align:text-bottom;background-color:currentColor;border-radius:50%;opacity:0;-webkit-animation:spinner-grow .75s linear infinite;animation:spinner-grow .75s linear infinite}.spinner-grow-sm{width:1rem;height:1rem}.align-baseline{vertical-align:baseline!important}.align-top{vertical-align:top!important}.align-middle{vertical-align:middle!important}.align-bottom{vertical-align:bottom!important}.align-text-bottom{vertical-align:text-bottom!important}.align-text-top{vertical-align:text-top!important}.bg-primary{background-color:#fc0!important}a.bg-primary:focus,a.bg-primary:hover,button.bg-primary:focus,button.bg-primary:hover{background-color:#cca300!important}.bg-secondary{background-color:#212529!important}a.bg-secondary:focus,a.bg-secondary:hover,button.bg-secondary:focus,button.bg-secondary:hover{background-color:#0a0c0d!important}.bg-success{background-color:#28a745!important}a.bg-success:focus,a.bg-success:hover,button.bg-success:focus,button.bg-success:hover{background-color:#1e7e34!important}.bg-info{background-color:#17a2b8!important}a.bg-info:focus,a.bg-info:hover,button.bg-info:focus,button.bg-info:hover{background-color:#117a8b!important}.bg-warning{background-color:#ffc107!important}a.bg-warning:focus,a.bg-warning:hover,button.bg-warning:focus,button.bg-warning:hover{background-color:#d39e00!important}.bg-danger{background-color:#dc3545!important}a.bg-danger:focus,a.bg-danger:hover,button.bg-danger:focus,button.bg-danger:hover{background-color:#bd2130!important}.bg-light{background-color:#f1f6f9!important}a.bg-light:focus,a.bg-light:hover,button.bg-light:focus,button.bg-light:hover{background-color:#cddfea!important}.bg-dark{background-color:#495057!important}a.bg-dark:focus,a.bg-dark:hover,button.bg-dark:focus,button.bg-dark:hover{background-color:#32373b!important}.bg-primary-light{background-color:#fffaf0!important}a.bg-primary-light:focus,a.bg-primary-light:hover,button.bg-primary-light:focus,button.bg-primary-light:hover{background-color:#ffe9bd!important}.bg-secondary-light{background-color:#fff!important}a.bg-secondary-light:focus,a.bg-secondary-light:hover,button.bg-secondary-light:focus,button.bg-secondary-light:hover{background-color:#e6e6e6!important}.bg-tertiary{background-color:#257af4!important}a.bg-tertiary:focus,a.bg-tertiary:hover,button.bg-tertiary:focus,button.bg-tertiary:hover{background-color:#0b60db!important}.bg-tertiary-light{background-color:#e3f1fe!important}a.bg-tertiary-light:focus,a.bg-tertiary-light:hover,button.bg-tertiary-light:focus,button.bg-tertiary-light:hover{background-color:#b2d8fc!important}a.bg-white:focus,a.bg-white:hover,button.bg-white:focus,button.bg-white:hover{background-color:#e6e6e6!important}.bg-black{background-color:#212529!important}a.bg-black:focus,a.bg-black:hover,button.bg-black:focus,button.bg-black:hover{background-color:#0a0c0d!important}.bg-blue{background-color:#257af4!important}a.bg-blue:focus,a.bg-blue:hover,button.bg-blue:focus,button.bg-blue:hover{background-color:#0b60db!important}.bg-light-blue{background-color:#e3f1fe!important}a.bg-light-blue:focus,a.bg-light-blue:hover,button.bg-light-blue:focus,button.bg-light-blue:hover{background-color:#b2d8fc!important}.bg-yellow{background-color:#fc0!important}a.bg-yellow:focus,a.bg-yellow:hover,button.bg-yellow:focus,button.bg-yellow:hover{background-color:#cca300!important}.bg-light-yellow{background-color:#fffaf0!important}a.bg-light-yellow:focus,a.bg-light-yellow:hover,button.bg-light-yellow:focus,button.bg-light-yellow:hover{background-color:#ffe9bd!important}.bg-orange{background-color:#ff8c00!important}a.bg-orange:focus,a.bg-orange:hover,button.bg-orange:focus,button.bg-orange:hover{background-color:#cc7000!important}.bg-light-orange{background-color:#ffe4b5!important}a.bg-light-orange:focus,a.bg-light-orange:hover,button.bg-light-orange:focus,button.bg-light-orange:hover{background-color:#ffd182!important}.bg-red{background-color:#ff3939!important}a.bg-red:focus,a.bg-red:hover,button.bg-red:focus,button.bg-red:hover{background-color:#ff0606!important}.bg-light-red{background-color:#ffe4e1!important}a.bg-light-red:focus,a.bg-light-red:hover,button.bg-light-red:focus,button.bg-light-red:hover{background-color:#ffb6ae!important}.bg-medium{background-color:#d6dbdf!important}a.bg-medium:focus,a.bg-medium:hover,button.bg-medium:focus,button.bg-medium:hover{background-color:#b9c2c9!important}.bg-white{background-color:#fff!important}.bg-transparent{background-color:transparent!important}.border{border:1px solid #d6dbdf!important}.border-top{border-top:1px solid #d6dbdf!important}.border-right{border-right:1px solid #d6dbdf!important}.border-bottom{border-bottom:1px solid #d6dbdf!important}.border-left{border-left:1px solid #d6dbdf!important}.border-0{border:0!important}.border-top-0{border-top:0!important}.border-right-0{border-right:0!important}.border-bottom-0{border-bottom:0!important}.border-left-0{border-left:0!important}.border-primary{border-color:#fc0!important}.border-secondary{border-color:#212529!important}.border-success{border-color:#28a745!important}.border-info{border-color:#17a2b8!important}.border-warning{border-color:#ffc107!important}.border-danger{border-color:#dc3545!important}.border-light{border-color:#f1f6f9!important}.border-dark{border-color:#495057!important}.border-primary-light{border-color:#fffaf0!important}.border-secondary-light{border-color:#fff!important}.border-tertiary{border-color:#257af4!important}.border-tertiary-light{border-color:#e3f1fe!important}.border-black{border-color:#212529!important}.border-blue{border-color:#257af4!important}.border-light-blue{border-color:#e3f1fe!important}.border-yellow{border-color:#fc0!important}.border-light-yellow{border-color:#fffaf0!important}.border-orange{border-color:#ff8c00!important}.border-light-orange{border-color:#ffe4b5!important}.border-red{border-color:#ff3939!important}.border-light-red{border-color:#ffe4e1!important}.border-medium{border-color:#d6dbdf!important}.border-white{border-color:#fff!important}.rounded-sm{border-radius:4px!important}.rounded{border-radius:8px!important}.rounded-top{border-top-left-radius:8px!important}.rounded-right,.rounded-top{border-top-right-radius:8px!important}.rounded-bottom,.rounded-right{border-bottom-right-radius:8px!important}.rounded-bottom,.rounded-left{border-bottom-left-radius:8px!important}.rounded-left{border-top-left-radius:8px!important}.rounded-lg{border-radius:8px!important}.rounded-circle{border-radius:50%!important}.rounded-pill{border-radius:50rem!important}.rounded-0{border-radius:0!important}.clearfix:after{display:block;clear:both;content:""}.d-none{display:none!important}.d-inline{display:inline!important}.d-inline-block{display:inline-block!important}.d-block{display:block!important}.d-table{display:table!important}.d-table-row{display:table-row!important}.d-table-cell{display:table-cell!important}.d-flex{display:flex!important}.d-inline-flex{display:inline-flex!important}@media(min-width:400px){.d-xs-none{display:none!important}.d-xs-inline{display:inline!important}.d-xs-inline-block{display:inline-block!important}.d-xs-block{display:block!important}.d-xs-table{display:table!important}.d-xs-table-row{display:table-row!important}.d-xs-table-cell{display:table-cell!important}.d-xs-flex{display:flex!important}.d-xs-inline-flex{display:inline-flex!important}}@media(min-width:616px){.d-sm-none{display:none!important}.d-sm-inline{display:inline!important}.d-sm-inline-block{display:inline-block!important}.d-sm-block{display:block!important}.d-sm-table{display:table!important}.d-sm-table-row{display:table-row!important}.d-sm-table-cell{display:table-cell!important}.d-sm-flex{display:flex!important}.d-sm-inline-flex{display:inline-flex!important}}@media(min-width:768px){.d-md-none{display:none!important}.d-md-inline{display:inline!important}.d-md-inline-block{display:inline-block!important}.d-md-block{display:block!important}.d-md-table{display:table!important}.d-md-table-row{display:table-row!important}.d-md-table-cell{display:table-cell!important}.d-md-flex{display:flex!important}.d-md-inline-flex{display:inline-flex!important}}@media(min-width:980px){.d-lg-none{display:none!important}.d-lg-inline{display:inline!important}.d-lg-inline-block{display:inline-block!important}.d-lg-block{display:block!important}.d-lg-table{display:table!important}.d-lg-table-row{display:table-row!important}.d-lg-table-cell{display:table-cell!important}.d-lg-flex{display:flex!important}.d-lg-inline-flex{display:inline-flex!important}}@media(min-width:1240px){.d-xl-none{display:none!important}.d-xl-inline{display:inline!important}.d-xl-inline-block{display:inline-block!important}.d-xl-block{display:block!important}.d-xl-table{display:table!important}.d-xl-table-row{display:table-row!important}.d-xl-table-cell{display:table-cell!important}.d-xl-flex{display:flex!important}.d-xl-inline-flex{display:inline-flex!important}}@media print{.d-print-none{display:none!important}.d-print-inline{display:inline!important}.d-print-inline-block{display:inline-block!important}.d-print-block{display:block!important}.d-print-table{display:table!important}.d-print-table-row{display:table-row!important}.d-print-table-cell{display:table-cell!important}.d-print-flex{display:flex!important}.d-print-inline-flex{display:inline-flex!important}}.embed-responsive{position:relative;display:block;width:100%;padding:0;overflow:hidden}.embed-responsive:before{display:block;content:""}.embed-responsive .embed-responsive-item,.embed-responsive embed,.embed-responsive iframe,.embed-responsive object,.embed-responsive video{position:absolute;top:0;bottom:0;left:0;width:100%;height:100%;border:0}.embed-responsive-21by9:before{padding-top:42.8571428571%}.embed-responsive-16by9:before{padding-top:56.25%}.embed-responsive-4by3:before{padding-top:75%}.embed-responsive-1by1:before{padding-top:100%}.flex-row{flex-direction:row!important}.flex-column{flex-direction:column!important}.flex-row-reverse{flex-direction:row-reverse!important}.flex-column-reverse{flex-direction:column-reverse!important}.flex-wrap{flex-wrap:wrap!important}.flex-nowrap{flex-wrap:nowrap!important}.flex-wrap-reverse{flex-wrap:wrap-reverse!important}.flex-fill{flex:1 1 auto!important}.flex-grow-0{flex-grow:0!important}.flex-grow-1{flex-grow:1!important}.flex-shrink-0{flex-shrink:0!important}.flex-shrink-1{flex-shrink:1!important}.justify-content-start{justify-content:flex-start!important}.justify-content-end{justify-content:flex-end!important}.justify-content-center{justify-content:center!important}.justify-content-between{justify-content:space-between!important}.justify-content-around{justify-content:space-around!important}.align-items-start{align-items:flex-start!important}.align-items-end{align-items:flex-end!important}.align-items-center{align-items:center!important}.align-items-baseline{align-items:baseline!important}.align-items-stretch{align-items:stretch!important}.align-content-start{align-content:flex-start!important}.align-content-end{align-content:flex-end!important}.align-content-center{align-content:center!important}.align-content-between{align-content:space-between!important}.align-content-around{align-content:space-around!important}.align-content-stretch{align-content:stretch!important}.align-self-auto{align-self:auto!important}.align-self-start{align-self:flex-start!important}.align-self-end{align-self:flex-end!important}.align-self-center{align-self:center!important}.align-self-baseline{align-self:baseline!important}.align-self-stretch{align-self:stretch!important}@media(min-width:400px){.flex-xs-row{flex-direction:row!important}.flex-xs-column{flex-direction:column!important}.flex-xs-row-reverse{flex-direction:row-reverse!important}.flex-xs-column-reverse{flex-direction:column-reverse!important}.flex-xs-wrap{flex-wrap:wrap!important}.flex-xs-nowrap{flex-wrap:nowrap!important}.flex-xs-wrap-reverse{flex-wrap:wrap-reverse!important}.flex-xs-fill{flex:1 1 auto!important}.flex-xs-grow-0{flex-grow:0!important}.flex-xs-grow-1{flex-grow:1!important}.flex-xs-shrink-0{flex-shrink:0!important}.flex-xs-shrink-1{flex-shrink:1!important}.justify-content-xs-start{justify-content:flex-start!important}.justify-content-xs-end{justify-content:flex-end!important}.justify-content-xs-center{justify-content:center!important}.justify-content-xs-between{justify-content:space-between!important}.justify-content-xs-around{justify-content:space-around!important}.align-items-xs-start{align-items:flex-start!important}.align-items-xs-end{align-items:flex-end!important}.align-items-xs-center{align-items:center!important}.align-items-xs-baseline{align-items:baseline!important}.align-items-xs-stretch{align-items:stretch!important}.align-content-xs-start{align-content:flex-start!important}.align-content-xs-end{align-content:flex-end!important}.align-content-xs-center{align-content:center!important}.align-content-xs-between{align-content:space-between!important}.align-content-xs-around{align-content:space-around!important}.align-content-xs-stretch{align-content:stretch!important}.align-self-xs-auto{align-self:auto!important}.align-self-xs-start{align-self:flex-start!important}.align-self-xs-end{align-self:flex-end!important}.align-self-xs-center{align-self:center!important}.align-self-xs-baseline{align-self:baseline!important}.align-self-xs-stretch{align-self:stretch!important}}@media(min-width:616px){.flex-sm-row{flex-direction:row!important}.flex-sm-column{flex-direction:column!important}.flex-sm-row-reverse{flex-direction:row-reverse!important}.flex-sm-column-reverse{flex-direction:column-reverse!important}.flex-sm-wrap{flex-wrap:wrap!important}.flex-sm-nowrap{flex-wrap:nowrap!important}.flex-sm-wrap-reverse{flex-wrap:wrap-reverse!important}.flex-sm-fill{flex:1 1 auto!important}.flex-sm-grow-0{flex-grow:0!important}.flex-sm-grow-1{flex-grow:1!important}.flex-sm-shrink-0{flex-shrink:0!important}.flex-sm-shrink-1{flex-shrink:1!important}.justify-content-sm-start{justify-content:flex-start!important}.justify-content-sm-end{justify-content:flex-end!important}.justify-content-sm-center{justify-content:center!important}.justify-content-sm-between{justify-content:space-between!important}.justify-content-sm-around{justify-content:space-around!important}.align-items-sm-start{align-items:flex-start!important}.align-items-sm-end{align-items:flex-end!important}.align-items-sm-center{align-items:center!important}.align-items-sm-baseline{align-items:baseline!important}.align-items-sm-stretch{align-items:stretch!important}.align-content-sm-start{align-content:flex-start!important}.align-content-sm-end{align-content:flex-end!important}.align-content-sm-center{align-content:center!important}.align-content-sm-between{align-content:space-between!important}.align-content-sm-around{align-content:space-around!important}.align-content-sm-stretch{align-content:stretch!important}.align-self-sm-auto{align-self:auto!important}.align-self-sm-start{align-self:flex-start!important}.align-self-sm-end{align-self:flex-end!important}.align-self-sm-center{align-self:center!important}.align-self-sm-baseline{align-self:baseline!important}.align-self-sm-stretch{align-self:stretch!important}}@media(min-width:768px){.flex-md-row{flex-direction:row!important}.flex-md-column{flex-direction:column!important}.flex-md-row-reverse{flex-direction:row-reverse!important}.flex-md-column-reverse{flex-direction:column-reverse!important}.flex-md-wrap{flex-wrap:wrap!important}.flex-md-nowrap{flex-wrap:nowrap!important}.flex-md-wrap-reverse{flex-wrap:wrap-reverse!important}.flex-md-fill{flex:1 1 auto!important}.flex-md-grow-0{flex-grow:0!important}.flex-md-grow-1{flex-grow:1!important}.flex-md-shrink-0{flex-shrink:0!important}.flex-md-shrink-1{flex-shrink:1!important}.justify-content-md-start{justify-content:flex-start!important}.justify-content-md-end{justify-content:flex-end!important}.justify-content-md-center{justify-content:center!important}.justify-content-md-between{justify-content:space-between!important}.justify-content-md-around{justify-content:space-around!important}.align-items-md-start{align-items:flex-start!important}.align-items-md-end{align-items:flex-end!important}.align-items-md-center{align-items:center!important}.align-items-md-baseline{align-items:baseline!important}.align-items-md-stretch{align-items:stretch!important}.align-content-md-start{align-content:flex-start!important}.align-content-md-end{align-content:flex-end!important}.align-content-md-center{align-content:center!important}.align-content-md-between{align-content:space-between!important}.align-content-md-around{align-content:space-around!important}.align-content-md-stretch{align-content:stretch!important}.align-self-md-auto{align-self:auto!important}.align-self-md-start{align-self:flex-start!important}.align-self-md-end{align-self:flex-end!important}.align-self-md-center{align-self:center!important}.align-self-md-baseline{align-self:baseline!important}.align-self-md-stretch{align-self:stretch!important}}@media(min-width:980px){.flex-lg-row{flex-direction:row!important}.flex-lg-column{flex-direction:column!important}.flex-lg-row-reverse{flex-direction:row-reverse!important}.flex-lg-column-reverse{flex-direction:column-reverse!important}.flex-lg-wrap{flex-wrap:wrap!important}.flex-lg-nowrap{flex-wrap:nowrap!important}.flex-lg-wrap-reverse{flex-wrap:wrap-reverse!important}.flex-lg-fill{flex:1 1 auto!important}.flex-lg-grow-0{flex-grow:0!important}.flex-lg-grow-1{flex-grow:1!important}.flex-lg-shrink-0{flex-shrink:0!important}.flex-lg-shrink-1{flex-shrink:1!important}.justify-content-lg-start{justify-content:flex-start!important}.justify-content-lg-end{justify-content:flex-end!important}.justify-content-lg-center{justify-content:center!important}.justify-content-lg-between{justify-content:space-between!important}.justify-content-lg-around{justify-content:space-around!important}.align-items-lg-start{align-items:flex-start!important}.align-items-lg-end{align-items:flex-end!important}.align-items-lg-center{align-items:center!important}.align-items-lg-baseline{align-items:baseline!important}.align-items-lg-stretch{align-items:stretch!important}.align-content-lg-start{align-content:flex-start!important}.align-content-lg-end{align-content:flex-end!important}.align-content-lg-center{align-content:center!important}.align-content-lg-between{align-content:space-between!important}.align-content-lg-around{align-content:space-around!important}.align-content-lg-stretch{align-content:stretch!important}.align-self-lg-auto{align-self:auto!important}.align-self-lg-start{align-self:flex-start!important}.align-self-lg-end{align-self:flex-end!important}.align-self-lg-center{align-self:center!important}.align-self-lg-baseline{align-self:baseline!important}.align-self-lg-stretch{align-self:stretch!important}}@media(min-width:1240px){.flex-xl-row{flex-direction:row!important}.flex-xl-column{flex-direction:column!important}.flex-xl-row-reverse{flex-direction:row-reverse!important}.flex-xl-column-reverse{flex-direction:column-reverse!important}.flex-xl-wrap{flex-wrap:wrap!important}.flex-xl-nowrap{flex-wrap:nowrap!important}.flex-xl-wrap-reverse{flex-wrap:wrap-reverse!important}.flex-xl-fill{flex:1 1 auto!important}.flex-xl-grow-0{flex-grow:0!important}.flex-xl-grow-1{flex-grow:1!important}.flex-xl-shrink-0{flex-shrink:0!important}.flex-xl-shrink-1{flex-shrink:1!important}.justify-content-xl-start{justify-content:flex-start!important}.justify-content-xl-end{justify-content:flex-end!important}.justify-content-xl-center{justify-content:center!important}.justify-content-xl-between{justify-content:space-between!important}.justify-content-xl-around{justify-content:space-around!important}.align-items-xl-start{align-items:flex-start!important}.align-items-xl-end{align-items:flex-end!important}.align-items-xl-center{align-items:center!important}.align-items-xl-baseline{align-items:baseline!important}.align-items-xl-stretch{align-items:stretch!important}.align-content-xl-start{align-content:flex-start!important}.align-content-xl-end{align-content:flex-end!important}.align-content-xl-center{align-content:center!important}.align-content-xl-between{align-content:space-between!important}.align-content-xl-around{align-content:space-around!important}.align-content-xl-stretch{align-content:stretch!important}.align-self-xl-auto{align-self:auto!important}.align-self-xl-start{align-self:flex-start!important}.align-self-xl-end{align-self:flex-end!important}.align-self-xl-center{align-self:center!important}.align-self-xl-baseline{align-self:baseline!important}.align-self-xl-stretch{align-self:stretch!important}}.float-left{float:left!important}.float-right{float:right!important}.float-none{float:none!important}@media(min-width:400px){.float-xs-left{float:left!important}.float-xs-right{float:right!important}.float-xs-none{float:none!important}}@media(min-width:616px){.float-sm-left{float:left!important}.float-sm-right{float:right!important}.float-sm-none{float:none!important}}@media(min-width:768px){.float-md-left{float:left!important}.float-md-right{float:right!important}.float-md-none{float:none!important}}@media(min-width:980px){.float-lg-left{float:left!important}.float-lg-right{float:right!important}.float-lg-none{float:none!important}}@media(min-width:1240px){.float-xl-left{float:left!important}.float-xl-right{float:right!important}.float-xl-none{float:none!important}}.overflow-auto{overflow:auto!important}.overflow-hidden{overflow:hidden!important}.position-static{position:static!important}.position-relative{position:relative!important}.position-absolute{position:absolute!important}.position-fixed{position:fixed!important}.position-sticky{position:sticky!important}.fixed-top{top:0}.fixed-bottom,.fixed-top{position:fixed;right:0;left:0;z-index:1030}.fixed-bottom{bottom:0}@supports(position:sticky){.sticky-top{position:sticky;top:0;z-index:1020}}.sr-only{position:absolute;width:1px;height:1px;padding:0;margin:-1px;overflow:hidden;clip:rect(0,0,0,0);white-space:nowrap;border:0}.sr-only-focusable:active,.sr-only-focusable:focus{position:static;width:auto;height:auto;overflow:visible;clip:auto;white-space:normal}.shadow-sm{box-shadow:0 2px 14px rgba(108,117,125,.2)!important}.shadow{box-shadow:0 8px 20px rgba(108,117,125,.2)!important}.shadow-lg{box-shadow:0 12px 32px rgba(108,117,125,.2)!important}.shadow-none{box-shadow:none!important}.w-25{width:25%!important}.w-50{width:50%!important}.w-75{width:75%!important}.w-100{width:100%!important}.w-auto{width:auto!important}.h-25{height:25%!important}.h-50{height:50%!important}.h-75{height:75%!important}.h-100{height:100%!important}.h-auto{height:auto!important}.mw-100{max-width:100%!important}.mh-100{max-height:100%!important}.min-vw-100{min-width:100vw!important}.min-vh-100{min-height:100vh!important}.vw-100{width:100vw!important}.vh-100{height:100vh!important}.stretched-link:after{position:absolute;top:0;right:0;bottom:0;left:0;z-index:1;pointer-events:auto;content:"";background-color:transparent}.m-0{margin:0!important}.mt-0,.my-0{margin-top:0!important}.mr-0,.mx-0{margin-right:0!important}.mb-0,.my-0{margin-bottom:0!important}.ml-0,.mx-0{margin-left:0!important}.m-1{margin:8px!important}.mt-1,.my-1{margin-top:8px!important}.mr-1,.mx-1{margin-right:8px!important}.mb-1,.my-1{margin-bottom:8px!important}.ml-1,.mx-1{margin-left:8px!important}.m-2{margin:16px!important}.mt-2,.my-2{margin-top:16px!important}.mr-2,.mx-2{margin-right:16px!important}.mb-2,.my-2{margin-bottom:16px!important}.ml-2,.mx-2{margin-left:16px!important}.m-3{margin:24px!important}.mt-3,.my-3{margin-top:24px!important}.mr-3,.mx-3{margin-right:24px!important}.mb-3,.my-3{margin-bottom:24px!important}.ml-3,.mx-3{margin-left:24px!important}.m-4{margin:32px!important}.mt-4,.my-4{margin-top:32px!important}.mr-4,.mx-4{margin-right:32px!important}.mb-4,.my-4{margin-bottom:32px!important}.ml-4,.mx-4{margin-left:32px!important}.m-5{margin:40px!important}.mt-5,.my-5{margin-top:40px!important}.mr-5,.mx-5{margin-right:40px!important}.mb-5,.my-5{margin-bottom:40px!important}.ml-5,.mx-5{margin-left:40px!important}.m-6{margin:48px!important}.mt-6,.my-6{margin-top:48px!important}.mr-6,.mx-6{margin-right:48px!important}.mb-6,.my-6{margin-bottom:48px!important}.ml-6,.mx-6{margin-left:48px!important}.m-7{margin:56px!important}.mt-7,.my-7{margin-top:56px!important}.mr-7,.mx-7{margin-right:56px!important}.mb-7,.my-7{margin-bottom:56px!important}.ml-7,.mx-7{margin-left:56px!important}.m-8{margin:64px!important}.mt-8,.my-8{margin-top:64px!important}.mr-8,.mx-8{margin-right:64px!important}.mb-8,.my-8{margin-bottom:64px!important}.ml-8,.mx-8{margin-left:64px!important}.m-9{margin:72px!important}.mt-9,.my-9{margin-top:72px!important}.mr-9,.mx-9{margin-right:72px!important}.mb-9,.my-9{margin-bottom:72px!important}.ml-9,.mx-9{margin-left:72px!important}.m-10{margin:80px!important}.mt-10,.my-10{margin-top:80px!important}.mr-10,.mx-10{margin-right:80px!important}.mb-10,.my-10{margin-bottom:80px!important}.ml-10,.mx-10{margin-left:80px!important}.m-12{margin:96px!important}.mt-12,.my-12{margin-top:96px!important}.mr-12,.mx-12{margin-right:96px!important}.mb-12,.my-12{margin-bottom:96px!important}.ml-12,.mx-12{margin-left:96px!important}.m-15{margin:120px!important}.mt-15,.my-15{margin-top:120px!important}.mr-15,.mx-15{margin-right:120px!important}.mb-15,.my-15{margin-bottom:120px!important}.ml-15,.mx-15{margin-left:120px!important}.p-0{padding:0!important}.pt-0,.py-0{padding-top:0!important}.pr-0,.px-0{padding-right:0!important}.pb-0,.py-0{padding-bottom:0!important}.pl-0,.px-0{padding-left:0!important}.p-1{padding:8px!important}.pt-1,.py-1{padding-top:8px!important}.pr-1,.px-1{padding-right:8px!important}.pb-1,.py-1{padding-bottom:8px!important}.pl-1,.px-1{padding-left:8px!important}.p-2{padding:16px!important}.pt-2,.py-2{padding-top:16px!important}.pr-2,.px-2{padding-right:16px!important}.pb-2,.py-2{padding-bottom:16px!important}.pl-2,.px-2{padding-left:16px!important}.p-3{padding:24px!important}.pt-3,.py-3{padding-top:24px!important}.pr-3,.px-3{padding-right:24px!important}.pb-3,.py-3{padding-bottom:24px!important}.pl-3,.px-3{padding-left:24px!important}.p-4{padding:32px!important}.pt-4,.py-4{padding-top:32px!important}.pr-4,.px-4{padding-right:32px!important}.pb-4,.py-4{padding-bottom:32px!important}.pl-4,.px-4{padding-left:32px!important}.p-5{padding:40px!important}.pt-5,.py-5{padding-top:40px!important}.pr-5,.px-5{padding-right:40px!important}.pb-5,.py-5{padding-bottom:40px!important}.pl-5,.px-5{padding-left:40px!important}.p-6{padding:48px!important}.pt-6,.py-6{padding-top:48px!important}.pr-6,.px-6{padding-right:48px!important}.pb-6,.py-6{padding-bottom:48px!important}.pl-6,.px-6{padding-left:48px!important}.p-7{padding:56px!important}.pt-7,.py-7{padding-top:56px!important}.pr-7,.px-7{padding-right:56px!important}.pb-7,.py-7{padding-bottom:56px!important}.pl-7,.px-7{padding-left:56px!important}.p-8{padding:64px!important}.pt-8,.py-8{padding-top:64px!important}.pr-8,.px-8{padding-right:64px!important}.pb-8,.py-8{padding-bottom:64px!important}.pl-8,.px-8{padding-left:64px!important}.p-9{padding:72px!important}.pt-9,.py-9{padding-top:72px!important}.pr-9,.px-9{padding-right:72px!important}.pb-9,.py-9{padding-bottom:72px!important}.pl-9,.px-9{padding-left:72px!important}.p-10{padding:80px!important}.pt-10,.py-10{padding-top:80px!important}.pr-10,.px-10{padding-right:80px!important}.pb-10,.py-10{padding-bottom:80px!important}.pl-10,.px-10{padding-left:80px!important}.p-12{padding:96px!important}.pt-12,.py-12{padding-top:96px!important}.pr-12,.px-12{padding-right:96px!important}.pb-12,.py-12{padding-bottom:96px!important}.pl-12,.px-12{padding-left:96px!important}.p-15{padding:120px!important}.pt-15,.py-15{padding-top:120px!important}.pr-15,.px-15{padding-right:120px!important}.pb-15,.py-15{padding-bottom:120px!important}.pl-15,.px-15{padding-left:120px!important}.m-n1{margin:-8px!important}.mt-n1,.my-n1{margin-top:-8px!important}.mr-n1,.mx-n1{margin-right:-8px!important}.mb-n1,.my-n1{margin-bottom:-8px!important}.ml-n1,.mx-n1{margin-left:-8px!important}.m-n2{margin:-16px!important}.mt-n2,.my-n2{margin-top:-16px!important}.mr-n2,.mx-n2{margin-right:-16px!important}.mb-n2,.my-n2{margin-bottom:-16px!important}.ml-n2,.mx-n2{margin-left:-16px!important}.m-n3{margin:-24px!important}.mt-n3,.my-n3{margin-top:-24px!important}.mr-n3,.mx-n3{margin-right:-24px!important}.mb-n3,.my-n3{margin-bottom:-24px!important}.ml-n3,.mx-n3{margin-left:-24px!important}.m-n4{margin:-32px!important}.mt-n4,.my-n4{margin-top:-32px!important}.mr-n4,.mx-n4{margin-right:-32px!important}.mb-n4,.my-n4{margin-bottom:-32px!important}.ml-n4,.mx-n4{margin-left:-32px!important}.m-n5{margin:-40px!important}.mt-n5,.my-n5{margin-top:-40px!important}.mr-n5,.mx-n5{margin-right:-40px!important}.mb-n5,.my-n5{margin-bottom:-40px!important}.ml-n5,.mx-n5{margin-left:-40px!important}.m-n6{margin:-48px!important}.mt-n6,.my-n6{margin-top:-48px!important}.mr-n6,.mx-n6{margin-right:-48px!important}.mb-n6,.my-n6{margin-bottom:-48px!important}.ml-n6,.mx-n6{margin-left:-48px!important}.m-n7{margin:-56px!important}.mt-n7,.my-n7{margin-top:-56px!important}.mr-n7,.mx-n7{margin-right:-56px!important}.mb-n7,.my-n7{margin-bottom:-56px!important}.ml-n7,.mx-n7{margin-left:-56px!important}.m-n8{margin:-64px!important}.mt-n8,.my-n8{margin-top:-64px!important}.mr-n8,.mx-n8{margin-right:-64px!important}.mb-n8,.my-n8{margin-bottom:-64px!important}.ml-n8,.mx-n8{margin-left:-64px!important}.m-n9{margin:-72px!important}.mt-n9,.my-n9{margin-top:-72px!important}.mr-n9,.mx-n9{margin-right:-72px!important}.mb-n9,.my-n9{margin-bottom:-72px!important}.ml-n9,.mx-n9{margin-left:-72px!important}.m-n10{margin:-80px!important}.mt-n10,.my-n10{margin-top:-80px!important}.mr-n10,.mx-n10{margin-right:-80px!important}.mb-n10,.my-n10{margin-bottom:-80px!important}.ml-n10,.mx-n10{margin-left:-80px!important}.m-n12{margin:-96px!important}.mt-n12,.my-n12{margin-top:-96px!important}.mr-n12,.mx-n12{margin-right:-96px!important}.mb-n12,.my-n12{margin-bottom:-96px!important}.ml-n12,.mx-n12{margin-left:-96px!important}.m-n15{margin:-120px!important}.mt-n15,.my-n15{margin-top:-120px!important}.mr-n15,.mx-n15{margin-right:-120px!important}.mb-n15,.my-n15{margin-bottom:-120px!important}.ml-n15,.mx-n15{margin-left:-120px!important}.m-auto{margin:auto!important}.mt-auto,.my-auto{margin-top:auto!important}.mr-auto,.mx-auto{margin-right:auto!important}.mb-auto,.my-auto{margin-bottom:auto!important}.ml-auto,.mx-auto{margin-left:auto!important}@media(min-width:400px){.m-xs-0{margin:0!important}.mt-xs-0,.my-xs-0{margin-top:0!important}.mr-xs-0,.mx-xs-0{margin-right:0!important}.mb-xs-0,.my-xs-0{margin-bottom:0!important}.ml-xs-0,.mx-xs-0{margin-left:0!important}.m-xs-1{margin:8px!important}.mt-xs-1,.my-xs-1{margin-top:8px!important}.mr-xs-1,.mx-xs-1{margin-right:8px!important}.mb-xs-1,.my-xs-1{margin-bottom:8px!important}.ml-xs-1,.mx-xs-1{margin-left:8px!important}.m-xs-2{margin:16px!important}.mt-xs-2,.my-xs-2{margin-top:16px!important}.mr-xs-2,.mx-xs-2{margin-right:16px!important}.mb-xs-2,.my-xs-2{margin-bottom:16px!important}.ml-xs-2,.mx-xs-2{margin-left:16px!important}.m-xs-3{margin:24px!important}.mt-xs-3,.my-xs-3{margin-top:24px!important}.mr-xs-3,.mx-xs-3{margin-right:24px!important}.mb-xs-3,.my-xs-3{margin-bottom:24px!important}.ml-xs-3,.mx-xs-3{margin-left:24px!important}.m-xs-4{margin:32px!important}.mt-xs-4,.my-xs-4{margin-top:32px!important}.mr-xs-4,.mx-xs-4{margin-right:32px!important}.mb-xs-4,.my-xs-4{margin-bottom:32px!important}.ml-xs-4,.mx-xs-4{margin-left:32px!important}.m-xs-5{margin:40px!important}.mt-xs-5,.my-xs-5{margin-top:40px!important}.mr-xs-5,.mx-xs-5{margin-right:40px!important}.mb-xs-5,.my-xs-5{margin-bottom:40px!important}.ml-xs-5,.mx-xs-5{margin-left:40px!important}.m-xs-6{margin:48px!important}.mt-xs-6,.my-xs-6{margin-top:48px!important}.mr-xs-6,.mx-xs-6{margin-right:48px!important}.mb-xs-6,.my-xs-6{margin-bottom:48px!important}.ml-xs-6,.mx-xs-6{margin-left:48px!important}.m-xs-7{margin:56px!important}.mt-xs-7,.my-xs-7{margin-top:56px!important}.mr-xs-7,.mx-xs-7{margin-right:56px!important}.mb-xs-7,.my-xs-7{margin-bottom:56px!important}.ml-xs-7,.mx-xs-7{margin-left:56px!important}.m-xs-8{margin:64px!important}.mt-xs-8,.my-xs-8{margin-top:64px!important}.mr-xs-8,.mx-xs-8{margin-right:64px!important}.mb-xs-8,.my-xs-8{margin-bottom:64px!important}.ml-xs-8,.mx-xs-8{margin-left:64px!important}.m-xs-9{margin:72px!important}.mt-xs-9,.my-xs-9{margin-top:72px!important}.mr-xs-9,.mx-xs-9{margin-right:72px!important}.mb-xs-9,.my-xs-9{margin-bottom:72px!important}.ml-xs-9,.mx-xs-9{margin-left:72px!important}.m-xs-10{margin:80px!important}.mt-xs-10,.my-xs-10{margin-top:80px!important}.mr-xs-10,.mx-xs-10{margin-right:80px!important}.mb-xs-10,.my-xs-10{margin-bottom:80px!important}.ml-xs-10,.mx-xs-10{margin-left:80px!important}.m-xs-12{margin:96px!important}.mt-xs-12,.my-xs-12{margin-top:96px!important}.mr-xs-12,.mx-xs-12{margin-right:96px!important}.mb-xs-12,.my-xs-12{margin-bottom:96px!important}.ml-xs-12,.mx-xs-12{margin-left:96px!important}.m-xs-15{margin:120px!important}.mt-xs-15,.my-xs-15{margin-top:120px!important}.mr-xs-15,.mx-xs-15{margin-right:120px!important}.mb-xs-15,.my-xs-15{margin-bottom:120px!important}.ml-xs-15,.mx-xs-15{margin-left:120px!important}.p-xs-0{padding:0!important}.pt-xs-0,.py-xs-0{padding-top:0!important}.pr-xs-0,.px-xs-0{padding-right:0!important}.pb-xs-0,.py-xs-0{padding-bottom:0!important}.pl-xs-0,.px-xs-0{padding-left:0!important}.p-xs-1{padding:8px!important}.pt-xs-1,.py-xs-1{padding-top:8px!important}.pr-xs-1,.px-xs-1{padding-right:8px!important}.pb-xs-1,.py-xs-1{padding-bottom:8px!important}.pl-xs-1,.px-xs-1{padding-left:8px!important}.p-xs-2{padding:16px!important}.pt-xs-2,.py-xs-2{padding-top:16px!important}.pr-xs-2,.px-xs-2{padding-right:16px!important}.pb-xs-2,.py-xs-2{padding-bottom:16px!important}.pl-xs-2,.px-xs-2{padding-left:16px!important}.p-xs-3{padding:24px!important}.pt-xs-3,.py-xs-3{padding-top:24px!important}.pr-xs-3,.px-xs-3{padding-right:24px!important}.pb-xs-3,.py-xs-3{padding-bottom:24px!important}.pl-xs-3,.px-xs-3{padding-left:24px!important}.p-xs-4{padding:32px!important}.pt-xs-4,.py-xs-4{padding-top:32px!important}.pr-xs-4,.px-xs-4{padding-right:32px!important}.pb-xs-4,.py-xs-4{padding-bottom:32px!important}.pl-xs-4,.px-xs-4{padding-left:32px!important}.p-xs-5{padding:40px!important}.pt-xs-5,.py-xs-5{padding-top:40px!important}.pr-xs-5,.px-xs-5{padding-right:40px!important}.pb-xs-5,.py-xs-5{padding-bottom:40px!important}.pl-xs-5,.px-xs-5{padding-left:40px!important}.p-xs-6{padding:48px!important}.pt-xs-6,.py-xs-6{padding-top:48px!important}.pr-xs-6,.px-xs-6{padding-right:48px!important}.pb-xs-6,.py-xs-6{padding-bottom:48px!important}.pl-xs-6,.px-xs-6{padding-left:48px!important}.p-xs-7{padding:56px!important}.pt-xs-7,.py-xs-7{padding-top:56px!important}.pr-xs-7,.px-xs-7{padding-right:56px!important}.pb-xs-7,.py-xs-7{padding-bottom:56px!important}.pl-xs-7,.px-xs-7{padding-left:56px!important}.p-xs-8{padding:64px!important}.pt-xs-8,.py-xs-8{padding-top:64px!important}.pr-xs-8,.px-xs-8{padding-right:64px!important}.pb-xs-8,.py-xs-8{padding-bottom:64px!important}.pl-xs-8,.px-xs-8{padding-left:64px!important}.p-xs-9{padding:72px!important}.pt-xs-9,.py-xs-9{padding-top:72px!important}.pr-xs-9,.px-xs-9{padding-right:72px!important}.pb-xs-9,.py-xs-9{padding-bottom:72px!important}.pl-xs-9,.px-xs-9{padding-left:72px!important}.p-xs-10{padding:80px!important}.pt-xs-10,.py-xs-10{padding-top:80px!important}.pr-xs-10,.px-xs-10{padding-right:80px!important}.pb-xs-10,.py-xs-10{padding-bottom:80px!important}.pl-xs-10,.px-xs-10{padding-left:80px!important}.p-xs-12{padding:96px!important}.pt-xs-12,.py-xs-12{padding-top:96px!important}.pr-xs-12,.px-xs-12{padding-right:96px!important}.pb-xs-12,.py-xs-12{padding-bottom:96px!important}.pl-xs-12,.px-xs-12{padding-left:96px!important}.p-xs-15{padding:120px!important}.pt-xs-15,.py-xs-15{padding-top:120px!important}.pr-xs-15,.px-xs-15{padding-right:120px!important}.pb-xs-15,.py-xs-15{padding-bottom:120px!important}.pl-xs-15,.px-xs-15{padding-left:120px!important}.m-xs-n1{margin:-8px!important}.mt-xs-n1,.my-xs-n1{margin-top:-8px!important}.mr-xs-n1,.mx-xs-n1{margin-right:-8px!important}.mb-xs-n1,.my-xs-n1{margin-bottom:-8px!important}.ml-xs-n1,.mx-xs-n1{margin-left:-8px!important}.m-xs-n2{margin:-16px!important}.mt-xs-n2,.my-xs-n2{margin-top:-16px!important}.mr-xs-n2,.mx-xs-n2{margin-right:-16px!important}.mb-xs-n2,.my-xs-n2{margin-bottom:-16px!important}.ml-xs-n2,.mx-xs-n2{margin-left:-16px!important}.m-xs-n3{margin:-24px!important}.mt-xs-n3,.my-xs-n3{margin-top:-24px!important}.mr-xs-n3,.mx-xs-n3{margin-right:-24px!important}.mb-xs-n3,.my-xs-n3{margin-bottom:-24px!important}.ml-xs-n3,.mx-xs-n3{margin-left:-24px!important}.m-xs-n4{margin:-32px!important}.mt-xs-n4,.my-xs-n4{margin-top:-32px!important}.mr-xs-n4,.mx-xs-n4{margin-right:-32px!important}.mb-xs-n4,.my-xs-n4{margin-bottom:-32px!important}.ml-xs-n4,.mx-xs-n4{margin-left:-32px!important}.m-xs-n5{margin:-40px!important}.mt-xs-n5,.my-xs-n5{margin-top:-40px!important}.mr-xs-n5,.mx-xs-n5{margin-right:-40px!important}.mb-xs-n5,.my-xs-n5{margin-bottom:-40px!important}.ml-xs-n5,.mx-xs-n5{margin-left:-40px!important}.m-xs-n6{margin:-48px!important}.mt-xs-n6,.my-xs-n6{margin-top:-48px!important}.mr-xs-n6,.mx-xs-n6{margin-right:-48px!important}.mb-xs-n6,.my-xs-n6{margin-bottom:-48px!important}.ml-xs-n6,.mx-xs-n6{margin-left:-48px!important}.m-xs-n7{margin:-56px!important}.mt-xs-n7,.my-xs-n7{margin-top:-56px!important}.mr-xs-n7,.mx-xs-n7{margin-right:-56px!important}.mb-xs-n7,.my-xs-n7{margin-bottom:-56px!important}.ml-xs-n7,.mx-xs-n7{margin-left:-56px!important}.m-xs-n8{margin:-64px!important}.mt-xs-n8,.my-xs-n8{margin-top:-64px!important}.mr-xs-n8,.mx-xs-n8{margin-right:-64px!important}.mb-xs-n8,.my-xs-n8{margin-bottom:-64px!important}.ml-xs-n8,.mx-xs-n8{margin-left:-64px!important}.m-xs-n9{margin:-72px!important}.mt-xs-n9,.my-xs-n9{margin-top:-72px!important}.mr-xs-n9,.mx-xs-n9{margin-right:-72px!important}.mb-xs-n9,.my-xs-n9{margin-bottom:-72px!important}.ml-xs-n9,.mx-xs-n9{margin-left:-72px!important}.m-xs-n10{margin:-80px!important}.mt-xs-n10,.my-xs-n10{margin-top:-80px!important}.mr-xs-n10,.mx-xs-n10{margin-right:-80px!important}.mb-xs-n10,.my-xs-n10{margin-bottom:-80px!important}.ml-xs-n10,.mx-xs-n10{margin-left:-80px!important}.m-xs-n12{margin:-96px!important}.mt-xs-n12,.my-xs-n12{margin-top:-96px!important}.mr-xs-n12,.mx-xs-n12{margin-right:-96px!important}.mb-xs-n12,.my-xs-n12{margin-bottom:-96px!important}.ml-xs-n12,.mx-xs-n12{margin-left:-96px!important}.m-xs-n15{margin:-120px!important}.mt-xs-n15,.my-xs-n15{margin-top:-120px!important}.mr-xs-n15,.mx-xs-n15{margin-right:-120px!important}.mb-xs-n15,.my-xs-n15{margin-bottom:-120px!important}.ml-xs-n15,.mx-xs-n15{margin-left:-120px!important}.m-xs-auto{margin:auto!important}.mt-xs-auto,.my-xs-auto{margin-top:auto!important}.mr-xs-auto,.mx-xs-auto{margin-right:auto!important}.mb-xs-auto,.my-xs-auto{margin-bottom:auto!important}.ml-xs-auto,.mx-xs-auto{margin-left:auto!important}}@media(min-width:616px){.m-sm-0{margin:0!important}.mt-sm-0,.my-sm-0{margin-top:0!important}.mr-sm-0,.mx-sm-0{margin-right:0!important}.mb-sm-0,.my-sm-0{margin-bottom:0!important}.ml-sm-0,.mx-sm-0{margin-left:0!important}.m-sm-1{margin:8px!important}.mt-sm-1,.my-sm-1{margin-top:8px!important}.mr-sm-1,.mx-sm-1{margin-right:8px!important}.mb-sm-1,.my-sm-1{margin-bottom:8px!important}.ml-sm-1,.mx-sm-1{margin-left:8px!important}.m-sm-2{margin:16px!important}.mt-sm-2,.my-sm-2{margin-top:16px!important}.mr-sm-2,.mx-sm-2{margin-right:16px!important}.mb-sm-2,.my-sm-2{margin-bottom:16px!important}.ml-sm-2,.mx-sm-2{margin-left:16px!important}.m-sm-3{margin:24px!important}.mt-sm-3,.my-sm-3{margin-top:24px!important}.mr-sm-3,.mx-sm-3{margin-right:24px!important}.mb-sm-3,.my-sm-3{margin-bottom:24px!important}.ml-sm-3,.mx-sm-3{margin-left:24px!important}.m-sm-4{margin:32px!important}.mt-sm-4,.my-sm-4{margin-top:32px!important}.mr-sm-4,.mx-sm-4{margin-right:32px!important}.mb-sm-4,.my-sm-4{margin-bottom:32px!important}.ml-sm-4,.mx-sm-4{margin-left:32px!important}.m-sm-5{margin:40px!important}.mt-sm-5,.my-sm-5{margin-top:40px!important}.mr-sm-5,.mx-sm-5{margin-right:40px!important}.mb-sm-5,.my-sm-5{margin-bottom:40px!important}.ml-sm-5,.mx-sm-5{margin-left:40px!important}.m-sm-6{margin:48px!important}.mt-sm-6,.my-sm-6{margin-top:48px!important}.mr-sm-6,.mx-sm-6{margin-right:48px!important}.mb-sm-6,.my-sm-6{margin-bottom:48px!important}.ml-sm-6,.mx-sm-6{margin-left:48px!important}.m-sm-7{margin:56px!important}.mt-sm-7,.my-sm-7{margin-top:56px!important}.mr-sm-7,.mx-sm-7{margin-right:56px!important}.mb-sm-7,.my-sm-7{margin-bottom:56px!important}.ml-sm-7,.mx-sm-7{margin-left:56px!important}.m-sm-8{margin:64px!important}.mt-sm-8,.my-sm-8{margin-top:64px!important}.mr-sm-8,.mx-sm-8{margin-right:64px!important}.mb-sm-8,.my-sm-8{margin-bottom:64px!important}.ml-sm-8,.mx-sm-8{margin-left:64px!important}.m-sm-9{margin:72px!important}.mt-sm-9,.my-sm-9{margin-top:72px!important}.mr-sm-9,.mx-sm-9{margin-right:72px!important}.mb-sm-9,.my-sm-9{margin-bottom:72px!important}.ml-sm-9,.mx-sm-9{margin-left:72px!important}.m-sm-10{margin:80px!important}.mt-sm-10,.my-sm-10{margin-top:80px!important}.mr-sm-10,.mx-sm-10{margin-right:80px!important}.mb-sm-10,.my-sm-10{margin-bottom:80px!important}.ml-sm-10,.mx-sm-10{margin-left:80px!important}.m-sm-12{margin:96px!important}.mt-sm-12,.my-sm-12{margin-top:96px!important}.mr-sm-12,.mx-sm-12{margin-right:96px!important}.mb-sm-12,.my-sm-12{margin-bottom:96px!important}.ml-sm-12,.mx-sm-12{margin-left:96px!important}.m-sm-15{margin:120px!important}.mt-sm-15,.my-sm-15{margin-top:120px!important}.mr-sm-15,.mx-sm-15{margin-right:120px!important}.mb-sm-15,.my-sm-15{margin-bottom:120px!important}.ml-sm-15,.mx-sm-15{margin-left:120px!important}.p-sm-0{padding:0!important}.pt-sm-0,.py-sm-0{padding-top:0!important}.pr-sm-0,.px-sm-0{padding-right:0!important}.pb-sm-0,.py-sm-0{padding-bottom:0!important}.pl-sm-0,.px-sm-0{padding-left:0!important}.p-sm-1{padding:8px!important}.pt-sm-1,.py-sm-1{padding-top:8px!important}.pr-sm-1,.px-sm-1{padding-right:8px!important}.pb-sm-1,.py-sm-1{padding-bottom:8px!important}.pl-sm-1,.px-sm-1{padding-left:8px!important}.p-sm-2{padding:16px!important}.pt-sm-2,.py-sm-2{padding-top:16px!important}.pr-sm-2,.px-sm-2{padding-right:16px!important}.pb-sm-2,.py-sm-2{padding-bottom:16px!important}.pl-sm-2,.px-sm-2{padding-left:16px!important}.p-sm-3{padding:24px!important}.pt-sm-3,.py-sm-3{padding-top:24px!important}.pr-sm-3,.px-sm-3{padding-right:24px!important}.pb-sm-3,.py-sm-3{padding-bottom:24px!important}.pl-sm-3,.px-sm-3{padding-left:24px!important}.p-sm-4{padding:32px!important}.pt-sm-4,.py-sm-4{padding-top:32px!important}.pr-sm-4,.px-sm-4{padding-right:32px!important}.pb-sm-4,.py-sm-4{padding-bottom:32px!important}.pl-sm-4,.px-sm-4{padding-left:32px!important}.p-sm-5{padding:40px!important}.pt-sm-5,.py-sm-5{padding-top:40px!important}.pr-sm-5,.px-sm-5{padding-right:40px!important}.pb-sm-5,.py-sm-5{padding-bottom:40px!important}.pl-sm-5,.px-sm-5{padding-left:40px!important}.p-sm-6{padding:48px!important}.pt-sm-6,.py-sm-6{padding-top:48px!important}.pr-sm-6,.px-sm-6{padding-right:48px!important}.pb-sm-6,.py-sm-6{padding-bottom:48px!important}.pl-sm-6,.px-sm-6{padding-left:48px!important}.p-sm-7{padding:56px!important}.pt-sm-7,.py-sm-7{padding-top:56px!important}.pr-sm-7,.px-sm-7{padding-right:56px!important}.pb-sm-7,.py-sm-7{padding-bottom:56px!important}.pl-sm-7,.px-sm-7{padding-left:56px!important}.p-sm-8{padding:64px!important}.pt-sm-8,.py-sm-8{padding-top:64px!important}.pr-sm-8,.px-sm-8{padding-right:64px!important}.pb-sm-8,.py-sm-8{padding-bottom:64px!important}.pl-sm-8,.px-sm-8{padding-left:64px!important}.p-sm-9{padding:72px!important}.pt-sm-9,.py-sm-9{padding-top:72px!important}.pr-sm-9,.px-sm-9{padding-right:72px!important}.pb-sm-9,.py-sm-9{padding-bottom:72px!important}.pl-sm-9,.px-sm-9{padding-left:72px!important}.p-sm-10{padding:80px!important}.pt-sm-10,.py-sm-10{padding-top:80px!important}.pr-sm-10,.px-sm-10{padding-right:80px!important}.pb-sm-10,.py-sm-10{padding-bottom:80px!important}.pl-sm-10,.px-sm-10{padding-left:80px!important}.p-sm-12{padding:96px!important}.pt-sm-12,.py-sm-12{padding-top:96px!important}.pr-sm-12,.px-sm-12{padding-right:96px!important}.pb-sm-12,.py-sm-12{padding-bottom:96px!important}.pl-sm-12,.px-sm-12{padding-left:96px!important}.p-sm-15{padding:120px!important}.pt-sm-15,.py-sm-15{padding-top:120px!important}.pr-sm-15,.px-sm-15{padding-right:120px!important}.pb-sm-15,.py-sm-15{padding-bottom:120px!important}.pl-sm-15,.px-sm-15{padding-left:120px!important}.m-sm-n1{margin:-8px!important}.mt-sm-n1,.my-sm-n1{margin-top:-8px!important}.mr-sm-n1,.mx-sm-n1{margin-right:-8px!important}.mb-sm-n1,.my-sm-n1{margin-bottom:-8px!important}.ml-sm-n1,.mx-sm-n1{margin-left:-8px!important}.m-sm-n2{margin:-16px!important}.mt-sm-n2,.my-sm-n2{margin-top:-16px!important}.mr-sm-n2,.mx-sm-n2{margin-right:-16px!important}.mb-sm-n2,.my-sm-n2{margin-bottom:-16px!important}.ml-sm-n2,.mx-sm-n2{margin-left:-16px!important}.m-sm-n3{margin:-24px!important}.mt-sm-n3,.my-sm-n3{margin-top:-24px!important}.mr-sm-n3,.mx-sm-n3{margin-right:-24px!important}.mb-sm-n3,.my-sm-n3{margin-bottom:-24px!important}.ml-sm-n3,.mx-sm-n3{margin-left:-24px!important}.m-sm-n4{margin:-32px!important}.mt-sm-n4,.my-sm-n4{margin-top:-32px!important}.mr-sm-n4,.mx-sm-n4{margin-right:-32px!important}.mb-sm-n4,.my-sm-n4{margin-bottom:-32px!important}.ml-sm-n4,.mx-sm-n4{margin-left:-32px!important}.m-sm-n5{margin:-40px!important}.mt-sm-n5,.my-sm-n5{margin-top:-40px!important}.mr-sm-n5,.mx-sm-n5{margin-right:-40px!important}.mb-sm-n5,.my-sm-n5{margin-bottom:-40px!important}.ml-sm-n5,.mx-sm-n5{margin-left:-40px!important}.m-sm-n6{margin:-48px!important}.mt-sm-n6,.my-sm-n6{margin-top:-48px!important}.mr-sm-n6,.mx-sm-n6{margin-right:-48px!important}.mb-sm-n6,.my-sm-n6{margin-bottom:-48px!important}.ml-sm-n6,.mx-sm-n6{margin-left:-48px!important}.m-sm-n7{margin:-56px!important}.mt-sm-n7,.my-sm-n7{margin-top:-56px!important}.mr-sm-n7,.mx-sm-n7{margin-right:-56px!important}.mb-sm-n7,.my-sm-n7{margin-bottom:-56px!important}.ml-sm-n7,.mx-sm-n7{margin-left:-56px!important}.m-sm-n8{margin:-64px!important}.mt-sm-n8,.my-sm-n8{margin-top:-64px!important}.mr-sm-n8,.mx-sm-n8{margin-right:-64px!important}.mb-sm-n8,.my-sm-n8{margin-bottom:-64px!important}.ml-sm-n8,.mx-sm-n8{margin-left:-64px!important}.m-sm-n9{margin:-72px!important}.mt-sm-n9,.my-sm-n9{margin-top:-72px!important}.mr-sm-n9,.mx-sm-n9{margin-right:-72px!important}.mb-sm-n9,.my-sm-n9{margin-bottom:-72px!important}.ml-sm-n9,.mx-sm-n9{margin-left:-72px!important}.m-sm-n10{margin:-80px!important}.mt-sm-n10,.my-sm-n10{margin-top:-80px!important}.mr-sm-n10,.mx-sm-n10{margin-right:-80px!important}.mb-sm-n10,.my-sm-n10{margin-bottom:-80px!important}.ml-sm-n10,.mx-sm-n10{margin-left:-80px!important}.m-sm-n12{margin:-96px!important}.mt-sm-n12,.my-sm-n12{margin-top:-96px!important}.mr-sm-n12,.mx-sm-n12{margin-right:-96px!important}.mb-sm-n12,.my-sm-n12{margin-bottom:-96px!important}.ml-sm-n12,.mx-sm-n12{margin-left:-96px!important}.m-sm-n15{margin:-120px!important}.mt-sm-n15,.my-sm-n15{margin-top:-120px!important}.mr-sm-n15,.mx-sm-n15{margin-right:-120px!important}.mb-sm-n15,.my-sm-n15{margin-bottom:-120px!important}.ml-sm-n15,.mx-sm-n15{margin-left:-120px!important}.m-sm-auto{margin:auto!important}.mt-sm-auto,.my-sm-auto{margin-top:auto!important}.mr-sm-auto,.mx-sm-auto{margin-right:auto!important}.mb-sm-auto,.my-sm-auto{margin-bottom:auto!important}.ml-sm-auto,.mx-sm-auto{margin-left:auto!important}}@media(min-width:768px){.m-md-0{margin:0!important}.mt-md-0,.my-md-0{margin-top:0!important}.mr-md-0,.mx-md-0{margin-right:0!important}.mb-md-0,.my-md-0{margin-bottom:0!important}.ml-md-0,.mx-md-0{margin-left:0!important}.m-md-1{margin:8px!important}.mt-md-1,.my-md-1{margin-top:8px!important}.mr-md-1,.mx-md-1{margin-right:8px!important}.mb-md-1,.my-md-1{margin-bottom:8px!important}.ml-md-1,.mx-md-1{margin-left:8px!important}.m-md-2{margin:16px!important}.mt-md-2,.my-md-2{margin-top:16px!important}.mr-md-2,.mx-md-2{margin-right:16px!important}.mb-md-2,.my-md-2{margin-bottom:16px!important}.ml-md-2,.mx-md-2{margin-left:16px!important}.m-md-3{margin:24px!important}.mt-md-3,.my-md-3{margin-top:24px!important}.mr-md-3,.mx-md-3{margin-right:24px!important}.mb-md-3,.my-md-3{margin-bottom:24px!important}.ml-md-3,.mx-md-3{margin-left:24px!important}.m-md-4{margin:32px!important}.mt-md-4,.my-md-4{margin-top:32px!important}.mr-md-4,.mx-md-4{margin-right:32px!important}.mb-md-4,.my-md-4{margin-bottom:32px!important}.ml-md-4,.mx-md-4{margin-left:32px!important}.m-md-5{margin:40px!important}.mt-md-5,.my-md-5{margin-top:40px!important}.mr-md-5,.mx-md-5{margin-right:40px!important}.mb-md-5,.my-md-5{margin-bottom:40px!important}.ml-md-5,.mx-md-5{margin-left:40px!important}.m-md-6{margin:48px!important}.mt-md-6,.my-md-6{margin-top:48px!important}.mr-md-6,.mx-md-6{margin-right:48px!important}.mb-md-6,.my-md-6{margin-bottom:48px!important}.ml-md-6,.mx-md-6{margin-left:48px!important}.m-md-7{margin:56px!important}.mt-md-7,.my-md-7{margin-top:56px!important}.mr-md-7,.mx-md-7{margin-right:56px!important}.mb-md-7,.my-md-7{margin-bottom:56px!important}.ml-md-7,.mx-md-7{margin-left:56px!important}.m-md-8{margin:64px!important}.mt-md-8,.my-md-8{margin-top:64px!important}.mr-md-8,.mx-md-8{margin-right:64px!important}.mb-md-8,.my-md-8{margin-bottom:64px!important}.ml-md-8,.mx-md-8{margin-left:64px!important}.m-md-9{margin:72px!important}.mt-md-9,.my-md-9{margin-top:72px!important}.mr-md-9,.mx-md-9{margin-right:72px!important}.mb-md-9,.my-md-9{margin-bottom:72px!important}.ml-md-9,.mx-md-9{margin-left:72px!important}.m-md-10{margin:80px!important}.mt-md-10,.my-md-10{margin-top:80px!important}.mr-md-10,.mx-md-10{margin-right:80px!important}.mb-md-10,.my-md-10{margin-bottom:80px!important}.ml-md-10,.mx-md-10{margin-left:80px!important}.m-md-12{margin:96px!important}.mt-md-12,.my-md-12{margin-top:96px!important}.mr-md-12,.mx-md-12{margin-right:96px!important}.mb-md-12,.my-md-12{margin-bottom:96px!important}.ml-md-12,.mx-md-12{margin-left:96px!important}.m-md-15{margin:120px!important}.mt-md-15,.my-md-15{margin-top:120px!important}.mr-md-15,.mx-md-15{margin-right:120px!important}.mb-md-15,.my-md-15{margin-bottom:120px!important}.ml-md-15,.mx-md-15{margin-left:120px!important}.p-md-0{padding:0!important}.pt-md-0,.py-md-0{padding-top:0!important}.pr-md-0,.px-md-0{padding-right:0!important}.pb-md-0,.py-md-0{padding-bottom:0!important}.pl-md-0,.px-md-0{padding-left:0!important}.p-md-1{padding:8px!important}.pt-md-1,.py-md-1{padding-top:8px!important}.pr-md-1,.px-md-1{padding-right:8px!important}.pb-md-1,.py-md-1{padding-bottom:8px!important}.pl-md-1,.px-md-1{padding-left:8px!important}.p-md-2{padding:16px!important}.pt-md-2,.py-md-2{padding-top:16px!important}.pr-md-2,.px-md-2{padding-right:16px!important}.pb-md-2,.py-md-2{padding-bottom:16px!important}.pl-md-2,.px-md-2{padding-left:16px!important}.p-md-3{padding:24px!important}.pt-md-3,.py-md-3{padding-top:24px!important}.pr-md-3,.px-md-3{padding-right:24px!important}.pb-md-3,.py-md-3{padding-bottom:24px!important}.pl-md-3,.px-md-3{padding-left:24px!important}.p-md-4{padding:32px!important}.pt-md-4,.py-md-4{padding-top:32px!important}.pr-md-4,.px-md-4{padding-right:32px!important}.pb-md-4,.py-md-4{padding-bottom:32px!important}.pl-md-4,.px-md-4{padding-left:32px!important}.p-md-5{padding:40px!important}.pt-md-5,.py-md-5{padding-top:40px!important}.pr-md-5,.px-md-5{padding-right:40px!important}.pb-md-5,.py-md-5{padding-bottom:40px!important}.pl-md-5,.px-md-5{padding-left:40px!important}.p-md-6{padding:48px!important}.pt-md-6,.py-md-6{padding-top:48px!important}.pr-md-6,.px-md-6{padding-right:48px!important}.pb-md-6,.py-md-6{padding-bottom:48px!important}.pl-md-6,.px-md-6{padding-left:48px!important}.p-md-7{padding:56px!important}.pt-md-7,.py-md-7{padding-top:56px!important}.pr-md-7,.px-md-7{padding-right:56px!important}.pb-md-7,.py-md-7{padding-bottom:56px!important}.pl-md-7,.px-md-7{padding-left:56px!important}.p-md-8{padding:64px!important}.pt-md-8,.py-md-8{padding-top:64px!important}.pr-md-8,.px-md-8{padding-right:64px!important}.pb-md-8,.py-md-8{padding-bottom:64px!important}.pl-md-8,.px-md-8{padding-left:64px!important}.p-md-9{padding:72px!important}.pt-md-9,.py-md-9{padding-top:72px!important}.pr-md-9,.px-md-9{padding-right:72px!important}.pb-md-9,.py-md-9{padding-bottom:72px!important}.pl-md-9,.px-md-9{padding-left:72px!important}.p-md-10{padding:80px!important}.pt-md-10,.py-md-10{padding-top:80px!important}.pr-md-10,.px-md-10{padding-right:80px!important}.pb-md-10,.py-md-10{padding-bottom:80px!important}.pl-md-10,.px-md-10{padding-left:80px!important}.p-md-12{padding:96px!important}.pt-md-12,.py-md-12{padding-top:96px!important}.pr-md-12,.px-md-12{padding-right:96px!important}.pb-md-12,.py-md-12{padding-bottom:96px!important}.pl-md-12,.px-md-12{padding-left:96px!important}.p-md-15{padding:120px!important}.pt-md-15,.py-md-15{padding-top:120px!important}.pr-md-15,.px-md-15{padding-right:120px!important}.pb-md-15,.py-md-15{padding-bottom:120px!important}.pl-md-15,.px-md-15{padding-left:120px!important}.m-md-n1{margin:-8px!important}.mt-md-n1,.my-md-n1{margin-top:-8px!important}.mr-md-n1,.mx-md-n1{margin-right:-8px!important}.mb-md-n1,.my-md-n1{margin-bottom:-8px!important}.ml-md-n1,.mx-md-n1{margin-left:-8px!important}.m-md-n2{margin:-16px!important}.mt-md-n2,.my-md-n2{margin-top:-16px!important}.mr-md-n2,.mx-md-n2{margin-right:-16px!important}.mb-md-n2,.my-md-n2{margin-bottom:-16px!important}.ml-md-n2,.mx-md-n2{margin-left:-16px!important}.m-md-n3{margin:-24px!important}.mt-md-n3,.my-md-n3{margin-top:-24px!important}.mr-md-n3,.mx-md-n3{margin-right:-24px!important}.mb-md-n3,.my-md-n3{margin-bottom:-24px!important}.ml-md-n3,.mx-md-n3{margin-left:-24px!important}.m-md-n4{margin:-32px!important}.mt-md-n4,.my-md-n4{margin-top:-32px!important}.mr-md-n4,.mx-md-n4{margin-right:-32px!important}.mb-md-n4,.my-md-n4{margin-bottom:-32px!important}.ml-md-n4,.mx-md-n4{margin-left:-32px!important}.m-md-n5{margin:-40px!important}.mt-md-n5,.my-md-n5{margin-top:-40px!important}.mr-md-n5,.mx-md-n5{margin-right:-40px!important}.mb-md-n5,.my-md-n5{margin-bottom:-40px!important}.ml-md-n5,.mx-md-n5{margin-left:-40px!important}.m-md-n6{margin:-48px!important}.mt-md-n6,.my-md-n6{margin-top:-48px!important}.mr-md-n6,.mx-md-n6{margin-right:-48px!important}.mb-md-n6,.my-md-n6{margin-bottom:-48px!important}.ml-md-n6,.mx-md-n6{margin-left:-48px!important}.m-md-n7{margin:-56px!important}.mt-md-n7,.my-md-n7{margin-top:-56px!important}.mr-md-n7,.mx-md-n7{margin-right:-56px!important}.mb-md-n7,.my-md-n7{margin-bottom:-56px!important}.ml-md-n7,.mx-md-n7{margin-left:-56px!important}.m-md-n8{margin:-64px!important}.mt-md-n8,.my-md-n8{margin-top:-64px!important}.mr-md-n8,.mx-md-n8{margin-right:-64px!important}.mb-md-n8,.my-md-n8{margin-bottom:-64px!important}.ml-md-n8,.mx-md-n8{margin-left:-64px!important}.m-md-n9{margin:-72px!important}.mt-md-n9,.my-md-n9{margin-top:-72px!important}.mr-md-n9,.mx-md-n9{margin-right:-72px!important}.mb-md-n9,.my-md-n9{margin-bottom:-72px!important}.ml-md-n9,.mx-md-n9{margin-left:-72px!important}.m-md-n10{margin:-80px!important}.mt-md-n10,.my-md-n10{margin-top:-80px!important}.mr-md-n10,.mx-md-n10{margin-right:-80px!important}.mb-md-n10,.my-md-n10{margin-bottom:-80px!important}.ml-md-n10,.mx-md-n10{margin-left:-80px!important}.m-md-n12{margin:-96px!important}.mt-md-n12,.my-md-n12{margin-top:-96px!important}.mr-md-n12,.mx-md-n12{margin-right:-96px!important}.mb-md-n12,.my-md-n12{margin-bottom:-96px!important}.ml-md-n12,.mx-md-n12{margin-left:-96px!important}.m-md-n15{margin:-120px!important}.mt-md-n15,.my-md-n15{margin-top:-120px!important}.mr-md-n15,.mx-md-n15{margin-right:-120px!important}.mb-md-n15,.my-md-n15{margin-bottom:-120px!important}.ml-md-n15,.mx-md-n15{margin-left:-120px!important}.m-md-auto{margin:auto!important}.mt-md-auto,.my-md-auto{margin-top:auto!important}.mr-md-auto,.mx-md-auto{margin-right:auto!important}.mb-md-auto,.my-md-auto{margin-bottom:auto!important}.ml-md-auto,.mx-md-auto{margin-left:auto!important}}@media(min-width:980px){.m-lg-0{margin:0!important}.mt-lg-0,.my-lg-0{margin-top:0!important}.mr-lg-0,.mx-lg-0{margin-right:0!important}.mb-lg-0,.my-lg-0{margin-bottom:0!important}.ml-lg-0,.mx-lg-0{margin-left:0!important}.m-lg-1{margin:8px!important}.mt-lg-1,.my-lg-1{margin-top:8px!important}.mr-lg-1,.mx-lg-1{margin-right:8px!important}.mb-lg-1,.my-lg-1{margin-bottom:8px!important}.ml-lg-1,.mx-lg-1{margin-left:8px!important}.m-lg-2{margin:16px!important}.mt-lg-2,.my-lg-2{margin-top:16px!important}.mr-lg-2,.mx-lg-2{margin-right:16px!important}.mb-lg-2,.my-lg-2{margin-bottom:16px!important}.ml-lg-2,.mx-lg-2{margin-left:16px!important}.m-lg-3{margin:24px!important}.mt-lg-3,.my-lg-3{margin-top:24px!important}.mr-lg-3,.mx-lg-3{margin-right:24px!important}.mb-lg-3,.my-lg-3{margin-bottom:24px!important}.ml-lg-3,.mx-lg-3{margin-left:24px!important}.m-lg-4{margin:32px!important}.mt-lg-4,.my-lg-4{margin-top:32px!important}.mr-lg-4,.mx-lg-4{margin-right:32px!important}.mb-lg-4,.my-lg-4{margin-bottom:32px!important}.ml-lg-4,.mx-lg-4{margin-left:32px!important}.m-lg-5{margin:40px!important}.mt-lg-5,.my-lg-5{margin-top:40px!important}.mr-lg-5,.mx-lg-5{margin-right:40px!important}.mb-lg-5,.my-lg-5{margin-bottom:40px!important}.ml-lg-5,.mx-lg-5{margin-left:40px!important}.m-lg-6{margin:48px!important}.mt-lg-6,.my-lg-6{margin-top:48px!important}.mr-lg-6,.mx-lg-6{margin-right:48px!important}.mb-lg-6,.my-lg-6{margin-bottom:48px!important}.ml-lg-6,.mx-lg-6{margin-left:48px!important}.m-lg-7{margin:56px!important}.mt-lg-7,.my-lg-7{margin-top:56px!important}.mr-lg-7,.mx-lg-7{margin-right:56px!important}.mb-lg-7,.my-lg-7{margin-bottom:56px!important}.ml-lg-7,.mx-lg-7{margin-left:56px!important}.m-lg-8{margin:64px!important}.mt-lg-8,.my-lg-8{margin-top:64px!important}.mr-lg-8,.mx-lg-8{margin-right:64px!important}.mb-lg-8,.my-lg-8{margin-bottom:64px!important}.ml-lg-8,.mx-lg-8{margin-left:64px!important}.m-lg-9{margin:72px!important}.mt-lg-9,.my-lg-9{margin-top:72px!important}.mr-lg-9,.mx-lg-9{margin-right:72px!important}.mb-lg-9,.my-lg-9{margin-bottom:72px!important}.ml-lg-9,.mx-lg-9{margin-left:72px!important}.m-lg-10{margin:80px!important}.mt-lg-10,.my-lg-10{margin-top:80px!important}.mr-lg-10,.mx-lg-10{margin-right:80px!important}.mb-lg-10,.my-lg-10{margin-bottom:80px!important}.ml-lg-10,.mx-lg-10{margin-left:80px!important}.m-lg-12{margin:96px!important}.mt-lg-12,.my-lg-12{margin-top:96px!important}.mr-lg-12,.mx-lg-12{margin-right:96px!important}.mb-lg-12,.my-lg-12{margin-bottom:96px!important}.ml-lg-12,.mx-lg-12{margin-left:96px!important}.m-lg-15{margin:120px!important}.mt-lg-15,.my-lg-15{margin-top:120px!important}.mr-lg-15,.mx-lg-15{margin-right:120px!important}.mb-lg-15,.my-lg-15{margin-bottom:120px!important}.ml-lg-15,.mx-lg-15{margin-left:120px!important}.p-lg-0{padding:0!important}.pt-lg-0,.py-lg-0{padding-top:0!important}.pr-lg-0,.px-lg-0{padding-right:0!important}.pb-lg-0,.py-lg-0{padding-bottom:0!important}.pl-lg-0,.px-lg-0{padding-left:0!important}.p-lg-1{padding:8px!important}.pt-lg-1,.py-lg-1{padding-top:8px!important}.pr-lg-1,.px-lg-1{padding-right:8px!important}.pb-lg-1,.py-lg-1{padding-bottom:8px!important}.pl-lg-1,.px-lg-1{padding-left:8px!important}.p-lg-2{padding:16px!important}.pt-lg-2,.py-lg-2{padding-top:16px!important}.pr-lg-2,.px-lg-2{padding-right:16px!important}.pb-lg-2,.py-lg-2{padding-bottom:16px!important}.pl-lg-2,.px-lg-2{padding-left:16px!important}.p-lg-3{padding:24px!important}.pt-lg-3,.py-lg-3{padding-top:24px!important}.pr-lg-3,.px-lg-3{padding-right:24px!important}.pb-lg-3,.py-lg-3{padding-bottom:24px!important}.pl-lg-3,.px-lg-3{padding-left:24px!important}.p-lg-4{padding:32px!important}.pt-lg-4,.py-lg-4{padding-top:32px!important}.pr-lg-4,.px-lg-4{padding-right:32px!important}.pb-lg-4,.py-lg-4{padding-bottom:32px!important}.pl-lg-4,.px-lg-4{padding-left:32px!important}.p-lg-5{padding:40px!important}.pt-lg-5,.py-lg-5{padding-top:40px!important}.pr-lg-5,.px-lg-5{padding-right:40px!important}.pb-lg-5,.py-lg-5{padding-bottom:40px!important}.pl-lg-5,.px-lg-5{padding-left:40px!important}.p-lg-6{padding:48px!important}.pt-lg-6,.py-lg-6{padding-top:48px!important}.pr-lg-6,.px-lg-6{padding-right:48px!important}.pb-lg-6,.py-lg-6{padding-bottom:48px!important}.pl-lg-6,.px-lg-6{padding-left:48px!important}.p-lg-7{padding:56px!important}.pt-lg-7,.py-lg-7{padding-top:56px!important}.pr-lg-7,.px-lg-7{padding-right:56px!important}.pb-lg-7,.py-lg-7{padding-bottom:56px!important}.pl-lg-7,.px-lg-7{padding-left:56px!important}.p-lg-8{padding:64px!important}.pt-lg-8,.py-lg-8{padding-top:64px!important}.pr-lg-8,.px-lg-8{padding-right:64px!important}.pb-lg-8,.py-lg-8{padding-bottom:64px!important}.pl-lg-8,.px-lg-8{padding-left:64px!important}.p-lg-9{padding:72px!important}.pt-lg-9,.py-lg-9{padding-top:72px!important}.pr-lg-9,.px-lg-9{padding-right:72px!important}.pb-lg-9,.py-lg-9{padding-bottom:72px!important}.pl-lg-9,.px-lg-9{padding-left:72px!important}.p-lg-10{padding:80px!important}.pt-lg-10,.py-lg-10{padding-top:80px!important}.pr-lg-10,.px-lg-10{padding-right:80px!important}.pb-lg-10,.py-lg-10{padding-bottom:80px!important}.pl-lg-10,.px-lg-10{padding-left:80px!important}.p-lg-12{padding:96px!important}.pt-lg-12,.py-lg-12{padding-top:96px!important}.pr-lg-12,.px-lg-12{padding-right:96px!important}.pb-lg-12,.py-lg-12{padding-bottom:96px!important}.pl-lg-12,.px-lg-12{padding-left:96px!important}.p-lg-15{padding:120px!important}.pt-lg-15,.py-lg-15{padding-top:120px!important}.pr-lg-15,.px-lg-15{padding-right:120px!important}.pb-lg-15,.py-lg-15{padding-bottom:120px!important}.pl-lg-15,.px-lg-15{padding-left:120px!important}.m-lg-n1{margin:-8px!important}.mt-lg-n1,.my-lg-n1{margin-top:-8px!important}.mr-lg-n1,.mx-lg-n1{margin-right:-8px!important}.mb-lg-n1,.my-lg-n1{margin-bottom:-8px!important}.ml-lg-n1,.mx-lg-n1{margin-left:-8px!important}.m-lg-n2{margin:-16px!important}.mt-lg-n2,.my-lg-n2{margin-top:-16px!important}.mr-lg-n2,.mx-lg-n2{margin-right:-16px!important}.mb-lg-n2,.my-lg-n2{margin-bottom:-16px!important}.ml-lg-n2,.mx-lg-n2{margin-left:-16px!important}.m-lg-n3{margin:-24px!important}.mt-lg-n3,.my-lg-n3{margin-top:-24px!important}.mr-lg-n3,.mx-lg-n3{margin-right:-24px!important}.mb-lg-n3,.my-lg-n3{margin-bottom:-24px!important}.ml-lg-n3,.mx-lg-n3{margin-left:-24px!important}.m-lg-n4{margin:-32px!important}.mt-lg-n4,.my-lg-n4{margin-top:-32px!important}.mr-lg-n4,.mx-lg-n4{margin-right:-32px!important}.mb-lg-n4,.my-lg-n4{margin-bottom:-32px!important}.ml-lg-n4,.mx-lg-n4{margin-left:-32px!important}.m-lg-n5{margin:-40px!important}.mt-lg-n5,.my-lg-n5{margin-top:-40px!important}.mr-lg-n5,.mx-lg-n5{margin-right:-40px!important}.mb-lg-n5,.my-lg-n5{margin-bottom:-40px!important}.ml-lg-n5,.mx-lg-n5{margin-left:-40px!important}.m-lg-n6{margin:-48px!important}.mt-lg-n6,.my-lg-n6{margin-top:-48px!important}.mr-lg-n6,.mx-lg-n6{margin-right:-48px!important}.mb-lg-n6,.my-lg-n6{margin-bottom:-48px!important}.ml-lg-n6,.mx-lg-n6{margin-left:-48px!important}.m-lg-n7{margin:-56px!important}.mt-lg-n7,.my-lg-n7{margin-top:-56px!important}.mr-lg-n7,.mx-lg-n7{margin-right:-56px!important}.mb-lg-n7,.my-lg-n7{margin-bottom:-56px!important}.ml-lg-n7,.mx-lg-n7{margin-left:-56px!important}.m-lg-n8{margin:-64px!important}.mt-lg-n8,.my-lg-n8{margin-top:-64px!important}.mr-lg-n8,.mx-lg-n8{margin-right:-64px!important}.mb-lg-n8,.my-lg-n8{margin-bottom:-64px!important}.ml-lg-n8,.mx-lg-n8{margin-left:-64px!important}.m-lg-n9{margin:-72px!important}.mt-lg-n9,.my-lg-n9{margin-top:-72px!important}.mr-lg-n9,.mx-lg-n9{margin-right:-72px!important}.mb-lg-n9,.my-lg-n9{margin-bottom:-72px!important}.ml-lg-n9,.mx-lg-n9{margin-left:-72px!important}.m-lg-n10{margin:-80px!important}.mt-lg-n10,.my-lg-n10{margin-top:-80px!important}.mr-lg-n10,.mx-lg-n10{margin-right:-80px!important}.mb-lg-n10,.my-lg-n10{margin-bottom:-80px!important}.ml-lg-n10,.mx-lg-n10{margin-left:-80px!important}.m-lg-n12{margin:-96px!important}.mt-lg-n12,.my-lg-n12{margin-top:-96px!important}.mr-lg-n12,.mx-lg-n12{margin-right:-96px!important}.mb-lg-n12,.my-lg-n12{margin-bottom:-96px!important}.ml-lg-n12,.mx-lg-n12{margin-left:-96px!important}.m-lg-n15{margin:-120px!important}.mt-lg-n15,.my-lg-n15{margin-top:-120px!important}.mr-lg-n15,.mx-lg-n15{margin-right:-120px!important}.mb-lg-n15,.my-lg-n15{margin-bottom:-120px!important}.ml-lg-n15,.mx-lg-n15{margin-left:-120px!important}.m-lg-auto{margin:auto!important}.mt-lg-auto,.my-lg-auto{margin-top:auto!important}.mr-lg-auto,.mx-lg-auto{margin-right:auto!important}.mb-lg-auto,.my-lg-auto{margin-bottom:auto!important}.ml-lg-auto,.mx-lg-auto{margin-left:auto!important}}@media(min-width:1240px){.m-xl-0{margin:0!important}.mt-xl-0,.my-xl-0{margin-top:0!important}.mr-xl-0,.mx-xl-0{margin-right:0!important}.mb-xl-0,.my-xl-0{margin-bottom:0!important}.ml-xl-0,.mx-xl-0{margin-left:0!important}.m-xl-1{margin:8px!important}.mt-xl-1,.my-xl-1{margin-top:8px!important}.mr-xl-1,.mx-xl-1{margin-right:8px!important}.mb-xl-1,.my-xl-1{margin-bottom:8px!important}.ml-xl-1,.mx-xl-1{margin-left:8px!important}.m-xl-2{margin:16px!important}.mt-xl-2,.my-xl-2{margin-top:16px!important}.mr-xl-2,.mx-xl-2{margin-right:16px!important}.mb-xl-2,.my-xl-2{margin-bottom:16px!important}.ml-xl-2,.mx-xl-2{margin-left:16px!important}.m-xl-3{margin:24px!important}.mt-xl-3,.my-xl-3{margin-top:24px!important}.mr-xl-3,.mx-xl-3{margin-right:24px!important}.mb-xl-3,.my-xl-3{margin-bottom:24px!important}.ml-xl-3,.mx-xl-3{margin-left:24px!important}.m-xl-4{margin:32px!important}.mt-xl-4,.my-xl-4{margin-top:32px!important}.mr-xl-4,.mx-xl-4{margin-right:32px!important}.mb-xl-4,.my-xl-4{margin-bottom:32px!important}.ml-xl-4,.mx-xl-4{margin-left:32px!important}.m-xl-5{margin:40px!important}.mt-xl-5,.my-xl-5{margin-top:40px!important}.mr-xl-5,.mx-xl-5{margin-right:40px!important}.mb-xl-5,.my-xl-5{margin-bottom:40px!important}.ml-xl-5,.mx-xl-5{margin-left:40px!important}.m-xl-6{margin:48px!important}.mt-xl-6,.my-xl-6{margin-top:48px!important}.mr-xl-6,.mx-xl-6{margin-right:48px!important}.mb-xl-6,.my-xl-6{margin-bottom:48px!important}.ml-xl-6,.mx-xl-6{margin-left:48px!important}.m-xl-7{margin:56px!important}.mt-xl-7,.my-xl-7{margin-top:56px!important}.mr-xl-7,.mx-xl-7{margin-right:56px!important}.mb-xl-7,.my-xl-7{margin-bottom:56px!important}.ml-xl-7,.mx-xl-7{margin-left:56px!important}.m-xl-8{margin:64px!important}.mt-xl-8,.my-xl-8{margin-top:64px!important}.mr-xl-8,.mx-xl-8{margin-right:64px!important}.mb-xl-8,.my-xl-8{margin-bottom:64px!important}.ml-xl-8,.mx-xl-8{margin-left:64px!important}.m-xl-9{margin:72px!important}.mt-xl-9,.my-xl-9{margin-top:72px!important}.mr-xl-9,.mx-xl-9{margin-right:72px!important}.mb-xl-9,.my-xl-9{margin-bottom:72px!important}.ml-xl-9,.mx-xl-9{margin-left:72px!important}.m-xl-10{margin:80px!important}.mt-xl-10,.my-xl-10{margin-top:80px!important}.mr-xl-10,.mx-xl-10{margin-right:80px!important}.mb-xl-10,.my-xl-10{margin-bottom:80px!important}.ml-xl-10,.mx-xl-10{margin-left:80px!important}.m-xl-12{margin:96px!important}.mt-xl-12,.my-xl-12{margin-top:96px!important}.mr-xl-12,.mx-xl-12{margin-right:96px!important}.mb-xl-12,.my-xl-12{margin-bottom:96px!important}.ml-xl-12,.mx-xl-12{margin-left:96px!important}.m-xl-15{margin:120px!important}.mt-xl-15,.my-xl-15{margin-top:120px!important}.mr-xl-15,.mx-xl-15{margin-right:120px!important}.mb-xl-15,.my-xl-15{margin-bottom:120px!important}.ml-xl-15,.mx-xl-15{margin-left:120px!important}.p-xl-0{padding:0!important}.pt-xl-0,.py-xl-0{padding-top:0!important}.pr-xl-0,.px-xl-0{padding-right:0!important}.pb-xl-0,.py-xl-0{padding-bottom:0!important}.pl-xl-0,.px-xl-0{padding-left:0!important}.p-xl-1{padding:8px!important}.pt-xl-1,.py-xl-1{padding-top:8px!important}.pr-xl-1,.px-xl-1{padding-right:8px!important}.pb-xl-1,.py-xl-1{padding-bottom:8px!important}.pl-xl-1,.px-xl-1{padding-left:8px!important}.p-xl-2{padding:16px!important}.pt-xl-2,.py-xl-2{padding-top:16px!important}.pr-xl-2,.px-xl-2{padding-right:16px!important}.pb-xl-2,.py-xl-2{padding-bottom:16px!important}.pl-xl-2,.px-xl-2{padding-left:16px!important}.p-xl-3{padding:24px!important}.pt-xl-3,.py-xl-3{padding-top:24px!important}.pr-xl-3,.px-xl-3{padding-right:24px!important}.pb-xl-3,.py-xl-3{padding-bottom:24px!important}.pl-xl-3,.px-xl-3{padding-left:24px!important}.p-xl-4{padding:32px!important}.pt-xl-4,.py-xl-4{padding-top:32px!important}.pr-xl-4,.px-xl-4{padding-right:32px!important}.pb-xl-4,.py-xl-4{padding-bottom:32px!important}.pl-xl-4,.px-xl-4{padding-left:32px!important}.p-xl-5{padding:40px!important}.pt-xl-5,.py-xl-5{padding-top:40px!important}.pr-xl-5,.px-xl-5{padding-right:40px!important}.pb-xl-5,.py-xl-5{padding-bottom:40px!important}.pl-xl-5,.px-xl-5{padding-left:40px!important}.p-xl-6{padding:48px!important}.pt-xl-6,.py-xl-6{padding-top:48px!important}.pr-xl-6,.px-xl-6{padding-right:48px!important}.pb-xl-6,.py-xl-6{padding-bottom:48px!important}.pl-xl-6,.px-xl-6{padding-left:48px!important}.p-xl-7{padding:56px!important}.pt-xl-7,.py-xl-7{padding-top:56px!important}.pr-xl-7,.px-xl-7{padding-right:56px!important}.pb-xl-7,.py-xl-7{padding-bottom:56px!important}.pl-xl-7,.px-xl-7{padding-left:56px!important}.p-xl-8{padding:64px!important}.pt-xl-8,.py-xl-8{padding-top:64px!important}.pr-xl-8,.px-xl-8{padding-right:64px!important}.pb-xl-8,.py-xl-8{padding-bottom:64px!important}.pl-xl-8,.px-xl-8{padding-left:64px!important}.p-xl-9{padding:72px!important}.pt-xl-9,.py-xl-9{padding-top:72px!important}.pr-xl-9,.px-xl-9{padding-right:72px!important}.pb-xl-9,.py-xl-9{padding-bottom:72px!important}.pl-xl-9,.px-xl-9{padding-left:72px!important}.p-xl-10{padding:80px!important}.pt-xl-10,.py-xl-10{padding-top:80px!important}.pr-xl-10,.px-xl-10{padding-right:80px!important}.pb-xl-10,.py-xl-10{padding-bottom:80px!important}.pl-xl-10,.px-xl-10{padding-left:80px!important}.p-xl-12{padding:96px!important}.pt-xl-12,.py-xl-12{padding-top:96px!important}.pr-xl-12,.px-xl-12{padding-right:96px!important}.pb-xl-12,.py-xl-12{padding-bottom:96px!important}.pl-xl-12,.px-xl-12{padding-left:96px!important}.p-xl-15{padding:120px!important}.pt-xl-15,.py-xl-15{padding-top:120px!important}.pr-xl-15,.px-xl-15{padding-right:120px!important}.pb-xl-15,.py-xl-15{padding-bottom:120px!important}.pl-xl-15,.px-xl-15{padding-left:120px!important}.m-xl-n1{margin:-8px!important}.mt-xl-n1,.my-xl-n1{margin-top:-8px!important}.mr-xl-n1,.mx-xl-n1{margin-right:-8px!important}.mb-xl-n1,.my-xl-n1{margin-bottom:-8px!important}.ml-xl-n1,.mx-xl-n1{margin-left:-8px!important}.m-xl-n2{margin:-16px!important}.mt-xl-n2,.my-xl-n2{margin-top:-16px!important}.mr-xl-n2,.mx-xl-n2{margin-right:-16px!important}.mb-xl-n2,.my-xl-n2{margin-bottom:-16px!important}.ml-xl-n2,.mx-xl-n2{margin-left:-16px!important}.m-xl-n3{margin:-24px!important}.mt-xl-n3,.my-xl-n3{margin-top:-24px!important}.mr-xl-n3,.mx-xl-n3{margin-right:-24px!important}.mb-xl-n3,.my-xl-n3{margin-bottom:-24px!important}.ml-xl-n3,.mx-xl-n3{margin-left:-24px!important}.m-xl-n4{margin:-32px!important}.mt-xl-n4,.my-xl-n4{margin-top:-32px!important}.mr-xl-n4,.mx-xl-n4{margin-right:-32px!important}.mb-xl-n4,.my-xl-n4{margin-bottom:-32px!important}.ml-xl-n4,.mx-xl-n4{margin-left:-32px!important}.m-xl-n5{margin:-40px!important}.mt-xl-n5,.my-xl-n5{margin-top:-40px!important}.mr-xl-n5,.mx-xl-n5{margin-right:-40px!important}.mb-xl-n5,.my-xl-n5{margin-bottom:-40px!important}.ml-xl-n5,.mx-xl-n5{margin-left:-40px!important}.m-xl-n6{margin:-48px!important}.mt-xl-n6,.my-xl-n6{margin-top:-48px!important}.mr-xl-n6,.mx-xl-n6{margin-right:-48px!important}.mb-xl-n6,.my-xl-n6{margin-bottom:-48px!important}.ml-xl-n6,.mx-xl-n6{margin-left:-48px!important}.m-xl-n7{margin:-56px!important}.mt-xl-n7,.my-xl-n7{margin-top:-56px!important}.mr-xl-n7,.mx-xl-n7{margin-right:-56px!important}.mb-xl-n7,.my-xl-n7{margin-bottom:-56px!important}.ml-xl-n7,.mx-xl-n7{margin-left:-56px!important}.m-xl-n8{margin:-64px!important}.mt-xl-n8,.my-xl-n8{margin-top:-64px!important}.mr-xl-n8,.mx-xl-n8{margin-right:-64px!important}.mb-xl-n8,.my-xl-n8{margin-bottom:-64px!important}.ml-xl-n8,.mx-xl-n8{margin-left:-64px!important}.m-xl-n9{margin:-72px!important}.mt-xl-n9,.my-xl-n9{margin-top:-72px!important}.mr-xl-n9,.mx-xl-n9{margin-right:-72px!important}.mb-xl-n9,.my-xl-n9{margin-bottom:-72px!important}.ml-xl-n9,.mx-xl-n9{margin-left:-72px!important}.m-xl-n10{margin:-80px!important}.mt-xl-n10,.my-xl-n10{margin-top:-80px!important}.mr-xl-n10,.mx-xl-n10{margin-right:-80px!important}.mb-xl-n10,.my-xl-n10{margin-bottom:-80px!important}.ml-xl-n10,.mx-xl-n10{margin-left:-80px!important}.m-xl-n12{margin:-96px!important}.mt-xl-n12,.my-xl-n12{margin-top:-96px!important}.mr-xl-n12,.mx-xl-n12{margin-right:-96px!important}.mb-xl-n12,.my-xl-n12{margin-bottom:-96px!important}.ml-xl-n12,.mx-xl-n12{margin-left:-96px!important}.m-xl-n15{margin:-120px!important}.mt-xl-n15,.my-xl-n15{margin-top:-120px!important}.mr-xl-n15,.mx-xl-n15{margin-right:-120px!important}.mb-xl-n15,.my-xl-n15{margin-bottom:-120px!important}.ml-xl-n15,.mx-xl-n15{margin-left:-120px!important}.m-xl-auto{margin:auto!important}.mt-xl-auto,.my-xl-auto{margin-top:auto!important}.mr-xl-auto,.mx-xl-auto{margin-right:auto!important}.mb-xl-auto,.my-xl-auto{margin-bottom:auto!important}.ml-xl-auto,.mx-xl-auto{margin-left:auto!important}}.text-monospace{font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,monospace!important}.text-justify{text-align:justify!important}.text-wrap{white-space:normal!important}.text-nowrap{white-space:nowrap!important}.text-truncate{overflow:hidden;text-overflow:ellipsis;white-space:nowrap}.text-left{text-align:left!important}.text-right{text-align:right!important}.text-center{text-align:center!important}@media(min-width:400px){.text-xs-left{text-align:left!important}.text-xs-right{text-align:right!important}.text-xs-center{text-align:center!important}}@media(min-width:616px){.text-sm-left{text-align:left!important}.text-sm-right{text-align:right!important}.text-sm-center{text-align:center!important}}@media(min-width:768px){.text-md-left{text-align:left!important}.text-md-right{text-align:right!important}.text-md-center{text-align:center!important}}@media(min-width:980px){.text-lg-left{text-align:left!important}.text-lg-right{text-align:right!important}.text-lg-center{text-align:center!important}}@media(min-width:1240px){.text-xl-left{text-align:left!important}.text-xl-right{text-align:right!important}.text-xl-center{text-align:center!important}}.text-lowercase{text-transform:lowercase!important}.text-uppercase{text-transform:uppercase!important}.text-capitalize{text-transform:capitalize!important}.font-weight-light{font-weight:300!important}.font-weight-lighter{font-weight:lighter!important}.font-weight-normal{font-weight:400!important}.font-weight-bold{font-weight:700!important}.font-weight-bolder{font-weight:bolder!important}.font-italic{font-style:italic!important}.text-primary{color:#fc0!important}a.text-primary:focus,a.text-primary:hover{color:#b38f00!important}.text-secondary{color:#212529!important}a.text-secondary:focus,a.text-secondary:hover{color:#000!important}.text-success{color:#28a745!important}a.text-success:focus,a.text-success:hover{color:#19692c!important}.text-info{color:#17a2b8!important}a.text-info:focus,a.text-info:hover{color:#0f6674!important}.text-warning{color:#ffc107!important}a.text-warning:focus,a.text-warning:hover{color:#ba8b00!important}.text-danger{color:#dc3545!important}a.text-danger:focus,a.text-danger:hover{color:#a71d2a!important}.text-light{color:#f1f6f9!important}a.text-light:focus,a.text-light:hover{color:#bbd4e2!important}.text-dark{color:#495057!important}a.text-dark:focus,a.text-dark:hover{color:#262a2d!important}.text-primary-light{color:#fffaf0!important}a.text-primary-light:focus,a.text-primary-light:hover{color:#ffe1a4!important}.text-secondary-light{color:#fff!important}a.text-secondary-light:focus,a.text-secondary-light:hover{color:#d9d9d9!important}.text-tertiary{color:#257af4!important}a.text-tertiary:focus,a.text-tertiary:hover{color:#0a56c3!important}.text-tertiary-light{color:#e3f1fe!important}a.text-tertiary-light:focus,a.text-tertiary-light:hover{color:#99ccfb!important}.text-white{color:#fff!important}a.text-white:focus,a.text-white:hover{color:#d9d9d9!important}.text-black{color:#212529!important}a.text-black:focus,a.text-black:hover{color:#000!important}.text-blue{color:#257af4!important}a.text-blue:focus,a.text-blue:hover{color:#0a56c3!important}.text-light-blue{color:#e3f1fe!important}a.text-light-blue:focus,a.text-light-blue:hover{color:#99ccfb!important}.text-yellow{color:#fc0!important}a.text-yellow:focus,a.text-yellow:hover{color:#b38f00!important}.text-light-yellow{color:#fffaf0!important}a.text-light-yellow:focus,a.text-light-yellow:hover{color:#ffe1a4!important}.text-orange{color:#ff8c00!important}a.text-orange:focus,a.text-orange:hover{color:#b36200!important}.text-light-orange{color:#ffe4b5!important}a.text-light-orange:focus,a.text-light-orange:hover{color:#ffc869!important}.text-red{color:#ff3939!important}a.text-red:focus,a.text-red:hover{color:#ec0000!important}.text-light-red{color:#ffe4e1!important}a.text-light-red:focus,a.text-light-red:hover{color:#ff9f95!important}.text-medium{color:#d6dbdf!important}a.text-medium:focus,a.text-medium:hover{color:#abb5bd!important}.text-body{color:#212529!important}.text-muted{color:#6c757d!important}.text-black-50{color:rgba(33,37,41,.5)!important}.text-white-50{color:hsla(0,0%,100%,.5)!important}.text-hide{font:0/0 a;color:transparent;text-shadow:none;background-color:transparent;border:0}.text-decoration-none{text-decoration:none!important}.text-break{word-break:break-word!important;overflow-wrap:break-word!important}.text-reset{color:inherit!important}.visible{visibility:visible!important}.invisible{visibility:hidden!important}@media print{*,:after,:before{text-shadow:none!important;box-shadow:none!important}a:not(.btn){text-decoration:underline}abbr[title]:after{content:" (" attr(title) ")"}pre{white-space:pre-wrap!important}blockquote,pre{border:1px solid #d6dbdf;page-break-inside:avoid}thead{display:table-header-group}img,tr{page-break-inside:avoid}h2,h3,p{orphans:3;widows:3}h2,h3{page-break-after:avoid}@page{size:a3}.container,body{min-width:980px!important}.navbar{display:none}.badge{border:1px solid #212529}.table{border-collapse:collapse!important}.table td,.table th{background-color:#fff!important}.table-bordered td,.table-bordered th{border:1px solid #dee2e6!important}.table-dark{color:inherit}.table-dark tbody+tbody,.table-dark td,.table-dark th,.table-dark thead th{border-color:#d6dbdf}.table .thead-dark th{color:inherit;border-color:#d6dbdf}} \ No newline at end of file diff --git a/website/css/main.css b/website/css/main.css index 73ff758625f..a505beb20bb 100644 --- a/website/css/main.css +++ b/website/css/main.css @@ -1 +1 @@ -@media screen and (max-width:978.98px){.btn{padding:8px 16px}}@media screen and (max-width:978.98px){.btn-lg{padding:12px 24px}}.btn-primary,.btn-primary:active,.btn-primary:hover{color:#212529}.btn-outline-primary{background:#fffaf0;border-color:#fc0;color:#212529}.btn-outline-primary:active,.btn-outline-primary:hover{background:#fc0;border-color:#fc0;color:#212529}.btn-secondary{border-color:#212529;color:#fff}.btn-outline-secondary,.btn-secondary:active,.btn-secondary:hover{background:#fff;border-color:#212529;color:#212529}.btn-outline-secondary:active,.btn-outline-secondary:hover{background:#212529;border-color:#212529;color:#fff}.btn-tertiary{border-color:#257af4;color:#fff}.btn-tertiary:active,.btn-tertiary:hover{background:#257af4;border-color:#257af4;color:#fff}.btn-outline-tertiary{background:#e3f1fe;color:#257af4}.btn-outline-tertiary:active,.btn-outline-tertiary:hover{background:#257af4;color:#fff}.btns{align-items:center;display:grid;-moz-column-gap:24px;column-gap:24px;row-gap:16px;grid-auto-flow:column;justify-content:center}@media screen and (max-width:767.98px){.btns{grid-auto-flow:row}}.btns.btns-lg{-moz-column-gap:40px;column-gap:40px}.btns.is-2{grid-template-columns:1fr 1fr}@media screen and (max-width:767.98px){.btns.is-2{grid-template-columns:1fr}}.btns.is-3{grid-template-columns:1fr 1fr 1fr}@media screen and (max-width:767.98px){.btns.is-3{grid-template-columns:1fr}}.card{box-shadow:0 8px 20px rgba(108,117,125,.2);overflow:hidden;transition:box-shadow .2s,transform .2s;width:100%}.card,.card-body{position:relative}.card-body{z-index:10}.card.is-large .card-body{padding:40px}.card.bg-primary-light{border-color:#fc0}.card.has-dark-border{border-color:#6c757d}.card.has-pattern:after,.card.has-pattern:before{background-repeat:no-repeat;background-size:auto 100%;bottom:0;content:"";display:block;position:absolute;top:0;width:72px}.card.has-pattern:before{background-image:url(../images/backgrounds/bg-card-pattern-blue-1.png);background-position:0 0;left:0}.card.has-pattern:after{background-image:url(../images/backgrounds/bg-card-pattern-blue-2.png);background-position:100% 0;right:0}.card.has-hover:active,.card.has-hover:hover,a.card:active,a.card:hover{box-shadow:0 12px 32px rgba(108,117,125,.2);transform:translateY(-8px)}.card.has-highlight:after,.card.has-hover:after,a.card:after{content:"";display:block;height:8px;margin-top:auto;transition:background .2s;width:100%}.card.has-highlight:after,.card.has-hover:active:after,.card.has-hover:hover:after,a.card:active:after,a.card:hover:after{background:#e3f1fe}.case-study-cards{-moz-column-gap:40px;column-gap:40px;display:grid;grid-template-columns:1fr;row-gap:40px;padding-bottom:40px;position:relative}.case-study-cards>div{align-items:stretch;display:flex}.case-study-cards:before{background:#d6dbdf;bottom:0;content:"";display:block;left:20px;position:absolute;top:40px;width:100vw}@media screen and (min-width:980px){.case-study-cards{grid-template-columns:repeat(2,minmax(0,1fr));row-gap:80px;padding-bottom:120px}.case-study-cards:before{left:-40px;top:120px}}.case-study-card{align-items:stretch;flex-direction:row;flex-shrink:0;left:0;transition:box-shadow .2s,left .4s,width .4s,z-index 0s;transition-delay:0s,.6s,.6s,0s;width:100%;z-index:2}@media screen and (max-width:979.98px){.case-study-card .row{min-height:0!important}}@media screen and (min-width:980px){.case-study-card:active,.case-study-card:hover{box-shadow:0 12px 32px rgba(108,117,125,.2)}.case-study-card:not(.is-open){cursor:pointer}.case-study-card.is-open{transform:none!important;transition-delay:0s,0s,0s,0s;width:calc(200% + 40px);z-index:10}.case-study-card.is-closing{z-index:10}.case-study-card.open-left.is-open{left:calc(-100% - 40px)}.case-study-card:before{background:no-repeat url(../images/backgrounds/bg-card-pattern-red.png);background-position:100%;background-size:contain;content:"";display:block;height:calc(100% - 80px);max-height:224px;max-width:234px;position:absolute;right:0;top:40px;transform:translateX(30%);transition:transform .4s;transition-delay:.6s;width:100%;z-index:1}}@media screen and (min-width:980px)and (min-width:1240px){.case-study-card:before{transform:translateX(50%)}}@media screen and (min-width:980px){.case-study-card.is-open:before{transform:translateX(70%);transition-delay:0s}}@media screen and (min-width:980px){.case-study-card-wrap{align-items:stretch;display:flex;flex-shrink:0;min-height:304px;position:relative;transition:width .4s;transition-delay:.6s;width:calc(200% + 42px);z-index:2}}@media screen and (min-width:980px){.case-study-card.is-open .case-study-card-wrap{transition-delay:0s;width:100%}}@media screen and (min-width:980px){.case-study-card-body{display:flex;flex-direction:column;padding-right:80px!important}.case-study-card-body>.row{align-self:stretch}}@media screen and (min-width:980px){.case-study-card-toggle{background:#fff;box-shadow:0 8px 20px rgba(108,117,125,.2);border-radius:100%;cursor:pointer;height:56px;position:relative;width:56px}.case-study-card-toggle:after,.case-study-card-toggle:before{background:#257af4;content:"";display:block;height:4px;left:calc(50% - 15px);position:absolute;top:calc(50% - 2px);transition:opacity .2s,transform .2s;width:30px}.case-study-card-toggle:after{transform:rotate(90deg)}}@media screen and (min-width:980px){.case-study-card.is-open .case-study-card-toggle:before{opacity:0;transform:rotate(-90deg)}}@media screen and (min-width:980px){.case-study-card.is-open .case-study-card-toggle:after{transform:rotate(0)}}@media screen and (min-width:980px){.case-study-card .col-lg-3{left:-60%;position:relative;transition:left .4s;transition-delay:.6s}}@media screen and (min-width:980px)and (min-width:980px){.case-study-card .col-lg-3{flex:0 0 250px;max-width:250px;width:250px}}@media screen and (min-width:980px){.case-study-card.is-open .col-lg-3{left:0;transition-delay:0s}}@media screen and (min-width:980px){.case-study-card .col-lg-auto{opacity:0;transform:translateX(24px);transition:opacity .4s,transform .4s;transition-delay:.2s}}@media screen and (min-width:980px)and (min-width:980px){.case-study-card .col-lg-auto{max-width:605px;width:calc(100% - 319px)}}@media screen and (min-width:980px){.case-study-card.is-open .col-lg-auto{opacity:1;transform:none;transition-delay:.2s}}.footer-copy{white-space:nowrap}form .form-control{border:1px solid #6c757d;border-radius:6px;height:auto;line-height:20px;min-height:44px;padding:12px 16px;width:100%}form .form-control,form .form-control:focus{box-shadow:0 8px 20px rgba(108,117,125,.2);color:#212529}form .form-control:focus{border-color:#212529}form .form-control::-moz-placeholder{color:#6c757d}form .form-control:-ms-input-placeholder{color:#6c757d}form .form-control::placeholder{color:#6c757d}form select.form-control{-webkit-appearance:none;-moz-appearance:none;appearance:none}form select.form-control:not([data-chosen]){color:#6c757d}form .btn-secondary:active,form .btn-secondary:hover{color:#212529;background:#fc0;border-color:#fc0}.hero{overflow:visible;position:relative}.hero,.hero-bg{background-repeat:no-repeat;background-position:50%;background-size:cover}.hero-bg{display:block;height:100%;left:50%;position:absolute;top:0;transform:translateX(-50%);z-index:1}.hero>.container{position:relative;z-index:2}.hero.has-offset{margin-bottom:-160px;padding-bottom:160px}.base-hero{height:22.5vw;max-height:324px;min-height:280px}.index-hero{background-image:url(/images/backgrounds/bg-hero-home.svg);height:68vw;max-height:980px}.index-hero,.other-hero{max-width:2448px;width:170vw}.other-hero{background-image:url(/images/backgrounds/bg-hero.svg)}.bg-footer-cta{background-image:url(/images/backgrounds/bg-footer-cta.svg);width:2448px}.quickstart-bg{background-image:url(/images/backgrounds/bg-quick-start.svg);height:40vw;top:220px;width:170vw}hr{background:#f1f6f9;border:0;display:block;height:4px;margin:0;width:100%}hr.is-small{height:2px}hr.is-large{height:8px}hr.is-medium{background:#d6dbdf}hr.is-dark{background:#495057}hr.is-yellow{background:linear-gradient(90deg,#ff8c00,#ff8c00 8px,#fc0 16px,rgba(255,204,0,0));-webkit-clip-path:polygon(8px 100%,0 100%,0 0,8px 0,8px 100%,16px 100%,16px 0,100% 0,100% 100%);clip-path:polygon(8px 100%,0 100%,0 0,8px 0,8px 100%,16px 100%,16px 0,100% 0,100% 100%);height:8px}.icon{display:block;height:48px;margin-bottom:24px;-o-object-fit:contain;object-fit:contain;-o-object-position:center;object-position:center}@media screen and (min-width:576px){.icon{height:64px}}@media screen and (min-width:980px){.icon{height:80px}}img{max-width:100%}.kicker{color:#6c757d;font-family:Hind Siliguri,sans-serif;font-size:.875rem;font-weight:600;letter-spacing:1px;margin:0}@media screen and (max-width:978.98px){.lead{font-size:1.125rem}}.logo{display:block;height:36px;max-width:220px;-o-object-fit:contain;object-fit:contain;-o-object-position:center;object-position:center;width:100%}.navbar-clickhouse{border-bottom:4px solid #f1f6f9;height:142px}.navbar-clickhouse>.container{flex-wrap:wrap}.navbar-super{flex-shrink:0;width:100%}.navbar-super ul{list-style:none}.navbar-super li:not(:last-child){margin-bottom:0;margin-right:24px}.navbar-super a{align-items:center;color:#212529;display:flex;font-size:.875rem}.navbar-super a:active,.navbar-super a:hover{color:#257af4;text-decoration:none}.navbar-super img{flex-shrink:0;margin-right:4px}.navbar-brand-clickhouse{background:no-repeat url(../images/logo-clickhouse.svg);background-size:contain;flex-shrink:0;height:28px;margin-right:48px;padding:0;width:180px}.navbar-nav{align-items:center;height:46px}.navbar .nav-item:not(:last-child){margin-bottom:0;margin-right:24px}.navbar .nav-link{color:#212529}.navbar .nav-link:active,.navbar .nav-link:hover{color:#257af4}.navbar .navbar-nav{flex-direction:row}@media screen and (max-width:978.98px){.navbar>.container{padding-left:20px;padding-right:20px}.navbar .navbar-toggler{height:24px;padding:0;width:24px}.navbar .navbar-toggler:focus{outline:none}.navbar .navbar-toggler-icon{background:no-repeat url(../images/icons/icon-menu.svg);background-position:50%;background-size:contain;height:24px;width:24px}.navbar .navbar-collapse{background:#fff;border-bottom:4px solid #f1f6f9;height:56px;left:0;padding:0 20px 16px;position:absolute;right:0;top:100%}.navbar .nav-link{font-size:.875rem;white-space:nowrap}}@media screen and (max-width:615.98px){.navbar .navbar-collapse{height:auto}.navbar .navbar-nav{flex-direction:column;height:auto}.navbar .nav-item:not(:last-child){margin-bottom:16px;margin-right:0}}@media screen and (max-width:399.98px){.navbar{height:80px}}.page,.photo-frame{overflow:hidden;width:100%}.photo-frame{background:hsla(0,0%,100%,.6);border-radius:100%;box-shadow:0 8px 20px rgba(108,117,125,.2);display:block;margin-bottom:24px;max-width:160px;position:relative}.photo-frame:before{content:"";display:block;padding-bottom:100%;width:100%}.photo-frame img{display:block;height:100%;left:0;-o-object-fit:contain;object-fit:contain;-o-object-position:center;object-position:center;position:absolute;top:0;width:100%}.pullquote{position:relative;width:70%}.pullquote:before{background:no-repeat url(../images/backgrounds/bg-quotes.svg);background-position:50%;background-size:100%;content:"";mix-blend-mode:multiply;right:56px;width:calc(100% - 16px);z-index:2}.pullquote-bg,.pullquote:before{bottom:0;display:block;position:absolute;top:0}.pullquote-bg{right:0;width:calc(50vw + 28.57143%);z-index:1}.pullquote-body{padding:64px 40px 64px 0;position:relative;z-index:3}.pullquote-quote{font-family:Hind Siliguri,sans-serif;font-size:32px;font-weight:700}.pullquote-citation{font-size:1.125rem}.section{overflow:visible;position:relative}.section,.section-bg{background-repeat:no-repeat;background-position:50%;background-size:cover}.section-bg{display:block;height:100%;left:50%;position:absolute;top:0;transform:translateX(-50%);z-index:1}.section>.container{position:relative;z-index:2}.social-icons{align-items:center;display:flex}.social-icons>a{aspect-ratio:24/24;background:#6c757d;display:block;height:24px;width:24px;-webkit-mask-position:center;mask-position:center;-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat;-webkit-mask-size:contain;mask-size:contain;transition:background .2s}.social-icons>a:active,.social-icons>a:hover{background:#212529}.social-icons>a+a{margin-left:32px}.social-icons-facebook{-webkit-mask-image:url(/images/icons/icon-facebook-gray.svg);mask-image:url(/images/icons/icon-facebook-gray.svg)}.social-icons-twitter{-webkit-mask-image:url(/images/icons/icon-twitter-gray.svg);mask-image:url(/images/icons/icon-twitter-gray.svg);width:31px}.social-icons-linkedin{-webkit-mask-image:url(/images/icons/icon-linkedin-gray.svg);mask-image:url(/images/icons/icon-linkedin-gray.svg)}.social-icons-linkedin-alt{-webkit-mask-image:url(/images/icons/icon-linkedin-alt-gray.svg);mask-image:url(/images/icons/icon-linkedin-alt-gray.svg)}.social-icons.size-small>a{height:20px;width:20px}.social-icons.size-small>a:active,.social-icons.size-small>a:hover{background:#212529}.social-icons.size-small>a+a{margin-left:16px}.tabs{position:relative}.tabs:before{background:#fff;border-radius:7px 7px 0 0;content:"";display:block;height:8px;left:1px;position:absolute;right:1px;top:68px;z-index:10}@media screen and (min-width:1240px){.tabs:before{top:76px}}.tabs-body{background:#fff;border-radius:8px;border:1px solid #6c757d;box-shadow:0 8px 20px rgba(108,117,125,.2);padding:24px}@media screen and (min-width:980px){.tabs-body{padding:32px}}@media screen and (min-width:1240px){.tabs-body{padding:40px}}.tabs .nav-tabs{border-bottom:0;flex-wrap:nowrap;height:76px;margin:-20px -20px -9px;-webkit-mask-image:linear-gradient(90deg,transparent,#000 20px,#000 calc(100% - 20px),transparent);mask-image:linear-gradient(90deg,transparent,#000 20px,#000 calc(100% - 20px),transparent);overflow:scroll;overflow-x:scroll;overflow-y:visible;padding:20px 20px 0;position:relative}@media screen and (min-width:940px){.tabs .nav-tabs{overflow:visible}}@media screen and (min-width:1240px){.tabs .nav-tabs{height:84px}}.tabs .nav-link{align-items:center;border-bottom:0;color:#6c757d;display:flex;font-size:.875rem;flex-shrink:0;height:56px;justify-content:center;padding:0 12px 8px;text-align:center;white-space:nowrap}@media screen and (min-width:1240px){.tabs .nav-link{height:64px;padding:0 16px 8px}}.tabs .nav-link.active{background:#fff;box-shadow:0 -4px 8px rgba(108,117,125,.1);font-weight:700;padding:0 16px 8px}@media screen and (min-width:980px){.tabs .nav-link.active{padding:0 24px 8px}}@media screen and (min-width:1240px){.tabs .nav-link.active{padding:0 32px 8px}}.tab-pane pre{background:#212529;border-radius:16px;color:#fff;padding:24px 16px}@media screen and (min-width:1240px){.tab-pane pre{padding:32px 24px}}.trailing-link{align-items:center;color:#212529;display:flex;font-size:.875rem;font-weight:700}.trailing-link:after{background:no-repeat url(../images/icons/icon-arrow.svg);background-position:100%;background-size:contain;content:"";display:block;height:12px;transition:transform .2s;width:20px}.trailing-link:active,.trailing-link:hover{color:#212529;text-decoration:none}.trailing-link:active:after,.trailing-link:hover:after{transform:translateX(8px)}.trailing-link.span-full:after{margin-left:auto}ul{color:#495057;list-style-type:square;padding-left:1.25em}ul li:not(:last-child){margin-bottom:16px}ul li::marker{color:#ff3939}ul.has-separators{list-style:none;padding:0}ul.has-separators li:not(:last-child){border-bottom:4px solid #f1f6f9;margin-bottom:24px;padding-bottom:24px}.bg-gradient-secondary{background-image:linear-gradient(58deg,#ff6443 3%,#fe561d 24%,#e32f0d 93%)}.bg-gradient-light-orange{background-image:linear-gradient(90deg,rgba(255,203,128,0),#ffcb80)}.bg-offset-right{bottom:0;left:-24px;position:absolute;top:0;width:calc(100vw + 24px);z-index:-1}@media screen and (min-width:1240px){.bg-offset-right{left:-96px;width:calc(100vw + 96px)}}.bg-inset-right{bottom:0;left:40px;position:absolute;top:0;width:calc(100vw - 40px);z-index:-1}@media screen and (min-width:980px){.bg-inset-right{left:96px;width:calc(100vw - 96px)}}.has-border-left{border-left:8px solid #f1f6f9;padding-left:16px}.font-xl{font-size:1.25rem}.font-lg{font-size:1.125rem}.font-sm{font-size:.875rem}.font-xs{font-size:.625rem}.font-weight-semibold{font-weight:600}.display-5{color:#212529;font-size:20px;font-weight:500}.display-6{color:#212529;font-size:14px;font-weight:700}.overflow-auto{overflow:auto}.text-decoration-underline{text-decoration:underline}.text-upper{text-transform:uppercase} \ No newline at end of file +@media screen and (max-width:978.98px){.btn{padding:8px 16px}}@media screen and (max-width:978.98px){.btn-lg{padding:12px 24px}}.btn-primary,.btn-primary:active,.btn-primary:hover{color:#212529}.btn-outline-primary{background:#fffaf0;border-color:#fc0;color:#212529}.btn-outline-primary:active,.btn-outline-primary:hover{background:#fc0;border-color:#fc0;color:#212529}.btn-secondary{border-color:#212529;color:#fff}.btn-outline-secondary,.btn-secondary:active,.btn-secondary:hover{background:#fff;border-color:#212529;color:#212529}.btn-outline-secondary:active,.btn-outline-secondary:hover{background:#212529;border-color:#212529;color:#fff}.btn-tertiary{border-color:#257af4;color:#fff}.btn-tertiary:active,.btn-tertiary:hover{background:#257af4;border-color:#257af4;color:#fff}.btn-outline-tertiary{background:#e3f1fe;color:#257af4}.btn-outline-tertiary:active,.btn-outline-tertiary:hover{background:#257af4;color:#fff}.btns{align-items:center;display:grid;-moz-column-gap:24px;column-gap:24px;row-gap:16px;grid-auto-flow:column;justify-content:center}@media screen and (max-width:767.98px){.btns{grid-auto-flow:row}}.btns.btns-lg{-moz-column-gap:40px;column-gap:40px}.btns.is-2{grid-template-columns:1fr 1fr}@media screen and (max-width:767.98px){.btns.is-2{grid-template-columns:1fr}}.btns.is-3{grid-template-columns:1fr 1fr 1fr}@media screen and (max-width:767.98px){.btns.is-3{grid-template-columns:1fr}}.card{box-shadow:0 8px 20px rgba(108,117,125,.2);overflow:hidden;transition:box-shadow .2s,transform .2s;width:100%}.card,.card-body{position:relative}.card-body{z-index:10}.card.is-large .card-body{padding:40px}.card.bg-primary-light{border-color:#fc0}.card.has-dark-border{border-color:#6c757d}.card.has-pattern:after,.card.has-pattern:before{background-repeat:no-repeat;background-size:auto 100%;bottom:0;content:"";display:block;position:absolute;top:0;width:72px}.card.has-pattern:before{background-image:url(../images/backgrounds/bg-card-pattern-blue-1.png);background-position:0 0;left:0}.card.has-pattern:after{background-image:url(../images/backgrounds/bg-card-pattern-blue-2.png);background-position:100% 0;right:0}.card.has-hover:active,.card.has-hover:hover,a.card:active,a.card:hover{box-shadow:0 12px 32px rgba(108,117,125,.2);transform:translateY(-8px)}.card.has-highlight:after,.card.has-hover:after,a.card:after{content:"";display:block;height:8px;margin-top:auto;transition:background .2s;width:100%}.card.has-highlight:after,.card.has-hover:active:after,.card.has-hover:hover:after,a.card:active:after,a.card:hover:after{background:#e3f1fe}.case-study-cards{-moz-column-gap:40px;column-gap:40px;display:grid;grid-template-columns:1fr;row-gap:40px;padding-bottom:40px;position:relative}.case-study-cards>div{align-items:stretch;display:flex}.case-study-cards:before{background:#d6dbdf;bottom:0;content:"";display:block;left:20px;position:absolute;top:40px;width:100vw}@media screen and (min-width:980px){.case-study-cards{grid-template-columns:repeat(2,minmax(0,1fr));row-gap:80px;padding-bottom:120px}.case-study-cards:before{left:-40px;top:120px}}.case-study-card{align-items:stretch;flex-direction:row;flex-shrink:0;left:0;transition:box-shadow .2s,left .4s,width .4s,z-index 0s;transition-delay:0s,.6s,.6s,0s;width:100%;z-index:2}@media screen and (max-width:979.98px){.case-study-card .row{min-height:0!important}}@media screen and (min-width:980px){.case-study-card:active,.case-study-card:hover{box-shadow:0 12px 32px rgba(108,117,125,.2)}.case-study-card:not(.is-open){cursor:pointer}.case-study-card.is-open{transform:none!important;transition-delay:0s,0s,0s,0s;width:calc(200% + 40px);z-index:10}.case-study-card.is-closing{z-index:10}.case-study-card.open-left.is-open{left:calc(-100% - 40px)}.case-study-card:before{background:no-repeat url(../images/backgrounds/bg-card-pattern-red.png);background-position:100%;background-size:contain;content:"";display:block;height:calc(100% - 80px);max-height:224px;max-width:234px;position:absolute;right:0;top:40px;transform:translateX(30%);transition:transform .4s;transition-delay:.6s;width:100%;z-index:1}}@media screen and (min-width:980px)and (min-width:1240px){.case-study-card:before{transform:translateX(50%)}}@media screen and (min-width:980px){.case-study-card.is-open:before{transform:translateX(70%);transition-delay:0s}}@media screen and (min-width:980px){.case-study-card-wrap{align-items:stretch;display:flex;flex-shrink:0;min-height:304px;position:relative;transition:width .4s;transition-delay:.6s;width:calc(200% + 42px);z-index:2}}@media screen and (min-width:980px){.case-study-card.is-open .case-study-card-wrap{transition-delay:0s;width:100%}}@media screen and (min-width:980px){.case-study-card-body{display:flex;flex-direction:column;padding-right:80px!important}.case-study-card-body>.row{align-self:stretch}}@media screen and (min-width:980px){.case-study-card-toggle{background:#fff;box-shadow:0 8px 20px rgba(108,117,125,.2);border-radius:100%;cursor:pointer;height:56px;position:relative;width:56px}.case-study-card-toggle:after,.case-study-card-toggle:before{background:#257af4;content:"";display:block;height:4px;left:calc(50% - 15px);position:absolute;top:calc(50% - 2px);transition:opacity .2s,transform .2s;width:30px}.case-study-card-toggle:after{transform:rotate(90deg)}}@media screen and (min-width:980px){.case-study-card.is-open .case-study-card-toggle:before{opacity:0;transform:rotate(-90deg)}}@media screen and (min-width:980px){.case-study-card.is-open .case-study-card-toggle:after{transform:rotate(0)}}@media screen and (min-width:980px){.case-study-card .col-lg-3{left:-60%;position:relative;transition:left .4s;transition-delay:.6s}}@media screen and (min-width:980px)and (min-width:980px){.case-study-card .col-lg-3{flex:0 0 250px;max-width:250px;width:250px}}@media screen and (min-width:980px){.case-study-card.is-open .col-lg-3{left:0;transition-delay:0s}}@media screen and (min-width:980px){.case-study-card .col-lg-auto{opacity:0;transform:translateX(24px);transition:opacity .4s,transform .4s;transition-delay:.2s}}@media screen and (min-width:980px)and (min-width:980px){.case-study-card .col-lg-auto{max-width:605px;width:calc(100% - 319px)}}@media screen and (min-width:980px){.case-study-card.is-open .col-lg-auto{opacity:1;transform:none;transition-delay:.2s}}.footer-copy{white-space:nowrap}form .form-control{border:1px solid #6c757d;border-radius:6px;height:auto;line-height:20px;min-height:44px;padding:12px 16px;width:100%}form .form-control,form .form-control:focus{box-shadow:0 8px 20px rgba(108,117,125,.2);color:#212529}form .form-control:focus{border-color:#212529}form .form-control::-moz-placeholder{color:#6c757d}form .form-control:-ms-input-placeholder{color:#6c757d}form .form-control::placeholder{color:#6c757d}form select.form-control{-webkit-appearance:none;-moz-appearance:none;appearance:none}form select.form-control:not([data-chosen]){color:#6c757d}form .btn-secondary:active,form .btn-secondary:hover{color:#212529;background:#fc0;border-color:#fc0}.hero{overflow:visible;position:relative}.hero,.hero-bg{background-repeat:no-repeat;background-position:50%;background-size:cover}.hero-bg{display:block;height:100%;left:50%;position:absolute;top:0;transform:translateX(-50%);z-index:1}.hero>.container{position:relative;z-index:2}.hero.has-offset{margin-bottom:-160px;padding-bottom:160px}.base-hero{height:22.5vw;max-height:324px;min-height:280px}.index-hero{background-image:url(/images/backgrounds/bg-hero-home.svg);height:68vw;max-height:980px}.index-hero,.other-hero{max-width:2448px;width:170vw}.other-hero{background-image:url(/images/backgrounds/bg-hero.svg)}.bg-footer-cta{background-image:url(/images/backgrounds/bg-footer-cta.svg);width:2448px}.quickstart-bg{background-image:url(/images/backgrounds/bg-quick-start.svg);height:40vw;top:220px;width:170vw}hr{background:#f1f6f9;border:0;display:block;height:4px;margin:0;width:100%}hr.is-small{height:2px}hr.is-large{height:8px}hr.is-medium{background:#d6dbdf}hr.is-dark{background:#495057}hr.is-yellow{background:linear-gradient(90deg,#ff8c00,#ff8c00 8px,#fc0 16px,rgba(255,204,0,0));-webkit-clip-path:polygon(8px 100%,0 100%,0 0,8px 0,8px 100%,16px 100%,16px 0,100% 0,100% 100%);clip-path:polygon(8px 100%,0 100%,0 0,8px 0,8px 100%,16px 100%,16px 0,100% 0,100% 100%);height:8px}.icon{display:block;height:48px;margin-bottom:24px;-o-object-fit:contain;object-fit:contain;-o-object-position:center;object-position:center}@media screen and (min-width:576px){.icon{height:64px}}@media screen and (min-width:980px){.icon{height:80px}}img{max-width:100%}.kicker{color:#6c757d;font-family:Hind Siliguri,sans-serif;font-size:.875rem;font-weight:600;letter-spacing:1px;margin:0}@media screen and (max-width:978.98px){.lead{font-size:1.125rem}}.logo{display:block;height:36px;max-width:220px;-o-object-fit:contain;object-fit:contain;-o-object-position:center;object-position:center;width:100%}.navbar-clickhouse{border-bottom:4px solid #f1f6f9;height:142px}.navbar-clickhouse>.container{flex-wrap:wrap}.navbar-super{flex-shrink:0;width:100%}.navbar-super ul{list-style:none}.navbar-super li:not(:last-child){margin-bottom:0;margin-right:24px}.navbar-super a{align-items:center;color:#212529;display:flex;font-size:.875rem}.navbar-super a:active,.navbar-super a:hover{color:#257af4;text-decoration:none}.navbar-super img{flex-shrink:0;margin-right:4px}.navbar-brand-clickhouse{background:no-repeat url(../images/logo-clickhouse.svg);background-size:contain;flex-shrink:0;height:28px;margin-right:48px;padding:0;width:180px}.navbar-nav{align-items:center;height:46px}.navbar .nav-item:not(:last-child){margin-bottom:0;margin-right:24px}.navbar .nav-link{color:#212529}.navbar .nav-link:active,.navbar .nav-link:hover{color:#257af4}.navbar .navbar-nav{flex-direction:row}@media screen and (max-width:978.98px){.navbar>.container{padding-left:20px;padding-right:20px}.navbar .navbar-toggler{height:24px;padding:0;width:24px}.navbar .navbar-toggler:focus{outline:none}.navbar .navbar-toggler-icon{background:no-repeat url(../images/icons/icon-menu.svg);background-position:50%;background-size:contain;height:24px;width:24px}.navbar .navbar-collapse{background:#fff;border-bottom:4px solid #f1f6f9;height:56px;left:0;padding:0 20px 16px;position:absolute;right:0;top:100%}.navbar .nav-link{font-size:.875rem;white-space:nowrap}}@media screen and (max-width:615.98px){.navbar .navbar-collapse{height:auto}.navbar .navbar-nav{flex-direction:column;height:auto}.navbar .nav-item:not(:last-child){margin-bottom:16px;margin-right:0}}@media screen and (max-width:399.98px){.navbar{height:80px}}.page,.photo-frame{overflow:hidden;width:100%}.photo-frame{background:hsla(0,0%,100%,.6);border-radius:100%;box-shadow:0 8px 20px rgba(108,117,125,.2);display:block;margin-bottom:24px;max-width:160px;position:relative}.photo-frame:before{content:"";display:block;padding-bottom:100%;width:100%}.photo-frame img{display:block;height:100%;left:0;-o-object-fit:contain;object-fit:contain;-o-object-position:center;object-position:center;position:absolute;top:0;width:100%}.pullquote{position:relative;width:70%}.pullquote:before{background:no-repeat url(../images/backgrounds/bg-quotes.svg);background-position:50%;background-size:100%;content:"";mix-blend-mode:multiply;right:56px;width:calc(100% - 16px);z-index:2}.pullquote-bg,.pullquote:before{bottom:0;display:block;position:absolute;top:0}.pullquote-bg{right:0;width:calc(50vw + 28.57143%);z-index:1}.pullquote-body{padding:64px 40px 64px 0;position:relative;z-index:3}.pullquote-quote{font-family:Hind Siliguri,sans-serif;font-size:32px;font-weight:700}.pullquote-citation{font-size:1.125rem}.section{overflow:visible;position:relative}.section,.section-bg{background-repeat:no-repeat;background-position:50%;background-size:cover}.section-bg{display:block;height:100%;left:50%;position:absolute;top:0;transform:translateX(-50%);z-index:1}.section>.container{position:relative;z-index:2}.social-icons{align-items:center;display:flex}.social-icons>a{aspect-ratio:24/24;background:#6c757d;display:block;height:24px;width:24px;-webkit-mask-position:center;mask-position:center;-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat;-webkit-mask-size:contain;mask-size:contain;transition:background .2s}.social-icons>a:active,.social-icons>a:hover{background:#212529}.social-icons>a+a{margin-left:32px}.social-icons-facebook{-webkit-mask-image:url(/images/icons/icon-facebook-gray.svg);mask-image:url(/images/icons/icon-facebook-gray.svg)}.social-icons-twitter{-webkit-mask-image:url(/images/icons/icon-twitter-gray.svg);mask-image:url(/images/icons/icon-twitter-gray.svg);width:31px}.social-icons-linkedin{-webkit-mask-image:url(/images/icons/icon-linkedin-gray.svg);mask-image:url(/images/icons/icon-linkedin-gray.svg)}.social-icons-linkedin-alt{-webkit-mask-image:url(/images/icons/icon-linkedin-alt-gray.svg);mask-image:url(/images/icons/icon-linkedin-alt-gray.svg)}.social-icons.size-small>a{height:20px;width:20px}.social-icons.size-small>a:active,.social-icons.size-small>a:hover{background:#212529}.social-icons.size-small>a+a{margin-left:16px}.tabs{position:relative}.tabs:before{background:#fff;border-radius:7px 7px 0 0;content:"";display:block;height:8px;left:1px;position:absolute;right:1px;top:68px;z-index:10}@media screen and (min-width:1240px){.tabs:before{top:76px}}.tabs-body{background:#fff;border-radius:8px;border:1px solid #6c757d;box-shadow:0 8px 20px rgba(108,117,125,.2);padding:24px}@media screen and (min-width:980px){.tabs-body{padding:32px}}@media screen and (min-width:1240px){.tabs-body{padding:40px}}.tabs .nav-tabs{border-bottom:0;flex-wrap:nowrap;height:76px;margin:-20px -20px -9px;-webkit-mask-image:linear-gradient(90deg,transparent,#000 20px,#000 calc(100% - 20px),transparent);mask-image:linear-gradient(90deg,transparent,#000 20px,#000 calc(100% - 20px),transparent);overflow:scroll;overflow-x:scroll;overflow-y:visible;padding:20px 20px 0;position:relative}@media screen and (min-width:940px){.tabs .nav-tabs{overflow:visible}}@media screen and (min-width:1240px){.tabs .nav-tabs{height:84px}}.tabs .nav-link{align-items:center;border-bottom:0;color:#6c757d;display:flex;font-size:.875rem;flex-shrink:0;height:56px;justify-content:center;padding:0 12px 8px;text-align:center;white-space:nowrap}@media screen and (min-width:1240px){.tabs .nav-link{height:64px;padding:0 16px 8px}}.tabs .nav-link.active{background:#fff;box-shadow:0 -4px 8px rgba(108,117,125,.1);font-weight:700;padding:0 16px 8px}@media screen and (min-width:980px){.tabs .nav-link.active{padding:0 24px 8px}}@media screen and (min-width:1240px){.tabs .nav-link.active{padding:0 32px 8px}}.tab-pane pre{background:#212529;border-radius:16px;color:#fff;padding:24px 16px}@media screen and (min-width:1240px){.tab-pane pre{padding:32px 24px}}.trailing-link{align-items:center;color:#212529;display:flex;font-size:.875rem;font-weight:700}.trailing-link:after{background:no-repeat url(../images/icons/icon-arrow.svg);background-position:100%;background-size:contain;content:"";display:block;height:12px;transition:transform .2s;width:20px}.trailing-link:active,.trailing-link:hover{color:#212529;text-decoration:none}.trailing-link:active:after,.trailing-link:hover:after{transform:translateX(8px)}.trailing-link.span-full:after{margin-left:auto}ul{list-style-type:square;padding-left:1.25em}ul li:not(:last-child){margin-bottom:16px}ul li::marker{color:#ff3939}ul.has-separators{list-style:none;padding:0}ul.has-separators li:not(:last-child){border-bottom:4px solid #f1f6f9;margin-bottom:24px;padding-bottom:24px}.bg-gradient-secondary{background-image:linear-gradient(58deg,#ff6443 3%,#fe561d 24%,#e32f0d 93%)}.bg-gradient-light-orange{background-image:linear-gradient(90deg,rgba(255,203,128,0),#ffcb80)}.bg-offset-right{bottom:0;left:-24px;position:absolute;top:0;width:calc(100vw + 24px);z-index:-1}@media screen and (min-width:1240px){.bg-offset-right{left:-96px;width:calc(100vw + 96px)}}.bg-inset-right{bottom:0;left:40px;position:absolute;top:0;width:calc(100vw - 40px);z-index:-1}@media screen and (min-width:980px){.bg-inset-right{left:96px;width:calc(100vw - 96px)}}.has-border-left{border-left:8px solid #f1f6f9;padding-left:16px}.font-xl{font-size:1.25rem}.font-lg{font-size:1.125rem}.font-sm{font-size:.875rem}.font-xs{font-size:.625rem}.font-weight-semibold{font-weight:600}.display-5{color:#212529;font-size:20px;font-weight:500}.display-6{color:#212529;font-size:14px;font-weight:700}.overflow-auto{overflow:auto}.text-decoration-underline{text-decoration:underline}.text-upper{text-transform:uppercase} \ No newline at end of file diff --git a/website/images/photos/christoph-wurm.jpg b/website/images/photos/christoph-wurm.jpg new file mode 100644 index 00000000000..8a9adac2fb0 Binary files /dev/null and b/website/images/photos/christoph-wurm.jpg differ diff --git a/website/images/photos/mikhail-shiryaev.jpg b/website/images/photos/mikhail-shiryaev.jpg new file mode 100644 index 00000000000..3f2f2f2334a Binary files /dev/null and b/website/images/photos/mikhail-shiryaev.jpg differ diff --git a/website/src/scss/_variables.scss b/website/src/scss/_variables.scss index 4d5ef4f4667..d511c757055 100644 --- a/website/src/scss/_variables.scss +++ b/website/src/scss/_variables.scss @@ -165,7 +165,7 @@ $border-color: $gray-500; $border-radius: 8px; $border-radius-lg: 8px; -$border-radius-sm: 8px; +$border-radius-sm: 4px; $box-shadow-sm: 0 2px 14px rgba($gray-700, .2); $box-shadow: 0 8px 20px rgba($gray-700, .2); @@ -238,10 +238,10 @@ $btn-font-size: 14px; $btn-line-height: 20px; $btn-white-space: null; // Set to `nowrap` to prevent text wrapping -$btn-padding-y-sm: 12px; -$btn-padding-x-sm: 32px; -$btn-font-size-sm: 14px; -$btn-line-height-sm: 20px; +$btn-padding-y-sm: 6px; +$btn-padding-x-sm: 12px; +$btn-font-size-sm: 12px; +$btn-line-height-sm: 16px; $btn-padding-y-lg: 16px; $btn-padding-x-lg: 32px; diff --git a/website/src/scss/components/_ul.scss b/website/src/scss/components/_ul.scss index 9cf03aba169..2dc92a0d211 100644 --- a/website/src/scss/components/_ul.scss +++ b/website/src/scss/components/_ul.scss @@ -1,5 +1,4 @@ ul { - color: $gray-900; list-style-type: square; padding-left: 1.25em; diff --git a/website/templates/company/team.html b/website/templates/company/team.html index 28efdcfde06..b4ed1c26a29 100644 --- a/website/templates/company/team.html +++ b/website/templates/company/team.html @@ -6,7 +6,7 @@ {{ _('ClickHouse Team') }} -
+
+
+ + + + +

+ {{ _('Mikhail Shiryaev') }} +

+

+ {{ _('Site Reliability Engineer') }} +

+
@@ -358,7 +371,7 @@

-
+ -
+
+ + + + +

+ {{ _('Christoph Wurm') }} +

+

+ {{ _('Solutions Architect') }} +

+ +
+
diff --git a/website/templates/docs/sidebar.html b/website/templates/docs/sidebar.html index 794984686c1..11ac4765189 100644 --- a/website/templates/docs/sidebar.html +++ b/website/templates/docs/sidebar.html @@ -15,9 +15,6 @@ {% set level = 1 %} {% include "templates/docs/sidebar-item.html" %} {% endfor %} - - {{ _('PDF version') }}
{{ _('PDF version') }}
-
{% else %} {% include "templates/docs/toc.html" %} diff --git a/website/templates/index/hero.html b/website/templates/index/hero.html index b101e6f9831..8e604cad771 100644 --- a/website/templates/index/hero.html +++ b/website/templates/index/hero.html @@ -3,20 +3,21 @@

- ClickHouse v21.10 Released + ClickHouse v21.11 Released

{{ _('ClickHouse® is an open-source, high performance columnar OLAP database management system for real-time analytics using SQL.') }}

- @@ -37,7 +38,6 @@ Read the News Read the Press Release
-