diff --git a/.clang-tidy b/.clang-tidy index ca84a4834e5..6fd67876923 100644 --- a/.clang-tidy +++ b/.clang-tidy @@ -16,7 +16,6 @@ Checks: '-*, modernize-make-unique, modernize-raw-string-literal, modernize-redundant-void-arg, - modernize-replace-auto-ptr, modernize-replace-random-shuffle, modernize-use-bool-literals, modernize-use-nullptr, @@ -145,6 +144,7 @@ Checks: '-*, clang-analyzer-cplusplus.SelfAssignment, clang-analyzer-deadcode.DeadStores, clang-analyzer-cplusplus.Move, + clang-analyzer-optin.cplusplus.UninitializedObject, clang-analyzer-optin.cplusplus.VirtualCall, clang-analyzer-security.insecureAPI.UncheckedReturn, clang-analyzer-security.insecureAPI.bcmp, @@ -164,6 +164,8 @@ Checks: '-*, clang-analyzer-unix.cstring.NullArg, boost-use-to-string, + + alpha.security.cert.env.InvalidPtr, ' WarningsAsErrors: '*' diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 6540b60476f..2d8540b57ea 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -1,4 +1,4 @@ -Changelog category (leave one): +### Changelog category (leave one): - New Feature - Improvement - Bug Fix (user-visible misbehaviour in official stable or prestable release) @@ -9,7 +9,7 @@ Changelog category (leave one): - Not for changelog (changelog entry is not required) -Changelog entry (a user-readable short description of the changes that goes to CHANGELOG.md): +### Changelog entry (a user-readable short description of the changes that goes to CHANGELOG.md): ... diff --git a/.github/workflows/backport_branches.yml b/.github/workflows/backport_branches.yml index 75f8a63368d..44fe082b04d 100644 --- a/.github/workflows/backport_branches.yml +++ b/.github/workflows/backport_branches.yml @@ -9,6 +9,18 @@ on: # yamllint disable-line rule:truthy branches: - 'backport/**' jobs: + PythonUnitTests: + runs-on: [self-hosted, style-checker] + steps: + - name: Clear repository + run: | + sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" + - name: Check out repository code + uses: actions/checkout@v2 + - name: Python unit tests + run: | + cd "$GITHUB_WORKSPACE/tests/ci" + python3 -m unittest discover -s . -p '*_test.py' DockerHubPushAarch64: runs-on: [self-hosted, style-checker-aarch64] steps: @@ -143,8 +155,8 @@ jobs: if: ${{ success() || failure() }} uses: actions/upload-artifact@v2 with: - name: ${{ env.BUILD_NAME }} - path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json + name: ${{ env.BUILD_URLS }} + path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json - name: Cleanup if: always() run: | @@ -184,8 +196,8 @@ jobs: - name: Upload build URLs to artifacts uses: actions/upload-artifact@v2 with: - name: ${{ env.BUILD_NAME }} - path: ${{ runner.temp }}/build_check/${{ env.BUILD_NAME }}.json + name: ${{ env.BUILD_URLS }} + path: ${{ runner.temp }}/build_check/${{ env.BUILD_URLS }}.json - name: Cleanup if: always() run: | @@ -229,8 +241,8 @@ jobs: if: ${{ success() || failure() }} uses: actions/upload-artifact@v2 with: - name: ${{ env.BUILD_NAME }} - path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json + name: ${{ env.BUILD_URLS }} + path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json - name: Cleanup if: always() run: | @@ -274,8 +286,8 @@ jobs: if: ${{ success() || failure() }} uses: actions/upload-artifact@v2 with: - name: ${{ env.BUILD_NAME }} - path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json + name: ${{ env.BUILD_URLS }} + path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json - name: Cleanup if: always() run: | @@ -319,8 +331,8 @@ jobs: if: ${{ success() || failure() }} uses: actions/upload-artifact@v2 with: - name: ${{ env.BUILD_NAME }} - path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json + name: ${{ env.BUILD_URLS }} + path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json - name: Cleanup if: always() run: | @@ -341,10 +353,15 @@ jobs: steps: - name: Set envs run: | + DEPENDENCIES=$(cat << 'EOF' | jq '. | length' + ${{ toJSON(needs) }} + EOF + ) + echo "DEPENDENCIES=$DEPENDENCIES" >> "$GITHUB_ENV" cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/report_check - REPORTS_PATH=${{runner.temp}}/reports_dir CHECK_NAME=ClickHouse build check (actions) + REPORTS_PATH=${{runner.temp}}/reports_dir + TEMP_PATH=${{runner.temp}}/report_check EOF - name: Download json reports uses: actions/download-artifact@v2 @@ -360,7 +377,7 @@ jobs: sudo rm -fr "$TEMP_PATH" mkdir -p "$TEMP_PATH" cd "$GITHUB_WORKSPACE/tests/ci" - python3 build_report_check.py "$CHECK_NAME" + python3 build_report_check.py "$CHECK_NAME" "$DEPENDENCIES" - name: Cleanup if: always() run: | diff --git a/.github/workflows/master.yml b/.github/workflows/master.yml index cfa95b84ee5..efaf1c64c05 100644 --- a/.github/workflows/master.yml +++ b/.github/workflows/master.yml @@ -219,8 +219,8 @@ jobs: if: ${{ success() || failure() }} uses: actions/upload-artifact@v2 with: - name: ${{ env.BUILD_NAME }} - path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json + name: ${{ env.BUILD_URLS }} + path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json - name: Cleanup if: always() run: | @@ -260,8 +260,8 @@ jobs: - name: Upload build URLs to artifacts uses: actions/upload-artifact@v2 with: - name: ${{ env.BUILD_NAME }} - path: ${{ runner.temp }}/build_check/${{ env.BUILD_NAME }}.json + name: ${{ env.BUILD_URLS }} + path: ${{ runner.temp }}/build_check/${{ env.BUILD_URLS }}.json - name: Cleanup if: always() run: | @@ -305,8 +305,8 @@ jobs: if: ${{ success() || failure() }} uses: actions/upload-artifact@v2 with: - name: ${{ env.BUILD_NAME }} - path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json + name: ${{ env.BUILD_URLS }} + path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json - name: Cleanup if: always() run: | @@ -350,8 +350,8 @@ jobs: if: ${{ success() || failure() }} uses: actions/upload-artifact@v2 with: - name: ${{ env.BUILD_NAME }} - path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json + name: ${{ env.BUILD_URLS }} + path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json - name: Cleanup if: always() run: | @@ -395,8 +395,8 @@ jobs: if: ${{ success() || failure() }} uses: actions/upload-artifact@v2 with: - name: ${{ env.BUILD_NAME }} - path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json + name: ${{ env.BUILD_URLS }} + path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json - name: Cleanup if: always() run: | @@ -440,8 +440,8 @@ jobs: if: ${{ success() || failure() }} uses: actions/upload-artifact@v2 with: - name: ${{ env.BUILD_NAME }} - path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json + name: ${{ env.BUILD_URLS }} + path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json - name: Cleanup if: always() run: | @@ -485,8 +485,8 @@ jobs: if: ${{ success() || failure() }} uses: actions/upload-artifact@v2 with: - name: ${{ env.BUILD_NAME }} - path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json + name: ${{ env.BUILD_URLS }} + path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json - name: Cleanup if: always() run: | @@ -530,8 +530,8 @@ jobs: if: ${{ success() || failure() }} uses: actions/upload-artifact@v2 with: - name: ${{ env.BUILD_NAME }} - path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json + name: ${{ env.BUILD_URLS }} + path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json - name: Cleanup if: always() run: | @@ -575,8 +575,8 @@ jobs: if: ${{ success() || failure() }} uses: actions/upload-artifact@v2 with: - name: ${{ env.BUILD_NAME }} - path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json + name: ${{ env.BUILD_URLS }} + path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json - name: Cleanup if: always() run: | @@ -620,8 +620,8 @@ jobs: if: ${{ success() || failure() }} uses: actions/upload-artifact@v2 with: - name: ${{ env.BUILD_NAME }} - path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json + name: ${{ env.BUILD_URLS }} + path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json - name: Cleanup if: always() run: | @@ -668,8 +668,8 @@ jobs: if: ${{ success() || failure() }} uses: actions/upload-artifact@v2 with: - name: ${{ env.BUILD_NAME }} - path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json + name: ${{ env.BUILD_URLS }} + path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json - name: Cleanup if: always() run: | @@ -713,8 +713,8 @@ jobs: if: ${{ success() || failure() }} uses: actions/upload-artifact@v2 with: - name: ${{ env.BUILD_NAME }} - path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json + name: ${{ env.BUILD_URLS }} + path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json - name: Cleanup if: always() run: | @@ -758,8 +758,8 @@ jobs: if: ${{ success() || failure() }} uses: actions/upload-artifact@v2 with: - name: ${{ env.BUILD_NAME }} - path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json + name: ${{ env.BUILD_URLS }} + path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json - name: Cleanup if: always() run: | @@ -803,8 +803,8 @@ jobs: if: ${{ success() || failure() }} uses: actions/upload-artifact@v2 with: - name: ${{ env.BUILD_NAME }} - path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json + name: ${{ env.BUILD_URLS }} + path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json - name: Cleanup if: always() run: | @@ -848,8 +848,8 @@ jobs: if: ${{ success() || failure() }} uses: actions/upload-artifact@v2 with: - name: ${{ env.BUILD_NAME }} - path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json + name: ${{ env.BUILD_URLS }} + path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json - name: Cleanup if: always() run: | @@ -893,8 +893,8 @@ jobs: if: ${{ success() || failure() }} uses: actions/upload-artifact@v2 with: - name: ${{ env.BUILD_NAME }} - path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json + name: ${{ env.BUILD_URLS }} + path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json - name: Cleanup if: always() run: | @@ -938,8 +938,8 @@ jobs: if: ${{ success() || failure() }} uses: actions/upload-artifact@v2 with: - name: ${{ env.BUILD_NAME }} - path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json + name: ${{ env.BUILD_URLS }} + path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json - name: Cleanup if: always() run: | @@ -947,6 +947,34 @@ jobs: docker rm -f "$(docker ps -a -q)" ||: sudo rm -fr "$TEMP_PATH" "$CACHES_PATH" ############################################################################################ +##################################### Docker images ####################################### +############################################################################################ + DockerServerImages: + needs: + - BuilderDebRelease + - BuilderDebAarch64 + runs-on: [self-hosted, style-checker] + steps: + - name: Clear repository + run: | + sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" + - name: Check out repository code + uses: actions/checkout@v2 + with: + fetch-depth: 0 # otherwise we will have no version info + - name: Check docker clickhouse/clickhouse-server building + run: | + cd "$GITHUB_WORKSPACE/tests/ci" + python3 docker_server.py --release-type head + python3 docker_server.py --release-type head --no-ubuntu \ + --image-repo clickhouse/clickhouse-keeper --image-path docker/keeper + - name: Cleanup + if: always() + run: | + docker kill "$(docker ps -q)" ||: + docker rm -f "$(docker ps -a -q)" ||: + sudo rm -fr "$TEMP_PATH" +############################################################################################ ##################################### BUILD REPORTER ####################################### ############################################################################################ BuilderReport: @@ -964,10 +992,16 @@ jobs: steps: - name: Set envs run: | + DEPENDENCIES=$(cat << 'EOF' | jq '. | length' + ${{ toJSON(needs) }} + EOF + ) + echo "DEPENDENCIES=$DEPENDENCIES" >> "$GITHUB_ENV" cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/report_check - REPORTS_PATH=${{runner.temp}}/reports_dir CHECK_NAME=ClickHouse build check (actions) + REPORTS_PATH=${{runner.temp}}/reports_dir + REPORTS_PATH=${{runner.temp}}/reports_dir + TEMP_PATH=${{runner.temp}}/report_check EOF - name: Download json reports uses: actions/download-artifact@v2 @@ -983,7 +1017,7 @@ jobs: sudo rm -fr "$TEMP_PATH" mkdir -p "$TEMP_PATH" cd "$GITHUB_WORKSPACE/tests/ci" - python3 build_report_check.py "$CHECK_NAME" + python3 build_report_check.py "$CHECK_NAME" "$DEPENDENCIES" - name: Cleanup if: always() run: | diff --git a/.github/workflows/nightly.yml b/.github/workflows/nightly.yml index 5b47f94a324..bd54fd975c0 100644 --- a/.github/workflows/nightly.yml +++ b/.github/workflows/nightly.yml @@ -72,3 +72,53 @@ jobs: with: name: changed_images path: ${{ runner.temp }}/changed_images.json + BuilderCoverity: + needs: DockerHubPush + runs-on: [self-hosted, builder] + steps: + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + BUILD_NAME=coverity + CACHES_PATH=${{runner.temp}}/../ccaches + CHECK_NAME=ClickHouse build check (actions) + IMAGES_PATH=${{runner.temp}}/images_path + REPO_COPY=${{runner.temp}}/build_check/ClickHouse + TEMP_PATH=${{runner.temp}}/build_check + EOF + echo "COVERITY_TOKEN=${{ secrets.COVERITY_TOKEN }}" >> "$GITHUB_ENV" + - name: Download changed images + uses: actions/download-artifact@v2 + with: + name: changed_images + path: ${{ env.IMAGES_PATH }} + - name: Clear repository + run: | + sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" + - name: Check out repository code + id: coverity-checkout + uses: actions/checkout@v2 + with: + submodules: 'true' + fetch-depth: 0 # otherwise we will have no info about contributors + - name: Build + run: | + sudo rm -fr "$TEMP_PATH" + mkdir -p "$TEMP_PATH" + cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" + cd "$REPO_COPY/tests/ci" && python3 build_check.py "$CHECK_NAME" "$BUILD_NAME" + - name: Upload Coverity Analysis + if: ${{ success() || failure() }} + run: | + curl --form token="${COVERITY_TOKEN}" \ + --form email='security+coverity@clickhouse.com' \ + --form file="@$TEMP_PATH/$BUILD_NAME/coverity-scan.tgz" \ + --form version="${GITHUB_REF#refs/heads/}-${GITHUB_SHA::6}" \ + --form description="Nighly Scan: $(date +'%Y-%m-%dT%H:%M:%S')" \ + https://scan.coverity.com/builds?project=ClickHouse%2FClickHouse + - name: Cleanup + if: always() + run: | + docker kill "$(docker ps -q)" ||: + docker rm -f "$(docker ps -a -q)" ||: + sudo rm -fr "$TEMP_PATH" "$CACHES_PATH" diff --git a/.github/workflows/pull_request.yml b/.github/workflows/pull_request.yml index 87a31b9683c..0e68a68e8f5 100644 --- a/.github/workflows/pull_request.yml +++ b/.github/workflows/pull_request.yml @@ -4,7 +4,7 @@ env: # Force the stdout and stderr streams to be unbuffered PYTHONUNBUFFERED: 1 -on: # yamllint disable-line rule:truthy +on: # yamllint disable-line rule:truthy pull_request: types: - synchronize @@ -153,13 +153,19 @@ jobs: EOF - name: Clear repository run: | - sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" - - name: Check out repository code - uses: actions/checkout@v2 - - name: Fast Test - run: | + sudo rm -fr "$GITHUB_WORKSPACE" + mkdir "$GITHUB_WORKSPACE" sudo rm -fr "$TEMP_PATH" mkdir -p "$TEMP_PATH" + - name: Check out repository code + uses: actions/checkout@v2 + - name: Download changed images + uses: actions/download-artifact@v2 + with: + name: changed_images + path: ${{ env.TEMP_PATH }} + - name: Fast Test + run: | cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" cd "$REPO_COPY/tests/ci" && python3 fast_test_check.py - name: Cleanup @@ -272,8 +278,8 @@ jobs: if: ${{ success() || failure() }} uses: actions/upload-artifact@v2 with: - name: ${{ env.BUILD_NAME }} - path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json + name: ${{ env.BUILD_URLS }} + path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json - name: Cleanup if: always() run: | @@ -317,8 +323,8 @@ jobs: if: ${{ success() || failure() }} uses: actions/upload-artifact@v2 with: - name: ${{ env.BUILD_NAME }} - path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json + name: ${{ env.BUILD_URLS }} + path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json - name: Cleanup if: always() run: | @@ -362,8 +368,8 @@ jobs: if: ${{ success() || failure() }} uses: actions/upload-artifact@v2 with: - name: ${{ env.BUILD_NAME }} - path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json + name: ${{ env.BUILD_URLS }} + path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json - name: Cleanup if: always() run: | @@ -404,8 +410,8 @@ jobs: if: ${{ success() || failure() }} uses: actions/upload-artifact@v2 with: - name: ${{ env.BUILD_NAME }} - path: ${{ runner.temp }}/build_check/${{ env.BUILD_NAME }}.json + name: ${{ env.BUILD_URLS }} + path: ${{ runner.temp }}/build_check/${{ env.BUILD_URLS }}.json - name: Cleanup if: always() run: | @@ -446,8 +452,8 @@ jobs: if: ${{ success() || failure() }} uses: actions/upload-artifact@v2 with: - name: ${{ env.BUILD_NAME }} - path: ${{ runner.temp }}/build_check/${{ env.BUILD_NAME }}.json + name: ${{ env.BUILD_URLS }} + path: ${{ runner.temp }}/build_check/${{ env.BUILD_URLS }}.json - name: Cleanup if: always() run: | @@ -491,8 +497,8 @@ jobs: if: ${{ success() || failure() }} uses: actions/upload-artifact@v2 with: - name: ${{ env.BUILD_NAME }} - path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json + name: ${{ env.BUILD_URLS }} + path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json - name: Cleanup if: always() run: | @@ -536,8 +542,8 @@ jobs: if: ${{ success() || failure() }} uses: actions/upload-artifact@v2 with: - name: ${{ env.BUILD_NAME }} - path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json + name: ${{ env.BUILD_URLS }} + path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json - name: Cleanup if: always() run: | @@ -581,8 +587,8 @@ jobs: if: ${{ success() || failure() }} uses: actions/upload-artifact@v2 with: - name: ${{ env.BUILD_NAME }} - path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json + name: ${{ env.BUILD_URLS }} + path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json - name: Cleanup if: always() run: | @@ -626,8 +632,8 @@ jobs: if: ${{ success() || failure() }} uses: actions/upload-artifact@v2 with: - name: ${{ env.BUILD_NAME }} - path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json + name: ${{ env.BUILD_URLS }} + path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json - name: Cleanup if: always() run: | @@ -671,8 +677,8 @@ jobs: if: ${{ success() || failure() }} uses: actions/upload-artifact@v2 with: - name: ${{ env.BUILD_NAME }} - path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json + name: ${{ env.BUILD_URLS }} + path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json - name: Cleanup if: always() run: | @@ -719,8 +725,8 @@ jobs: if: ${{ success() || failure() }} uses: actions/upload-artifact@v2 with: - name: ${{ env.BUILD_NAME }} - path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json + name: ${{ env.BUILD_URLS }} + path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json - name: Cleanup if: always() run: | @@ -764,8 +770,8 @@ jobs: if: ${{ success() || failure() }} uses: actions/upload-artifact@v2 with: - name: ${{ env.BUILD_NAME }} - path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json + name: ${{ env.BUILD_URLS }} + path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json - name: Cleanup if: always() run: | @@ -809,8 +815,8 @@ jobs: if: ${{ success() || failure() }} uses: actions/upload-artifact@v2 with: - name: ${{ env.BUILD_NAME }} - path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json + name: ${{ env.BUILD_URLS }} + path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json - name: Cleanup if: always() run: | @@ -854,8 +860,8 @@ jobs: if: ${{ success() || failure() }} uses: actions/upload-artifact@v2 with: - name: ${{ env.BUILD_NAME }} - path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json + name: ${{ env.BUILD_URLS }} + path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json - name: Cleanup if: always() run: | @@ -899,8 +905,8 @@ jobs: if: ${{ success() || failure() }} uses: actions/upload-artifact@v2 with: - name: ${{ env.BUILD_NAME }} - path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json + name: ${{ env.BUILD_URLS }} + path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json - name: Cleanup if: always() run: | @@ -944,8 +950,8 @@ jobs: if: ${{ success() || failure() }} uses: actions/upload-artifact@v2 with: - name: ${{ env.BUILD_NAME }} - path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json + name: ${{ env.BUILD_URLS }} + path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json - name: Cleanup if: always() run: | @@ -989,8 +995,8 @@ jobs: if: ${{ success() || failure() }} uses: actions/upload-artifact@v2 with: - name: ${{ env.BUILD_NAME }} - path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json + name: ${{ env.BUILD_URLS }} + path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json - name: Cleanup if: always() run: | @@ -998,6 +1004,34 @@ jobs: docker rm -f "$(docker ps -a -q)" ||: sudo rm -fr "$TEMP_PATH" "$CACHES_PATH" ############################################################################################ +##################################### Docker images ####################################### +############################################################################################ + DockerServerImages: + needs: + - BuilderDebRelease + - BuilderDebAarch64 + runs-on: [self-hosted, style-checker] + steps: + - name: Clear repository + run: | + sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" + - name: Check out repository code + uses: actions/checkout@v2 + with: + fetch-depth: 0 # otherwise we will have no version info + - name: Check docker clickhouse/clickhouse-server building + run: | + cd "$GITHUB_WORKSPACE/tests/ci" + python3 docker_server.py --release-type head --no-push + python3 docker_server.py --release-type head --no-push --no-ubuntu \ + --image-repo clickhouse/clickhouse-keeper --image-path docker/keeper + - name: Cleanup + if: always() + run: | + docker kill "$(docker ps -q)" ||: + docker rm -f "$(docker ps -a -q)" ||: + sudo rm -fr "$TEMP_PATH" +############################################################################################ ##################################### BUILD REPORTER ####################################### ############################################################################################ BuilderReport: @@ -1016,10 +1050,15 @@ jobs: steps: - name: Set envs run: | + DEPENDENCIES=$(cat << 'EOF' | jq '. | length' + ${{ toJSON(needs) }} + EOF + ) + echo "DEPENDENCIES=$DEPENDENCIES" >> "$GITHUB_ENV" cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/report_check - REPORTS_PATH=${{runner.temp}}/reports_dir CHECK_NAME=ClickHouse build check (actions) + REPORTS_PATH=${{runner.temp}}/reports_dir + TEMP_PATH=${{runner.temp}}/report_check EOF - name: Download json reports uses: actions/download-artifact@v2 @@ -1035,7 +1074,7 @@ jobs: sudo rm -fr "$TEMP_PATH" mkdir -p "$TEMP_PATH" cd "$GITHUB_WORKSPACE/tests/ci" - python3 build_report_check.py "$CHECK_NAME" + python3 build_report_check.py "$CHECK_NAME" "$DEPENDENCIES" - name: Cleanup if: always() run: | @@ -3138,6 +3177,7 @@ jobs: needs: - StyleCheck - DockerHubPush + - DockerServerImages - CheckLabels - BuilderReport - FastTest diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index bd62e64409f..ea2e1ed33fb 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -36,3 +36,28 @@ jobs: overwrite: true tag: ${{ github.ref }} file_glob: true + ############################################################################################ + ##################################### Docker images ####################################### + ############################################################################################ + DockerServerImages: + runs-on: [self-hosted, style-checker] + steps: + - name: Clear repository + run: | + sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" + - name: Check out repository code + uses: actions/checkout@v2 + with: + fetch-depth: 0 # otherwise we will have no version info + - name: Check docker clickhouse/clickhouse-server building + run: | + cd "$GITHUB_WORKSPACE/tests/ci" + python3 docker_server.py --release-type auto --version "${{ github.ref }}" + python3 docker_server.py --release-type auto --version "${{ github.ref }}" --no-ubuntu \ + --image-repo clickhouse/clickhouse-keeper --image-path docker/keeper + - name: Cleanup + if: always() + run: | + docker kill "$(docker ps -q)" ||: + docker rm -f "$(docker ps -a -q)" ||: + sudo rm -fr "$TEMP_PATH" diff --git a/.github/workflows/release_branches.yml b/.github/workflows/release_branches.yml index d916699acc2..91e1a224204 100644 --- a/.github/workflows/release_branches.yml +++ b/.github/workflows/release_branches.yml @@ -146,8 +146,8 @@ jobs: if: ${{ success() || failure() }} uses: actions/upload-artifact@v2 with: - name: ${{ env.BUILD_NAME }} - path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json + name: ${{ env.BUILD_URLS }} + path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json - name: Cleanup if: always() run: | @@ -187,8 +187,8 @@ jobs: - name: Upload build URLs to artifacts uses: actions/upload-artifact@v2 with: - name: ${{ env.BUILD_NAME }} - path: ${{ runner.temp }}/build_check/${{ env.BUILD_NAME }}.json + name: ${{ env.BUILD_URLS }} + path: ${{ runner.temp }}/build_check/${{ env.BUILD_URLS }}.json - name: Cleanup if: always() run: | @@ -232,8 +232,8 @@ jobs: if: ${{ success() || failure() }} uses: actions/upload-artifact@v2 with: - name: ${{ env.BUILD_NAME }} - path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json + name: ${{ env.BUILD_URLS }} + path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json - name: Cleanup if: always() run: | @@ -277,8 +277,8 @@ jobs: if: ${{ success() || failure() }} uses: actions/upload-artifact@v2 with: - name: ${{ env.BUILD_NAME }} - path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json + name: ${{ env.BUILD_URLS }} + path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json - name: Cleanup if: always() run: | @@ -322,8 +322,8 @@ jobs: if: ${{ success() || failure() }} uses: actions/upload-artifact@v2 with: - name: ${{ env.BUILD_NAME }} - path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json + name: ${{ env.BUILD_URLS }} + path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json - name: Cleanup if: always() run: | @@ -367,8 +367,8 @@ jobs: if: ${{ success() || failure() }} uses: actions/upload-artifact@v2 with: - name: ${{ env.BUILD_NAME }} - path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json + name: ${{ env.BUILD_URLS }} + path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json - name: Cleanup if: always() run: | @@ -412,8 +412,8 @@ jobs: if: ${{ success() || failure() }} uses: actions/upload-artifact@v2 with: - name: ${{ env.BUILD_NAME }} - path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json + name: ${{ env.BUILD_URLS }} + path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json - name: Cleanup if: always() run: | @@ -436,10 +436,16 @@ jobs: steps: - name: Set envs run: | + DEPENDENCIES=$(cat << 'EOF' | jq '. | length' + ${{ toJSON(needs) }} + EOF + ) + echo "DEPENDENCIES=$DEPENDENCIES" >> "$GITHUB_ENV" cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/report_check - REPORTS_PATH=${{runner.temp}}/reports_dir CHECK_NAME=ClickHouse build check (actions) + REPORTS_PATH=${{runner.temp}}/reports_dir + REPORTS_PATH=${{runner.temp}}/reports_dir + TEMP_PATH=${{runner.temp}}/report_check EOF - name: Download json reports uses: actions/download-artifact@v2 @@ -455,7 +461,7 @@ jobs: sudo rm -fr "$TEMP_PATH" mkdir -p "$TEMP_PATH" cd "$GITHUB_WORKSPACE/tests/ci" - python3 build_report_check.py "$CHECK_NAME" + python3 build_report_check.py "$CHECK_NAME" "$DEPENDENCIES" - name: Cleanup if: always() run: | diff --git a/CMakeLists.txt b/CMakeLists.txt index a9ce64b87ba..dad9a25ab26 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -222,6 +222,12 @@ else () set(NO_WHOLE_ARCHIVE --no-whole-archive) endif () +option(ENABLE_CURL_BUILD "Enable curl, azure, sentry build on by default except MacOS." ON) +if (OS_DARWIN) + # Disable the curl, azure, senry build on MacOS + set (ENABLE_CURL_BUILD OFF) +endif () + # Ignored if `lld` is used option(ADD_GDB_INDEX_FOR_GOLD "Add .gdb-index to resulting binaries for gold linker.") @@ -294,14 +300,19 @@ include(cmake/cpu_features.cmake) # Enable it explicitly. set (COMPILER_FLAGS "${COMPILER_FLAGS} -fasynchronous-unwind-tables") -# Reproducible builds -# If turned `ON`, remap file source paths in debug info, predefined preprocessor macros and __builtin_FILE(). -option(ENABLE_BUILD_PATH_MAPPING "Enable remap file source paths in debug info, predefined preprocessor macros and __builtin_FILE(). It's to generate reproducible builds. See https://reproducible-builds.org/docs/build-path" ON) +# Reproducible builds. +if (CMAKE_BUILD_TYPE_UC STREQUAL "DEBUG") + set (ENABLE_BUILD_PATH_MAPPING_DEFAULT OFF) +else () + set (ENABLE_BUILD_PATH_MAPPING_DEFAULT ON) +endif () + +option (ENABLE_BUILD_PATH_MAPPING "Enable remapping of file source paths in debug info, predefined preprocessor macros, and __builtin_FILE(). It's used to generate reproducible builds. See https://reproducible-builds.org/docs/build-path" ${ENABLE_BUILD_PATH_MAPPING_DEFAULT}) if (ENABLE_BUILD_PATH_MAPPING) set (COMPILER_FLAGS "${COMPILER_FLAGS} -ffile-prefix-map=${CMAKE_SOURCE_DIR}=.") set (CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} -ffile-prefix-map=${CMAKE_SOURCE_DIR}=.") -endif() +endif () if (${CMAKE_VERSION} VERSION_LESS "3.12.4") # CMake < 3.12 doesn't support setting 20 as a C++ standard version. diff --git a/base/daemon/BaseDaemon.cpp b/base/daemon/BaseDaemon.cpp index 311349a2ba7..b27a904b31a 100644 --- a/base/daemon/BaseDaemon.cpp +++ b/base/daemon/BaseDaemon.cpp @@ -828,7 +828,6 @@ void BaseDaemon::initializeTerminationAndSignalProcessing() /// Setup signal handlers. /// SIGTSTP is added for debugging purposes. To output a stack trace of any running thread at anytime. - addSignalHandler({SIGABRT, SIGSEGV, SIGILL, SIGBUS, SIGSYS, SIGFPE, SIGPIPE, SIGTSTP, SIGTRAP}, signalHandler, &handled_signals); addSignalHandler({SIGHUP}, closeLogsSignalHandler, &handled_signals); addSignalHandler({SIGINT, SIGQUIT, SIGTERM}, terminateRequestedSignalHandler, &handled_signals); diff --git a/base/loggers/Loggers.cpp b/base/loggers/Loggers.cpp index 7c627ad2272..512e44f79c7 100644 --- a/base/loggers/Loggers.cpp +++ b/base/loggers/Loggers.cpp @@ -197,7 +197,6 @@ void Loggers::buildLoggers(Poco::Util::AbstractConfiguration & config, Poco::Log Poco::AutoPtr pf = new OwnPatternFormatter(color_enabled); Poco::AutoPtr log = new DB::OwnFormattingChannel(pf, new Poco::ConsoleChannel); - logger.warning("Logging " + console_log_level_string + " to console"); log->setLevel(console_log_level); split->addChannel(log, "console"); } diff --git a/cmake/strip_binary.cmake b/cmake/strip_binary.cmake index 1f24790a159..2d6a3888503 100644 --- a/cmake/strip_binary.cmake +++ b/cmake/strip_binary.cmake @@ -46,7 +46,7 @@ macro(clickhouse_make_empty_debug_info_for_nfpm) add_custom_command(TARGET ${EMPTY_DEBUG_TARGET} POST_BUILD COMMAND mkdir -p "${EMPTY_DEBUG_DESTINATION_DIR}/lib/debug" COMMAND touch "${EMPTY_DEBUG_DESTINATION_DIR}/lib/debug/${EMPTY_DEBUG_TARGET}.debug" - COMMENT "Addiding empty debug info for NFPM" VERBATIM + COMMENT "Adding empty debug info for NFPM" VERBATIM ) install(FILES "${EMPTY_DEBUG_DESTINATION_DIR}/lib/debug/${EMPTY_DEBUG_TARGET}.debug" DESTINATION "${CMAKE_INSTALL_LIBDIR}/debug/${CMAKE_INSTALL_FULL_BINDIR}" COMPONENT clickhouse) diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index 9cf307c473e..1f03c0fd341 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -119,9 +119,13 @@ add_contrib (fastops-cmake fastops) add_contrib (libuv-cmake libuv) add_contrib (amqpcpp-cmake AMQP-CPP) # requires: libuv add_contrib (cassandra-cmake cassandra) # requires: libuv -add_contrib (curl-cmake curl) -add_contrib (azure-cmake azure) -add_contrib (sentry-native-cmake sentry-native) # requires: curl + +if (ENABLE_CURL_BUILD) + add_contrib (curl-cmake curl) + add_contrib (azure-cmake azure) + add_contrib (sentry-native-cmake sentry-native) # requires: curl +endif() + add_contrib (fmtlib-cmake fmtlib) add_contrib (krb5-cmake krb5) add_contrib (cyrus-sasl-cmake cyrus-sasl) # for krb5 diff --git a/contrib/arrow b/contrib/arrow index 1d9cc51daa4..efdcd015cfd 160000 --- a/contrib/arrow +++ b/contrib/arrow @@ -1 +1 @@ -Subproject commit 1d9cc51daa4e7e9fc6926320ef73759818bd736e +Subproject commit efdcd015cfdee1b6aa349c9ca227ca12c3d697f5 diff --git a/contrib/curl b/contrib/curl index 3b8bbbbd160..801bd5138ce 160000 --- a/contrib/curl +++ b/contrib/curl @@ -1 +1 @@ -Subproject commit 3b8bbbbd1609c638a3d3d0acb148a33dedb67be3 +Subproject commit 801bd5138ce31aa0d906fa4e2eabfc599d74e793 diff --git a/contrib/curl-cmake/CMakeLists.txt b/contrib/curl-cmake/CMakeLists.txt index 589f40384e3..b1e1a0ded8a 100644 --- a/contrib/curl-cmake/CMakeLists.txt +++ b/contrib/curl-cmake/CMakeLists.txt @@ -32,7 +32,6 @@ set (SRCS "${LIBRARY_DIR}/lib/transfer.c" "${LIBRARY_DIR}/lib/strcase.c" "${LIBRARY_DIR}/lib/easy.c" - "${LIBRARY_DIR}/lib/security.c" "${LIBRARY_DIR}/lib/curl_fnmatch.c" "${LIBRARY_DIR}/lib/fileinfo.c" "${LIBRARY_DIR}/lib/wildcard.c" @@ -115,6 +114,12 @@ set (SRCS "${LIBRARY_DIR}/lib/curl_get_line.c" "${LIBRARY_DIR}/lib/altsvc.c" "${LIBRARY_DIR}/lib/socketpair.c" + "${LIBRARY_DIR}/lib/bufref.c" + "${LIBRARY_DIR}/lib/dynbuf.c" + "${LIBRARY_DIR}/lib/hsts.c" + "${LIBRARY_DIR}/lib/http_aws_sigv4.c" + "${LIBRARY_DIR}/lib/mqtt.c" + "${LIBRARY_DIR}/lib/rename.c" "${LIBRARY_DIR}/lib/vauth/vauth.c" "${LIBRARY_DIR}/lib/vauth/cleartext.c" "${LIBRARY_DIR}/lib/vauth/cram.c" @@ -131,8 +136,6 @@ set (SRCS "${LIBRARY_DIR}/lib/vtls/gtls.c" "${LIBRARY_DIR}/lib/vtls/vtls.c" "${LIBRARY_DIR}/lib/vtls/nss.c" - "${LIBRARY_DIR}/lib/vtls/polarssl.c" - "${LIBRARY_DIR}/lib/vtls/polarssl_threadlock.c" "${LIBRARY_DIR}/lib/vtls/wolfssl.c" "${LIBRARY_DIR}/lib/vtls/schannel.c" "${LIBRARY_DIR}/lib/vtls/schannel_verify.c" @@ -141,6 +144,7 @@ set (SRCS "${LIBRARY_DIR}/lib/vtls/mbedtls.c" "${LIBRARY_DIR}/lib/vtls/mesalink.c" "${LIBRARY_DIR}/lib/vtls/bearssl.c" + "${LIBRARY_DIR}/lib/vtls/keylog.c" "${LIBRARY_DIR}/lib/vquic/ngtcp2.c" "${LIBRARY_DIR}/lib/vquic/quiche.c" "${LIBRARY_DIR}/lib/vssh/libssh2.c" diff --git a/contrib/krb5-cmake/CMakeLists.txt b/contrib/krb5-cmake/CMakeLists.txt index 685e8737ef0..214d23bc2a9 100644 --- a/contrib/krb5-cmake/CMakeLists.txt +++ b/contrib/krb5-cmake/CMakeLists.txt @@ -1,4 +1,4 @@ -set (ENABLE_KRB5_DEFAULT 1) +set (ENABLE_KRB5_DEFAULT ${ENABLE_LIBRARIES}) if (NOT CMAKE_SYSTEM_NAME MATCHES "Linux" AND NOT (CMAKE_SYSTEM_NAME MATCHES "Darwin" AND NOT CMAKE_CROSSCOMPILING)) message (WARNING "krb5 disabled in non-Linux and non-native-Darwin environments") set (ENABLE_KRB5_DEFAULT 0) @@ -16,6 +16,7 @@ if(NOT AWK_PROGRAM) endif() set(KRB5_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/krb5/src") +set(KRB5_ET_BIN_DIR "${CMAKE_CURRENT_BINARY_DIR}/include_private") set(ALL_SRCS "${KRB5_SOURCE_DIR}/util/et/et_name.c" @@ -90,7 +91,6 @@ set(ALL_SRCS "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/get_tkt_flags.c" "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/set_allowable_enctypes.c" "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/k5sealiov.c" - "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/gssapi_err_krb5.c" "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/canon_name.c" "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/inq_cred.c" "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/export_sec_context.c" @@ -143,11 +143,12 @@ set(ALL_SRCS "${KRB5_SOURCE_DIR}/lib/gssapi/generic/util_buffer_set.c" "${KRB5_SOURCE_DIR}/lib/gssapi/generic/util_set.c" "${KRB5_SOURCE_DIR}/lib/gssapi/generic/util_token.c" - "${KRB5_SOURCE_DIR}/lib/gssapi/generic/gssapi_err_generic.c" "${KRB5_SOURCE_DIR}/lib/gssapi/generic/disp_major_status.c" "${KRB5_SOURCE_DIR}/lib/gssapi/generic/util_seqstate.c" "${KRB5_SOURCE_DIR}/lib/gssapi/generic/util_errmap.c" "${KRB5_SOURCE_DIR}/lib/gssapi/generic/rel_buffer.c" + "${KRB5_ET_BIN_DIR}/lib/gssapi/krb5/gssapi_err_krb5.c" + "${KRB5_ET_BIN_DIR}/lib/gssapi/generic/gssapi_err_generic.c" "${KRB5_SOURCE_DIR}/lib/gssapi/spnego/spnego_mech.c" "${KRB5_SOURCE_DIR}/lib/gssapi/spnego/negoex_util.c" @@ -256,8 +257,8 @@ set(ALL_SRCS "${KRB5_SOURCE_DIR}/util/profile/prof_parse.c" "${KRB5_SOURCE_DIR}/util/profile/prof_get.c" "${KRB5_SOURCE_DIR}/util/profile/prof_set.c" - "${KRB5_SOURCE_DIR}/util/profile/prof_err.c" "${KRB5_SOURCE_DIR}/util/profile/prof_init.c" + "${KRB5_ET_BIN_DIR}/util/profile/prof_err.c" "${KRB5_SOURCE_DIR}/lib/krb5/krb/fwd_tgt.c" "${KRB5_SOURCE_DIR}/lib/krb5/krb/conv_creds.c" "${KRB5_SOURCE_DIR}/lib/krb5/krb/fast.c" @@ -450,13 +451,12 @@ set(ALL_SRCS - "${KRB5_SOURCE_DIR}/lib/krb5/error_tables/k5e1_err.c" - "${KRB5_SOURCE_DIR}/lib/krb5/error_tables/kdb5_err.c" - "${KRB5_SOURCE_DIR}/lib/krb5/error_tables/asn1_err.c" - "${KRB5_SOURCE_DIR}/lib/krb5/error_tables/krb5_err.c" - "${KRB5_SOURCE_DIR}/lib/krb5/error_tables/krb524_err.c" - "${KRB5_SOURCE_DIR}/lib/krb5/error_tables/kv5m_err.c" - + "${KRB5_ET_BIN_DIR}/lib/krb5/error_tables/k5e1_err.c" + "${KRB5_ET_BIN_DIR}/lib/krb5/error_tables/kdb5_err.c" + "${KRB5_ET_BIN_DIR}/lib/krb5/error_tables/asn1_err.c" + "${KRB5_ET_BIN_DIR}/lib/krb5/error_tables/krb5_err.c" + "${KRB5_ET_BIN_DIR}/lib/krb5/error_tables/krb524_err.c" + "${KRB5_ET_BIN_DIR}/lib/krb5/error_tables/kv5m_err.c" "${KRB5_SOURCE_DIR}/lib/krb5/rcache/rc_base.c" @@ -473,7 +473,7 @@ set(ALL_SRCS ) add_custom_command( - OUTPUT "${KRB5_SOURCE_DIR}/util/et/compile_et" + OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/compile_et" COMMAND /bin/sh ./config_script ./compile_et.sh @@ -481,50 +481,17 @@ add_custom_command( ${AWK_PROGRAM} sed > - compile_et + ${CMAKE_CURRENT_BINARY_DIR}/compile_et DEPENDS "${KRB5_SOURCE_DIR}/util/et/compile_et.sh" "${KRB5_SOURCE_DIR}/util/et/config_script" WORKING_DIRECTORY "${KRB5_SOURCE_DIR}/util/et" ) -file(GLOB_RECURSE ET_FILES - "${KRB5_SOURCE_DIR}/*.et" -) - -function(preprocess_et out_var) - set(result) - foreach(in_f ${ARGN}) - string(REPLACE - .et - .c - F_C - ${in_f} - ) - string(REPLACE - .et - .h - F_H - ${in_f} - ) - - get_filename_component(ET_PATH ${in_f} DIRECTORY) - - add_custom_command(OUTPUT ${F_C} ${F_H} - COMMAND perl "${KRB5_SOURCE_DIR}/util/et/compile_et" -d "${KRB5_SOURCE_DIR}/util/et" ${in_f} - DEPENDS ${in_f} "${KRB5_SOURCE_DIR}/util/et/compile_et" - WORKING_DIRECTORY ${ET_PATH} - VERBATIM - ) - list(APPEND result ${F_C}) - endforeach() - set(${out_var} "${result}" PARENT_SCOPE) -endfunction() - add_custom_command( - OUTPUT "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/error_map.h" + OUTPUT "${KRB5_ET_BIN_DIR}/error_map.h" COMMAND perl -I../../../util ../../../util/gen-map.pl - -oerror_map.h + -o${KRB5_ET_BIN_DIR}/error_map.h NAME=gsserrmap KEY=OM_uint32 VALUE=char* @@ -536,22 +503,21 @@ add_custom_command( add_custom_target( ERROR_MAP_H - DEPENDS "${KRB5_SOURCE_DIR}/lib/gssapi/krb5/error_map.h" + DEPENDS "${KRB5_ET_BIN_DIR}/error_map.h" VERBATIM ) add_custom_command( - OUTPUT "${KRB5_SOURCE_DIR}/lib/gssapi/generic/errmap.h" - COMMAND perl -w -I../../../util ../../../util/gen.pl bimap errmap.h NAME=mecherrmap LEFT=OM_uint32 RIGHT=struct\ mecherror LEFTPRINT=print_OM_uint32 RIGHTPRINT=mecherror_print LEFTCMP=cmp_OM_uint32 RIGHTCMP=mecherror_cmp + OUTPUT "${KRB5_ET_BIN_DIR}/errmap.h" + COMMAND perl -w -I../../../util ../../../util/gen.pl bimap ${KRB5_ET_BIN_DIR}/errmap.h NAME=mecherrmap LEFT=OM_uint32 RIGHT=struct\ mecherror LEFTPRINT=print_OM_uint32 RIGHTPRINT=mecherror_print LEFTCMP=cmp_OM_uint32 RIGHTCMP=mecherror_cmp WORKING_DIRECTORY "${KRB5_SOURCE_DIR}/lib/gssapi/generic" ) add_custom_target( ERRMAP_H - DEPENDS "${KRB5_SOURCE_DIR}/lib/gssapi/generic/errmap.h" + DEPENDS "${KRB5_ET_BIN_DIR}/errmap.h" VERBATIM ) - add_custom_target( KRB_5_H DEPENDS "${CMAKE_CURRENT_BINARY_DIR}/include/krb5/krb5.h" @@ -567,7 +533,40 @@ add_dependencies( KRB_5_H ) -preprocess_et(processed_et_files ${ET_FILES}) +# +# Generate error tables +# +function(preprocess_et et_path) + string(REPLACE .et .c F_C ${et_path}) + string(REPLACE .et .h F_H ${et_path}) + get_filename_component(et_dir ${et_path} DIRECTORY) + get_filename_component(et_name ${et_path} NAME_WLE) + + add_custom_command(OUTPUT ${F_C} ${F_H} ${KRB5_ET_BIN_DIR}/${et_name}.h + COMMAND perl "${CMAKE_CURRENT_BINARY_DIR}/compile_et" -d "${KRB5_SOURCE_DIR}/util/et" ${et_path} + # for #include w/o path (via -iquote) + COMMAND ${CMAKE_COMMAND} -E create_symlink ${F_H} ${KRB5_ET_BIN_DIR}/${et_name}.h + DEPENDS ${et_path} "${CMAKE_CURRENT_BINARY_DIR}/compile_et" + WORKING_DIRECTORY ${et_dir} + VERBATIM + ) +endfunction() + +function(generate_error_tables) + file(GLOB_RECURSE ET_FILES "${KRB5_SOURCE_DIR}/*.et") + foreach(et_path ${ET_FILES}) + string(REPLACE ${KRB5_SOURCE_DIR} ${KRB5_ET_BIN_DIR} et_bin_path ${et_path}) + string(REPLACE / _ et_target_name ${et_path}) + get_filename_component(et_bin_dir ${et_bin_path} DIRECTORY) + add_custom_command(OUTPUT ${et_bin_path} + COMMAND ${CMAKE_COMMAND} -E make_directory ${et_bin_dir} + COMMAND ${CMAKE_COMMAND} -E copy_if_different ${et_path} ${et_bin_path} + VERBATIM + ) + preprocess_et(${et_bin_path}) + endforeach() +endfunction() +generate_error_tables() if(CMAKE_SYSTEM_NAME MATCHES "Darwin") add_custom_command( @@ -634,12 +633,12 @@ file(MAKE_DIRECTORY SET(KRBHDEP "${KRB5_SOURCE_DIR}/include/krb5/krb5.hin" - "${KRB5_SOURCE_DIR}/lib/krb5/error_tables/krb5_err.h" - "${KRB5_SOURCE_DIR}/lib/krb5/error_tables/k5e1_err.h" - "${KRB5_SOURCE_DIR}/lib/krb5/error_tables/kdb5_err.h" - "${KRB5_SOURCE_DIR}/lib/krb5/error_tables/kv5m_err.h" - "${KRB5_SOURCE_DIR}/lib/krb5/error_tables/krb524_err.h" - "${KRB5_SOURCE_DIR}/lib/krb5/error_tables/asn1_err.h" + "${KRB5_ET_BIN_DIR}/lib/krb5/error_tables/krb5_err.h" + "${KRB5_ET_BIN_DIR}/lib/krb5/error_tables/k5e1_err.h" + "${KRB5_ET_BIN_DIR}/lib/krb5/error_tables/kdb5_err.h" + "${KRB5_ET_BIN_DIR}/lib/krb5/error_tables/kv5m_err.h" + "${KRB5_ET_BIN_DIR}/lib/krb5/error_tables/krb524_err.h" + "${KRB5_ET_BIN_DIR}/lib/krb5/error_tables/asn1_err.h" ) # cmake < 3.18 does not have 'cat' command @@ -656,6 +655,11 @@ target_include_directories(_krb5 SYSTEM BEFORE PUBLIC "${CMAKE_CURRENT_BINARY_DIR}/include" ) +target_compile_options(_krb5 PRIVATE + # For '#include "file.h"' + -iquote "${CMAKE_CURRENT_BINARY_DIR}/include_private" +) + target_include_directories(_krb5 PRIVATE "${CMAKE_CURRENT_BINARY_DIR}/include_private" # For autoconf.h and other generated headers. ${KRB5_SOURCE_DIR} diff --git a/contrib/llvm-cmake/CMakeLists.txt b/contrib/llvm-cmake/CMakeLists.txt index 6ff07f0e016..87c8a65510f 100644 --- a/contrib/llvm-cmake/CMakeLists.txt +++ b/contrib/llvm-cmake/CMakeLists.txt @@ -1,12 +1,9 @@ -# During cross-compilation in our CI we have to use llvm-tblgen and other building tools -# tools to be build for host architecture and everything else for target architecture (e.g. AArch64) -# Possible workaround is to use llvm-tblgen from some package... -# But lets just enable LLVM for native builds -if (CMAKE_CROSSCOMPILING OR SANITIZE STREQUAL "undefined") - set (ENABLE_EMBEDDED_COMPILER_DEFAULT OFF) +if (APPLE OR NOT ARCH_AMD64 OR SANITIZE STREQUAL "undefined") + set (ENABLE_EMBEDDED_COMPILER_DEFAULT OFF) else() - set (ENABLE_EMBEDDED_COMPILER_DEFAULT ON) + set (ENABLE_EMBEDDED_COMPILER_DEFAULT ON) endif() + option (ENABLE_EMBEDDED_COMPILER "Enable support for 'compile_expressions' option for query execution" ${ENABLE_EMBEDDED_COMPILER_DEFAULT}) if (NOT ENABLE_EMBEDDED_COMPILER) diff --git a/contrib/poco b/contrib/poco index 520a90e02e3..008b1646947 160000 --- a/contrib/poco +++ b/contrib/poco @@ -1 +1 @@ -Subproject commit 520a90e02e3e5cb90afeae1846d161dbc508a6f1 +Subproject commit 008b16469471d55b176db181756c94e3f14dd2dc diff --git a/contrib/replxx b/contrib/replxx index 6f0b6f151ae..3fd0e3c9364 160000 --- a/contrib/replxx +++ b/contrib/replxx @@ -1 +1 @@ -Subproject commit 6f0b6f151ae2a044625ae93acd19ca365fcea64d +Subproject commit 3fd0e3c9364a589447453d9906d854ebd8d385c5 diff --git a/contrib/unixodbc b/contrib/unixodbc index b0ad30f7f62..a2cd5395e8c 160000 --- a/contrib/unixodbc +++ b/contrib/unixodbc @@ -1 +1 @@ -Subproject commit b0ad30f7f6289c12b76f04bfb9d466374bb32168 +Subproject commit a2cd5395e8c7f7390025ec93af5bfebef3fb5fcd diff --git a/debian/clickhouse-server.service b/debian/clickhouse-server.service index a9400b24270..028b4fbf8ab 100644 --- a/debian/clickhouse-server.service +++ b/debian/clickhouse-server.service @@ -20,7 +20,7 @@ ExecStart=/usr/bin/clickhouse-server --config=/etc/clickhouse-server/config.xml EnvironmentFile=-/etc/default/clickhouse LimitCORE=infinity LimitNOFILE=500000 -CapabilityBoundingSet=CAP_NET_ADMIN CAP_IPC_LOCK CAP_SYS_NICE +CapabilityBoundingSet=CAP_NET_ADMIN CAP_IPC_LOCK CAP_SYS_NICE CAP_NET_BIND_SERVICE [Install] # ClickHouse should not start from the rescue shell (rescue.target). diff --git a/docker/docs/builder/Dockerfile b/docker/docs/builder/Dockerfile index 906312a19a2..061251aa7f0 100644 --- a/docker/docs/builder/Dockerfile +++ b/docker/docs/builder/Dockerfile @@ -1,4 +1,3 @@ -# rebuild in #33610 # docker build -t clickhouse/docs-builder . FROM ubuntu:20.04 diff --git a/docker/keeper/Dockerfile b/docker/keeper/Dockerfile new file mode 100644 index 00000000000..068377e8f8c --- /dev/null +++ b/docker/keeper/Dockerfile @@ -0,0 +1,74 @@ +FROM ubuntu:20.04 AS glibc-donor + +ARG TARGETARCH +RUN arch=${TARGETARCH:-amd64} \ + && case $arch in \ + amd64) rarch=x86_64 ;; \ + arm64) rarch=aarch64 ;; \ + esac \ + && ln -s "${rarch}-linux-gnu" /lib/linux-gnu + + +FROM alpine + +ENV LANG=en_US.UTF-8 \ + LANGUAGE=en_US:en \ + LC_ALL=en_US.UTF-8 \ + TZ=UTC \ + CLICKHOUSE_CONFIG=/etc/clickhouse-server/config.xml + +COPY --from=glibc-donor /lib/linux-gnu/libc.so.6 /lib/linux-gnu/libdl.so.2 /lib/linux-gnu/libm.so.6 /lib/linux-gnu/libpthread.so.0 /lib/linux-gnu/librt.so.1 /lib/linux-gnu/libnss_dns.so.2 /lib/linux-gnu/libnss_files.so.2 /lib/linux-gnu/libresolv.so.2 /lib/linux-gnu/ld-2.31.so /lib/ +COPY --from=glibc-donor /etc/nsswitch.conf /etc/ +COPY entrypoint.sh /entrypoint.sh + +ARG TARGETARCH +RUN arch=${TARGETARCH:-amd64} \ + && case $arch in \ + amd64) mkdir -p /lib64 && ln -sf /lib/ld-2.31.so /lib64/ld-linux-x86-64.so.2 ;; \ + arm64) ln -sf /lib/ld-2.31.so /lib/ld-linux-aarch64.so.1 ;; \ + esac + +ARG REPOSITORY="https://s3.amazonaws.com/clickhouse-builds/22.4/31c367d3cd3aefd316778601ff6565119fe36682/package_release" +ARG VERSION="22.4.1.917" +ARG PACKAGES="clickhouse-keeper" + +# user/group precreated explicitly with fixed uid/gid on purpose. +# It is especially important for rootless containers: in that case entrypoint +# can't do chown and owners of mounted volumes should be configured externally. +# We do that in advance at the begining of Dockerfile before any packages will be +# installed to prevent picking those uid / gid by some unrelated software. +# The same uid / gid (101) is used both for alpine and ubuntu. + + +ARG TARGETARCH +RUN arch=${TARGETARCH:-amd64} \ + && for package in ${PACKAGES}; do \ + { \ + { echo "Get ${REPOSITORY}/${package}-${VERSION}-${arch}.tgz" \ + && wget -c -q "${REPOSITORY}/${package}-${VERSION}-${arch}.tgz" -O "/tmp/${package}-${VERSION}-${arch}.tgz" \ + && tar xvzf "/tmp/${package}-${VERSION}-${arch}.tgz" --strip-components=1 -C / ; \ + } || \ + { echo "Fallback to ${REPOSITORY}/${package}-${VERSION}.tgz" \ + && wget -c -q "${REPOSITORY}/${package}-${VERSION}.tgz" -O "/tmp/${package}-${VERSION}.tgz" \ + && tar xvzf "/tmp/${package}-${VERSION}.tgz" --strip-components=2 -C / ; \ + } ; \ + } || exit 1 \ + ; done \ + && rm /tmp/*.tgz /install -r \ + && addgroup -S -g 101 clickhouse \ + && adduser -S -h /var/lib/clickhouse -s /bin/bash -G clickhouse -g "ClickHouse keeper" -u 101 clickhouse \ + && mkdir -p /var/lib/clickhouse /var/log/clickhouse-keeper /etc/clickhouse-keeper \ + && chown clickhouse:clickhouse /var/lib/clickhouse \ + && chown root:clickhouse /var/log/clickhouse-keeper \ + && chmod +x /entrypoint.sh \ + && apk add --no-cache su-exec bash tzdata \ + && cp /usr/share/zoneinfo/UTC /etc/localtime \ + && echo "UTC" > /etc/timezone \ + && chmod ugo+Xrw -R /var/lib/clickhouse /var/log/clickhouse-keeper /etc/clickhouse-keeper + + +EXPOSE 2181 10181 44444 + +VOLUME /var/lib/clickhouse /var/log/clickhouse-keeper /etc/clickhouse-keeper + +ENTRYPOINT ["/entrypoint.sh"] diff --git a/docker/keeper/Dockerfile.alpine b/docker/keeper/Dockerfile.alpine new file mode 120000 index 00000000000..1d1fe94df49 --- /dev/null +++ b/docker/keeper/Dockerfile.alpine @@ -0,0 +1 @@ +Dockerfile \ No newline at end of file diff --git a/docker/keeper/entrypoint.sh b/docker/keeper/entrypoint.sh new file mode 100644 index 00000000000..3aacf655c28 --- /dev/null +++ b/docker/keeper/entrypoint.sh @@ -0,0 +1,93 @@ +#!/bin/bash + +set +x +set -eo pipefail +shopt -s nullglob + +DO_CHOWN=1 +if [ "${CLICKHOUSE_DO_NOT_CHOWN:-0}" = "1" ]; then + DO_CHOWN=0 +fi + +CLICKHOUSE_UID="${CLICKHOUSE_UID:-"$(id -u clickhouse)"}" +CLICKHOUSE_GID="${CLICKHOUSE_GID:-"$(id -g clickhouse)"}" + +# support --user +if [ "$(id -u)" = "0" ]; then + USER=$CLICKHOUSE_UID + GROUP=$CLICKHOUSE_GID + if command -v gosu &> /dev/null; then + gosu="gosu $USER:$GROUP" + elif command -v su-exec &> /dev/null; then + gosu="su-exec $USER:$GROUP" + else + echo "No gosu/su-exec detected!" + exit 1 + fi +else + USER="$(id -u)" + GROUP="$(id -g)" + gosu="" + DO_CHOWN=0 +fi + +KEEPER_CONFIG="${KEEPER_CONFIG:-/etc/clickhouse-keeper/config.yaml}" + +if [ -f "$KEEPER_CONFIG" ] && ! $gosu test -f "$KEEPER_CONFIG" -a -r "$KEEPER_CONFIG"; then + echo "Configuration file '$KEEPER_CONFIG' isn't readable by user with id '$USER'" + exit 1 +fi + +DATA_DIR="${CLICKHOUSE_DATA_DIR:-/var/lib/clickhouse}" +LOG_DIR="${LOG_DIR:-/var/log/clickhouse-keeper}" +LOG_PATH="${LOG_DIR}/clickhouse-keeper.log" +ERROR_LOG_PATH="${LOG_DIR}/clickhouse-keeper.err.log" +COORDINATION_LOG_DIR="${DATA_DIR}/coordination/log" +COORDINATION_SNAPSHOT_DIR="${DATA_DIR}/coordination/snapshots" +CLICKHOUSE_WATCHDOG_ENABLE=${CLICKHOUSE_WATCHDOG_ENABLE:-0} + +for dir in "$DATA_DIR" \ + "$LOG_DIR" \ + "$TMP_DIR" \ + "$COORDINATION_LOG_DIR" \ + "$COORDINATION_SNAPSHOT_DIR" +do + # check if variable not empty + [ -z "$dir" ] && continue + # ensure directories exist + if ! mkdir -p "$dir"; then + echo "Couldn't create necessary directory: $dir" + exit 1 + fi + + if [ "$DO_CHOWN" = "1" ]; then + # ensure proper directories permissions + # but skip it for if directory already has proper premissions, cause recursive chown may be slow + if [ "$(stat -c %u "$dir")" != "$USER" ] || [ "$(stat -c %g "$dir")" != "$GROUP" ]; then + chown -R "$USER:$GROUP" "$dir" + fi + elif ! $gosu test -d "$dir" -a -w "$dir" -a -r "$dir"; then + echo "Necessary directory '$dir' isn't accessible by user with id '$USER'" + exit 1 + fi +done + +# if no args passed to `docker run` or first argument start with `--`, then the user is passing clickhouse-server arguments +if [[ $# -lt 1 ]] || [[ "$1" == "--"* ]]; then + # Watchdog is launched by default, but does not send SIGINT to the main process, + # so the container can't be finished by ctrl+c + export CLICKHOUSE_WATCHDOG_ENABLE + + cd /var/lib/clickhouse + + # There is a config file. It is already tested with gosu (if it is readably by keeper user) + if [ -f "$KEEPER_CONFIG" ]; then + exec $gosu /usr/bin/clickhouse-keeper --config-file="$KEEPER_CONFIG" --log-file="$LOG_PATH" --errorlog-file="$ERROR_LOG_PATH" "$@" + fi + + # There is no config file. Will use embedded one + exec $gosu /usr/bin/clickhouse-keeper --log-file="$LOG_PATH" --errorlog-file="$ERROR_LOG_PATH" "$@" +fi + +# Otherwise, we assume the user want to run his own process, for example a `bash` shell to explore this image +exec "$@" diff --git a/docker/packager/binary/build.sh b/docker/packager/binary/build.sh index 31416e1a0ee..2bedb50dd40 100755 --- a/docker/packager/binary/build.sh +++ b/docker/packager/binary/build.sh @@ -25,13 +25,23 @@ read -ra CMAKE_FLAGS <<< "${CMAKE_FLAGS:-}" env cmake --debug-trycompile --verbose=1 -DCMAKE_VERBOSE_MAKEFILE=1 -LA "-DCMAKE_BUILD_TYPE=$BUILD_TYPE" "-DSANITIZE=$SANITIZER" -DENABLE_CHECK_HEAVY_BUILDS=1 "${CMAKE_FLAGS[@]}" .. +if [ "coverity" == "$COMBINED_OUTPUT" ] +then + mkdir -p /opt/cov-analysis + + wget --post-data "token=$COVERITY_TOKEN&project=ClickHouse%2FClickHouse" -qO- https://scan.coverity.com/download/linux64 | tar xz -C /opt/cov-analysis --strip-components 1 + export PATH=$PATH:/opt/cov-analysis/bin + cov-configure --config ./coverity.config --template --comptype clangcc --compiler "$CC" + SCAN_WRAPPER="cov-build --config ./coverity.config --dir cov-int" +fi + cache_status # clear cache stats ccache --zero-stats ||: # No quotes because I want it to expand to nothing if empty. -# shellcheck disable=SC2086 -ninja $NINJA_FLAGS clickhouse-bundle +# shellcheck disable=SC2086 # No quotes because I want it to expand to nothing if empty. +$SCAN_WRAPPER ninja $NINJA_FLAGS clickhouse-bundle cache_status @@ -91,6 +101,12 @@ then mv "$COMBINED_OUTPUT.tgz" /output fi +if [ "coverity" == "$COMBINED_OUTPUT" ] +then + tar -cv -I pigz -f "coverity-scan.tgz" cov-int + mv "coverity-scan.tgz" /output +fi + # Also build fuzzers if any sanitizer specified # if [ -n "$SANITIZER" ] # then diff --git a/docker/packager/packager b/docker/packager/packager index be25b185dc3..98d57b7da17 100755 --- a/docker/packager/packager +++ b/docker/packager/packager @@ -176,6 +176,9 @@ def parse_env_variables( if package_type == "performance": result.append("COMBINED_OUTPUT=performance") cmake_flags.append("-DENABLE_TESTS=0") + elif package_type == "coverity": + result.append("COMBINED_OUTPUT=coverity") + result.append('COVERITY_TOKEN="$COVERITY_TOKEN"') elif split_binary: result.append("COMBINED_OUTPUT=shared_build") @@ -262,9 +265,8 @@ if __name__ == "__main__": # and configs to be used for performance test. parser.add_argument( "--package-type", - choices=("deb", "binary", "performance"), + choices=["deb", "binary", "performance", "coverity"], required=True, - help="a build type", ) parser.add_argument( "--clickhouse-repo-path", @@ -322,7 +324,11 @@ if __name__ == "__main__": if not os.path.isabs(args.output_dir): args.output_dir = os.path.abspath(os.path.join(os.getcwd(), args.output_dir)) - image_type = "binary" if args.package_type == "performance" else args.package_type + image_type = ( + "binary" + if args.package_type in ("performance", "coverity") + else args.package_type + ) image_name = "clickhouse/binary-builder" if not os.path.isabs(args.clickhouse_repo_path): diff --git a/docker/server/.gitignore b/docker/server/.gitignore deleted file mode 100644 index 692758d55aa..00000000000 --- a/docker/server/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -alpine-root/* -tgz-packages/* diff --git a/docker/server/Dockerfile b/docker/server/Dockerfile deleted file mode 100644 index 5b7990ab030..00000000000 --- a/docker/server/Dockerfile +++ /dev/null @@ -1,122 +0,0 @@ -FROM ubuntu:20.04 - -# ARG for quick switch to a given ubuntu mirror -ARG apt_archive="http://archive.ubuntu.com" -RUN sed -i "s|http://archive.ubuntu.com|$apt_archive|g" /etc/apt/sources.list - -ARG repository="deb https://packages.clickhouse.com/deb stable main" -ARG version=22.1.1.* - -# set non-empty deb_location_url url to create a docker image -# from debs created by CI build, for example: -# docker build . --network host --build-arg version="21.4.1.6282" --build-arg deb_location_url="https://clickhouse-builds.s3.yandex.net/21852/069cfbff388b3d478d1a16dc7060b48073f5d522/clickhouse_build_check/clang-11_relwithdebuginfo_none_bundled_unsplitted_disable_False_deb/" -t filimonovq/clickhouse-server:pr21852 -ARG deb_location_url="" - -# set non-empty single_binary_location_url to create docker image -# from a single binary url (useful for non-standard builds - with sanitizers, for arm64). -# for example (run on aarch64 server): -# docker build . --network host --build-arg single_binary_location_url="https://builds.clickhouse.com/master/aarch64/clickhouse" -t altinity/clickhouse-server:master-testing-arm -# note: clickhouse-odbc-bridge is not supported there. -ARG single_binary_location_url="" - -# see https://github.com/moby/moby/issues/4032#issuecomment-192327844 -ARG DEBIAN_FRONTEND=noninteractive - -# user/group precreated explicitly with fixed uid/gid on purpose. -# It is especially important for rootless containers: in that case entrypoint -# can't do chown and owners of mounted volumes should be configured externally. -# We do that in advance at the begining of Dockerfile before any packages will be -# installed to prevent picking those uid / gid by some unrelated software. -# The same uid / gid (101) is used both for alpine and ubuntu. - -# To drop privileges, we need 'su' command, that simply changes uid and gid. -# In fact, the 'su' command from Linux is not so simple, due to inherent vulnerability in Linux: -# https://ruderich.org/simon/notes/su-sudo-from-root-tty-hijacking -# It has to mitigate this drawback of Linux, and to do this, 'su' command is creating it's own pseudo-terminal -# and forwarding commands. Due to some ridiculous curcumstances, it does not work in Docker (or it does) -# and for these reasons people are using alternatives to the 'su' command in Docker, -# that don't mess with the terminal, don't care about closing the opened files, etc... -# but can only be safe to drop privileges inside Docker. -# The question - what implementation of 'su' command to use. -# It should be a simple script doing about just two syscalls. -# Some people tend to use 'gosu' tool that is written in Go. -# It is not used for several reasons: -# 1. Dependency on some foreign code in yet another programming language - does not sound alright. -# 2. Anselmo D. Adams suggested not to use it due to false positive alarms in some undisclosed security scanners. - -COPY su-exec.c /su-exec.c - -RUN groupadd -r clickhouse --gid=101 \ - && useradd -r -g clickhouse --uid=101 --home-dir=/var/lib/clickhouse --shell=/bin/bash clickhouse \ - && apt-get update \ - && apt-get install --yes --no-install-recommends \ - apt-transport-https \ - ca-certificates \ - dirmngr \ - gnupg \ - locales \ - wget \ - tzdata \ - && mkdir -p /etc/apt/sources.list.d \ - && apt-key adv --keyserver keyserver.ubuntu.com --recv 8919F6BD2B48D754 \ - && echo $repository > /etc/apt/sources.list.d/clickhouse.list \ - && if [ -n "$deb_location_url" ]; then \ - echo "installing from custom url with deb packages: $deb_location_url" \ - rm -rf /tmp/clickhouse_debs \ - && mkdir -p /tmp/clickhouse_debs \ - && wget --progress=bar:force:noscroll "${deb_location_url}/clickhouse-common-static_${version}_amd64.deb" -P /tmp/clickhouse_debs \ - && wget --progress=bar:force:noscroll "${deb_location_url}/clickhouse-client_${version}_all.deb" -P /tmp/clickhouse_debs \ - && wget --progress=bar:force:noscroll "${deb_location_url}/clickhouse-server_${version}_all.deb" -P /tmp/clickhouse_debs \ - && dpkg -i /tmp/clickhouse_debs/*.deb ; \ - elif [ -n "$single_binary_location_url" ]; then \ - echo "installing from single binary url: $single_binary_location_url" \ - && rm -rf /tmp/clickhouse_binary \ - && mkdir -p /tmp/clickhouse_binary \ - && wget --progress=bar:force:noscroll "$single_binary_location_url" -O /tmp/clickhouse_binary/clickhouse \ - && chmod +x /tmp/clickhouse_binary/clickhouse \ - && /tmp/clickhouse_binary/clickhouse install --user "clickhouse" --group "clickhouse" ; \ - else \ - echo "installing from repository: $repository" \ - && apt-get update \ - && apt-get --yes -o "Dpkg::Options::=--force-confdef" -o "Dpkg::Options::=--force-confold" upgrade \ - && apt-get install --allow-unauthenticated --yes --no-install-recommends \ - clickhouse-common-static=$version \ - clickhouse-client=$version \ - clickhouse-server=$version ; \ - fi \ - && apt-get install -y --no-install-recommends tcc libc-dev && \ - tcc /su-exec.c -o /bin/su-exec && \ - chown root:root /bin/su-exec && \ - chmod 0755 /bin/su-exec && \ - rm /su-exec.c && \ - apt-get purge -y --auto-remove tcc libc-dev libc-dev-bin libc6-dev linux-libc-dev \ - && clickhouse-local -q 'SELECT * FROM system.build_options' \ - && rm -rf \ - /var/lib/apt/lists/* \ - /var/cache/debconf \ - /tmp/* \ - && apt-get clean \ - && mkdir -p /var/lib/clickhouse /var/log/clickhouse-server /etc/clickhouse-server /etc/clickhouse-client \ - && chmod ugo+Xrw -R /var/lib/clickhouse /var/log/clickhouse-server /etc/clickhouse-server /etc/clickhouse-client - -# we need to allow "others" access to clickhouse folder, because docker container -# can be started with arbitrary uid (openshift usecase) - -RUN locale-gen en_US.UTF-8 -ENV LANG en_US.UTF-8 -ENV LANGUAGE en_US:en -ENV LC_ALL en_US.UTF-8 -ENV TZ UTC - -RUN mkdir /docker-entrypoint-initdb.d - -COPY docker_related_config.xml /etc/clickhouse-server/config.d/ -COPY entrypoint.sh /entrypoint.sh -RUN chmod +x /entrypoint.sh - -EXPOSE 9000 8123 9009 -VOLUME /var/lib/clickhouse - -ENV CLICKHOUSE_CONFIG /etc/clickhouse-server/config.xml - -ENTRYPOINT ["/entrypoint.sh"] diff --git a/docker/server/Dockerfile b/docker/server/Dockerfile new file mode 120000 index 00000000000..fd45f0f7c7c --- /dev/null +++ b/docker/server/Dockerfile @@ -0,0 +1 @@ +Dockerfile.ubuntu \ No newline at end of file diff --git a/docker/server/Dockerfile.alpine b/docker/server/Dockerfile.alpine index cd192c0c9da..5aaf5dd5511 100644 --- a/docker/server/Dockerfile.alpine +++ b/docker/server/Dockerfile.alpine @@ -1,3 +1,14 @@ +FROM ubuntu:20.04 AS glibc-donor +ARG TARGETARCH + +RUN arch=${TARGETARCH:-amd64} \ + && case $arch in \ + amd64) rarch=x86_64 ;; \ + arm64) rarch=aarch64 ;; \ + esac \ + && ln -s "${rarch}-linux-gnu" /lib/linux-gnu + + FROM alpine ENV LANG=en_US.UTF-8 \ @@ -6,7 +17,24 @@ ENV LANG=en_US.UTF-8 \ TZ=UTC \ CLICKHOUSE_CONFIG=/etc/clickhouse-server/config.xml -COPY alpine-root/ / +COPY --from=glibc-donor /lib/linux-gnu/libc.so.6 /lib/linux-gnu/libdl.so.2 /lib/linux-gnu/libm.so.6 /lib/linux-gnu/libpthread.so.0 /lib/linux-gnu/librt.so.1 /lib/linux-gnu/libnss_dns.so.2 /lib/linux-gnu/libnss_files.so.2 /lib/linux-gnu/libresolv.so.2 /lib/linux-gnu/ld-2.31.so /lib/ +COPY --from=glibc-donor /etc/nsswitch.conf /etc/ +COPY docker_related_config.xml /etc/clickhouse-server/config.d/ +COPY entrypoint.sh /entrypoint.sh + +ARG TARGETARCH + +RUN arch=${TARGETARCH:-amd64} \ + && case $arch in \ + amd64) mkdir -p /lib64 && ln -sf /lib/ld-2.31.so /lib64/ld-linux-x86-64.so.2 ;; \ + arm64) ln -sf /lib/ld-2.31.so /lib/ld-linux-aarch64.so.1 ;; \ + esac + +# lts / testing / prestable / etc +ARG REPO_CHANNEL="stable" +ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}" +ARG VERSION="20.9.3.45" +ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static" # user/group precreated explicitly with fixed uid/gid on purpose. # It is especially important for rootless containers: in that case entrypoint @@ -15,9 +43,23 @@ COPY alpine-root/ / # installed to prevent picking those uid / gid by some unrelated software. # The same uid / gid (101) is used both for alpine and ubuntu. -RUN addgroup -S -g 101 clickhouse \ +RUN arch=${TARGETARCH:-amd64} \ + && for package in ${PACKAGES}; do \ + { \ + { echo "Get ${REPOSITORY}/${package}-${VERSION}-${arch}.tgz" \ + && wget -c -q "${REPOSITORY}/${package}-${VERSION}-${arch}.tgz" -O "/tmp/${package}-${VERSION}-${arch}.tgz" \ + && tar xvzf "/tmp/${package}-${VERSION}-${arch}.tgz" --strip-components=1 -C / ; \ + } || \ + { echo "Fallback to ${REPOSITORY}/${package}-${VERSION}.tgz" \ + && wget -c -q "${REPOSITORY}/${package}-${VERSION}.tgz" -O "/tmp/${package}-${VERSION}.tgz" \ + && tar xvzf "/tmp/${package}-${VERSION}.tgz" --strip-components=2 -C / ; \ + } ; \ + } || exit 1 \ + ; done \ + && rm /tmp/*.tgz /install -r \ + && addgroup -S -g 101 clickhouse \ && adduser -S -h /var/lib/clickhouse -s /bin/bash -G clickhouse -g "ClickHouse server" -u 101 clickhouse \ - && mkdir -p /var/lib/clickhouse /var/log/clickhouse-server /etc/clickhouse-server /etc/clickhouse-client \ + && mkdir -p /var/lib/clickhouse /var/log/clickhouse-server /etc/clickhouse-server/config.d /etc/clickhouse-server/users.d /etc/clickhouse-client /docker-entrypoint-initdb.d \ && chown clickhouse:clickhouse /var/lib/clickhouse \ && chown root:clickhouse /var/log/clickhouse-server \ && chmod +x /entrypoint.sh \ diff --git a/docker/server/Dockerfile.ubuntu b/docker/server/Dockerfile.ubuntu new file mode 100644 index 00000000000..6e93bd97036 --- /dev/null +++ b/docker/server/Dockerfile.ubuntu @@ -0,0 +1,129 @@ +FROM ubuntu:20.04 + +# see https://github.com/moby/moby/issues/4032#issuecomment-192327844 +ARG DEBIAN_FRONTEND=noninteractive + +COPY su-exec.c /su-exec.c + +# ARG for quick switch to a given ubuntu mirror +ARG apt_archive="http://archive.ubuntu.com" +RUN sed -i "s|http://archive.ubuntu.com|${apt_archive}|g" /etc/apt/sources.list \ + && groupadd -r clickhouse --gid=101 \ + && useradd -r -g clickhouse --uid=101 --home-dir=/var/lib/clickhouse --shell=/bin/bash clickhouse \ + && apt-get update \ + && apt-get install --yes --no-install-recommends \ + apt-transport-https \ + ca-certificates \ + dirmngr \ + gnupg \ + locales \ + wget \ + tzdata \ + && apt-get install -y --no-install-recommends tcc libc-dev && \ + tcc /su-exec.c -o /bin/su-exec && \ + chown root:root /bin/su-exec && \ + chmod 0755 /bin/su-exec && \ + rm /su-exec.c && \ + apt-get purge -y --auto-remove tcc libc-dev libc-dev-bin libc6-dev linux-libc-dev \ + && apt-get clean + +ARG REPO_CHANNEL="stable" +ARG REPOSITORY="deb https://packages.clickhouse.com/deb ${REPO_CHANNEL} main" +ARG VERSION=22.1.1.* +ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static" + +# set non-empty deb_location_url url to create a docker image +# from debs created by CI build, for example: +# docker build . --network host --build-arg version="21.4.1.6282" --build-arg deb_location_url="https://clickhouse-builds.s3.yandex.net/21852/069cfbff388b3d478d1a16dc7060b48073f5d522/clickhouse_build_check/clang-11_relwithdebuginfo_none_bundled_unsplitted_disable_False_deb/" -t filimonovq/clickhouse-server:pr21852 +ARG deb_location_url="" + +# set non-empty single_binary_location_url to create docker image +# from a single binary url (useful for non-standard builds - with sanitizers, for arm64). +# for example (run on aarch64 server): +# docker build . --network host --build-arg single_binary_location_url="https://builds.clickhouse.com/master/aarch64/clickhouse" -t altinity/clickhouse-server:master-testing-arm +# note: clickhouse-odbc-bridge is not supported there. +ARG single_binary_location_url="" + +# user/group precreated explicitly with fixed uid/gid on purpose. +# It is especially important for rootless containers: in that case entrypoint +# can't do chown and owners of mounted volumes should be configured externally. +# We do that in advance at the begining of Dockerfile before any packages will be +# installed to prevent picking those uid / gid by some unrelated software. +# The same uid / gid (101) is used both for alpine and ubuntu. + +# To drop privileges, we need 'su' command, that simply changes uid and gid. +# In fact, the 'su' command from Linux is not so simple, due to inherent vulnerability in Linux: +# https://ruderich.org/simon/notes/su-sudo-from-root-tty-hijacking +# It has to mitigate this drawback of Linux, and to do this, 'su' command is creating it's own pseudo-terminal +# and forwarding commands. Due to some ridiculous curcumstances, it does not work in Docker (or it does) +# and for these reasons people are using alternatives to the 'su' command in Docker, +# that don't mess with the terminal, don't care about closing the opened files, etc... +# but can only be safe to drop privileges inside Docker. +# The question - what implementation of 'su' command to use. +# It should be a simple script doing about just two syscalls. +# Some people tend to use 'gosu' tool that is written in Go. +# It is not used for several reasons: +# 1. Dependency on some foreign code in yet another programming language - does not sound alright. +# 2. Anselmo D. Adams suggested not to use it due to false positive alarms in some undisclosed security scanners. + +ARG TARGETARCH + +RUN arch=${TARGETARCH:-amd64} \ + && if [ -n "${deb_location_url}" ]; then \ + echo "installing from custom url with deb packages: ${deb_location_url}" \ + rm -rf /tmp/clickhouse_debs \ + && mkdir -p /tmp/clickhouse_debs \ + && for package in ${PACKAGES}; do \ + { wget --progress=bar:force:noscroll "${deb_location_url}/${package}_${VERSION}_${arch}.deb" -P /tmp/clickhouse_debs || \ + wget --progress=bar:force:noscroll "${deb_location_url}/${package}_${VERSION}_all.deb" -P /tmp/clickhouse_debs ; } \ + || exit 1 \ + ; done \ + && dpkg -i /tmp/clickhouse_debs/*.deb ; \ + elif [ -n "${single_binary_location_url}" ]; then \ + echo "installing from single binary url: ${single_binary_location_url}" \ + && rm -rf /tmp/clickhouse_binary \ + && mkdir -p /tmp/clickhouse_binary \ + && wget --progress=bar:force:noscroll "${single_binary_location_url}" -O /tmp/clickhouse_binary/clickhouse \ + && chmod +x /tmp/clickhouse_binary/clickhouse \ + && /tmp/clickhouse_binary/clickhouse install --user "clickhouse" --group "clickhouse" ; \ + else \ + mkdir -p /etc/apt/sources.list.d \ + && apt-key adv --keyserver keyserver.ubuntu.com --recv 8919F6BD2B48D754 \ + && echo ${REPOSITORY} > /etc/apt/sources.list.d/clickhouse.list \ + && echo "installing from repository: ${REPOSITORY}" \ + && apt-get update \ + && apt-get --yes -o "Dpkg::Options::=--force-confdef" -o "Dpkg::Options::=--force-confold" upgrade \ + && for package in ${PACKAGES}; do \ + packages="${packages} ${package}=${VERSION}" \ + ; done \ + && apt-get install --allow-unauthenticated --yes --no-install-recommends ${packages} || exit 1 \ + ; fi \ + && clickhouse-local -q 'SELECT * FROM system.build_options' \ + && rm -rf \ + /var/lib/apt/lists/* \ + /var/cache/debconf \ + /tmp/* \ + && mkdir -p /var/lib/clickhouse /var/log/clickhouse-server /etc/clickhouse-server /etc/clickhouse-client \ + && chmod ugo+Xrw -R /var/lib/clickhouse /var/log/clickhouse-server /etc/clickhouse-server /etc/clickhouse-client + +# we need to allow "others" access to clickhouse folder, because docker container +# can be started with arbitrary uid (openshift usecase) + +RUN locale-gen en_US.UTF-8 +ENV LANG en_US.UTF-8 +ENV LANGUAGE en_US:en +ENV LC_ALL en_US.UTF-8 +ENV TZ UTC + +RUN mkdir /docker-entrypoint-initdb.d + +COPY docker_related_config.xml /etc/clickhouse-server/config.d/ +COPY entrypoint.sh /entrypoint.sh +RUN chmod +x /entrypoint.sh + +EXPOSE 9000 8123 9009 +VOLUME /var/lib/clickhouse + +ENV CLICKHOUSE_CONFIG /etc/clickhouse-server/config.xml + +ENTRYPOINT ["/entrypoint.sh"] diff --git a/docker/server/alpine-build.sh b/docker/server/alpine-build.sh deleted file mode 100755 index 1b448c61fbb..00000000000 --- a/docker/server/alpine-build.sh +++ /dev/null @@ -1,63 +0,0 @@ -#!/bin/bash -set -x - -REPO_CHANNEL="${REPO_CHANNEL:-stable}" # lts / testing / prestable / etc -REPO_URL="${REPO_URL:-"https://repo.yandex.ru/clickhouse/tgz/${REPO_CHANNEL}"}" -VERSION="${VERSION:-20.9.3.45}" -DOCKER_IMAGE="${DOCKER_IMAGE:-clickhouse/clickhouse-server}" - -# where original files live -DOCKER_BUILD_FOLDER="${BASH_SOURCE%/*}" - -# we will create root for our image here -CONTAINER_ROOT_FOLDER="${DOCKER_BUILD_FOLDER}/alpine-root" - -# clean up the root from old runs, it's reconstructed each time -rm -rf "$CONTAINER_ROOT_FOLDER" -mkdir -p "$CONTAINER_ROOT_FOLDER" - -# where to put downloaded tgz -TGZ_PACKAGES_FOLDER="${DOCKER_BUILD_FOLDER}/tgz-packages" -mkdir -p "$TGZ_PACKAGES_FOLDER" - -PACKAGES=( "clickhouse-client" "clickhouse-server" "clickhouse-common-static" ) - -# download tars from the repo -for package in "${PACKAGES[@]}" -do - wget -c -q --show-progress "${REPO_URL}/${package}-${VERSION}.tgz" -O "${TGZ_PACKAGES_FOLDER}/${package}-${VERSION}.tgz" -done - -# unpack tars -for package in "${PACKAGES[@]}" -do - tar xvzf "${TGZ_PACKAGES_FOLDER}/${package}-${VERSION}.tgz" --strip-components=2 -C "$CONTAINER_ROOT_FOLDER" -done - -# prepare few more folders -mkdir -p "${CONTAINER_ROOT_FOLDER}/etc/clickhouse-server/users.d" \ - "${CONTAINER_ROOT_FOLDER}/etc/clickhouse-server/config.d" \ - "${CONTAINER_ROOT_FOLDER}/var/log/clickhouse-server" \ - "${CONTAINER_ROOT_FOLDER}/var/lib/clickhouse" \ - "${CONTAINER_ROOT_FOLDER}/docker-entrypoint-initdb.d" \ - "${CONTAINER_ROOT_FOLDER}/lib64" - -cp "${DOCKER_BUILD_FOLDER}/docker_related_config.xml" "${CONTAINER_ROOT_FOLDER}/etc/clickhouse-server/config.d/" -cp "${DOCKER_BUILD_FOLDER}/entrypoint.sh" "${CONTAINER_ROOT_FOLDER}/entrypoint.sh" - -## get glibc components from ubuntu 20.04 and put them to expected place -docker pull ubuntu:20.04 -ubuntu20image=$(docker create --rm ubuntu:20.04) -docker cp -L "${ubuntu20image}":/lib/x86_64-linux-gnu/libc.so.6 "${CONTAINER_ROOT_FOLDER}/lib" -docker cp -L "${ubuntu20image}":/lib/x86_64-linux-gnu/libdl.so.2 "${CONTAINER_ROOT_FOLDER}/lib" -docker cp -L "${ubuntu20image}":/lib/x86_64-linux-gnu/libm.so.6 "${CONTAINER_ROOT_FOLDER}/lib" -docker cp -L "${ubuntu20image}":/lib/x86_64-linux-gnu/libpthread.so.0 "${CONTAINER_ROOT_FOLDER}/lib" -docker cp -L "${ubuntu20image}":/lib/x86_64-linux-gnu/librt.so.1 "${CONTAINER_ROOT_FOLDER}/lib" -docker cp -L "${ubuntu20image}":/lib/x86_64-linux-gnu/libnss_dns.so.2 "${CONTAINER_ROOT_FOLDER}/lib" -docker cp -L "${ubuntu20image}":/lib/x86_64-linux-gnu/libnss_files.so.2 "${CONTAINER_ROOT_FOLDER}/lib" -docker cp -L "${ubuntu20image}":/lib/x86_64-linux-gnu/libresolv.so.2 "${CONTAINER_ROOT_FOLDER}/lib" -docker cp -L "${ubuntu20image}":/lib64/ld-linux-x86-64.so.2 "${CONTAINER_ROOT_FOLDER}/lib64" -docker cp -L "${ubuntu20image}":/etc/nsswitch.conf "${CONTAINER_ROOT_FOLDER}/etc" - -docker build "$DOCKER_BUILD_FOLDER" -f Dockerfile.alpine -t "${DOCKER_IMAGE}:${VERSION}-alpine" --pull -rm -rf "$CONTAINER_ROOT_FOLDER" diff --git a/docker/server/local.Dockerfile b/docker/server/local.Dockerfile deleted file mode 100644 index 0d86c9ce45a..00000000000 --- a/docker/server/local.Dockerfile +++ /dev/null @@ -1,47 +0,0 @@ -# Since right now we can't set volumes to the docker during build, we split building container in stages: -# 1. build base container -# 2. run base conatiner with mounted volumes -# 3. commit container as image -# 4. build final container atop that image -# Middle steps are performed by the bash script. - -FROM ubuntu:18.04 as clickhouse-server-base -ARG gosu_ver=1.14 - -VOLUME /packages/ - -# update to allow installing dependencies of clickhouse automatically -RUN apt update; \ - DEBIAN_FRONTEND=noninteractive \ - apt install -y locales; - -ADD https://github.com/tianon/gosu/releases/download/${gosu_ver}/gosu-amd64 /bin/gosu - -RUN locale-gen en_US.UTF-8 -ENV LANG en_US.UTF-8 -ENV LANGUAGE en_US:en -ENV LC_ALL en_US.UTF-8 - -# installing via apt to simulate real-world scenario, where user installs deb package and all it's dependecies automatically. -CMD DEBIAN_FRONTEND=noninteractive \ - apt install -y \ - /packages/clickhouse-common-static_*.deb \ - /packages/clickhouse-server_*.deb ; - -FROM clickhouse-server-base:postinstall as clickhouse-server - -RUN mkdir /docker-entrypoint-initdb.d - -COPY docker_related_config.xml /etc/clickhouse-server/config.d/ -COPY entrypoint.sh /entrypoint.sh - -RUN chmod +x \ - /entrypoint.sh \ - /bin/gosu - -EXPOSE 9000 8123 9009 -VOLUME /var/lib/clickhouse - -ENV CLICKHOUSE_CONFIG /etc/clickhouse-server/config.xml - -ENTRYPOINT ["/entrypoint.sh"] diff --git a/docker/test/fasttest/run.sh b/docker/test/fasttest/run.sh index 079d2872204..31ec52b1d5a 100755 --- a/docker/test/fasttest/run.sh +++ b/docker/test/fasttest/run.sh @@ -115,6 +115,7 @@ function start_server function clone_root { + git config --global --add safe.directory "$FASTTEST_SOURCE" git clone --depth 1 https://github.com/ClickHouse/ClickHouse.git -- "$FASTTEST_SOURCE" 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee "$FASTTEST_OUTPUT/clone_log.txt" ( diff --git a/docker/test/fuzzer/run-fuzzer.sh b/docker/test/fuzzer/run-fuzzer.sh index d8cb417f18b..a02d3291742 100755 --- a/docker/test/fuzzer/run-fuzzer.sh +++ b/docker/test/fuzzer/run-fuzzer.sh @@ -226,7 +226,6 @@ quit --receive_data_timeout_ms=10000 \ --stacktrace \ --query-fuzzer-runs=1000 \ - --testmode \ --queries-file $(ls -1 ch/tests/queries/0_stateless/*.sql | sort -R) \ $NEW_TESTS_OPT \ > >(tail -n 100000 > fuzzer.log) \ diff --git a/docker/test/integration/mysql_js_client/Dockerfile b/docker/test/integration/mysql_js_client/Dockerfile index b1397b40d38..4c9df10ace1 100644 --- a/docker/test/integration/mysql_js_client/Dockerfile +++ b/docker/test/integration/mysql_js_client/Dockerfile @@ -1,8 +1,10 @@ # docker build -t clickhouse/mysql-js-client . # MySQL JavaScript client docker container -FROM node:8 +FROM node:16.14.2 + +WORKDIR /usr/app RUN npm install mysql -COPY ./test.js test.js +COPY ./test.js ./test.js diff --git a/docker/test/stateful/run.sh b/docker/test/stateful/run.sh index 861e17848a4..6aa9d88f5b4 100755 --- a/docker/test/stateful/run.sh +++ b/docker/test/stateful/run.sh @@ -96,7 +96,7 @@ else clickhouse-client --query "RENAME TABLE datasets.hits_v1 TO test.hits" clickhouse-client --query "RENAME TABLE datasets.visits_v1 TO test.visits" clickhouse-client --query "CREATE TABLE test.hits_s3 (WatchID UInt64, JavaEnable UInt8, Title String, GoodEvent Int16, EventTime DateTime, EventDate Date, CounterID UInt32, ClientIP UInt32, ClientIP6 FixedString(16), RegionID UInt32, UserID UInt64, CounterClass Int8, OS UInt8, UserAgent UInt8, URL String, Referer String, URLDomain String, RefererDomain String, Refresh UInt8, IsRobot UInt8, RefererCategories Array(UInt16), URLCategories Array(UInt16), URLRegions Array(UInt32), RefererRegions Array(UInt32), ResolutionWidth UInt16, ResolutionHeight UInt16, ResolutionDepth UInt8, FlashMajor UInt8, FlashMinor UInt8, FlashMinor2 String, NetMajor UInt8, NetMinor UInt8, UserAgentMajor UInt16, UserAgentMinor FixedString(2), CookieEnable UInt8, JavascriptEnable UInt8, IsMobile UInt8, MobilePhone UInt8, MobilePhoneModel String, Params String, IPNetworkID UInt32, TraficSourceID Int8, SearchEngineID UInt16, SearchPhrase String, AdvEngineID UInt8, IsArtifical UInt8, WindowClientWidth UInt16, WindowClientHeight UInt16, ClientTimeZone Int16, ClientEventTime DateTime, SilverlightVersion1 UInt8, SilverlightVersion2 UInt8, SilverlightVersion3 UInt32, SilverlightVersion4 UInt16, PageCharset String, CodeVersion UInt32, IsLink UInt8, IsDownload UInt8, IsNotBounce UInt8, FUniqID UInt64, HID UInt32, IsOldCounter UInt8, IsEvent UInt8, IsParameter UInt8, DontCountHits UInt8, WithHash UInt8, HitColor FixedString(1), UTCEventTime DateTime, Age UInt8, Sex UInt8, Income UInt8, Interests UInt16, Robotness UInt8, GeneralInterests Array(UInt16), RemoteIP UInt32, RemoteIP6 FixedString(16), WindowName Int32, OpenerName Int32, HistoryLength Int16, BrowserLanguage FixedString(2), BrowserCountry FixedString(2), SocialNetwork String, SocialAction String, HTTPError UInt16, SendTiming Int32, DNSTiming Int32, ConnectTiming Int32, ResponseStartTiming Int32, ResponseEndTiming Int32, FetchTiming Int32, RedirectTiming Int32, DOMInteractiveTiming Int32, DOMContentLoadedTiming Int32, DOMCompleteTiming Int32, LoadEventStartTiming Int32, LoadEventEndTiming Int32, NSToDOMContentLoadedTiming Int32, FirstPaintTiming Int32, RedirectCount Int8, SocialSourceNetworkID UInt8, SocialSourcePage String, ParamPrice Int64, ParamOrderID String, ParamCurrency FixedString(3), ParamCurrencyID UInt16, GoalsReached Array(UInt32), OpenstatServiceName String, OpenstatCampaignID String, OpenstatAdID String, OpenstatSourceID String, UTMSource String, UTMMedium String, UTMCampaign String, UTMContent String, UTMTerm String, FromTag String, HasGCLID UInt8, RefererHash UInt64, URLHash UInt64, CLID UInt32, YCLID UInt64, ShareService String, ShareURL String, ShareTitle String, ParsedParams Nested(Key1 String, Key2 String, Key3 String, Key4 String, Key5 String, ValueDouble Float64), IslandID FixedString(16), RequestNum UInt32, RequestTry UInt8) ENGINE = MergeTree() PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDate, intHash32(UserID)) SAMPLE BY intHash32(UserID) SETTINGS index_granularity = 8192, storage_policy='s3_cache'" - clickhouse-client --query "INSERT INTO test.hits_s3 SELECT * FROM test.hits" + clickhouse-client --query "INSERT INTO test.hits_s3 SELECT * FROM test.hits SETTINGS enable_filesystem_cache_on_write_operations=0" fi clickhouse-client --query "SHOW TABLES FROM test" diff --git a/docker/test/stateless/run.sh b/docker/test/stateless/run.sh index f8b73791388..63750b90b5a 100755 --- a/docker/test/stateless/run.sh +++ b/docker/test/stateless/run.sh @@ -139,7 +139,7 @@ pigz < /var/log/clickhouse-server/clickhouse-server.log > /test_output/clickhous # directly # - even though ci auto-compress some files (but not *.tsv) it does this only # for files >64MB, we want this files to be compressed explicitly -for table in query_log zookeeper_log trace_log +for table in query_log zookeeper_log trace_log transactions_info_log do clickhouse-client -q "select * from system.$table format TSVWithNamesAndTypes" | pigz > /test_output/$table.tsv.gz & if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then diff --git a/docker/test/stress/run.sh b/docker/test/stress/run.sh index 3cef5b008db..0f5139f5b4d 100755 --- a/docker/test/stress/run.sh +++ b/docker/test/stress/run.sh @@ -106,17 +106,6 @@ function stop() function start() { - # Rename existing log file - it will be more convenient to read separate files for separate server runs. - if [ -f '/var/log/clickhouse-server/clickhouse-server.log' ] - then - log_file_counter=1 - while [ -f "/var/log/clickhouse-server/clickhouse-server.log.${log_file_counter}" ] - do - log_file_counter=$((log_file_counter + 1)) - done - mv '/var/log/clickhouse-server/clickhouse-server.log' "/var/log/clickhouse-server/clickhouse-server.log.${log_file_counter}" - fi - counter=0 until clickhouse-client --query "SELECT 1" do @@ -190,6 +179,8 @@ clickhouse-client --query "ATTACH DATABASE IF NOT EXISTS datasets ENGINE = Ordin clickhouse-client --query "CREATE DATABASE IF NOT EXISTS test" stop +mv /var/log/clickhouse-server/clickhouse-server.log /var/log/clickhouse-server/clickhouse-server.initial.log + start clickhouse-client --query "SHOW TABLES FROM datasets" @@ -205,6 +196,8 @@ clickhouse-client --query "SHOW TABLES FROM test" || echo -e 'Test script failed\tFAIL' >> /test_output/test_results.tsv stop +mv /var/log/clickhouse-server/clickhouse-server.log /var/log/clickhouse-server/clickhouse-server.stress.log + start clickhouse-client --query "SELECT 'Server successfully started', 'OK'" >> /test_output/test_results.tsv \ @@ -263,10 +256,12 @@ mkdir previous_release_package_folder clickhouse-client --query="SELECT version()" | ./download_previous_release && echo -e 'Download script exit code\tOK' >> /test_output/test_results.tsv \ || echo -e 'Download script failed\tFAIL' >> /test_output/test_results.tsv +stop +mv /var/log/clickhouse-server/clickhouse-server.log /var/log/clickhouse-server/clickhouse-server.clean.log + if [ "$(ls -A previous_release_package_folder/clickhouse-common-static_*.deb && ls -A previous_release_package_folder/clickhouse-server_*.deb)" ] then echo -e "Successfully downloaded previous release packets\tOK" >> /test_output/test_results.tsv - stop # Uninstall current packages dpkg --remove clickhouse-client @@ -289,7 +284,7 @@ then install_packages package_folder mkdir tmp_stress_output - + ./stress --backward-compatibility-check --output-folder tmp_stress_output --global-time-limit=1200 \ && echo -e 'Backward compatibility check: Test script exit code\tOK' >> /test_output/test_results.tsv \ || echo -e 'Backward compatibility check: Test script failed\tFAIL' >> /test_output/test_results.tsv @@ -297,8 +292,9 @@ then clickhouse-client --query="SELECT 'Tables count:', count() FROM system.tables" - stop - + stop + mv /var/log/clickhouse-server/clickhouse-server.log /var/log/clickhouse-server/clickhouse-server.backward.stress.log + # Start new server configure start 500 @@ -310,8 +306,9 @@ then # Let the server run for a while before checking log. sleep 60 - + stop + mv /var/log/clickhouse-server/clickhouse-server.log /var/log/clickhouse-server/clickhouse-server.backward.clean.log # Error messages (we should ignore some errors) echo "Check for Error messages in server log:" @@ -332,7 +329,7 @@ then -e "Code: 1000, e.code() = 111, Connection refused" \ -e "UNFINISHED" \ -e "Renaming unexpected part" \ - /var/log/clickhouse-server/clickhouse-server.log | zgrep -Fa "" > /test_output/bc_check_error_messages.txt \ + /var/log/clickhouse-server/clickhouse-server.backward.clean.log | zgrep -Fa "" > /test_output/bc_check_error_messages.txt \ && echo -e 'Backward compatibility check: Error message in clickhouse-server.log (see bc_check_error_messages.txt)\tFAIL' >> /test_output/test_results.tsv \ || echo -e 'Backward compatibility check: No Error messages in clickhouse-server.log\tOK' >> /test_output/test_results.tsv @@ -348,13 +345,13 @@ then rm -f /test_output/tmp # OOM - zgrep -Fa " Application: Child process was terminated by signal 9" /var/log/clickhouse-server/clickhouse-server.log > /dev/null \ + zgrep -Fa " Application: Child process was terminated by signal 9" /var/log/clickhouse-server/clickhouse-server.backward.*.log > /dev/null \ && echo -e 'Backward compatibility check: OOM killer (or signal 9) in clickhouse-server.log\tFAIL' >> /test_output/test_results.tsv \ || echo -e 'Backward compatibility check: No OOM messages in clickhouse-server.log\tOK' >> /test_output/test_results.tsv # Logical errors echo "Check for Logical errors in server log:" - zgrep -Fa -A20 "Code: 49, e.displayText() = DB::Exception:" /var/log/clickhouse-server/clickhouse-server.log > /test_output/bc_check_logical_errors.txt \ + zgrep -Fa -A20 "Code: 49, e.displayText() = DB::Exception:" /var/log/clickhouse-server/clickhouse-server.backward.*.log > /test_output/bc_check_logical_errors.txt \ && echo -e 'Backward compatibility check: Logical error thrown (see clickhouse-server.log or bc_check_logical_errors.txt)\tFAIL' >> /test_output/test_results.tsv \ || echo -e 'Backward compatibility check: No logical errors\tOK' >> /test_output/test_results.tsv @@ -362,19 +359,18 @@ then [ -s /test_output/bc_check_logical_errors.txt ] || rm /test_output/bc_check_logical_errors.txt # Crash - zgrep -Fa "########################################" /var/log/clickhouse-server/clickhouse-server.log > /dev/null \ + zgrep -Fa "########################################" /var/log/clickhouse-server/clickhouse-server.backward.*.log > /dev/null \ && echo -e 'Backward compatibility check: Killed by signal (in clickhouse-server.log)\tFAIL' >> /test_output/test_results.tsv \ || echo -e 'Backward compatibility check: Not crashed\tOK' >> /test_output/test_results.tsv # It also checks for crash without stacktrace (printed by watchdog) echo "Check for Fatal message in server log:" - zgrep -Fa " " /var/log/clickhouse-server/clickhouse-server.log > /test_output/bc_check_fatal_messages.txt \ + zgrep -Fa " " /var/log/clickhouse-server/clickhouse-server.backward.*.log > /test_output/bc_check_fatal_messages.txt \ && echo -e 'Backward compatibility check: Fatal message in clickhouse-server.log (see bc_check_fatal_messages.txt)\tFAIL' >> /test_output/test_results.tsv \ || echo -e 'Backward compatibility check: No fatal messages in clickhouse-server.log\tOK' >> /test_output/test_results.tsv # Remove file bc_check_fatal_messages.txt if it's empty [ -s /test_output/bc_check_fatal_messages.txt ] || rm /test_output/bc_check_fatal_messages.txt - else echo -e "Backward compatibility check: Failed to download previous release packets\tFAIL" >> /test_output/test_results.tsv fi diff --git a/docker/test/stress/stress b/docker/test/stress/stress index 86f8edf5980..d78de84f60d 100755 --- a/docker/test/stress/stress +++ b/docker/test/stress/stress @@ -10,7 +10,7 @@ import logging import time -def get_options(i): +def get_options(i, backward_compatibility_check): options = [] client_options = [] if 0 < i: @@ -19,7 +19,7 @@ def get_options(i): if i % 3 == 1: options.append("--db-engine=Ordinary") - if i % 3 == 2: + if i % 3 == 2 and not backward_compatibility_check: options.append('''--db-engine="Replicated('/test/db/test_{}', 's1', 'r1')"'''.format(i)) client_options.append('allow_experimental_database_replicated=1') @@ -57,7 +57,7 @@ def run_func_test(cmd, output_prefix, num_processes, skip_tests_option, global_t pipes = [] for i in range(0, len(output_paths)): f = open(output_paths[i], 'w') - full_command = "{} {} {} {} {}".format(cmd, get_options(i), global_time_limit_option, skip_tests_option, backward_compatibility_check_option) + full_command = "{} {} {} {} {}".format(cmd, get_options(i, backward_compatibility_check), global_time_limit_option, skip_tests_option, backward_compatibility_check_option) logging.info("Run func tests '%s'", full_command) p = Popen(full_command, shell=True, stdout=f, stderr=f) pipes.append(p) @@ -83,15 +83,15 @@ def make_query_command(query): def prepare_for_hung_check(drop_databases): # FIXME this function should not exist, but... - # ThreadFuzzer significantly slows down server and causes false-positive hung check failures - call_with_retry("clickhouse client -q 'SYSTEM STOP THREAD FUZZER'") - # We attach gdb to clickhouse-server before running tests # to print stacktraces of all crashes even if clickhouse cannot print it for some reason. # However, it obstruct checking for hung queries. logging.info("Will terminate gdb (if any)") call_with_retry("kill -TERM $(pidof gdb)") + # ThreadFuzzer significantly slows down server and causes false-positive hung check failures + call_with_retry("clickhouse client -q 'SYSTEM STOP THREAD FUZZER'") + call_with_retry(make_query_command('SELECT 1 FORMAT Null')) # Some tests execute SYSTEM STOP MERGES or similar queries. @@ -131,7 +131,7 @@ def prepare_for_hung_check(drop_databases): Popen(command, shell=True) break except Exception as ex: - print("Failed to SHOW or DROP databasese, will retry", ex) + logging.error("Failed to SHOW or DROP databasese, will retry %s", str(ex)) time.sleep(i) else: raise Exception("Cannot drop databases after stress tests. Probably server consumed too much memory and cannot execute simple queries") @@ -198,7 +198,11 @@ if __name__ == "__main__": logging.info("Logs compressed") if args.hung_check: - have_long_running_queries = prepare_for_hung_check(args.drop_databases) + try: + have_long_running_queries = prepare_for_hung_check(args.drop_databases) + except Exception as ex: + have_long_running_queries = True + logging.error("Failed to prepare for hung check %s", str(ex)) logging.info("Checking if some queries hung") cmd = ' '.join([args.test_cmd, # Do not track memory allocations up to 1Gi, @@ -215,6 +219,8 @@ if __name__ == "__main__": "--client-option", "max_untracked_memory=1Gi", "--client-option", "max_memory_usage_for_user=0", "--client-option", "memory_profiler_step=1Gi", + # Use system database to avoid CREATE/DROP DATABASE queries + "--database=system", "--hung-check", "00001_select_1" ]) diff --git a/docker/test/test_runner.sh b/docker/test/test_runner.sh deleted file mode 100755 index 0c99c8c2b32..00000000000 --- a/docker/test/test_runner.sh +++ /dev/null @@ -1,86 +0,0 @@ -#!/bin/sh - -set -e -x - -# Not sure why shellcheck complains that rc is not assigned before it is referenced. -# shellcheck disable=SC2154 -trap 'rc=$?; echo EXITED WITH: $rc; exit $rc' EXIT - -# CLI option to prevent rebuilding images, just re-run tests with images leftover from previuos time -readonly NO_REBUILD_FLAG="--no-rebuild" - -readonly CLICKHOUSE_DOCKER_DIR="$(realpath "${1}")" -readonly CLICKHOUSE_PACKAGES_ARG="${2}" -CLICKHOUSE_SERVER_IMAGE="${3}" - -if [ "${CLICKHOUSE_PACKAGES_ARG}" != "${NO_REBUILD_FLAG}" ]; then - readonly CLICKHOUSE_PACKAGES_DIR="$(realpath "${2}")" # or --no-rebuild -fi - - -# In order to allow packages directory to be anywhere, and to reduce amount of context sent to the docker daemon, -# all images are built in multiple stages: -# 1. build base image, install dependencies -# 2. run image with volume mounted, install what needed from those volumes -# 3. tag container as image -# 4. [optional] build another image atop of tagged. - -# TODO: optionally mount most recent clickhouse-test and queries directory from local machine - -if [ "${CLICKHOUSE_PACKAGES_ARG}" != "${NO_REBUILD_FLAG}" ]; then - docker build --network=host \ - -f "${CLICKHOUSE_DOCKER_DIR}/test/stateless/clickhouse-statelest-test-runner.Dockerfile" \ - --target clickhouse-test-runner-base \ - -t clickhouse-test-runner-base:preinstall \ - "${CLICKHOUSE_DOCKER_DIR}/test/stateless" - - docker rm -f clickhouse-test-runner-installing-packages || true - docker run --network=host \ - -v "${CLICKHOUSE_PACKAGES_DIR}:/packages" \ - --name clickhouse-test-runner-installing-packages \ - clickhouse-test-runner-base:preinstall - docker commit clickhouse-test-runner-installing-packages clickhouse-statelest-test-runner:local - docker rm -f clickhouse-test-runner-installing-packages || true -fi - -# # Create a bind-volume to the clickhouse-test script file -# docker volume create --driver local --opt type=none --opt device=/home/enmk/proj/ClickHouse_master/tests/clickhouse-test --opt o=bind clickhouse-test-script-volume -# docker volume create --driver local --opt type=none --opt device=/home/enmk/proj/ClickHouse_master/tests/queries --opt o=bind clickhouse-test-queries-dir-volume - -# Build server image (optional) from local packages -if [ -z "${CLICKHOUSE_SERVER_IMAGE}" ]; then - CLICKHOUSE_SERVER_IMAGE="clickhouse/server:local" - - if [ "${CLICKHOUSE_PACKAGES_ARG}" != "${NO_REBUILD_FLAG}" ]; then - docker build --network=host \ - -f "${CLICKHOUSE_DOCKER_DIR}/server/local.Dockerfile" \ - --target clickhouse-server-base \ - -t clickhouse-server-base:preinstall \ - "${CLICKHOUSE_DOCKER_DIR}/server" - - docker rm -f clickhouse_server_base_installing_server || true - docker run --network=host -v "${CLICKHOUSE_PACKAGES_DIR}:/packages" \ - --name clickhouse_server_base_installing_server \ - clickhouse-server-base:preinstall - docker commit clickhouse_server_base_installing_server clickhouse-server-base:postinstall - - docker build --network=host \ - -f "${CLICKHOUSE_DOCKER_DIR}/server/local.Dockerfile" \ - --target clickhouse-server \ - -t "${CLICKHOUSE_SERVER_IMAGE}" \ - "${CLICKHOUSE_DOCKER_DIR}/server" - fi -fi - -docker rm -f test-runner || true -docker-compose down -CLICKHOUSE_SERVER_IMAGE="${CLICKHOUSE_SERVER_IMAGE}" \ - docker-compose -f "${CLICKHOUSE_DOCKER_DIR}/test/test_runner_docker_compose.yaml" \ - create \ - --build --force-recreate - -CLICKHOUSE_SERVER_IMAGE="${CLICKHOUSE_SERVER_IMAGE}" \ - docker-compose -f "${CLICKHOUSE_DOCKER_DIR}/test/test_runner_docker_compose.yaml" \ - run \ - --name test-runner \ - test-runner diff --git a/docker/test/test_runner_docker_compose.yaml b/docker/test/test_runner_docker_compose.yaml deleted file mode 100644 index 2aef6a48d77..00000000000 --- a/docker/test/test_runner_docker_compose.yaml +++ /dev/null @@ -1,34 +0,0 @@ -version: "2" - -services: - clickhouse-server: - image: ${CLICKHOUSE_SERVER_IMAGE} - expose: - - "8123" # HTTP - - "9000" # TCP - - "9009" # HTTP-interserver - restart: "no" - - test-runner: - image: clickhouse-statelest-test-runner:local - - restart: "no" - depends_on: - - clickhouse-server - environment: - # these are used by clickhouse-test to point clickhouse-client to the right server - - CLICKHOUSE_HOST=clickhouse-server - - CLICKHOUSE_PORT=9009 - - CLICKHOUSE_TEST_HOST_EXPOSED_PORT=51234 - expose: - # port for any test to serve data to clickhouse-server on rare occasion (like URL-engine tables in 00646), - # should match value of CLICKHOUSE_TEST_HOST_EXPOSED_PORT above - - "51234" - - # NOTE: Dev-mode: mount newest versions of the queries and clickhouse-test script into container. - # volumes: - # - /home/enmk/proj/ClickHouse_master/tests/queries:/usr/share/clickhouse-test/queries:ro - # - /home/enmk/proj/ClickHouse_master/tests/clickhouse-test:/usr/bin/clickhouse-test:ro - - # String-form instead of list-form to allow multiple arguments in "${CLICKHOUSE_TEST_ARGS}" - entrypoint: "clickhouse-test ${CLICKHOUSE_TEST_ARGS}" diff --git a/docs/en/engines/table-engines/integrations/kafka.md b/docs/en/engines/table-engines/integrations/kafka.md index 1d80f143098..3eb00bad33b 100644 --- a/docs/en/engines/table-engines/integrations/kafka.md +++ b/docs/en/engines/table-engines/integrations/kafka.md @@ -47,7 +47,7 @@ Optional parameters: - `kafka_row_delimiter` — Delimiter character, which ends the message. - `kafka_schema` — Parameter that must be used if the format requires a schema definition. For example, [Cap’n Proto](https://capnproto.org/) requires the path to the schema file and the name of the root `schema.capnp:Message` object. -- `kafka_num_consumers` — The number of consumers per table. Default: `1`. Specify more consumers if the throughput of one consumer is insufficient. The total number of consumers should not exceed the number of partitions in the topic, since only one consumer can be assigned per partition. +- `kafka_num_consumers` — The number of consumers per table. Default: `1`. Specify more consumers if the throughput of one consumer is insufficient. The total number of consumers should not exceed the number of partitions in the topic, since only one consumer can be assigned per partition, and must not be greater than the number of physical cores on the server where ClickHouse is deployed. - `kafka_max_block_size` — The maximum batch size (in messages) for poll (default: `max_block_size`). - `kafka_skip_broken_messages` — Kafka message parser tolerance to schema-incompatible messages per block. Default: `0`. If `kafka_skip_broken_messages = N` then the engine skips *N* Kafka messages that cannot be parsed (a message equals a row of data). - `kafka_commit_every_batch` — Commit every consumed and handled batch instead of a single commit after writing a whole block (default: `0`). diff --git a/docs/en/getting-started/example-datasets/ontime.md b/docs/en/getting-started/example-datasets/ontime.md index efc807b75fa..87096354f1f 100644 --- a/docs/en/getting-started/example-datasets/ontime.md +++ b/docs/en/getting-started/example-datasets/ontime.md @@ -159,6 +159,10 @@ $ clickhouse-client --query "select count(*) from datasets.ontime" !!! info "Info" If you will run the queries described below, you have to use the full table name, `datasets.ontime`. + +!!! info "Info" + If you are using the prepared partitions or the Online Playground replace any occurrence of `IATA_CODE_Reporting_Airline` or `IATA_CODE_Reporting_Airline AS Carrier` in the following queries with `Carrier` (see `describe ontime`). + ## Queries {#queries} Q0. diff --git a/docs/en/interfaces/cli.md b/docs/en/interfaces/cli.md index eaf7a96ce42..a252f55de2c 100644 --- a/docs/en/interfaces/cli.md +++ b/docs/en/interfaces/cli.md @@ -124,7 +124,7 @@ You can pass parameters to `clickhouse-client` (all parameters have a default va - `--time, -t` – If specified, print the query execution time to ‘stderr’ in non-interactive mode. - `--stacktrace` – If specified, also print the stack trace if an exception occurs. - `--config-file` – The name of the configuration file. -- `--secure` – If specified, will connect to server over secure connection. +- `--secure` – If specified, will connect to server over secure connection (TLS). You might need to configure your CA certificates in the [configuration file](#configuration_files). The available configuration settings are the same as for [server-side TLS configuration](../operations/server-configuration-parameters/settings.md#server_configuration_parameters-openssl). - `--history_file` — Path to a file containing command history. - `--param_` — Value for a [query with parameters](#cli-queries-with-parameters). - `--hardware-utilization` — Print hardware utilization information in progress bar. @@ -148,7 +148,12 @@ Example of a config file: username password - False + true + + + /etc/ssl/cert.pem + + ``` diff --git a/docs/en/introduction/adopters.md b/docs/en/introduction/adopters.md index ad199ce452e..98eea85bbfa 100644 --- a/docs/en/introduction/adopters.md +++ b/docs/en/introduction/adopters.md @@ -43,7 +43,7 @@ toc_title: Adopters | Citymobil | Taxi | Analytics | — | — | [Blog Post in Russian, March 2020](https://habr.com/en/company/citymobil/blog/490660/) | | Cloudflare | CDN | Traffic analysis | 36 servers | — | [Blog post, May 2017](https://blog.cloudflare.com/how-cloudflare-analyzes-1m-dns-queries-per-second/), [Blog post, March 2018](https://blog.cloudflare.com/http-analytics-for-6m-requests-per-second-using-clickhouse/) | | Comcast | Media | CDN Traffic Analysis | — | — | [ApacheCon 2019 Talk](https://www.youtube.com/watch?v=e9TZ6gFDjNg) | -| ContentSquare | Web analytics | Main product | — | — | [Blog post in French, November 2018](http://souslecapot.net/2018/11/21/patrick-chatain-vp-engineering-chez-contentsquare-penser-davantage-amelioration-continue-que-revolution-constante/) | +| Contentsquare | Web analytics | Main product | — | — | [Blog post in French, November 2018](http://souslecapot.net/2018/11/21/patrick-chatain-vp-engineering-chez-contentsquare-penser-davantage-amelioration-continue-que-revolution-constante/) | | Corunet | Analytics | Main product | — | — | [Slides in English, April 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup21/predictive_models.pdf) | | CraiditX 氪信 | Finance AI | Analysis | — | — | [Slides in English, November 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup33/udf.pptx) | | Crazypanda | Games | | — | — | Live session on ClickHouse meetup | @@ -158,6 +158,7 @@ toc_title: Adopters | Staffcop | Information Security | Main Product | — | — | [Official website, Documentation](https://www.staffcop.ru/sce43) | | Suning | E-Commerce | User behaviour analytics | — | — | [Blog article](https://www.sohu.com/a/434152235_411876) | | Superwall | Monetization Tooling | Main product | — | — | [Word of mouth, Jan 2022](https://github.com/ClickHouse/ClickHouse/pull/33573) | +| Swetrix | Analytics | Main Product | — | — | [Source code](https://github.com/swetrix/swetrix-api) | | Teralytics | Mobility | Analytics | — | — | [Tech blog](https://www.teralytics.net/knowledge-hub/visualizing-mobility-data-the-scalability-challenge) | | Tencent | Big Data | Data processing | — | — | [Slides in Chinese, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/5.%20ClickHouse大数据集群应用_李俊飞腾讯网媒事业部.pdf) | | Tencent | Messaging | Logging | — | — | [Talk in Chinese, November 2019](https://youtu.be/T-iVQRuw-QY?t=5050) | diff --git a/docs/en/operations/clickhouse-keeper.md b/docs/en/operations/clickhouse-keeper.md index a8ca2079070..26d61dabaf9 100644 --- a/docs/en/operations/clickhouse-keeper.md +++ b/docs/en/operations/clickhouse-keeper.md @@ -3,13 +3,10 @@ toc_priority: 66 toc_title: ClickHouse Keeper --- -# [pre-production] ClickHouse Keeper {#clickHouse-keeper} +# ClickHouse Keeper {#clickHouse-keeper} ClickHouse server uses [ZooKeeper](https://zookeeper.apache.org/) coordination system for data [replication](../engines/table-engines/mergetree-family/replication.md) and [distributed DDL](../sql-reference/distributed-ddl.md) queries execution. ClickHouse Keeper is an alternative coordination system compatible with ZooKeeper. -!!! warning "Warning" - This feature is currently in the pre-production stage. We test it in our CI and on small internal installations. - ## Implementation details {#implementation-details} ZooKeeper is one of the first well-known open-source coordination systems. It's implemented in Java, has quite a simple and powerful data model. ZooKeeper's coordination algorithm called ZAB (ZooKeeper Atomic Broadcast) doesn't provide linearizability guarantees for reads, because each ZooKeeper node serves reads locally. Unlike ZooKeeper ClickHouse Keeper is written in C++ and uses [RAFT algorithm](https://raft.github.io/) [implementation](https://github.com/eBay/NuRaft). This algorithm allows to have linearizability for reads and writes, has several open-source implementations in different languages. diff --git a/docs/en/operations/server-configuration-parameters/settings.md b/docs/en/operations/server-configuration-parameters/settings.md index 88c43c9c3c2..fb53fd38fe3 100644 --- a/docs/en/operations/server-configuration-parameters/settings.md +++ b/docs/en/operations/server-configuration-parameters/settings.md @@ -366,12 +366,12 @@ Opens `https://tabix.io/` when accessing `http://localhost: http_port`.
]]>
-``` +``` ## hsts_max_age {#hsts-max-age} - -Expired time for HSTS in seconds. The default value is 0 means clickhouse disabled HSTS. If you set a positive number, the HSTS will be enabled and the max-age is the number you set. - -**Example** + +Expired time for HSTS in seconds. The default value is 0 means clickhouse disabled HSTS. If you set a positive number, the HSTS will be enabled and the max-age is the number you set. + +**Example** ```xml 600000 @@ -468,7 +468,7 @@ To enable authentication, set `interserver_http_credentials.allow_empty` to `tru After configuring all replicas set `allow_empty` to `false` or remove this setting. It makes authentication with new credentials mandatory. -To change existing credentials, move the username and the password to `interserver_http_credentials.old` section and update `user` and `password` with new values. At this point the server uses new credentials to connect to other replicas and accepts connections with either new or old credentials. +To change existing credentials, move the username and the password to `interserver_http_credentials.old` section and update `user` and `password` with new values. At this point the server uses new credentials to connect to other replicas and accepts connections with either new or old credentials. ``` xml @@ -834,7 +834,7 @@ The value 0 means that you can delete all tables without any restrictions. ClickHouse uses threads from the Global Thread pool to process queries. If there is no idle thread to process a query, then a new thread is created in the pool. `max_thread_pool_size` limits the maximum number of threads in the pool. -Possible values: +Possible values: - Positive integer. @@ -850,7 +850,7 @@ Default value: `10000`. If the number of **idle** threads in the Global Thread pool is greater than `max_thread_pool_free_size`, then ClickHouse releases resources occupied by some threads and the pool size is decreased. Threads can be created again if necessary. -Possible values: +Possible values: - Positive integer. @@ -866,7 +866,7 @@ Default value: `1000`. The maximum number of jobs that can be scheduled on the Global Thread pool. Increasing queue size leads to larger memory usage. It is recommended to keep this value equal to [max_thread_pool_size](#max-thread-pool-size). -Possible values: +Possible values: - Positive integer. @@ -941,30 +941,30 @@ For more information, see the MergeTreeSettings.h header file. SSL client/server configuration. -Support for SSL is provided by the `libpoco` library. The interface is described in the file [SSLManager.h](https://github.com/ClickHouse-Extras/poco/blob/master/NetSSL_OpenSSL/include/Poco/Net/SSLManager.h) +Support for SSL is provided by the `libpoco` library. The available configuration options are explained in [SSLManager.h](https://github.com/ClickHouse-Extras/poco/blob/master/NetSSL_OpenSSL/include/Poco/Net/SSLManager.h). Default values can be found in [SSLManager.cpp](https://github.com/ClickHouse-Extras/poco/blob/master/NetSSL_OpenSSL/src/SSLManager.cpp). Keys for server/client settings: - privateKeyFile – The path to the file with the secret key of the PEM certificate. The file may contain a key and certificate at the same time. - certificateFile – The path to the client/server certificate file in PEM format. You can omit it if `privateKeyFile` contains the certificate. -- caConfig – The path to the file or directory that contains trusted root certificates. -- verificationMode – The method for checking the node’s certificates. Details are in the description of the [Context](https://github.com/ClickHouse-Extras/poco/blob/master/NetSSL_OpenSSL/include/Poco/Net/Context.h) class. Possible values: `none`, `relaxed`, `strict`, `once`. -- verificationDepth – The maximum length of the verification chain. Verification will fail if the certificate chain length exceeds the set value. -- loadDefaultCAFile – Indicates that built-in CA certificates for OpenSSL will be used. Acceptable values: `true`, `false`. \| -- cipherList – Supported OpenSSL encryptions. For example: `ALL:!ADH:!LOW:!EXP:!MD5:@STRENGTH`. -- cacheSessions – Enables or disables caching sessions. Must be used in combination with `sessionIdContext`. Acceptable values: `true`, `false`. -- sessionIdContext – A unique set of random characters that the server appends to each generated identifier. The length of the string must not exceed `SSL_MAX_SSL_SESSION_ID_LENGTH`. This parameter is always recommended since it helps avoid problems both if the server caches the session and if the client requested caching. Default value: `${application.name}`. -- sessionCacheSize – The maximum number of sessions that the server caches. Default value: 1024\*20. 0 – Unlimited sessions. -- sessionTimeout – Time for caching the session on the server. -- extendedVerification – Automatically extended verification of certificates after the session ends. Acceptable values: `true`, `false`. -- requireTLSv1 – Require a TLSv1 connection. Acceptable values: `true`, `false`. -- requireTLSv1_1 – Require a TLSv1.1 connection. Acceptable values: `true`, `false`. -- requireTLSv1_2 – Require a TLSv1.2 connection. Acceptable values: `true`, `false`. -- fips – Activates OpenSSL FIPS mode. Supported if the library’s OpenSSL version supports FIPS. -- privateKeyPassphraseHandler – Class (PrivateKeyPassphraseHandler subclass) that requests the passphrase for accessing the private key. For example: ``, `KeyFileHandler`, `test`, ``. -- invalidCertificateHandler – Class (a subclass of CertificateHandler) for verifying invalid certificates. For example: ` ConsoleCertificateHandler ` . -- disableProtocols – Protocols that are not allowed to use. -- preferServerCiphers – Preferred server ciphers on the client. +- caConfig (default: none) – The path to the file or directory that contains trusted CA certificates. If this points to a file, it must be in PEM format and can contain several CA certificates. If this points to a directory, it must contain one .pem file per CA certificate. The filenames are looked up by the CA subject name hash value. Details can be found in the man page of [SSL_CTX_load_verify_locations](https://www.openssl.org/docs/man3.0/man3/SSL_CTX_load_verify_locations.html). +- verificationMode (default: relaxed) – The method for checking the node’s certificates. Details are in the description of the [Context](https://github.com/ClickHouse-Extras/poco/blob/master/NetSSL_OpenSSL/include/Poco/Net/Context.h) class. Possible values: `none`, `relaxed`, `strict`, `once`. +- verificationDepth (default: 9) – The maximum length of the verification chain. Verification will fail if the certificate chain length exceeds the set value. +- loadDefaultCAFile (default: true) – Wether built-in CA certificates for OpenSSL will be used. ClickHouse assumes that builtin CA certificates are in the file `/etc/ssl/cert.pem` (resp. the directory `/etc/ssl/certs`) or in file (resp. directory) specified by the environment variable `SSL_CERT_FILE` (resp. `SSL_CERT_DIR`). +- cipherList (default: `ALL:!ADH:!LOW:!EXP:!MD5:@STRENGTH`) - Supported OpenSSL encryptions. +- cacheSessions (default: false) – Enables or disables caching sessions. Must be used in combination with `sessionIdContext`. Acceptable values: `true`, `false`. +- sessionIdContext (default: `${application.name}`) – A unique set of random characters that the server appends to each generated identifier. The length of the string must not exceed `SSL_MAX_SSL_SESSION_ID_LENGTH`. This parameter is always recommended since it helps avoid problems both if the server caches the session and if the client requested caching. Default value: `${application.name}`. +- sessionCacheSize (default: [1024\*20](https://github.com/ClickHouse/boringssl/blob/master/include/openssl/ssl.h#L1978)) – The maximum number of sessions that the server caches. A value of 0 means unlimited sessions. +- sessionTimeout (default: [2h](https://github.com/ClickHouse/boringssl/blob/master/include/openssl/ssl.h#L1926)) – Time for caching the session on the server. +- extendedVerification (default: false) – If enabled, verify that the certificate CN or SAN matches the peer hostname. +- requireTLSv1 (default: false) – Require a TLSv1 connection. Acceptable values: `true`, `false`. +- requireTLSv1_1 (default: false) – Require a TLSv1.1 connection. Acceptable values: `true`, `false`. +- requireTLSv1_2 (default: false) – Require a TLSv1.2 connection. Acceptable values: `true`, `false`. +- fips (default: false) – Activates OpenSSL FIPS mode. Supported if the library’s OpenSSL version supports FIPS. +- privateKeyPassphraseHandler (default: `KeyConsoleHandler`)– Class (PrivateKeyPassphraseHandler subclass) that requests the passphrase for accessing the private key. For example: ``, `KeyFileHandler`, `test`, ``. +- invalidCertificateHandler (default: `ConsoleCertificateHandler`) – Class (a subclass of CertificateHandler) for verifying invalid certificates. For example: ` ConsoleCertificateHandler ` . +- disableProtocols (default: "") – Protocols that are not allowed to use. +- preferServerCiphers (default: false) – Preferred server ciphers on the client. **Example of settings:** @@ -1467,6 +1467,18 @@ The update is performed asynchronously, in a separate system thread. - [background_schedule_pool_size](../../operations/settings/settings.md#background_schedule_pool_size) + +## dns_max_consecutive_failures {#server-settings-dns-max-consecutive-failures} + +The number of consecutive failures accepted when updating a DNS cache entry before it is dropped. +Use `0` to disable cache dropping (entries will only be cleaned by `SYSTEM DROP DNS CACHE`) + +**Default value**: 5. + +**See also** + +- [`SYSTEM DROP DNS CACHE`](../../sql-reference/statements/system.md#query_language-system-drop-dns-cache) + ## distributed_ddl {#server-settings-distributed_ddl} Manage executing [distributed ddl queries](../../sql-reference/distributed-ddl.md) (CREATE, DROP, ALTER, RENAME) on cluster. diff --git a/docs/en/operations/settings/settings.md b/docs/en/operations/settings/settings.md index 91bf0812de4..07abd77fed0 100644 --- a/docs/en/operations/settings/settings.md +++ b/docs/en/operations/settings/settings.md @@ -519,6 +519,33 @@ Possible values: Default value: `1`. +## allow_settings_after_format_in_insert {#allow_settings_after_format_in_insert} + +Control whether `SETTINGS` after `FORMAT` in `INSERT` queries is allowed or not. It is not recommended to use this, since this may interpret part of `SETTINGS` as values. + +Example: + +```sql +INSERT INTO FUNCTION null('foo String') SETTINGS max_threads=1 VALUES ('bar'); +``` + +But the following query will work only with `allow_settings_after_format_in_insert`: + +```sql +SET allow_settings_after_format_in_insert=1; +INSERT INTO FUNCTION null('foo String') VALUES ('bar') SETTINGS max_threads=1; +``` + +Possible values: + +- 0 — Disallow. +- 1 — Allow. + +Default value: `0`. + +!!! note "Warning" + Use this setting only for backward compatibility if your use cases depend on old syntax. + ## input_format_skip_unknown_fields {#settings-input-format-skip-unknown-fields} Enables or disables skipping insertion of extra data. @@ -1062,6 +1089,15 @@ Result: └─────────────┴───────────┘ ``` +## log_processors_profiles {#settings-log_processors_profiles} + +Write time that processor spent during execution/waiting for data to `system.processors_profile_log` table. + +See also: + +- [`system.processors_profile_log`](../../operations/system-tables/processors_profile_log.md#system-processors_profile_log) +- [`EXPLAIN PIPELINE`](../../sql-reference/statements/explain.md#explain-pipeline) + ## max_insert_block_size {#settings-max_insert_block_size} The size of blocks (in a count of rows) to form for insertion into a table. diff --git a/docs/en/operations/storing-data.md b/docs/en/operations/storing-data.md index beffd45bcbd..6cda47ab9fb 100644 --- a/docs/en/operations/storing-data.md +++ b/docs/en/operations/storing-data.md @@ -11,10 +11,6 @@ To work with data stored on `Amazon S3` disks use [S3](../engines/table-engines/ To load data from a web server with static files use a disk with type [web](#storing-data-on-webserver). -## Zero-copy Replication {#zero-copy} - -ClickHouse supports zero-copy replication for `S3` and `HDFS` disks, which means that if the data is stored remotely on several machines and needs to be synchronized, then only the metadata is replicated (paths to the data parts), but not the data itself. - ## Configuring HDFS {#configuring-hdfs} [MergeTree](../engines/table-engines/mergetree-family/mergetree.md) and [Log](../engines/table-engines/log-family/log.md) family table engines can store data to HDFS using a disk with type `HDFS`. @@ -316,3 +312,8 @@ When loading files by `endpoint`, they must be loaded into `/store/` p If URL is not reachable on disk load when the server is starting up tables, then all errors are caught. If in this case there were errors, tables can be reloaded (become visible) via `DETACH TABLE table_name` -> `ATTACH TABLE table_name`. If metadata was successfully loaded at server startup, then tables are available straight away. Use [http_max_single_read_retries](../operations/settings/settings.md#http-max-single-read-retries) setting to limit the maximum number of retries during a single HTTP read. + + +## Zero-copy Replication (not ready for production) {#zero-copy} + +ClickHouse supports zero-copy replication for `S3` and `HDFS` disks, which means that if the data is stored remotely on several machines and needs to be synchronized, then only the metadata is replicated (paths to the data parts), but not the data itself. diff --git a/docs/en/operations/system-tables/processors_profile_log.md b/docs/en/operations/system-tables/processors_profile_log.md new file mode 100644 index 00000000000..2d76edb5dd7 --- /dev/null +++ b/docs/en/operations/system-tables/processors_profile_log.md @@ -0,0 +1,75 @@ +# system.processors_profile_log {#system-processors_profile_log} + +This table contains profiling on processors level (that you can find in [`EXPLAIN PIPELINE`](../../sql-reference/statements/explain.md#explain-pipeline)). + +Columns: + +- `event_date` ([Date](../../sql-reference/data-types/date.md)) — The date when the event happened. +- `event_time` ([DateTime64](../../sql-reference/data-types/datetime64.md)) — The date and time when the event happened. +- `id` ([UInt64](../../sql-reference/data-types/int-uint.md)) — ID of processor +- `parent_ids` ([Array(UInt64)](../../sql-reference/data-types/array.md)) — Parent processors IDs +- `query_id` ([String](../../sql-reference/data-types/string.md)) — ID of the query +- `name` ([LowCardinality(String)](../../sql-reference/data-types/lowcardinality.md)) — Name of the processor. +- `elapsed_us` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Number of microseconds this processor was executed. +- `input_wait_elapsed_us` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Number of microseconds this processor was waiting for data (from other processor). +- `output_wait_elapsed_us` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Number of microseconds this processor was waiting because output port was full. + +**Example** + +Query: + +``` sql +EXPLAIN PIPELINE +SELECT sleep(1) + +┌─explain─────────────────────────┐ +│ (Expression) │ +│ ExpressionTransform │ +│ (SettingQuotaAndLimits) │ +│ (ReadFromStorage) │ +│ SourceFromSingleChunk 0 → 1 │ +└─────────────────────────────────┘ + +SELECT sleep(1) +SETTINGS log_processors_profiles = 1 + +Query id: feb5ed16-1c24-4227-aa54-78c02b3b27d4 + +┌─sleep(1)─┐ +│ 0 │ +└──────────┘ + +1 rows in set. Elapsed: 1.018 sec. + +SELECT + name, + elapsed_us, + input_wait_elapsed_us, + output_wait_elapsed_us +FROM system.processors_profile_log +WHERE query_id = 'feb5ed16-1c24-4227-aa54-78c02b3b27d4' +ORDER BY name ASC +``` + +Result: + +``` text +┌─name────────────────────┬─elapsed_us─┬─input_wait_elapsed_us─┬─output_wait_elapsed_us─┐ +│ ExpressionTransform │ 1000497 │ 2823 │ 197 │ +│ LazyOutputFormat │ 36 │ 1002188 │ 0 │ +│ LimitsCheckingTransform │ 10 │ 1002994 │ 106 │ +│ NullSource │ 5 │ 1002074 │ 0 │ +│ NullSource │ 1 │ 1002084 │ 0 │ +│ SourceFromSingleChunk │ 45 │ 4736 │ 1000819 │ +└─────────────────────────┴────────────┴───────────────────────┴────────────────────────┘ +``` + +Here you can see: + +- `ExpressionTransform` was executing `sleep(1)` function, so it `work` will takes 1e6, and so `elapsed_us` > 1e6. +- `SourceFromSingleChunk` need to wait, because `ExpressionTransform` does not accept any data during execution of `sleep(1)`, so it will be in `PortFull` state for 1e6 us, and so `output_wait_elapsed_us` > 1e6. +- `LimitsCheckingTransform`/`NullSource`/`LazyOutputFormat` need to wait until `ExpressionTransform` will execute `sleep(1)` to process the result, so `input_wait_elapsed_us` > 1e6. + +**See Also** + +- [`EXPLAIN PIPELINE`](../../sql-reference/statements/explain.md#explain-pipeline) diff --git a/docs/en/sql-reference/functions/date-time-functions.md b/docs/en/sql-reference/functions/date-time-functions.md index d535a516b3a..fc48c97bb61 100644 --- a/docs/en/sql-reference/functions/date-time-functions.md +++ b/docs/en/sql-reference/functions/date-time-functions.md @@ -393,6 +393,13 @@ This is a generalization of other functions named `toStartOf*`. For example, `toStartOfInterval(t, INTERVAL 1 day)` returns the same as `toStartOfDay(t)`, `toStartOfInterval(t, INTERVAL 15 minute)` returns the same as `toStartOfFifteenMinutes(t)` etc. +## toLastDayOfMonth {#toLastDayOfMonth} + +Rounds up a date or date with time to the last day of the month. +Returns the date. + +Alias: `LAST_DAY`. + ## toTime {#totime} Converts a date with time to a certain fixed date, while preserving the time. diff --git a/docs/en/sql-reference/functions/index.md b/docs/en/sql-reference/functions/index.md index 7cceec889bd..572aa7f632e 100644 --- a/docs/en/sql-reference/functions/index.md +++ b/docs/en/sql-reference/functions/index.md @@ -77,7 +77,7 @@ A function configuration contains the following settings: - `argument` - argument description with the `type`, and optional `name` of an argument. Each argument is described in a separate setting. Specifying name is necessary if argument names are part of serialization for user defined function format like [Native](../../interfaces/formats.md#native) or [JSONEachRow](../../interfaces/formats.md#jsoneachrow). Default argument name value is `c` + argument_number. - `format` - a [format](../../interfaces/formats.md) in which arguments are passed to the command. - `return_type` - the type of a returned value. -- `return_name` - name of retuned value. Specifying return name is necessary if return name is part of serialization for user defined function format like [Native](../../interfaces/formats.md#native) or [JSONEachRow](../../interfaces/formats.md#jsoneachrow). Optional. Default value is `result`. +- `return_name` - name of returned value. Specifying return name is necessary if return name is part of serialization for user defined function format like [Native](../../interfaces/formats.md#native) or [JSONEachRow](../../interfaces/formats.md#jsoneachrow). Optional. Default value is `result`. - `type` - an executable type. If `type` is set to `executable` then single command is started. If it is set to `executable_pool` then a pool of commands is created. - `max_command_execution_time` - maximum execution time in seconds for processing block of data. This setting is valid for `executable_pool` commands only. Optional. Default value is `10`. - `command_termination_timeout` - time in seconds during which a command should finish after its pipe is closed. After that time `SIGTERM` is sent to the process executing the command. Optional. Default value is `10`. diff --git a/docs/en/sql-reference/functions/other-functions.md b/docs/en/sql-reference/functions/other-functions.md index bce3f9144b1..cedde8a7f35 100644 --- a/docs/en/sql-reference/functions/other-functions.md +++ b/docs/en/sql-reference/functions/other-functions.md @@ -2499,3 +2499,41 @@ Result: │ 286 │ └──────────────────────────┘ ``` + +## getTypeSerializationStreams {#getTypeSerializationStreams} + +return the serialization streams of data type. + +**Syntax** +``` sql +getTypeSerializationStreams(type_name) + +getTypeSerializationStreams(column) +``` + +**Arguments** +- `type_name` - Name of data type to get its serialization paths. [String](../../sql-reference/data-types/string.md#string). +- `column` - any column which has a data type + +**Returned value** +- List of serialization streams; + +Type: [Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md)). + + + +**Example** + +Query: + +``` sql +SELECT getTypeSerializationStreams('Array(Array(Int8))') +``` + +Result: + +``` text +┌───────────────────────getTypeSerializationStreams('Array(Array(Int8))')─────────────────────────────┐ +│ ['{ArraySizes}','{ArrayElements, ArraySizes}','{ArrayElements, ArrayElements, Regular}'] │ +└─────────────────────────────────────────────────────────────────────────────────────────────────────┘ +``` diff --git a/docs/en/sql-reference/statements/create/table.md b/docs/en/sql-reference/statements/create/table.md index 409ec422ade..ee663c92695 100644 --- a/docs/en/sql-reference/statements/create/table.md +++ b/docs/en/sql-reference/statements/create/table.md @@ -114,9 +114,9 @@ In addition, this column is not substituted when using an asterisk in a SELECT q ### EPHEMERAL {#ephemeral} -`EPHEMERAL expr` +`EPHEMERAL [expr]` -Ephemeral column. Such a column isn't stored in the table and cannot be SELECTed, but can be referenced in the defaults of CREATE statement. +Ephemeral column. Such a column isn't stored in the table and cannot be SELECTed, but can be referenced in the defaults of CREATE statement. If `expr` is omitted type for column is required. INSERT without list of columns will skip such column, so SELECT/INSERT invariant is preserved - the dump obtained using `SELECT *` can be inserted back into the table using INSERT without specifying the list of columns. ### ALIAS {#alias} diff --git a/docs/en/sql-reference/statements/select/index.md b/docs/en/sql-reference/statements/select/index.md index 33644133153..c5421c83091 100644 --- a/docs/en/sql-reference/statements/select/index.md +++ b/docs/en/sql-reference/statements/select/index.md @@ -22,7 +22,7 @@ SELECT [DISTINCT [ON (column1, column2, ...)]] expr_list [WHERE expr] [GROUP BY expr_list] [WITH ROLLUP|WITH CUBE] [WITH TOTALS] [HAVING expr] -[ORDER BY expr_list] [WITH FILL] [FROM expr] [TO expr] [STEP expr] +[ORDER BY expr_list] [WITH FILL] [FROM expr] [TO expr] [STEP expr] [INTERPOLATE [(expr_list)]] [LIMIT [offset_value, ]n BY columns] [LIMIT [n, ]m] [WITH TIES] [SETTINGS ...] diff --git a/docs/en/sql-reference/statements/select/order-by.md b/docs/en/sql-reference/statements/select/order-by.md index b24f0213e4e..04630ba1075 100644 --- a/docs/en/sql-reference/statements/select/order-by.md +++ b/docs/en/sql-reference/statements/select/order-by.md @@ -280,6 +280,7 @@ To fill multiple columns, add `WITH FILL` modifier with optional parameters afte ``` sql ORDER BY expr [WITH FILL] [FROM const_expr] [TO const_expr] [STEP const_numeric_expr], ... exprN [WITH FILL] [FROM expr] [TO expr] [STEP numeric_expr] +[INTERPOLATE [(col [AS expr], ... colN [AS exprN])]] ``` `WITH FILL` can be applied for fields with Numeric (all kinds of float, decimal, int) or Date/DateTime types. When applied for `String` fields, missed values are filled with empty strings. @@ -287,6 +288,7 @@ When `FROM const_expr` not defined sequence of filling use minimal `expr` field When `TO const_expr` not defined sequence of filling use maximum `expr` field value from `ORDER BY`. When `STEP const_numeric_expr` defined then `const_numeric_expr` interprets `as is` for numeric types, as `days` for Date type, as `seconds` for DateTime type. It also supports [INTERVAL](https://clickhouse.com/docs/en/sql-reference/data-types/special-data-types/interval/) data type representing time and date intervals. When `STEP const_numeric_expr` omitted then sequence of filling use `1.0` for numeric type, `1 day` for Date type and `1 second` for DateTime type. +`INTERPOLATE` can be applied to columns not participating in `ORDER BY WITH FILL`. Such columns are filled based on previous fields values by applying `expr`. If `expr` is not present will repeate previous value. Omitted list will result in including all allowed columns. Example of a query without `WITH FILL`: @@ -483,4 +485,62 @@ Result: └────────────┴────────────┴──────────┘ ``` +Example of a query without `INTERPOLATE`: + +``` sql +SELECT n, source, inter FROM ( + SELECT toFloat32(number % 10) AS n, 'original' AS source, number as inter + FROM numbers(10) WHERE number % 3 = 1 +) ORDER BY n WITH FILL FROM 0 TO 5.51 STEP 0.5; +``` + +Result: + +``` text +┌───n─┬─source───┬─inter─┐ +│ 0 │ │ 0 │ +│ 0.5 │ │ 0 │ +│ 1 │ original │ 1 │ +│ 1.5 │ │ 0 │ +│ 2 │ │ 0 │ +│ 2.5 │ │ 0 │ +│ 3 │ │ 0 │ +│ 3.5 │ │ 0 │ +│ 4 │ original │ 4 │ +│ 4.5 │ │ 0 │ +│ 5 │ │ 0 │ +│ 5.5 │ │ 0 │ +│ 7 │ original │ 7 │ +└─────┴──────────┴───────┘ +``` + +Same query after applying `INTERPOLATE`: + +``` sql +SELECT n, source, inter FROM ( + SELECT toFloat32(number % 10) AS n, 'original' AS source, number as inter + FROM numbers(10) WHERE number % 3 = 1 +) ORDER BY n WITH FILL FROM 0 TO 5.51 STEP 0.5 INTERPOLATE (inter AS inter + 1); +``` + +Result: + +``` text +┌───n─┬─source───┬─inter─┐ +│ 0 │ │ 0 │ +│ 0.5 │ │ 0 │ +│ 1 │ original │ 1 │ +│ 1.5 │ │ 2 │ +│ 2 │ │ 3 │ +│ 2.5 │ │ 4 │ +│ 3 │ │ 5 │ +│ 3.5 │ │ 6 │ +│ 4 │ original │ 4 │ +│ 4.5 │ │ 5 │ +│ 5 │ │ 6 │ +│ 5.5 │ │ 7 │ +│ 7 │ original │ 7 │ +└─────┴──────────┴───────┘ +``` + [Original article](https://clickhouse.com/docs/en/sql-reference/statements/select/order-by/) diff --git a/docs/ja/introduction/adopters.md b/docs/ja/introduction/adopters.md index 6f878bf1dfe..3372bb74f12 100644 --- a/docs/ja/introduction/adopters.md +++ b/docs/ja/introduction/adopters.md @@ -27,7 +27,7 @@ toc_title: "\u30A2\u30C0\u30D7\u30BF\u30FC" | Cisco | ネットワーク | トラフィック分析 | — | — | [ライトニングトーク2019](https://youtu.be/-hI1vDR2oPY?t=5057) | | Citadel Securities | 金融 | — | — | — | [2019年の貢献](https://github.com/ClickHouse/ClickHouse/pull/4774) | | シティモービル | タクシー | 分析 | — | — | [ロシア語でのブログ投稿,月2020](https://habr.com/en/company/citymobil/blog/490660/) | -| ContentSquare | ウェブ分析 | 主な製品 | — | — | [フランス語でのブログ投稿,November2018](http://souslecapot.net/2018/11/21/patrick-chatain-vp-engineering-chez-contentsquare-penser-davantage-amelioration-continue-que-revolution-constante/) | +| Contentsquare | ウェブ分析 | 主な製品 | — | — | [フランス語でのブログ投稿,November2018](http://souslecapot.net/2018/11/21/patrick-chatain-vp-engineering-chez-contentsquare-penser-davantage-amelioration-continue-que-revolution-constante/) | | Cloudflare | CDN | トラフィック分析 | 36台のサーバー | — | [ブログ投稿,月2017](https://blog.cloudflare.com/how-cloudflare-analyzes-1m-dns-queries-per-second/), [ブログ投稿,月2018](https://blog.cloudflare.com/http-analytics-for-6m-requests-per-second-using-clickhouse/) | | コルネット | 分析 | 主な製品 | — | — | [2019年英語スライド](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup21/predictive_models.pdf) | | CraiditX 氪信 | ファイナンスAI | 分析 | — | — | [2019年のスライド](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup33/udf.pptx) | diff --git a/docs/ru/engines/table-engines/mergetree-family/mergetree.md b/docs/ru/engines/table-engines/mergetree-family/mergetree.md index c75fa8e92ce..f2a13569c23 100644 --- a/docs/ru/engines/table-engines/mergetree-family/mergetree.md +++ b/docs/ru/engines/table-engines/mergetree-family/mergetree.md @@ -182,7 +182,7 @@ Marks numbers: 0 1 2 3 4 5 6 7 8 Разреженный индекс допускает чтение лишних строк. При чтении одного диапазона первичного ключа, может быть прочитано до `index_granularity * 2` лишних строк в каждом блоке данных. -Разреженный индекс почти всегда помещаеся в оперативную память и позволяет работать с очень большим количеством строк в таблицах. +Разреженный индекс почти всегда помещается в оперативную память и позволяет работать с очень большим количеством строк в таблицах. ClickHouse не требует уникального первичного ключа. Можно вставить много строк с одинаковым первичным ключом. diff --git a/docs/ru/sql-reference/statements/create/table.md b/docs/ru/sql-reference/statements/create/table.md index b9c2a4f0f0b..48cce437b8d 100644 --- a/docs/ru/sql-reference/statements/create/table.md +++ b/docs/ru/sql-reference/statements/create/table.md @@ -110,9 +110,9 @@ SELECT x, toTypeName(x) FROM t1; ### EPHEMERAL {#ephemeral} -`EPHEMERAL expr` +`EPHEMERAL [expr]` -Эфемерное выражение. Такой столбец не хранится в таблице и не может быть получен в запросе SELECT, но на него можно ссылаться в выражениях по умолчанию запроса CREATE. +Эфемерное выражение. Такой столбец не хранится в таблице и не может быть получен в запросе SELECT, но на него можно ссылаться в выражениях по умолчанию запроса CREATE. Если значение по умолчанию `expr` не указано, то тип колонки должен быть специфицирован. INSERT без списка столбцов игнорирует этот столбец, таким образом сохраняется инвариант - т.е. дамп, полученный путём `SELECT *`, можно вставить обратно в таблицу INSERT-ом без указания списка столбцов. ### ALIAS {#alias} diff --git a/docs/ru/sql-reference/statements/select/index.md b/docs/ru/sql-reference/statements/select/index.md index 4a409dc7743..fead3c11060 100644 --- a/docs/ru/sql-reference/statements/select/index.md +++ b/docs/ru/sql-reference/statements/select/index.md @@ -20,7 +20,7 @@ SELECT [DISTINCT [ON (column1, column2, ...)]] expr_list [WHERE expr] [GROUP BY expr_list] [WITH ROLLUP|WITH CUBE] [WITH TOTALS] [HAVING expr] -[ORDER BY expr_list] [WITH FILL] [FROM expr] [TO expr] [STEP expr] +[ORDER BY expr_list] [WITH FILL] [FROM expr] [TO expr] [STEP expr] [INTERPOLATE [(expr_list)]] [LIMIT [offset_value, ]n BY columns] [LIMIT [n, ]m] [WITH TIES] [SETTINGS ...] diff --git a/docs/ru/sql-reference/statements/select/order-by.md b/docs/ru/sql-reference/statements/select/order-by.md index 3f52b260423..e293e62e34c 100644 --- a/docs/ru/sql-reference/statements/select/order-by.md +++ b/docs/ru/sql-reference/statements/select/order-by.md @@ -280,6 +280,7 @@ SELECT * FROM collate_test ORDER BY s ASC COLLATE 'en'; ```sql ORDER BY expr [WITH FILL] [FROM const_expr] [TO const_expr] [STEP const_numeric_expr], ... exprN [WITH FILL] [FROM expr] [TO expr] [STEP numeric_expr] +[INTERPOLATE [(col [AS expr], ... colN [AS exprN])]] ``` `WITH FILL` может быть применен к полям с числовыми (все разновидности float, int, decimal) или временными (все разновидности Date, DateTime) типами. В случае применения к полям типа `String` недостающие значения заполняются пустой строкой. @@ -289,6 +290,8 @@ ORDER BY expr [WITH FILL] [FROM const_expr] [TO const_expr] [STEP const_numeric_ Когда `STEP const_numeric_expr` не указан, тогда используется `1.0` для числовых типов, `1 день` для типа Date и `1 секунда` для типа DateTime. +`INTERPOLATE` может быть применен к колонкам, не участвующим в `ORDER BY WITH FILL`. Такие колонки заполняются значениями, вычисляемыми применением `expr` к предыдущему значению. Если `expr` опущен, то колонка заполняется предыдущим значением. Если список колонок не указан, то включаются все разрешенные колонки. + Пример запроса без использования `WITH FILL`: ```sql SELECT n, source FROM ( @@ -395,3 +398,58 @@ ORDER BY │ 1970-03-12 │ 1970-01-08 │ original │ └────────────┴────────────┴──────────┘ ``` + +Пример запроса без `INTERPOLATE`: + +``` sql +SELECT n, source, inter FROM ( + SELECT toFloat32(number % 10) AS n, 'original' AS source, number as inter + FROM numbers(10) WHERE number % 3 = 1 +) ORDER BY n WITH FILL FROM 0 TO 5.51 STEP 0.5; +``` + +Результат: +``` text +┌───n─┬─source───┬─inter─┐ +│ 0 │ │ 0 │ +│ 0.5 │ │ 0 │ +│ 1 │ original │ 1 │ +│ 1.5 │ │ 0 │ +│ 2 │ │ 0 │ +│ 2.5 │ │ 0 │ +│ 3 │ │ 0 │ +│ 3.5 │ │ 0 │ +│ 4 │ original │ 4 │ +│ 4.5 │ │ 0 │ +│ 5 │ │ 0 │ +│ 5.5 │ │ 0 │ +│ 7 │ original │ 7 │ +└─────┴──────────┴───────┘ +``` + +Тот же запрос с `INTERPOLATE`: + +``` sql +SELECT n, source, inter FROM ( + SELECT toFloat32(number % 10) AS n, 'original' AS source, number as inter + FROM numbers(10) WHERE number % 3 = 1 +) ORDER BY n WITH FILL FROM 0 TO 5.51 STEP 0.5 INTERPOLATE (inter AS inter + 1); +``` + +Результат: +``` text +┌───n─┬─source───┬─inter─┐ +│ 0 │ │ 0 │ +│ 0.5 │ │ 0 │ +│ 1 │ original │ 1 │ +│ 1.5 │ │ 2 │ +│ 2 │ │ 3 │ +│ 2.5 │ │ 4 │ +│ 3 │ │ 5 │ +│ 3.5 │ │ 6 │ +│ 4 │ original │ 4 │ +│ 4.5 │ │ 5 │ +│ 5 │ │ 6 │ +│ 5.5 │ │ 7 │ +│ 7 │ original │ 7 │ +└─────┴──────────┴───────┘ diff --git a/docs/ru/sql-reference/table-functions/postgresql.md b/docs/ru/sql-reference/table-functions/postgresql.md index a8ae7cfb80b..e61ca69d78c 100644 --- a/docs/ru/sql-reference/table-functions/postgresql.md +++ b/docs/ru/sql-reference/table-functions/postgresql.md @@ -126,7 +126,7 @@ CREATE TABLE pg_table_schema_with_dots (a UInt32) **См. также** -- [Движок таблиц PostgreSQL](../../sql-reference/table-functions/postgresql.md) +- [Движок таблиц PostgreSQL](../../engines/table-engines/integrations/postgresql.md) - [Использование PostgreSQL как источника данных для внешнего словаря](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md#dicts-external_dicts_dict_sources-postgresql) [Оригинальная статья](https://clickhouse.com/docs/ru/sql-reference/table-functions/postgresql/) diff --git a/docs/tools/requirements.txt b/docs/tools/requirements.txt index c48a70b0909..dd641c13629 100644 --- a/docs/tools/requirements.txt +++ b/docs/tools/requirements.txt @@ -16,7 +16,7 @@ jsmin==3.0.0 livereload==2.6.3 Markdown==3.3.2 MarkupSafe==2.1.0 -mkdocs==1.1.2 +mkdocs==1.3.0 mkdocs-htmlproofer-plugin==0.0.3 mkdocs-macros-plugin==0.4.20 nltk==3.7 diff --git a/docs/zh/operations/configuration-files.md b/docs/zh/operations/configuration-files.md index 7998baafb6c..c99b8fcfca3 100644 --- a/docs/zh/operations/configuration-files.md +++ b/docs/zh/operations/configuration-files.md @@ -3,7 +3,7 @@ ClickHouse支持多配置文件管理。主配置文件是`/etc/clickhouse-server/config.xml`。其余文件须在目录`/etc/clickhouse-server/config.d`。 !!! 注意: - 所有配置文件必须是XML格式。此外,配置文件须有相同的跟元素,通常是``。 + 所有配置文件必须是XML格式。此外,配置文件须有相同的根元素,通常是``。 主配置文件中的一些配置可以通过`replace`或`remove`属性被配置文件覆盖。 diff --git a/packages/clickhouse-server.service b/packages/clickhouse-server.service index a9400b24270..028b4fbf8ab 100644 --- a/packages/clickhouse-server.service +++ b/packages/clickhouse-server.service @@ -20,7 +20,7 @@ ExecStart=/usr/bin/clickhouse-server --config=/etc/clickhouse-server/config.xml EnvironmentFile=-/etc/default/clickhouse LimitCORE=infinity LimitNOFILE=500000 -CapabilityBoundingSet=CAP_NET_ADMIN CAP_IPC_LOCK CAP_SYS_NICE +CapabilityBoundingSet=CAP_NET_ADMIN CAP_IPC_LOCK CAP_SYS_NICE CAP_NET_BIND_SERVICE [Install] # ClickHouse should not start from the rescue shell (rescue.target). diff --git a/programs/client/Client.cpp b/programs/client/Client.cpp index c2094b3b00d..a34ce02b293 100644 --- a/programs/client/Client.cpp +++ b/programs/client/Client.cpp @@ -163,10 +163,24 @@ void Client::initialize(Poco::Util::Application & self) configReadClient(config(), home_path); + /** getenv is thread-safe in Linux glibc and in all sane libc implementations. + * But the standard does not guarantee that subsequent calls will not rewrite the value by returned pointer. + * + * man getenv: + * + * As typically implemented, getenv() returns a pointer to a string within the environment list. + * The caller must take care not to modify this string, since that would change the environment of + * the process. + * + * The implementation of getenv() is not required to be reentrant. The string pointed to by the return value of getenv() + * may be statically allocated, and can be modified by a subsequent call to getenv(), putenv(3), setenv(3), or unsetenv(3). + */ + const char * env_user = getenv("CLICKHOUSE_USER"); - const char * env_password = getenv("CLICKHOUSE_PASSWORD"); if (env_user) config().setString("user", env_user); + + const char * env_password = getenv("CLICKHOUSE_PASSWORD"); if (env_password) config().setString("password", env_password); @@ -810,7 +824,7 @@ void Client::addOptions(OptionsDescription & options_description) ("quota_key", po::value(), "A string to differentiate quotas when the user have keyed quotas configured on server") ("max_client_network_bandwidth", po::value(), "the maximum speed of data exchange over the network for the client in bytes per second.") - ("compression", po::value(), "enable or disable compression") + ("compression", po::value(), "enable or disable compression (enabled by default for remote communication and disabled for localhost communication).") ("query-fuzzer-runs", po::value()->default_value(0), "After executing every SELECT query, do random mutations in it and run again specified number of times. This is used for testing to discover unexpected corner cases.") ("interleave-queries-file", po::value>()->multitoken(), @@ -1005,6 +1019,7 @@ void Client::processConfig() global_context->setCurrentQueryId(query_id); } print_stack_trace = config().getBool("stacktrace", false); + logging_initialized = true; if (config().has("multiquery")) is_multiquery = true; diff --git a/programs/format/Format.cpp b/programs/format/Format.cpp index 835afcdb2ed..50d85cdd43d 100644 --- a/programs/format/Format.cpp +++ b/programs/format/Format.cpp @@ -54,6 +54,7 @@ int mainEntryClickHouseFormat(int argc, char ** argv) ("multiquery,n", "allow multiple queries in the same file") ("obfuscate", "obfuscate instead of formatting") ("backslash", "add a backslash at the end of each line of the formatted query") + ("allow_settings_after_format_in_insert", "Allow SETTINGS after FORMAT, but note, that this is not always safe") ("seed", po::value(), "seed (arbitrary string) that determines the result of obfuscation") ; @@ -83,6 +84,7 @@ int mainEntryClickHouseFormat(int argc, char ** argv) bool multiple = options.count("multiquery"); bool obfuscate = options.count("obfuscate"); bool backslash = options.count("backslash"); + bool allow_settings_after_format_in_insert = options.count("allow_settings_after_format_in_insert"); if (quiet && (hilite || oneline || obfuscate)) { @@ -154,7 +156,7 @@ int mainEntryClickHouseFormat(int argc, char ** argv) const char * pos = query.data(); const char * end = pos + query.size(); - ParserQuery parser(end); + ParserQuery parser(end, allow_settings_after_format_in_insert); do { ASTPtr res = parseQueryAndMovePosition( diff --git a/programs/local/LocalServer.cpp b/programs/local/LocalServer.cpp index bb6684ca137..18b62e65765 100644 --- a/programs/local/LocalServer.cpp +++ b/programs/local/LocalServer.cpp @@ -434,6 +434,14 @@ catch (...) return getCurrentExceptionCode(); } +void LocalServer::updateLoggerLevel(const String & logs_level) +{ + if (!logging_initialized) + return; + + config().setString("logger.level", logs_level); + updateLevels(config(), logger()); +} void LocalServer::processConfig() { @@ -460,30 +468,31 @@ void LocalServer::processConfig() auto logging = (config().has("logger.console") || config().has("logger.level") || config().has("log-level") + || config().has("send_logs_level") || config().has("logger.log")); - auto file_logging = config().has("server_logs_file"); - if (is_interactive && logging && !file_logging) - throw Exception("For interactive mode logging is allowed only with --server_logs_file option", - ErrorCodes::BAD_ARGUMENTS); + auto level = config().getString("log-level", "trace"); - if (file_logging) + if (config().has("server_logs_file")) { - auto level = Poco::Logger::parseLevel(config().getString("log-level", "trace")); - Poco::Logger::root().setLevel(level); + auto poco_logs_level = Poco::Logger::parseLevel(level); + Poco::Logger::root().setLevel(poco_logs_level); Poco::Logger::root().setChannel(Poco::AutoPtr(new Poco::SimpleFileChannel(server_logs_file))); + logging_initialized = true; } - else if (logging) + else if (logging || is_interactive) { - // force enable logging config().setString("logger", "logger"); - // sensitive data rules are not used here + auto log_level_default = is_interactive && !logging ? "none" : level; + config().setString("logger.level", config().getString("log-level", config().getString("send_logs_level", log_level_default))); buildLoggers(config(), logger(), "clickhouse-local"); + logging_initialized = true; } else { Poco::Logger::root().setLevel("none"); Poco::Logger::root().setChannel(Poco::AutoPtr(new Poco::NullChannel())); + logging_initialized = false; } shared_context = Context::createShared(); @@ -713,6 +722,8 @@ void LocalServer::processOptions(const OptionsDescription &, const CommandLineOp config().setString("logger.log", options["logger.log"].as()); if (options.count("logger.level")) config().setString("logger.level", options["logger.level"].as()); + if (options.count("send_logs_level")) + config().setString("send_logs_level", options["send_logs_level"].as()); } } diff --git a/programs/local/LocalServer.h b/programs/local/LocalServer.h index 969af7f1b77..e96fb211554 100644 --- a/programs/local/LocalServer.h +++ b/programs/local/LocalServer.h @@ -46,6 +46,8 @@ protected: void processConfig() override; + void updateLoggerLevel(const String & logs_level) override; + private: /** Composes CREATE subquery based on passed arguments (--structure --file --table and --input-format) * This query will be executed first, before queries passed through --query argument diff --git a/programs/server/Server.cpp b/programs/server/Server.cpp index c12abda9594..fc9187cb622 100644 --- a/programs/server/Server.cpp +++ b/programs/server/Server.cpp @@ -1503,7 +1503,8 @@ int Server::main(const std::vector & /*args*/) else { /// Initialize a watcher periodically updating DNS cache - dns_cache_updater = std::make_unique(global_context, config().getInt("dns_cache_update_period", 15)); + dns_cache_updater = std::make_unique( + global_context, config().getInt("dns_cache_update_period", 15), config().getUInt("dns_max_consecutive_failures", 5)); } #if defined(OS_LINUX) @@ -1638,6 +1639,8 @@ int Server::main(const std::vector & /*args*/) server.start(); LOG_INFO(log, "Listening for {}", server.getDescription()); } + + global_context->setServerCompletelyStarted(); LOG_INFO(log, "Ready for connections."); } diff --git a/programs/server/config.xml b/programs/server/config.xml index 1de379b0b2a..3bb26a3a368 100644 --- a/programs/server/config.xml +++ b/programs/server/config.xml @@ -148,13 +148,13 @@ - + + - - - - - - - + + + system + processors_profile_log
+ toYYYYMM(event_date) + 7500 +
+
diff --git a/tests/integration/test_host_ip_change/__init__.py b/tests/integration/test_dns_cache/__init__.py similarity index 100% rename from tests/integration/test_host_ip_change/__init__.py rename to tests/integration/test_dns_cache/__init__.py diff --git a/tests/integration/test_host_ip_change/configs/dns_update_long.xml b/tests/integration/test_dns_cache/configs/dns_update_long.xml similarity index 100% rename from tests/integration/test_host_ip_change/configs/dns_update_long.xml rename to tests/integration/test_dns_cache/configs/dns_update_long.xml diff --git a/tests/integration/test_host_ip_change/configs/dns_update_short.xml b/tests/integration/test_dns_cache/configs/dns_update_short.xml similarity index 55% rename from tests/integration/test_host_ip_change/configs/dns_update_short.xml rename to tests/integration/test_dns_cache/configs/dns_update_short.xml index e0b68e27be0..86e1310b335 100644 --- a/tests/integration/test_host_ip_change/configs/dns_update_short.xml +++ b/tests/integration/test_dns_cache/configs/dns_update_short.xml @@ -1,3 +1,4 @@ 1 + 6 diff --git a/tests/integration/test_host_ip_change/configs/listen_host.xml b/tests/integration/test_dns_cache/configs/listen_host.xml similarity index 100% rename from tests/integration/test_host_ip_change/configs/listen_host.xml rename to tests/integration/test_dns_cache/configs/listen_host.xml diff --git a/tests/integration/test_host_ip_change/configs/remote_servers.xml b/tests/integration/test_dns_cache/configs/remote_servers.xml similarity index 100% rename from tests/integration/test_host_ip_change/configs/remote_servers.xml rename to tests/integration/test_dns_cache/configs/remote_servers.xml diff --git a/tests/integration/test_host_ip_change/configs/users_with_hostname.xml b/tests/integration/test_dns_cache/configs/users_with_hostname.xml similarity index 100% rename from tests/integration/test_host_ip_change/configs/users_with_hostname.xml rename to tests/integration/test_dns_cache/configs/users_with_hostname.xml diff --git a/tests/integration/test_host_ip_change/test.py b/tests/integration/test_dns_cache/test.py similarity index 91% rename from tests/integration/test_host_ip_change/test.py rename to tests/integration/test_dns_cache/test.py index 604f2e5dc76..820ff221f55 100644 --- a/tests/integration/test_host_ip_change/test.py +++ b/tests/integration/test_dns_cache/test.py @@ -285,3 +285,24 @@ def test_user_access_ip_change(cluster_with_dns_cache_update, node): retry_count=retry_count, sleep_time=1, ) + + +def test_host_is_drop_from_cache_after_consecutive_failures( + cluster_with_dns_cache_update, +): + with pytest.raises(QueryRuntimeException): + node4.query( + "SELECT * FROM remote('InvalidHostThatDoesNotExist', 'system', 'one')" + ) + + # Note that the list of hosts in variable since lost_host will be there too (and it's dropped and added back) + # dns_update_short -> dns_max_consecutive_failures set to 6 + assert node4.wait_for_log_line( + "Cannot resolve host \\(InvalidHostThatDoesNotExist\\), error 0: Host not found." + ) + assert node4.wait_for_log_line( + "Cached hosts not found:.*InvalidHostThatDoesNotExist**", repetitions=6 + ) + assert node4.wait_for_log_line( + "Cached hosts dropped:.*InvalidHostThatDoesNotExist.*" + ) diff --git a/tests/integration/test_dotnet_client/test.py b/tests/integration/test_dotnet_client/test.py index b147688c099..2af9b80f720 100644 --- a/tests/integration/test_dotnet_client/test.py +++ b/tests/integration/test_dotnet_client/test.py @@ -44,7 +44,7 @@ def dotnet_container(): "-f", docker_compose, "up", - "--no-recreate", + "--force-recreate", "-d", "--no-build", ] diff --git a/tests/integration/test_format_schema_on_server/test.py b/tests/integration/test_format_schema_on_server/test.py index 7001d53ccf2..0b7d8837ad3 100644 --- a/tests/integration/test_format_schema_on_server/test.py +++ b/tests/integration/test_format_schema_on_server/test.py @@ -29,7 +29,7 @@ def create_simple_table(): def test_protobuf_format_input(started_cluster): create_simple_table() instance.http_query( - "INSERT INTO test.simple FORMAT Protobuf SETTINGS format_schema='simple:KeyValuePair'", + "INSERT INTO test.simple SETTINGS format_schema='simple:KeyValuePair' FORMAT Protobuf", "\x07\x08\x01\x12\x03abc\x07\x08\x02\x12\x03def", ) assert instance.query("SELECT * from test.simple") == "1\tabc\n2\tdef\n" diff --git a/tests/integration/test_hive_query/test.py b/tests/integration/test_hive_query/test.py index 9e9a20fa6d1..374a86d51e8 100644 --- a/tests/integration/test_hive_query/test.py +++ b/tests/integration/test_hive_query/test.py @@ -149,6 +149,160 @@ def test_orc_groupby(started_cluster): assert result == expected_result +@pytest.mark.parametrize( + "table,use_local_cache_for_remote_storage,enable_orc_file_minmax_index,enable_orc_stripe_minmax_index", + [ + pytest.param( + "demo_orc_no_cache_no_index", + "false", + "false", + "false", + id="demo_orc_no_cache_no_index", + ), + pytest.param( + "demo_orc_with_cache_no_index", + "true", + "false", + "false", + id="demo_orc_with_cache_no_index", + ), + pytest.param( + "demo_orc_no_cache_file_index", + "false", + "true", + "false", + id="demo_orc_no_cache_file_index", + ), + pytest.param( + "demo_orc_with_cache_file_index", + "true", + "true", + "false", + id="demo_orc_with_cache_file_index", + ), + pytest.param( + "demo_orc_no_cache_stripe_index", + "false", + "true", + "true", + id="demo_orc_no_cache_stripe_index", + ), + pytest.param( + "demo_orc_with_cache_stripe_index", + "true", + "true", + "true", + id="demo_orc_with_cache_stripe_index", + ), + ], +) +def test_orc_minmax_index( + started_cluster, + table, + use_local_cache_for_remote_storage, + enable_orc_file_minmax_index, + enable_orc_stripe_minmax_index, +): + node = started_cluster.instances["h0_0_0"] + result = node.query( + """ + DROP TABLE IF EXISTS default.{table}; + CREATE TABLE default.{table} (`id` Nullable(String), `score` Nullable(Int32), `day` Nullable(String)) ENGINE = Hive('thrift://hivetest:9083', 'test', 'demo_orc') PARTITION BY(day) + SETTINGS enable_orc_file_minmax_index = {enable_orc_file_minmax_index}, enable_orc_stripe_minmax_index = {enable_orc_stripe_minmax_index}; + """.format( + table=table, + enable_orc_file_minmax_index=enable_orc_file_minmax_index, + enable_orc_stripe_minmax_index=enable_orc_stripe_minmax_index, + ) + ) + assert result.strip() == "" + + for i in range(2): + result = node.query( + """ + SELECT day, id, score FROM default.{table} where day >= '2021-11-05' and day <= '2021-11-16' and score >= 15 and score <= 30 order by day, id + SETTINGS use_local_cache_for_remote_storage = {use_local_cache_for_remote_storage} + """.format( + table=table, + use_local_cache_for_remote_storage=use_local_cache_for_remote_storage, + ) + ) + + assert ( + result + == """2021-11-05 abd 15 +2021-11-16 aaa 22 +""" + ) + + +@pytest.mark.parametrize( + "table,use_local_cache_for_remote_storage,enable_parquet_rowgroup_minmax_index", + [ + pytest.param( + "demo_parquet_no_cache_no_index", + "false", + "false", + id="demo_parquet_no_cache_no_index", + ), + pytest.param( + "demo_parquet_with_cache_no_index", + "true", + "false", + id="demo_parquet_with_cache_no_index", + ), + pytest.param( + "demo_parquet_no_cache_rowgroup_index", + "false", + "true", + id="demo_parquet_no_cache_rowgroup_index", + ), + pytest.param( + "demo_parquet_with_cache_rowgroup_index", + "true", + "true", + id="demo_parquet_with_cache_rowgroup_index", + ), + ], +) +def test_parquet_minmax_index( + started_cluster, + table, + use_local_cache_for_remote_storage, + enable_parquet_rowgroup_minmax_index, +): + node = started_cluster.instances["h0_0_0"] + result = node.query( + """ + DROP TABLE IF EXISTS default.{table}; + CREATE TABLE default.{table} (`id` Nullable(String), `score` Nullable(Int32), `day` Nullable(String)) ENGINE = Hive('thrift://hivetest:9083', 'test', 'demo') PARTITION BY(day) + SETTINGS enable_parquet_rowgroup_minmax_index = {enable_parquet_rowgroup_minmax_index} + """.format( + table=table, + enable_parquet_rowgroup_minmax_index=enable_parquet_rowgroup_minmax_index, + ) + ) + assert result.strip() == "" + + for i in range(2): + result = node.query( + """ + SELECT day, id, score FROM default.{table} where day >= '2021-11-05' and day <= '2021-11-16' and score >= 15 and score <= 30 order by day, id + SETTINGS use_local_cache_for_remote_storage = {use_local_cache_for_remote_storage} + """.format( + table=table, + use_local_cache_for_remote_storage=use_local_cache_for_remote_storage, + ) + ) + + assert ( + result + == """2021-11-05 abd 15 +2021-11-16 aaa 22 +""" + ) + + def test_hive_columns_prunning(started_cluster): logging.info("Start testing groupby ...") node = started_cluster.instances["h0_0_0"] diff --git a/tests/integration/test_input_format_parallel_parsing_memory_tracking/configs/conf.xml b/tests/integration/test_input_format_parallel_parsing_memory_tracking/configs/conf.xml index 3adba1d402a..2c40f0fab4a 100644 --- a/tests/integration/test_input_format_parallel_parsing_memory_tracking/configs/conf.xml +++ b/tests/integration/test_input_format_parallel_parsing_memory_tracking/configs/conf.xml @@ -18,6 +18,7 @@ +
diff --git a/tests/integration/test_mysql_protocol/test.py b/tests/integration/test_mysql_protocol/test.py index 78049e0f123..6e61675563f 100644 --- a/tests/integration/test_mysql_protocol/test.py +++ b/tests/integration/test_mysql_protocol/test.py @@ -55,7 +55,7 @@ def golang_container(): "-f", docker_compose, "up", - "--no-recreate", + "--force-recreate", "-d", "--no-build", ] @@ -82,7 +82,7 @@ def php_container(): "-f", docker_compose, "up", - "--no-recreate", + "--force-recreate", "-d", "--no-build", ] @@ -109,7 +109,7 @@ def nodejs_container(): "-f", docker_compose, "up", - "--no-recreate", + "--force-recreate", "-d", "--no-build", ] @@ -136,7 +136,7 @@ def java_container(): "-f", docker_compose, "up", - "--no-recreate", + "--force-recreate", "-d", "--no-build", ] diff --git a/tests/integration/test_postgresql_protocol/test.py b/tests/integration/test_postgresql_protocol/test.py index 5c270fd9ca7..43528c13c4d 100644 --- a/tests/integration/test_postgresql_protocol/test.py +++ b/tests/integration/test_postgresql_protocol/test.py @@ -56,7 +56,7 @@ def psql_client(): "-f", docker_compose, "up", - "--no-recreate", + "--force-recreate", "-d", "--build", ] @@ -99,7 +99,7 @@ def java_container(): "-f", docker_compose, "up", - "--no-recreate", + "--force-recreate", "-d", "--build", ] diff --git a/tests/integration/test_quota/test.py b/tests/integration/test_quota/test.py index 651726f30c0..fd5a6599a59 100644 --- a/tests/integration/test_quota/test.py +++ b/tests/integration/test_quota/test.py @@ -129,6 +129,7 @@ def test_quota_from_users_xml(): 1000, "\\N", "\\N", + "\\N", ] ] ) @@ -349,6 +350,7 @@ def test_tracking_quota(): "\\N", "\\N", "\\N", + "\\N", ] ] ) @@ -454,7 +456,7 @@ def test_exceed_quota(): ] ) system_quota_limits( - [["myQuota", 31556952, 0, 1, 1, 1, 1, 1, "\\N", 1, "\\N", "\\N"]] + [["myQuota", 31556952, 0, 1, 1, 1, 1, 1, "\\N", 1, "\\N", "\\N", "\\N"]] ) system_quota_usage( [ @@ -545,6 +547,7 @@ def test_exceed_quota(): 1000, "\\N", "\\N", + "\\N", ] ] ) @@ -634,6 +637,7 @@ def test_add_remove_interval(): 1000, "\\N", "\\N", + "\\N", ] ] ) @@ -695,6 +699,7 @@ def test_add_remove_interval(): 1000, "\\N", "\\N", + "\\N", ], [ "myQuota", @@ -709,6 +714,7 @@ def test_add_remove_interval(): "\\N", 20000, 120, + "\\N", ], ] ) @@ -842,6 +848,7 @@ def test_add_remove_interval(): 1000, "\\N", "\\N", + "\\N", ] ] ) @@ -1003,6 +1010,7 @@ def test_add_remove_interval(): 1000, "\\N", "\\N", + "\\N", ] ] ) @@ -1064,6 +1072,7 @@ def test_add_remove_quota(): 1000, "\\N", "\\N", + "\\N", ] ] ) @@ -1136,6 +1145,7 @@ def test_add_remove_quota(): 1000, "\\N", "\\N", + "\\N", ], [ "myQuota2", @@ -1150,6 +1160,7 @@ def test_add_remove_quota(): 4000, 400000, 60, + "\\N", ], [ "myQuota2", @@ -1164,6 +1175,7 @@ def test_add_remove_quota(): "\\N", "\\N", 1800, + "\\N", ], ] ) @@ -1226,6 +1238,7 @@ def test_add_remove_quota(): 1000, "\\N", "\\N", + "\\N", ] ] ) @@ -1294,6 +1307,7 @@ def test_add_remove_quota(): 1000, "\\N", "\\N", + "\\N", ] ] ) @@ -1356,6 +1370,7 @@ def test_reload_users_xml_by_timer(): 1000, "\\N", "\\N", + "\\N", ] ] ) @@ -1382,7 +1397,7 @@ def test_reload_users_xml_by_timer(): assert_eq_with_retry( instance, "SELECT * FROM system.quota_limits", - [["myQuota", 31556952, 0, 1, 1, 1, 1, 1, "\\N", 1, "\\N", "\\N"]], + [["myQuota", 31556952, 0, 1, 1, 1, 1, 1, "\\N", 1, "\\N", "\\N", "\\N"]], ) @@ -1481,15 +1496,15 @@ def test_dcl_management(): == "CREATE QUOTA qA FOR INTERVAL 30 minute MAX execution_time = 0.5, FOR INTERVAL 5 quarter MAX queries = 321, errors = 10 TO default\n" ) assert re.match( - "qA\\t\\t.*\\t1800\\t0\\t\\\\N\\t0\\t\\\\N\\t0\\t\\\\N\\t0\\t\\\\N\\t0\\t\\\\N\\t0\\t\\\\N\\t0\\t\\\\N\\t0\\t\\\\N\\t.*\\t0.5\n" - "qA\\t\\t.*\\t39446190\\t1\\t321\\t1\\t\\\\N\\t0\\t\\\\N\\t0\\t10\\t50\\t\\\\N\\t200\\t\\\\N\\t50\\t\\\\N\\t200\\t\\\\N\\t.*\\t\\\\N\n", + "qA\\t\\t.*\\t1800\\t0\\t\\\\N\\t0\\t\\\\N\\t0\\t\\\\N\\t0\\t\\\\N\\t0\\t\\\\N\\t0\\t\\\\N\\t0\\t\\\\N\\t0\\t\\\\N\\t.*\\t0.5\\t0\\t\\\\N\n" + "qA\\t\\t.*\\t39446190\\t1\\t321\\t1\\t\\\\N\\t0\\t\\\\N\\t0\\t10\\t50\\t\\\\N\\t200\\t\\\\N\\t50\\t\\\\N\\t200\\t\\\\N\\t.*\\t\\\\N\\t0\\t\\\\N\n", instance.query("SHOW QUOTA"), ) instance.query("SELECT * from test_table") assert re.match( - "qA\\t\\t.*\\t1800\\t1\\t\\\\N\\t1\\t\\\\N\\t0\\t\\\\N\\t0\\t\\\\N\\t50\\t\\\\N\\t200\\t\\\\N\\t50\\t\\\\N\\t200\\t\\\\N\\t.*\\t0.5\n" - "qA\\t\\t.*\\t39446190\\t2\\t321\\t2\\t\\\\N\\t0\\t\\\\N\\t0\\t10\\t100\\t\\\\N\\t400\\t\\\\N\\t100\\t\\\\N\\t400\\t\\\\N\\t.*\\t\\\\N\n", + "qA\\t\\t.*\\t1800\\t1\\t\\\\N\\t1\\t\\\\N\\t0\\t\\\\N\\t0\\t\\\\N\\t50\\t\\\\N\\t200\\t\\\\N\\t50\\t\\\\N\\t200\\t\\\\N\\t.*\\t0.5\\t0\\t\\\\N\n" + "qA\\t\\t.*\\t39446190\\t2\\t321\\t2\\t\\\\N\\t0\\t\\\\N\\t0\\t10\\t100\\t\\\\N\\t400\\t\\\\N\\t100\\t\\\\N\\t400\\t\\\\N\\t.*\\t\\\\N\\t0\\t\\\\N\n", instance.query("SHOW QUOTA"), ) @@ -1503,7 +1518,7 @@ def test_dcl_management(): instance.query("SELECT * from test_table") assert re.match( - "qA\\t\\t.*\\t42075936\\t1\\t\\\\N\\t1\\t\\\\N\\t0\\t\\\\N\\t0\\t\\\\N\\t50\\t\\\\N\\t200\\t\\\\N\\t50\\t\\\\N\\t200\\t\\\\N\\t.*\\t\\\\N\n", + "qA\\t\\t.*\\t42075936\\t1\\t\\\\N\\t1\\t\\\\N\\t0\\t\\\\N\\t0\\t\\\\N\\t50\\t\\\\N\\t200\\t\\\\N\\t50\\t\\\\N\\t200\\t\\\\N\\t.*\\t\\\\N\\t0\\t\\\\N\n", instance.query("SHOW QUOTA"), ) @@ -1519,7 +1534,7 @@ def test_dcl_management(): instance.query("SELECT * from test_table") assert re.match( - "qB\\t\\t.*\\t42075936\\t2\\t\\\\N\\t2\\t\\\\N\\t0\\t\\\\N\\t0\\t\\\\N\\t100\\t\\\\N\\t400\\t\\\\N\\t100\\t\\\\N\\t400\\t\\\\N\\t.*\\t\\\\N\n", + "qB\\t\\t.*\\t42075936\\t2\\t\\\\N\\t2\\t\\\\N\\t0\\t\\\\N\\t0\\t\\\\N\\t100\\t\\\\N\\t400\\t\\\\N\\t100\\t\\\\N\\t400\\t\\\\N\\t.*\\t\\\\N\\t0\\t\\\\N\n", instance.query("SHOW QUOTA"), ) @@ -1563,6 +1578,7 @@ def test_query_inserts(): 1000, "\\N", "\\N", + "\\N", ] ] ) diff --git a/tests/integration/test_s3_cluster/test.py b/tests/integration/test_s3_cluster/test.py index 561d3e3ed28..93708acd49c 100644 --- a/tests/integration/test_s3_cluster/test.py +++ b/tests/integration/test_s3_cluster/test.py @@ -149,7 +149,26 @@ def test_wrong_cluster(started_cluster): SELECT count(*) from s3Cluster( 'non_existent_cluster', 'http://minio1:9001/root/data/{clickhouse,database}/*', - 'minio', 'minio123', 'CSV', 'name String, value UInt32, polygon Array(Array(Tuple(Float64, Float64)))')""" + 'minio', 'minio123', 'CSV', 'name String, value UInt32, polygon Array(Array(Tuple(Float64, Float64)))') + """ ) assert "not found" in error + + +def test_ambiguous_join(started_cluster): + node = started_cluster.instances["s0_0_0"] + result = node.query( + """ + SELECT l.name, r.value from s3Cluster( + 'cluster_simple', + 'http://minio1:9001/root/data/{clickhouse,database}/*', 'minio', 'minio123', 'CSV', + 'name String, value UInt32, polygon Array(Array(Tuple(Float64, Float64)))') as l + JOIN s3Cluster( + 'cluster_simple', + 'http://minio1:9001/root/data/{clickhouse,database}/*', 'minio', 'minio123', 'CSV', + 'name String, value UInt32, polygon Array(Array(Tuple(Float64, Float64)))') as r + ON l.name = r.name + """ + ) + assert "AMBIGUOUS_COLUMN_NAME" not in result diff --git a/tests/integration/test_storage_hdfs/test.py b/tests/integration/test_storage_hdfs/test.py index 5e8a96d2c05..2946a9ce5cc 100644 --- a/tests/integration/test_storage_hdfs/test.py +++ b/tests/integration/test_storage_hdfs/test.py @@ -554,6 +554,17 @@ def test_insert_select_schema_inference(started_cluster): assert int(result) == 1 +def test_cluster_join(started_cluster): + result = node1.query( + """ + SELECT l.id,r.id FROM hdfsCluster('test_cluster_two_shards', 'hdfs://hdfs1:9000/test_hdfsCluster/file*', 'TSV', 'id UInt32') as l + JOIN hdfsCluster('test_cluster_two_shards', 'hdfs://hdfs1:9000/test_hdfsCluster/file*', 'TSV', 'id UInt32') as r + ON l.id = r.id + """ + ) + assert "AMBIGUOUS_COLUMN_NAME" not in result + + def test_virtual_columns_2(started_cluster): hdfs_api = started_cluster.hdfs_api diff --git a/tests/integration/test_storage_kafka/test.py b/tests/integration/test_storage_kafka/test.py index e451e15a5d6..a27b5a134e4 100644 --- a/tests/integration/test_storage_kafka/test.py +++ b/tests/integration/test_storage_kafka/test.py @@ -1135,6 +1135,76 @@ def test_kafka_consumer_hang2(kafka_cluster): kafka_delete_topic(admin_client, topic_name) +# sequential read from different consumers leads to breaking lot of kafka invariants +# (first consumer will get all partitions initially, and may have problems in doing polls every 60 sec) +def test_kafka_read_consumers_in_parallel(kafka_cluster): + admin_client = KafkaAdminClient( + bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port) + ) + + topic_name = "read_consumers_in_parallel" + kafka_create_topic(admin_client, topic_name, num_partitions=8) + + cancel = threading.Event() + + def produce(): + while not cancel.is_set(): + messages = [] + for _ in range(100): + messages.append(json.dumps({"key": 0, "value": 0})) + kafka_produce(kafka_cluster, "read_consumers_in_parallel", messages) + time.sleep(1) + + kafka_thread = threading.Thread(target=produce) + kafka_thread.start() + + # when we have more than 1 consumer in a single table, + # and kafka_thread_per_consumer=0 + # all the consumers should be read in parallel, not in sequence. + # then reading in parallel 8 consumers with 1 seconds kafka_poll_timeout_ms and less than 1 sec limit + # we should have exactly 1 poll per consumer (i.e. 8 polls) every 1 seconds (from different threads) + # in case parallel consuming is not working we will have only 1 poll every 1 seconds (from the same thread). + instance.query( + f""" + DROP TABLE IF EXISTS test.kafka; + DROP TABLE IF EXISTS test.view; + DROP TABLE IF EXISTS test.consumer; + + CREATE TABLE test.kafka (key UInt64, value UInt64) + ENGINE = Kafka + SETTINGS kafka_broker_list = 'kafka1:19092', + kafka_topic_list = '{topic_name}', + kafka_group_name = '{topic_name}', + kafka_format = 'JSONEachRow', + kafka_num_consumers = 8, + kafka_thread_per_consumer = 0, + kafka_poll_timeout_ms = 1000, + kafka_flush_interval_ms = 999; + CREATE TABLE test.view (key UInt64, value UInt64) ENGINE = Memory(); + CREATE MATERIALIZED VIEW test.consumer TO test.view AS SELECT * FROM test.kafka; + """ + ) + + instance.wait_for_log_line( + "kafka.*Polled batch of [0-9]+.*read_consumers_in_parallel", + repetitions=64, + look_behind_lines=100, + timeout=30, # we should get 64 polls in ~8 seconds, but when read sequentially it will take more than 64 sec + ) + + cancel.set() + kafka_thread.join() + + instance.query( + """ + DROP TABLE test.consumer; + DROP TABLE test.view; + DROP TABLE test.kafka; + """ + ) + kafka_delete_topic(admin_client, topic_name) + + def test_kafka_csv_with_delimiter(kafka_cluster): messages = [] for i in range(50): diff --git a/tests/integration/test_storage_postgresql_replica/test.py b/tests/integration/test_storage_postgresql_replica/test.py index e51a9335a65..aca33816d75 100644 --- a/tests/integration/test_storage_postgresql_replica/test.py +++ b/tests/integration/test_storage_postgresql_replica/test.py @@ -24,25 +24,25 @@ postgres_table_template = """ """ queries = [ - "INSERT INTO postgresql_replica select i, i from generate_series(0, 10000) as t(i);", - "DELETE FROM postgresql_replica WHERE (value*value) % 3 = 0;", - "UPDATE postgresql_replica SET value = value + 125 WHERE key % 2 = 0;", - "UPDATE postgresql_replica SET key=key+20000 WHERE key%2=0", - "INSERT INTO postgresql_replica select i, i from generate_series(40000, 50000) as t(i);", - "DELETE FROM postgresql_replica WHERE key % 10 = 0;", - "UPDATE postgresql_replica SET value = value + 101 WHERE key % 2 = 1;", - "UPDATE postgresql_replica SET key=key+80000 WHERE key%2=1", - "DELETE FROM postgresql_replica WHERE value % 2 = 0;", - "UPDATE postgresql_replica SET value = value + 2000 WHERE key % 5 = 0;", - "INSERT INTO postgresql_replica select i, i from generate_series(200000, 250000) as t(i);", - "DELETE FROM postgresql_replica WHERE value % 3 = 0;", - "UPDATE postgresql_replica SET value = value * 2 WHERE key % 3 = 0;", - "UPDATE postgresql_replica SET key=key+500000 WHERE key%2=1", - "INSERT INTO postgresql_replica select i, i from generate_series(1000000, 1050000) as t(i);", - "DELETE FROM postgresql_replica WHERE value % 9 = 2;", - "UPDATE postgresql_replica SET key=key+10000000", - "UPDATE postgresql_replica SET value = value + 2 WHERE key % 3 = 1;", - "DELETE FROM postgresql_replica WHERE value%5 = 0;", + "INSERT INTO {} select i, i from generate_series(0, 10000) as t(i);", + "DELETE FROM {} WHERE (value*value) % 3 = 0;", + "UPDATE {} SET value = value + 125 WHERE key % 2 = 0;", + "UPDATE {} SET key=key+20000 WHERE key%2=0", + "INSERT INTO {} select i, i from generate_series(40000, 50000) as t(i);", + "DELETE FROM {} WHERE key % 10 = 0;", + "UPDATE {} SET value = value + 101 WHERE key % 2 = 1;", + "UPDATE {} SET key=key+80000 WHERE key%2=1", + "DELETE FROM {} WHERE value % 2 = 0;", + "UPDATE {} SET value = value + 2000 WHERE key % 5 = 0;", + "INSERT INTO {} select i, i from generate_series(200000, 250000) as t(i);", + "DELETE FROM {} WHERE value % 3 = 0;", + "UPDATE {} SET value = value * 2 WHERE key % 3 = 0;", + "UPDATE {} SET key=key+500000 WHERE key%2=1", + "INSERT INTO {} select i, i from generate_series(1000000, 1050000) as t(i);", + "DELETE FROM {} WHERE value % 9 = 2;", + "UPDATE {} SET key=key+10000000", + "UPDATE {} SET value = value + 2 WHERE key % 3 = 1;", + "DELETE FROM {} WHERE value%5 = 0;", ] @@ -50,20 +50,17 @@ queries = [ def check_tables_are_synchronized( table_name, order_by="key", postgres_database="postgres_database" ): - expected = instance.query( - "select * from {}.{} order by {};".format( - postgres_database, table_name, order_by + while True: + expected = instance.query( + "select * from {}.{} order by {};".format( + postgres_database, table_name, order_by + ) ) - ) - result = instance.query( - "select * from test.{} order by {};".format(table_name, order_by) - ) - - while result != expected: - time.sleep(0.5) result = instance.query( "select * from test.{} order by {};".format(table_name, order_by) ) + if result == expected: + break assert result == expected @@ -103,15 +100,13 @@ def create_clickhouse_postgres_db(ip, port, name="postgres_database"): ) -def create_materialized_table(ip, port): +def create_materialized_table(ip, port, table_name="postgresql_replica"): instance.query( - """ - CREATE TABLE test.postgresql_replica (key UInt64, value UInt64) + f""" + CREATE TABLE test.{table_name} (key Int64, value Int64) ENGINE = MaterializedPostgreSQL( - '{}:{}', 'postgres_database', 'postgresql_replica', 'postgres', 'mysecretpassword') - PRIMARY KEY key; """.format( - ip, port - ) + '{ip}:{port}', 'postgres_database', '{table_name}', 'postgres', 'mysecretpassword') + PRIMARY KEY key; """ ) @@ -176,6 +171,7 @@ def test_initial_load_from_snapshot(started_cluster): cursor.execute("DROP TABLE postgresql_replica;") postgresql_replica_check_result(result, True) + instance.query(f"DROP TABLE test.postgresql_replica NO DELAY") @pytest.mark.timeout(320) @@ -212,6 +208,7 @@ def test_no_connection_at_startup(started_cluster): result = instance.query("SELECT * FROM test.postgresql_replica ORDER BY key;") cursor.execute("DROP TABLE postgresql_replica;") postgresql_replica_check_result(result, True) + instance.query(f"DROP TABLE test.postgresql_replica NO DELAY") @pytest.mark.timeout(320) @@ -250,6 +247,7 @@ def test_detach_attach_is_ok(started_cluster): cursor.execute("DROP TABLE postgresql_replica;") postgresql_replica_check_result(result, True) + instance.query(f"DROP TABLE test.postgresql_replica NO DELAY") @pytest.mark.timeout(320) @@ -303,6 +301,7 @@ def test_replicating_insert_queries(started_cluster): result = instance.query("SELECT * FROM test.postgresql_replica ORDER BY key;") cursor.execute("DROP TABLE postgresql_replica;") postgresql_replica_check_result(result, True) + instance.query(f"DROP TABLE test.postgresql_replica NO DELAY") @pytest.mark.timeout(320) @@ -659,6 +658,7 @@ def test_virtual_columns(started_cluster): ) print(result) cursor.execute("DROP TABLE postgresql_replica;") + instance.query(f"DROP TABLE test.postgresql_replica NO DELAY") def test_abrupt_connection_loss_while_heavy_replication(started_cluster): @@ -669,17 +669,18 @@ def test_abrupt_connection_loss_while_heavy_replication(started_cluster): database=True, ) cursor = conn.cursor() - create_postgres_table(cursor, "postgresql_replica") + table_name = "postgresql_replica" + create_postgres_table(cursor, table_name) - instance.query("DROP TABLE IF EXISTS test.postgresql_replica") + instance.query(f"DROP TABLE IF EXISTS test.{table_name}") create_materialized_table( ip=started_cluster.postgres_ip, port=started_cluster.postgres_port ) for i in range(len(queries)): - query = queries[i] + query = queries[i].format(table_name) cursor.execute(query) - print("query {}".format(query)) + print("query {}".format(query.format(table_name))) started_cluster.pause_container("postgres1") @@ -692,6 +693,7 @@ def test_abrupt_connection_loss_while_heavy_replication(started_cluster): result = instance.query("SELECT count() FROM test.postgresql_replica") print(result) # Just debug + instance.query(f"DROP TABLE test.postgresql_replica NO DELAY") def test_abrupt_server_restart_while_heavy_replication(started_cluster): @@ -701,26 +703,38 @@ def test_abrupt_server_restart_while_heavy_replication(started_cluster): database=True, ) cursor = conn.cursor() - create_postgres_table(cursor, "postgresql_replica") + table_name = "postgresql_replica_697" + create_postgres_table(cursor, table_name) - instance.query("DROP TABLE IF EXISTS test.postgresql_replica") + instance.query(f"INSERT INTO postgres_database.{table_name} SELECT -1, 1") + instance.query(f"DROP TABLE IF EXISTS test.{table_name} NO DELAY") create_materialized_table( - ip=started_cluster.postgres_ip, port=started_cluster.postgres_port + ip=started_cluster.postgres_ip, + port=started_cluster.postgres_port, + table_name=table_name, ) + n = 1 + while int(instance.query(f"select count() from test.{table_name}")) != 1: + sleep(1) + n += 1 + if n > 10: + break + for query in queries: - cursor.execute(query) - print("query {}".format(query)) + cursor.execute(query.format(table_name)) + print("query {}".format(query.format(table_name))) instance.restart_clickhouse() - result = instance.query("SELECT count() FROM test.postgresql_replica") + result = instance.query(f"SELECT count() FROM test.{table_name}") print(result) # Just debug - check_tables_are_synchronized("postgresql_replica") + check_tables_are_synchronized(table_name) - result = instance.query("SELECT count() FROM test.postgresql_replica") + result = instance.query(f"SELECT count() FROM test.{table_name}") print(result) # Just debug + instance.query(f"DROP TABLE test.{table_name} NO DELAY") def test_drop_table_immediately(started_cluster): @@ -744,7 +758,7 @@ def test_drop_table_immediately(started_cluster): ip=started_cluster.postgres_ip, port=started_cluster.postgres_port ) check_tables_are_synchronized("postgresql_replica") - instance.query("DROP TABLE test.postgresql_replica") + instance.query(f"DROP TABLE test.postgresql_replica NO DELAY") if __name__ == "__main__": diff --git a/tests/integration/test_storage_s3/test.py b/tests/integration/test_storage_s3/test.py index 71371f13d1e..4fd47f8eeb0 100644 --- a/tests/integration/test_storage_s3/test.py +++ b/tests/integration/test_storage_s3/test.py @@ -162,7 +162,7 @@ def test_put(started_cluster, maybe_auth, positive, compression): values_csv = "1,2,3\n3,2,1\n78,43,45\n" filename = "test.csv" put_query = f"""insert into table function s3('http://{started_cluster.minio_ip}:{started_cluster.minio_port}/{bucket}/{filename}', - {maybe_auth}'CSV', '{table_format}', '{compression}') values settings s3_truncate_on_insert=1 {values}""" + {maybe_auth}'CSV', '{table_format}', '{compression}') settings s3_truncate_on_insert=1 values {values}""" try: run_query(instance, put_query) @@ -362,7 +362,7 @@ def test_put_csv(started_cluster, maybe_auth, positive): instance = started_cluster.instances["dummy"] # type: ClickHouseInstance table_format = "column1 UInt32, column2 UInt32, column3 UInt32" filename = "test.csv" - put_query = "insert into table function s3('http://{}:{}/{}/{}', {}'CSV', '{}') format CSV settings s3_truncate_on_insert=1".format( + put_query = "insert into table function s3('http://{}:{}/{}/{}', {}'CSV', '{}') settings s3_truncate_on_insert=1 format CSV".format( started_cluster.minio_ip, MINIO_INTERNAL_PORT, bucket, @@ -392,7 +392,7 @@ def test_put_get_with_redirect(started_cluster): values = "(1, 1, 1), (1, 1, 1), (11, 11, 11)" values_csv = "1,1,1\n1,1,1\n11,11,11\n" filename = "test.csv" - query = "insert into table function s3('http://{}:{}/{}/{}', 'CSV', '{}') values settings s3_truncate_on_insert=1 {}".format( + query = "insert into table function s3('http://{}:{}/{}/{}', 'CSV', '{}') settings s3_truncate_on_insert=1 values {}".format( started_cluster.minio_redirect_host, started_cluster.minio_redirect_port, bucket, @@ -431,7 +431,7 @@ def test_put_with_zero_redirect(started_cluster): filename = "test.csv" # Should work without redirect - query = "insert into table function s3('http://{}:{}/{}/{}', 'CSV', '{}') values settings s3_truncate_on_insert=1 {}".format( + query = "insert into table function s3('http://{}:{}/{}/{}', 'CSV', '{}') settings s3_truncate_on_insert=1 values {}".format( started_cluster.minio_ip, MINIO_INTERNAL_PORT, bucket, @@ -442,7 +442,7 @@ def test_put_with_zero_redirect(started_cluster): run_query(instance, query) # Should not work with redirect - query = "insert into table function s3('http://{}:{}/{}/{}', 'CSV', '{}') values settings s3_truncate_on_insert=1 {}".format( + query = "insert into table function s3('http://{}:{}/{}/{}', 'CSV', '{}') settings s3_truncate_on_insert=1 values {}".format( started_cluster.minio_redirect_host, started_cluster.minio_redirect_port, bucket, @@ -1407,3 +1407,39 @@ def test_insert_select_schema_inference(started_cluster): f"select * from s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test_insert_select.native')" ) assert int(result) == 1 + + +def test_parallel_reading_with_memory_limit(started_cluster): + bucket = started_cluster.minio_bucket + instance = started_cluster.instances["dummy"] + + instance.query( + f"insert into function s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test_memory_limit.native') select * from numbers(1000000)" + ) + + result = instance.query_and_get_error( + f"select * from url('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test_memory_limit.native') settings max_memory_usage=1000" + ) + + assert "Memory limit (for query) exceeded" in result + + time.sleep(5) + + # Check that server didn't crash + result = instance.query("select 1") + assert int(result) == 1 + + +def test_wrong_format_usage(started_cluster): + bucket = started_cluster.minio_bucket + instance = started_cluster.instances["dummy"] + + instance.query( + f"insert into function s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test_wrong_format.native') select * from numbers(10)" + ) + + result = instance.query_and_get_error( + f"desc s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test_wrong_format.native', 'Parquet') settings input_format_allow_seeks=0, max_memory_usage=1000" + ) + + assert "Not a Parquet file" in result diff --git a/tests/integration/test_system_merges/test.py b/tests/integration/test_system_merges/test.py index 9239cb11065..775706f4df6 100644 --- a/tests/integration/test_system_merges/test.py +++ b/tests/integration/test_system_merges/test.py @@ -124,7 +124,7 @@ def test_merge_simple(started_cluster, replicated): assert ( node_check.query( - "SELECT * FROM system.merges WHERE table = '{name}'".format( + "SELECT * FROM system.merges WHERE table = '{name}' and progress < 1".format( name=table_name ) ) diff --git a/tests/integration/test_transactions/__init__.py b/tests/integration/test_transactions/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/integration/test_transactions/configs/transactions.xml b/tests/integration/test_transactions/configs/transactions.xml new file mode 100644 index 00000000000..a8d3e8fbf6d --- /dev/null +++ b/tests/integration/test_transactions/configs/transactions.xml @@ -0,0 +1,14 @@ + + 42 + + + 100500 + 0 + + + + system + transactions_info_log
+ 7500 +
+
diff --git a/tests/integration/test_transactions/test.py b/tests/integration/test_transactions/test.py new file mode 100644 index 00000000000..8983e70b4cb --- /dev/null +++ b/tests/integration/test_transactions/test.py @@ -0,0 +1,120 @@ +import pytest +from helpers.cluster import ClickHouseCluster + +cluster = ClickHouseCluster(__file__) +node = cluster.add_instance( + "node", + main_configs=["configs/transactions.xml"], + stay_alive=True, + with_zookeeper=True, +) + + +@pytest.fixture(scope="module") +def start_cluster(): + try: + cluster.start() + yield cluster + finally: + cluster.shutdown() + + +def tx(session, query): + params = {"session_id": "session_{}".format(session)} + return node.http_query(None, data=query, params=params) + + +def test_rollback_unfinished_on_restart(start_cluster): + node.query( + "create table mt (n int, m int) engine=MergeTree order by n partition by n % 2" + ) + node.query("insert into mt values (1, 10), (2, 20)") + tid0 = "(1,1,'00000000-0000-0000-0000-000000000000')" + + # it will hold a snapshot and avoid parts cleanup + tx(0, "begin transaction") + + tx(4, "begin transaction") + + tx(1, "begin transaction") + tid1 = tx(1, "select transactionID()").strip() + tx(1, "alter table mt drop partition id '1'") + tx(1, "commit") + + tx(1, "begin transaction") + tid2 = tx(1, "select transactionID()").strip() + tx(1, "insert into mt values (3, 30), (4, 40)") + tx(1, "commit") + + node.query("system flush logs") + csn1 = node.query( + "select csn from system.transactions_info_log where type='Commit' and tid={}".format( + tid1 + ) + ).strip() + csn2 = node.query( + "select csn from system.transactions_info_log where type='Commit' and tid={}".format( + tid2 + ) + ).strip() + + # insert a part before starting mutation and check that it will not be mutated + tx(4, "insert into mt values (9, 90)") + + # check that uncommitted mutation will be rolled back on restart + tx(1, "begin transaction") + tid3 = tx(1, "select transactionID()").strip() + tx(1, "insert into mt values (5, 50)") + tx(1, "alter table mt update m = m+n in partition id '1' where 1") + + # check that uncommitted merge will be rolled back on restart + tx(2, "begin transaction") + tid4 = tx(2, "select transactionID()").strip() + tx( + 2, + "optimize table mt partition id '0' final settings optimize_throw_if_noop = 1", + ) + + # check that uncommitted insert will be rolled back on restart + tx(3, "begin transaction") + tid5 = tx(3, "select transactionID()").strip() + tx(3, "insert into mt values (6, 70)") + + tid6 = tx(4, "select transactionID()").strip() + tx(4, "commit") + node.query("system flush logs") + csn6 = node.query( + "select csn from system.transactions_info_log where type='Commit' and tid={}".format( + tid6 + ) + ).strip() + + node.restart_clickhouse(kill=True) + + assert ( + node.query("select *, _part from mt order by n") + == "2\t20\t0_2_2_0\n3\t30\t1_3_3_0\n4\t40\t0_4_4_0\n9\t90\t1_5_5_0\n" + ) + res = node.query( + "select name, active, creation_tid, 'csn' || toString(creation_csn) || '_', removal_tid, 'csn' || toString(removal_csn) || '_' from system.parts where table='mt' order by name" + ) + res = res.replace(tid0, "tid0") + res = res.replace(tid1, "tid1").replace("csn" + csn1 + "_", "csn_1") + res = res.replace(tid2, "tid2").replace("csn" + csn2 + "_", "csn_2") + res = res.replace(tid3, "tid3") + res = res.replace(tid4, "tid4") + res = res.replace(tid5, "tid5") + res = res.replace(tid6, "tid6").replace("csn" + csn6 + "_", "csn_6") + assert ( + res + == "0_2_2_0\t1\ttid0\tcsn1_\t(0,0,'00000000-0000-0000-0000-000000000000')\tcsn0_\n" + "0_2_4_1\t0\ttid4\tcsn18446744073709551615_\t(0,0,'00000000-0000-0000-0000-000000000000')\tcsn0_\n" + "0_4_4_0\t1\ttid2\tcsn_2\t(0,0,'00000000-0000-0000-0000-000000000000')\tcsn0_\n" + "0_8_8_0\t0\ttid5\tcsn18446744073709551615_\t(0,0,'00000000-0000-0000-0000-000000000000')\tcsn0_\n" + "1_1_1_0\t0\ttid0\tcsn1_\ttid1\tcsn_1\n" + "1_3_3_0\t1\ttid2\tcsn_2\t(0,0,'00000000-0000-0000-0000-000000000000')\tcsn0_\n" + "1_3_3_0_7\t0\ttid3\tcsn18446744073709551615_\t(0,0,'00000000-0000-0000-0000-000000000000')\tcsn0_\n" + "1_5_5_0\t1\ttid6\tcsn_6\t(0,0,'00000000-0000-0000-0000-000000000000')\tcsn0_\n" + "1_6_6_0\t0\ttid3\tcsn18446744073709551615_\t(0,0,'00000000-0000-0000-0000-000000000000')\tcsn0_\n" + "1_6_6_0_7\t0\ttid3\tcsn18446744073709551615_\t(0,0,'00000000-0000-0000-0000-000000000000')\tcsn0_\n" + ) diff --git a/tests/performance/function_calculation_after_sorting_and_limit.xml b/tests/performance/function_calculation_after_sorting_and_limit.xml new file mode 100644 index 00000000000..ddb8f860600 --- /dev/null +++ b/tests/performance/function_calculation_after_sorting_and_limit.xml @@ -0,0 +1,4 @@ + + SELECT sipHash64(number) FROM numbers(1e8) ORDER BY number LIMIT 5 + SELECT sipHash64(number) FROM numbers(1e8) ORDER BY number + 1 LIMIT 5 + diff --git a/tests/performance/has_all.xml b/tests/performance/has_all.xml new file mode 100644 index 00000000000..331442cbfee --- /dev/null +++ b/tests/performance/has_all.xml @@ -0,0 +1,53 @@ + + + + array_type + + Int8 + Int16 + Int32 + Int64 + + + + + + CREATE TABLE test_table_small_{array_type} + ( + `set` Array({array_type}), + `subset` Array ({array_type}) + ) + ENGINE = MergeTree ORDER BY set; + + + + CREATE TABLE test_table_medium_{array_type} + ( + `set` Array({array_type}), + `subset` Array ({array_type}) + ) + ENGINE = MergeTree ORDER BY set; + + + + CREATE TABLE test_table_large_{array_type} + ( + `set` Array({array_type}), + `subset` Array ({array_type}) + ) + ENGINE = MergeTree ORDER BY set; + + + + INSERT INTO test_table_small_{array_type} SELECT groupArraySample(5000)(rand64()) AS set, groupArraySample(500)(rand64()) AS subset FROM numbers(10000000) GROUP BY number % 5000; + INSERT INTO test_table_medium_{array_type} SELECT groupArraySample(50000)(rand64()) AS set, groupArraySample(5000)(rand64()) AS subset FROM numbers(25000000) GROUP BY number % 50000; + INSERT INTO test_table_large_{array_type} SELECT groupArraySample(500000)(rand64()) AS set, groupArraySample(500000)(rand64()) AS subset FROM numbers(50000000) GROUP BY number % 500000; + + SELECT hasAll(set, subset) FROM test_table_small_{array_type} FORMAT Null + SELECT hasAll(set, subset) FROM test_table_medium_{array_type} FORMAT Null + SELECT hasAll(set, subset) FROM test_table_large_{array_type} FORMAT Null + + DROP TABLE IF EXISTS test_table_small_{array_type} + DROP TABLE IF EXISTS test_table_medium_{array_type} + DROP TABLE IF EXISTS test_table_large_{array_type} + diff --git a/tests/performance/scalar2.xml b/tests/performance/scalar2.xml new file mode 100644 index 00000000000..eb427536646 --- /dev/null +++ b/tests/performance/scalar2.xml @@ -0,0 +1,17 @@ + + CREATE TABLE tbl0 (`ds` Date, `x1` String, `x2` UInt32, `x3` UInt32, `x4` UInt32, `bm` AggregateFunction(groupBitmap, UInt32)) ENGINE = MergeTree PARTITION BY (ds, x1) ORDER BY (x2, x3, x4) SETTINGS index_granularity = 1 + + CREATE TABLE tbl (`ds` Date, `y1` UInt32, `x4` UInt32, `y2` UInt32, `y3` UInt32, `bm` AggregateFunction(groupBitmap, UInt32), `y4` UInt32 DEFAULT 0) ENGINE = MergeTree PARTITION BY (ds) ORDER BY (x4, y2, y3) SETTINGS index_granularity = 8192, max_parts_in_total = 10000000 + + insert into tbl0 with murmurHash3_32(toUInt32(rand())) as uid select toDate('2022-03-01')+rand()%7 as ds, concat('xx',toString(rand()%10+1)) as x1, 1 as x2, 2 as x3, bitShiftRight(uid, 22) as x4, groupBitmapState(uid) as bm from numbers(100000000) where x4%40=0 group by ds, x1, x2, x3, x4 + + insert into tbl with murmurHash3_32(toUInt32(rand())) as uid select toDate('2022-03-01')+rand()%7 as ds, rand()%1000+5000 as y1, bitShiftRight(uid, 22) as x4, rand()%100 as y2, rand()%2000 as y3, groupBitmapState(uid) as bm, rand()%1 as y4 from numbers(100000000) where x4%40=0 group by ds, y1, x4, y2, y3, y4 + + CREATE TABLE tmp_acc_hit engine Memory AS SELECT x1, x2, x3, arrayReduceInRanges('groupBitmapMergeState', [(1, 1), (1, 2), (1, 3), (1, 4), (1, 5), (1, 6), (1, 7)], bs) AS bs FROM (SELECT x1, x2, x3, groupArrayInsertAt(b, multiIf(ds = '2022-03-01', 0, ds = '2022-03-02', 1, ds = '2022-03-03', 2, ds = '2022-03-04', 3, ds = '2022-03-05', 4, ds = '2022-03-06', 5, ds = '2022-03-07', 6, 7)) AS bs FROM (SELECT x1, x2, x3, ds, groupBitmapOrState(bm) AS b FROM tbl0 WHERE ((ds >= '2022-03-01') AND (ds <= '2022-03-07')) AND (((x1 = 'xx1') AND (x2 = 1) AND (x3 = 2)) OR ((x1 = 'xx2') AND (x2 = 1) AND (x3 = 2)) OR ((x1 = 'xx3') AND (x2 = 1) AND (x3 = 2)) OR ((x1 = 'xx4') AND (x2 = 1) AND (x3 = 2)) OR ((x1 = 'xx5') AND (x2 = 1) AND (x3 = 2)) OR ((x1 = 'xx6') AND (x2 = 1) AND (x3 = 2)) OR ((x1 = 'xx7') AND (x2 = 1) AND (x3 = 2)) OR ((x1 = 'xx8') AND (x2 = 1) AND (x3 = 2)) OR ((x1 = 'xx9') AND (x2 = 1) AND (x3 = 2)) OR ((x1 = 'xx10') AND (x2 = 1) AND (x3 = 2))) AND (x4 IN (0, 40, 80, 120, 160, 200, 240, 280, 320, 360, 400, 440, 480, 520, 560, 600, 640, 680, 720, 760, 800, 840, 880, 920, 960, 1000)) GROUP BY x1, x2, x3, ds) AS t_hit GROUP BY x1, x2, x3) + + WITH (SELECT groupArrayInsertAt(b, multiIf((x1 = 'xx1') AND (x2 = 1) AND (x3 = 2), 0, (x1 = 'xx2') AND (x2 = 1) AND (x3 = 2), 1, (x1 = 'xx3') AND (x2 = 1) AND (x3 = 2), 2, (x1 = 'xx4') AND (x2 = 1) AND (x3 = 2), 3, (x1 = 'xx5') AND (x2 = 1) AND (x3 = 2), 4, (x1 = 'xx6') AND (x2 = 1) AND (x3 = 2), 5, (x1 = 'xx7') AND (x2 = 1) AND (x3 = 2), 6, (x1 = 'xx8') AND (x2 = 1) AND (x3 = 2), 7, (x1 = 'xx9') AND (x2 = 1) AND (x3 = 2), 8, (x1 = 'xx10') AND (x2 = 1) AND (x3 = 2), 9, 10)) FROM (SELECT x1, x2, x3, bs AS b FROM tmp_acc_hit)) AS bs SELECT y1, x4, toString(flat_arr) AS flat_arr, toString([bitmapAndCardinality(bmor1, (bs[1])[1]), bitmapAndCardinality(bmor2, (bs[1])[1]), bitmapAndCardinality(bmor3, (bs[1])[1]), bitmapAndCardinality(bmor1, (bs[2])[1]), bitmapAndCardinality(bmor2, (bs[2])[1]), bitmapAndCardinality(bmor3, (bs[2])[1]), bitmapAndCardinality(bmor1, (bs[3])[1]), bitmapAndCardinality(bmor2, (bs[3])[1]), bitmapAndCardinality(bmor3, (bs[3])[1]), bitmapAndCardinality(bmor1, (bs[4])[1]), bitmapAndCardinality(bmor2, (bs[4])[1]), bitmapAndCardinality(bmor3, (bs[4])[1]), bitmapAndCardinality(bmor1, (bs[5])[1]), bitmapAndCardinality(bmor2, (bs[5])[1]), bitmapAndCardinality(bmor3, (bs[5])[1]), bitmapAndCardinality(bmor1, (bs[6])[1]), bitmapAndCardinality(bmor2, (bs[6])[1]), bitmapAndCardinality(bmor3, (bs[6])[1]), bitmapAndCardinality(bmor1, (bs[7])[1]), bitmapAndCardinality(bmor2, (bs[7])[1]), bitmapAndCardinality(bmor3, (bs[7])[1]), bitmapAndCardinality(bmor1, (bs[8])[1]), bitmapAndCardinality(bmor2, (bs[8])[1]), bitmapAndCardinality(bmor3, (bs[8])[1]), bitmapAndCardinality(bmor1, (bs[9])[1]), bitmapAndCardinality(bmor2, (bs[9])[1]), bitmapAndCardinality(bmor3, (bs[9])[1]), bitmapAndCardinality(bmor1, (bs[10])[1]), bitmapAndCardinality(bmor2, (bs[10])[1]), bitmapAndCardinality(bmor3, (bs[10])[1])]) AS flat_arr_2 from (SELECT toString(y1) AS y1, toString(x4) AS x4, arrayFlatten(groupArrayInsertAt(flat_arr, multiIf(date_ = '2022-03-01', 0, 1))) AS flat_arr, groupBitmapOrState(bmor1) AS bmor1, groupBitmapOrState(bmor2) AS bmor2, groupBitmapOrState(bmor3) AS bmor3 FROM (WITH '2022-03-01' AS start_ds SELECT y1, x4, groupBitmapOrState(bm) AS bmor1, groupBitmapOrStateIf(bm, y2 > 0) AS bmor2, groupBitmapOrStateIf(bm, y4 = 1) AS bmor3, [sum(y2 * bitmapAndCardinality(bm, (bs[1])[1])), sum(y2 * bitmapAndCardinality(bm, (bs[2])[1])), sum(y2 * bitmapAndCardinality(bm, (bs[3])[1])), sum(y2 * bitmapAndCardinality(bm, (bs[4])[1])), sum(y2 * bitmapAndCardinality(bm, (bs[5])[1])), sum(y2 * bitmapAndCardinality(bm, (bs[6])[1])), sum(y2 * bitmapAndCardinality(bm, (bs[7])[1])), sum(y2 * bitmapAndCardinality(bm, (bs[8])[1])), sum(y2 * bitmapAndCardinality(bm, (bs[9])[1])), sum(y2 * bitmapAndCardinality(bm, (bs[10])[1]))] AS flat_arr, start_ds AS date_ FROM tbl WHERE (ds = start_ds) AND (y1 IN (7063, 5010, 5006, 6788, 6176, 6203, 6769, 6555, 7062, 5119, 5007, 5212, 6814, 6177, 6789, 5095, 4942, 6243, 7061, 6744, 6201, 7196, 6181, 7195, 6178, 5004, 6790, 5008, 6877, 7281, 6791, 6179, 5214, 5005, 7146, 6980, 6322, 5222, 5217, 5137, 6561, 5133, 6937, 5142, 5130, 6885, 7250, 5103, 6867, 7066, 5096, 6868, 6199, 7269, 5131, 6414, 6884, 6560, 5136, 6883, 5158, 6869, 5097, 5132, 5102, 7251, 5219, 4695, 5220, 5202, 4203, 4204, 5098, 6870, 7064, 5101, 5105, 5140, 5135, 5139, 6880, 6194, 5218, 4202, 6655, 5104, 5183, 7245, 5100, 7065, 5099, 6938, 5138, 6881, 5134, 6886, 5141, 5129)) AND (x4 IN (0, 40, 80, 120, 160, 200, 240, 280, 320, 360, 400, 440, 480, 520, 560, 600, 640, 680, 720, 760, 800, 840, 880, 920, 960, 1000)) AND (y4 IN (0, 1)) GROUP BY y1, x4) GROUP BY y1, x4) LIMIT 1 + + DROP TABLE IF EXISTS tbl + DROP TABLE IF EXISTS tbl0 + DROP TABLE IF EXISTS tmp_acc_hit + diff --git a/tests/queries/0_stateless/00155_long_merges.sh b/tests/queries/0_stateless/00155_long_merges.sh index f2d9cd1dade..15ad0892a42 100755 --- a/tests/queries/0_stateless/00155_long_merges.sh +++ b/tests/queries/0_stateless/00155_long_merges.sh @@ -32,7 +32,7 @@ function test { SUM=$(( $1 + $2 )) MAX=$(( $1 > $2 ? $1 : $2 )) - SETTINGS="--min_insert_block_size_rows=0 --min_insert_block_size_bytes=0" + SETTINGS="--min_insert_block_size_rows=0 --min_insert_block_size_bytes=0 --max_block_size=65505" $CLICKHOUSE_CLIENT $SETTINGS --query="INSERT INTO summing_00155 (x) SELECT number AS x FROM system.numbers LIMIT $1" $CLICKHOUSE_CLIENT $SETTINGS --query="INSERT INTO summing_00155 (x) SELECT number AS x FROM system.numbers LIMIT $2" diff --git a/tests/queries/0_stateless/00753_alter_attach.reference b/tests/queries/0_stateless/00753_alter_attach.reference index 007b99d4748..b0d2a3d031c 100644 --- a/tests/queries/0_stateless/00753_alter_attach.reference +++ b/tests/queries/0_stateless/00753_alter_attach.reference @@ -10,3 +10,15 @@ 5 2 6 3 7 3 +4 2 +5 2 +1 1 +2 1 +3 1 +1 1 +2 1 +3 1 +1 1 +2 2 +1 1 +1 1 diff --git a/tests/queries/0_stateless/00753_alter_attach.sql b/tests/queries/0_stateless/00753_alter_attach.sql index ca43fb3aeae..9fa4f92c2c1 100644 --- a/tests/queries/0_stateless/00753_alter_attach.sql +++ b/tests/queries/0_stateless/00753_alter_attach.sql @@ -19,4 +19,76 @@ INSERT INTO alter_attach VALUES (6, 3), (7, 3); ALTER TABLE alter_attach ATTACH PARTITION 2; SELECT * FROM alter_attach ORDER BY x; +ALTER TABLE alter_attach DETACH PARTITION ALL; +SELECT * FROM alter_attach ORDER BY x; + +ALTER TABLE alter_attach ATTACH PARTITION 2; +SELECT * FROM alter_attach ORDER BY x; + +DROP TABLE IF EXISTS detach_all_no_partition; +CREATE TABLE detach_all_no_partition (x UInt64, p UInt8) ENGINE = MergeTree ORDER BY tuple(); +INSERT INTO detach_all_no_partition VALUES (1, 1), (2, 1), (3, 1); +SELECT * FROM detach_all_no_partition ORDER BY x; + +ALTER TABLE detach_all_no_partition DETACH PARTITION ALL; +SELECT * FROM detach_all_no_partition ORDER BY x; + +ALTER TABLE detach_all_no_partition ATTACH PARTITION tuple(); +SELECT * FROM detach_all_no_partition ORDER BY x; + DROP TABLE alter_attach; +DROP TABLE detach_all_no_partition; + +DROP TABLE IF EXISTS replicated_table_detach_all1; +DROP TABLE IF EXISTS replicated_table_detach_all2; + +CREATE TABLE replicated_table_detach_all1 ( + id UInt64, + Data String +) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test_00753_{database}/replicated_table_detach_all', '1') ORDER BY id PARTITION BY id; + +CREATE TABLE replicated_table_detach_all2 ( + id UInt64, + Data String +) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test_00753_{database}/replicated_table_detach_all', '2') ORDER BY id PARTITION BY id; + + +INSERT INTO replicated_table_detach_all1 VALUES (1, '1'), (2, '2'); +select * from replicated_table_detach_all1 order by id; + +ALTER TABLE replicated_table_detach_all1 DETACH PARTITION ALL; +select * from replicated_table_detach_all1 order by id; +SYSTEM SYNC REPLICA replicated_table_detach_all2; +select * from replicated_table_detach_all2 order by id; + +ALTER TABLE replicated_table_detach_all1 ATTACH PARTITION tuple(1); +select * from replicated_table_detach_all1 order by id; +SYSTEM SYNC REPLICA replicated_table_detach_all2; +select * from replicated_table_detach_all2 order by id; + +ALTER TABLE replicated_table_detach_all1 FETCH PARTITION ALL FROM '/clickhouse/tables/test_00753_{database}/replicated_table_detach_all1'; -- { serverError 344 } + +DROP TABLE replicated_table_detach_all1; +DROP TABLE replicated_table_detach_all2; + +DROP TABLE IF EXISTS partition_all; +DROP TABLE IF EXISTS partition_all2; + +CREATE TABLE partition_all (x UInt64, p UInt8, q UInt8) ENGINE = MergeTree ORDER BY tuple() PARTITION BY p; +INSERT INTO partition_all VALUES (4, 1, 2), (5, 1, 3), (3, 1, 4); + +CREATE TABLE partition_all2 (x UInt64, p UInt8, q UInt8) ENGINE = MergeTree ORDER BY tuple() PARTITION BY p; +INSERT INTO partition_all2 VALUES (4, 1, 2), (5, 1, 3), (3, 1, 4); + +-- test PARTITION ALL +ALTER TABLE partition_all2 REPLACE PARTITION ALL FROM partition_all; -- { serverError 344 } +ALTER TABLE partition_all MOVE PARTITION ALL TO TABLE partition_all2; -- { serverError 344 } +ALTER TABLE partition_all2 CLEAR INDEX p IN PARTITION ALL; -- { serverError 344 } +ALTER TABLE partition_all2 CLEAR COLUMN q IN PARTITION ALL; -- { serverError 344 } +ALTER TABLE partition_all2 UPDATE q = q + 1 IN PARTITION ALL where p = 1; -- { serverError 344 } +ALTER TABLE partition_all2 FREEZE PARTITION ALL; -- { serverError 344 } +CHECK TABLE partition_all2 PARTITION ALL; -- { serverError 344 } +OPTIMIZE TABLE partition_all2 PARTITION ALL; -- { serverError 344 } + +DROP TABLE partition_all; +DROP TABLE partition_all2; diff --git a/tests/queries/0_stateless/00757_enum_defaults.reference b/tests/queries/0_stateless/00757_enum_defaults.reference index 56ead34ad3b..35ef3c72406 100644 --- a/tests/queries/0_stateless/00757_enum_defaults.reference +++ b/tests/queries/0_stateless/00757_enum_defaults.reference @@ -4,3 +4,5 @@ iphone 1 iphone 1 \N 1 +a +b diff --git a/tests/queries/0_stateless/00757_enum_defaults.sql b/tests/queries/0_stateless/00757_enum_defaults.sql index 58f54a98b70..769579ffc0b 100644 --- a/tests/queries/0_stateless/00757_enum_defaults.sql +++ b/tests/queries/0_stateless/00757_enum_defaults.sql @@ -1,2 +1,12 @@ select os_name, count() from (SELECT CAST('iphone' AS Enum8('iphone' = 1, 'android' = 2)) AS os_name) group by os_name WITH TOTALS; select toNullable(os_name) AS os_name, count() from (SELECT CAST('iphone' AS Enum8('iphone' = 1, 'android' = 2)) AS os_name) group by os_name WITH TOTALS; + +DROP TABLE IF EXISTS auto_assgin_enum; +DROP TABLE IF EXISTS auto_assgin_enum1; + +CREATE TABLE auto_assgin_enum (x enum('a', 'b')) ENGINE=MergeTree() order by x; +CREATE TABLE auto_assgin_enum1 (x enum('a' = 1, 'b')) ENGINE=MergeTree() order by x; -- { serverError 223 } +INSERT INTO auto_assgin_enum VALUES('a'), ('b'); +select * from auto_assgin_enum; + +DROP TABLE auto_assgin_enum; diff --git a/tests/queries/0_stateless/00825_protobuf_format_array_3dim.sh b/tests/queries/0_stateless/00825_protobuf_format_array_3dim.sh index 5c2804bdcae..3cd842a10ba 100755 --- a/tests/queries/0_stateless/00825_protobuf_format_array_3dim.sh +++ b/tests/queries/0_stateless/00825_protobuf_format_array_3dim.sh @@ -31,7 +31,7 @@ $CURDIR/helpers/protobuf_length_delimited_encoder.py --decode_and_check --format # Check the input in the protobuf format (now the table contains the same data twice). echo -$CLICKHOUSE_CLIENT --query "INSERT INTO array_3dim_protobuf_00825 FORMAT Protobuf SETTINGS format_schema='$SCHEMADIR/00825_protobuf_format_array_3dim:ABC'" < "$BINARY_FILE_PATH" +$CLICKHOUSE_CLIENT --query "INSERT INTO array_3dim_protobuf_00825 SETTINGS format_schema='$SCHEMADIR/00825_protobuf_format_array_3dim:ABC' FORMAT Protobuf" < "$BINARY_FILE_PATH" $CLICKHOUSE_CLIENT --query "SELECT * FROM array_3dim_protobuf_00825" rm "$BINARY_FILE_PATH" diff --git a/tests/queries/0_stateless/00825_protobuf_format_array_of_arrays.sh b/tests/queries/0_stateless/00825_protobuf_format_array_of_arrays.sh index bd208195acc..76c5a63c4f2 100755 --- a/tests/queries/0_stateless/00825_protobuf_format_array_of_arrays.sh +++ b/tests/queries/0_stateless/00825_protobuf_format_array_of_arrays.sh @@ -36,7 +36,7 @@ $CURDIR/helpers/protobuf_length_delimited_encoder.py --decode_and_check --format # Check the input in the protobuf format (now the table contains the same data twice). echo -$CLICKHOUSE_CLIENT --query "INSERT INTO array_of_arrays_protobuf_00825 FORMAT Protobuf SETTINGS format_schema='$SCHEMADIR/00825_protobuf_format_array_of_arrays:AA'" < "$BINARY_FILE_PATH" +$CLICKHOUSE_CLIENT --query "INSERT INTO array_of_arrays_protobuf_00825 SETTINGS format_schema='$SCHEMADIR/00825_protobuf_format_array_of_arrays:AA' FORMAT Protobuf" < "$BINARY_FILE_PATH" $CLICKHOUSE_CLIENT --query "SELECT * FROM array_of_arrays_protobuf_00825" rm "$BINARY_FILE_PATH" diff --git a/tests/queries/0_stateless/00825_protobuf_format_enum_mapping.sh b/tests/queries/0_stateless/00825_protobuf_format_enum_mapping.sh index 8d9e2689e26..1258230610d 100755 --- a/tests/queries/0_stateless/00825_protobuf_format_enum_mapping.sh +++ b/tests/queries/0_stateless/00825_protobuf_format_enum_mapping.sh @@ -33,7 +33,7 @@ $CURDIR/helpers/protobuf_length_delimited_encoder.py --decode_and_check --format # Check the input in the protobuf format (now the table contains the same data twice). echo -$CLICKHOUSE_CLIENT --query "INSERT INTO enum_mapping_protobuf_00825 FORMAT Protobuf SETTINGS format_schema='$SCHEMADIR/00825_protobuf_format_enum_mapping:EnumMessage'" < "$BINARY_FILE_PATH" +$CLICKHOUSE_CLIENT --query "INSERT INTO enum_mapping_protobuf_00825 SETTINGS format_schema='$SCHEMADIR/00825_protobuf_format_enum_mapping:EnumMessage' FORMAT Protobuf" < "$BINARY_FILE_PATH" $CLICKHOUSE_CLIENT --query "SELECT * FROM enum_mapping_protobuf_00825" rm "$BINARY_FILE_PATH" diff --git a/tests/queries/0_stateless/00825_protobuf_format_map.sh b/tests/queries/0_stateless/00825_protobuf_format_map.sh index 2a84772bc9f..81d1cf2e305 100755 --- a/tests/queries/0_stateless/00825_protobuf_format_map.sh +++ b/tests/queries/0_stateless/00825_protobuf_format_map.sh @@ -34,7 +34,7 @@ hexdump -C $BINARY_FILE_PATH # Check the input in the protobuf format (now the table contains the same data twice). echo -$CLICKHOUSE_CLIENT --query "INSERT INTO map_protobuf_00825 FORMAT Protobuf SETTINGS format_schema='$SCHEMADIR/00825_protobuf_format_map:Message'" < "$BINARY_FILE_PATH" +$CLICKHOUSE_CLIENT --query "INSERT INTO map_protobuf_00825 SETTINGS format_schema='$SCHEMADIR/00825_protobuf_format_map:Message' FORMAT Protobuf" < "$BINARY_FILE_PATH" $CLICKHOUSE_CLIENT --query "SELECT * FROM map_protobuf_00825" rm "$BINARY_FILE_PATH" diff --git a/tests/queries/0_stateless/00825_protobuf_format_nested_in_nested.sh b/tests/queries/0_stateless/00825_protobuf_format_nested_in_nested.sh index f1567128cf4..b0a16c2fbba 100755 --- a/tests/queries/0_stateless/00825_protobuf_format_nested_in_nested.sh +++ b/tests/queries/0_stateless/00825_protobuf_format_nested_in_nested.sh @@ -30,7 +30,7 @@ $CURDIR/helpers/protobuf_length_delimited_encoder.py --decode_and_check --format # Check the input in the protobuf format (now the table contains the same data twice). echo -$CLICKHOUSE_CLIENT --query "INSERT INTO nested_in_nested_protobuf_00825 FORMAT Protobuf SETTINGS format_schema='$SCHEMADIR/00825_protobuf_format_nested_in_nested:MessageType'" < "$BINARY_FILE_PATH" +$CLICKHOUSE_CLIENT --query "INSERT INTO nested_in_nested_protobuf_00825 SETTINGS format_schema='$SCHEMADIR/00825_protobuf_format_nested_in_nested:MessageType' FORMAT Protobuf" < "$BINARY_FILE_PATH" $CLICKHOUSE_CLIENT --query "SELECT * FROM nested_in_nested_protobuf_00825" rm "$BINARY_FILE_PATH" diff --git a/tests/queries/0_stateless/00825_protobuf_format_nested_optional.sh b/tests/queries/0_stateless/00825_protobuf_format_nested_optional.sh index 1b94ebd79f2..cf9c47f5ea9 100755 --- a/tests/queries/0_stateless/00825_protobuf_format_nested_optional.sh +++ b/tests/queries/0_stateless/00825_protobuf_format_nested_optional.sh @@ -37,7 +37,7 @@ $CURDIR/helpers/protobuf_length_delimited_encoder.py --decode_and_check --format # Check the input in the protobuf format (now the table contains the same data twice). echo -$CLICKHOUSE_CLIENT --query "INSERT INTO nested_optional_protobuf_00825 FORMAT Protobuf SETTINGS format_schema='$SCHEMADIR/00825_protobuf_format_nested_optional:Message'" < "$BINARY_FILE_PATH" +$CLICKHOUSE_CLIENT --query "INSERT INTO nested_optional_protobuf_00825 SETTINGS format_schema='$SCHEMADIR/00825_protobuf_format_nested_optional:Message' FORMAT Protobuf" < "$BINARY_FILE_PATH" $CLICKHOUSE_CLIENT --query "SELECT * FROM nested_optional_protobuf_00825" rm "$BINARY_FILE_PATH" diff --git a/tests/queries/0_stateless/00825_protobuf_format_no_length_delimiter.sh b/tests/queries/0_stateless/00825_protobuf_format_no_length_delimiter.sh index a16345c4bb1..0f168c38395 100755 --- a/tests/queries/0_stateless/00825_protobuf_format_no_length_delimiter.sh +++ b/tests/queries/0_stateless/00825_protobuf_format_no_length_delimiter.sh @@ -38,12 +38,12 @@ echo echo echo "Roundtrip:" $CLICKHOUSE_CLIENT --query "CREATE TABLE roundtrip_no_length_delimiter_protobuf_00825 AS no_length_delimiter_protobuf_00825" -$CLICKHOUSE_CLIENT --query "INSERT INTO roundtrip_no_length_delimiter_protobuf_00825 FORMAT ProtobufSingle SETTINGS format_schema='$SCHEMADIR/00825_protobuf_format_no_length_delimiter:Message'" < "$BINARY_FILE_PATH" +$CLICKHOUSE_CLIENT --query "INSERT INTO roundtrip_no_length_delimiter_protobuf_00825 SETTINGS format_schema='$SCHEMADIR/00825_protobuf_format_no_length_delimiter:Message' FORMAT ProtobufSingle" < "$BINARY_FILE_PATH" $CLICKHOUSE_CLIENT --query "SELECT * FROM roundtrip_no_length_delimiter_protobuf_00825" rm "$BINARY_FILE_PATH" # The ProtobufSingle format can't be used to write multiple rows because this format doesn't have any row delimiter. -$CLICKHOUSE_CLIENT --multiquery --testmode > /dev/null < /dev/null <&1 \ + | ${CLICKHOUSE_CLIENT} --ignore-error -nm --calculate_text_stack_trace 0 --log-level 'error' 2>&1 \ | grep -v -e 'Received exception .*$' -e '^(query: ' | sed 's/^\(Code: [0-9]\+\).*$/\1/g' diff --git a/tests/queries/0_stateless/00938_template_input_format.sh b/tests/queries/0_stateless/00938_template_input_format.sh index 9218f4bebca..e99f59614da 100755 --- a/tests/queries/0_stateless/00938_template_input_format.sh +++ b/tests/queries/0_stateless/00938_template_input_format.sh @@ -22,10 +22,11 @@ cv bn m\", d: 2016-01-01 ; n: 456, s1: as\"df\\'gh , s2: '', s3: \"zx\\ncv\\tbn m\", s4: \"qwe,rty\", d: 2016-01-02 ; n: 9876543210, s1: , s2: 'zx\\ncv\\tbn m', s3: \"qwe,rty\", s4: \"as\"\"df'gh\", d: 2016-01-03 ; n: 789, s1: zx\\ncv\\tbn m , s2: 'qwe,rty', s3: \"as\\\"df'gh\", s4: \"\", d: 2016-01-04"$'\t'" - $ suffix $" | $CLICKHOUSE_CLIENT --query="INSERT INTO template1 FORMAT Template SETTINGS \ + $ suffix $" | $CLICKHOUSE_CLIENT --query="INSERT INTO template1 SETTINGS \ format_template_resultset = '$CURDIR/00938_template_input_format_resultset.tmp', \ format_template_row = '$CURDIR/00938_template_input_format_row.tmp', \ -format_template_rows_between_delimiter = ';\n'"; +format_template_rows_between_delimiter = ';\n' \ +FORMAT Template"; $CLICKHOUSE_CLIENT --query="SELECT * FROM template1 ORDER BY n FORMAT CSV"; @@ -33,10 +34,11 @@ echo "==== parse json (sophisticated template) ====" echo -ne '{${:}"meta"${:}:${:}[${:}{${:}"name"${:}:${:}"s1"${:},${:}"type"${:}:${:}"String"${:}}${:},${:}{${:}"name"${:}:${:}"s2"${:},${:}"type"${:}:${:}"String"${:}}${:},${:}{${:}"name"${:}:${:}"s3"${:},${:}"type"${:}:${:}"String"${:}}${:},${:}{${:}"name"${:}:${:}"s4"${:},${:}"type"${:}:${:}"String"${:}}${:},${:}{${:}"name"${:}:${:}"n"${:},${:}"type"${:}:${:}"UInt64"${:}}${:},${:}{${:}"name"${:}:${:}"d"${:},${:}"type"${:}:${:}"Date"${:}}${:}]${:},${:}"data"${:}:${:}[${data}]${:},${:}"rows"${:}:${:}${:CSV}${:},${:}"statistics"${:}:${:}{${:}"elapsed"${:}:${:}${:CSV}${:},${:}"rows_read"${:}:${:}${:CSV}${:},${:}"bytes_read"${:}:${:}${:CSV}${:}}${:}}' > "$CURDIR"/00938_template_input_format_resultset.tmp echo -ne '{${:}"s1"${:}:${:}${s1:JSON}${:},${:}"s2"${:}:${:}${s2:JSON}${:},${:}"s3"${:}:${:}${s3:JSON}${:},${:}"s4"${:}:${:}${s4:JSON}${:},${:}"n"${:}:${:}${n:JSON}${:},${:}"d"${:}:${:}${d:JSON}${:}${:}}' > "$CURDIR"/00938_template_input_format_row.tmp -$CLICKHOUSE_CLIENT --query="SELECT * FROM template1 ORDER BY n FORMAT JSON" | $CLICKHOUSE_CLIENT --query="INSERT INTO template2 FORMAT TemplateIgnoreSpaces SETTINGS \ +$CLICKHOUSE_CLIENT --query="SELECT * FROM template1 ORDER BY n FORMAT JSON" | $CLICKHOUSE_CLIENT --query="INSERT INTO template2 SETTINGS \ format_template_resultset = '$CURDIR/00938_template_input_format_resultset.tmp', \ format_template_row = '$CURDIR/00938_template_input_format_row.tmp', \ -format_template_rows_between_delimiter = ','"; +format_template_rows_between_delimiter = ',' \ +FORMAT TemplateIgnoreSpaces"; $CLICKHOUSE_CLIENT --query="SELECT * FROM template2 ORDER BY n FORMAT CSV"; $CLICKHOUSE_CLIENT --query="TRUNCATE TABLE template2"; @@ -45,10 +47,11 @@ echo "==== parse json ====" echo -ne '{${:}"meta"${:}:${:JSON},${:}"data"${:}:${:}[${data}]${:},${:}"rows"${:}:${:JSON},${:}"statistics"${:}:${:JSON}${:}}' > "$CURDIR"/00938_template_input_format_resultset.tmp echo -ne '{${:}"s1"${:}:${:}${s3:JSON}${:},${:}"s2"${:}:${:}${:JSON}${:},${:}"s3"${:}:${:}${s1:JSON}${:},${:}"s4"${:}:${:}${:JSON}${:},${:}"n"${:}:${:}${n:JSON}${:},${:}"d"${:}:${:}${d:JSON}${:}${:}}' > "$CURDIR"/00938_template_input_format_row.tmp -$CLICKHOUSE_CLIENT --query="SELECT * FROM template1 ORDER BY n FORMAT JSON" | $CLICKHOUSE_CLIENT --query="INSERT INTO template2 FORMAT TemplateIgnoreSpaces SETTINGS \ +$CLICKHOUSE_CLIENT --query="SELECT * FROM template1 ORDER BY n FORMAT JSON" | $CLICKHOUSE_CLIENT --query="INSERT INTO template2 SETTINGS \ format_template_resultset = '$CURDIR/00938_template_input_format_resultset.tmp', \ format_template_row = '$CURDIR/00938_template_input_format_row.tmp', \ -format_template_rows_between_delimiter = ','"; +format_template_rows_between_delimiter = ',' \ +FORMAT TemplateIgnoreSpaces"; $CLICKHOUSE_CLIENT --query="SELECT * FROM template2 ORDER BY n FORMAT CSV"; @@ -66,10 +69,11 @@ cv bn m\", d: 2016-01-01 ; n: 456, s1: as\"df\\'gh , s2: '', s3: \"zx\\ncv\\tbn m\", s4: \"qwe,rty\", d: 2016-01-02 ; n: 9876543210, s1: , s2: 'zx\\ncv\\tbn m', s3: \"qwe,rty\", s4: \"as\"\"df'gh\", d: 2016-01-03 ; n: 789, s1: zx\cv\bn m , s2: 'qwe,rty', s3: \"as\\\"df'gh\", s4: \"\", d: 2016-01-04"$'\t'" - $ suffix $" | $CLICKHOUSE_CLIENT --query="INSERT INTO template1 FORMAT Template SETTINGS \ + $ suffix $" | $CLICKHOUSE_CLIENT --query="INSERT INTO template1 SETTINGS \ format_template_resultset = '$CURDIR/00938_template_input_format_resultset.tmp', \ format_template_row = '$CURDIR/00938_template_input_format_row.tmp', \ -format_template_rows_between_delimiter = ';\n'"; +format_template_rows_between_delimiter = ';\n' \ +FORMAT Template"; $CLICKHOUSE_CLIENT --query="SELECT * FROM template1 ORDER BY n FORMAT CSV"; diff --git a/tests/queries/0_stateless/00971_query_id_in_logs.sh b/tests/queries/0_stateless/00971_query_id_in_logs.sh index 9e927f36a9c..4de6e02d10d 100755 --- a/tests/queries/0_stateless/00971_query_id_in_logs.sh +++ b/tests/queries/0_stateless/00971_query_id_in_logs.sh @@ -1,4 +1,5 @@ #!/usr/bin/env bash +# Tags: no-parallel CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL=trace @@ -9,4 +10,4 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) set -e # No log lines without query id -$CLICKHOUSE_CLIENT --query_id=hello --query="SELECT count() FROM numbers(10)" 2>&1 | grep -vF ' {hello} ' | grep -P '<\w+>' ||: +$CLICKHOUSE_CLIENT --query_id=hello_00971 --query="SELECT count() FROM numbers(10)" 2>&1 | grep -vF ' {hello_00971} ' | grep -P '<\w+>' ||: diff --git a/tests/queries/0_stateless/00980_merge_alter_settings.sql b/tests/queries/0_stateless/00980_merge_alter_settings.sql index c0d18f6d453..f595a09970d 100644 --- a/tests/queries/0_stateless/00980_merge_alter_settings.sql +++ b/tests/queries/0_stateless/00980_merge_alter_settings.sql @@ -91,8 +91,8 @@ SHOW CREATE TABLE table_for_reset_setting; ALTER TABLE table_for_reset_setting RESET SETTING index_granularity; -- { serverError 472 } --- ignore undefined setting -ALTER TABLE table_for_reset_setting RESET SETTING merge_with_ttl_timeout, unknown_setting; +-- don't execute alter with incorrect setting +ALTER TABLE table_for_reset_setting RESET SETTING merge_with_ttl_timeout, unknown_setting; -- { serverError 36 } ALTER TABLE table_for_reset_setting MODIFY SETTING merge_with_ttl_timeout = 300, max_concurrent_queries = 1; @@ -102,4 +102,4 @@ ALTER TABLE table_for_reset_setting RESET SETTING max_concurrent_queries, merge_ SHOW CREATE TABLE table_for_reset_setting; -DROP TABLE IF EXISTS table_for_reset_setting; \ No newline at end of file +DROP TABLE IF EXISTS table_for_reset_setting; diff --git a/tests/queries/0_stateless/00980_zookeeper_merge_tree_alter_settings.sql b/tests/queries/0_stateless/00980_zookeeper_merge_tree_alter_settings.sql index dfb91eb3b0a..1b291bf84d2 100644 --- a/tests/queries/0_stateless/00980_zookeeper_merge_tree_alter_settings.sql +++ b/tests/queries/0_stateless/00980_zookeeper_merge_tree_alter_settings.sql @@ -108,8 +108,8 @@ ATTACH TABLE replicated_table_for_reset_setting1; SHOW CREATE TABLE replicated_table_for_reset_setting1; SHOW CREATE TABLE replicated_table_for_reset_setting2; --- ignore undefined setting -ALTER TABLE replicated_table_for_reset_setting1 RESET SETTING check_delay_period, unknown_setting; +-- don't execute alter with incorrect setting +ALTER TABLE replicated_table_for_reset_setting1 RESET SETTING check_delay_period, unknown_setting; -- { serverError 36 } ALTER TABLE replicated_table_for_reset_setting1 RESET SETTING merge_with_ttl_timeout; ALTER TABLE replicated_table_for_reset_setting2 RESET SETTING merge_with_ttl_timeout; diff --git a/tests/queries/0_stateless/01003_kill_query_race_condition.sh b/tests/queries/0_stateless/01003_kill_query_race_condition.sh index 64caf0f88d1..f98897b1544 100755 --- a/tests/queries/0_stateless/01003_kill_query_race_condition.sh +++ b/tests/queries/0_stateless/01003_kill_query_race_condition.sh @@ -1,5 +1,5 @@ #!/usr/bin/env bash -# Tags: race +# Tags: race, no-parallel CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh @@ -9,12 +9,12 @@ set -e function thread1() { - $CLICKHOUSE_CLIENT --query_id=hello --query "SELECT count() FROM numbers(1000000000)" --format Null; + $CLICKHOUSE_CLIENT --query_id=hello_01003 --query "SELECT count() FROM numbers(1000000000)" --format Null; } function thread2() { - $CLICKHOUSE_CLIENT --query "KILL QUERY WHERE query_id = 'hello'" --format Null + $CLICKHOUSE_CLIENT --query "KILL QUERY WHERE query_id = 'hello_01003'" --format Null sleep 0.$RANDOM } diff --git a/tests/queries/0_stateless/01014_format_custom_separated.sh b/tests/queries/0_stateless/01014_format_custom_separated.sh index 42599bcc944..4e88419d125 100755 --- a/tests/queries/0_stateless/01014_format_custom_separated.sh +++ b/tests/queries/0_stateless/01014_format_custom_separated.sh @@ -23,12 +23,13 @@ echo '0, "2019-09-24", "hello" 1, 2019-09-25, "world" 2, "2019-09-26", custom 3, 2019-09-27, separated -end' | $CLICKHOUSE_CLIENT --query="INSERT INTO custom_separated FORMAT CustomSeparated SETTINGS \ +end' | $CLICKHOUSE_CLIENT --query="INSERT INTO custom_separated SETTINGS \ format_custom_escaping_rule = 'CSV', \ format_custom_field_delimiter = ', ', \ format_custom_row_after_delimiter = '\n', \ format_custom_row_between_delimiter = '', \ -format_custom_result_after_delimiter = 'end\n'" +format_custom_result_after_delimiter = 'end\n' +FORMAT CustomSeparated" $CLICKHOUSE_CLIENT --query="SELECT * FROM custom_separated ORDER BY n FORMAT CSV" diff --git a/tests/queries/0_stateless/01015_attach_part.reference b/tests/queries/0_stateless/01015_attach_part.reference index b6cd514cd25..81c49e654ac 100644 --- a/tests/queries/0_stateless/01015_attach_part.reference +++ b/tests/queries/0_stateless/01015_attach_part.reference @@ -1,3 +1,4 @@ 1000 0 1000 +0 diff --git a/tests/queries/0_stateless/01015_attach_part.sql b/tests/queries/0_stateless/01015_attach_part.sql index 6b786bfbab9..a2f949d3499 100644 --- a/tests/queries/0_stateless/01015_attach_part.sql +++ b/tests/queries/0_stateless/01015_attach_part.sql @@ -21,4 +21,8 @@ ALTER TABLE table_01 ATTACH PART '20191001_1_1_0'; SELECT COUNT() FROM table_01; +ALTER TABLE table_01 DETACH PARTITION ALL; + +SELECT COUNT() FROM table_01; + DROP TABLE IF EXISTS table_01; diff --git a/tests/queries/0_stateless/01085_regexp_input_format.sh b/tests/queries/0_stateless/01085_regexp_input_format.sh index 5736d031c08..217a2fbe8b7 100755 --- a/tests/queries/0_stateless/01085_regexp_input_format.sh +++ b/tests/queries/0_stateless/01085_regexp_input_format.sh @@ -9,19 +9,19 @@ $CLICKHOUSE_CLIENT --query="CREATE TABLE regexp (id UInt32, array Array(UInt32), echo 'id: 1 array: [1,2,3] string: str1 date: 2020-01-01 id: 2 array: [1,2,3] string: str2 date: 2020-01-02 -id: 3 array: [1,2,3] string: str3 date: 2020-01-03' | $CLICKHOUSE_CLIENT --query="INSERT INTO regexp FORMAT Regexp SETTINGS format_regexp='id: (.+?) array: (.+?) string: (.+?) date: (.+?)', format_regexp_escaping_rule='Escaped'"; +id: 3 array: [1,2,3] string: str3 date: 2020-01-03' | $CLICKHOUSE_CLIENT --query="INSERT INTO regexp SETTINGS format_regexp='id: (.+?) array: (.+?) string: (.+?) date: (.+?)', format_regexp_escaping_rule='Escaped' FORMAT Regexp "; echo 'id: 4 array: "[1,2,3]" string: "str4" date: "2020-01-04" id: 5 array: "[1,2,3]" string: "str5" date: "2020-01-05" -id: 6 array: "[1,2,3]" string: "str6" date: "2020-01-06"' | $CLICKHOUSE_CLIENT --query="INSERT INTO regexp FORMAT Regexp SETTINGS format_regexp='id: (.+?) array: (.+?) string: (.+?) date: (.+?)', format_regexp_escaping_rule='CSV'"; +id: 6 array: "[1,2,3]" string: "str6" date: "2020-01-06"' | $CLICKHOUSE_CLIENT --query="INSERT INTO regexp SETTINGS format_regexp='id: (.+?) array: (.+?) string: (.+?) date: (.+?)', format_regexp_escaping_rule='CSV' FORMAT Regexp"; echo "id: 7 array: [1,2,3] string: 'str7' date: '2020-01-07' id: 8 array: [1,2,3] string: 'str8' date: '2020-01-08' -id: 9 array: [1,2,3] string: 'str9' date: '2020-01-09'" | $CLICKHOUSE_CLIENT --query="INSERT INTO regexp FORMAT Regexp SETTINGS format_regexp='id: (.+?) array: (.+?) string: (.+?) date: (.+?)', format_regexp_escaping_rule='Quoted'"; +id: 9 array: [1,2,3] string: 'str9' date: '2020-01-09'" | $CLICKHOUSE_CLIENT --query="INSERT INTO regexp SETTINGS format_regexp='id: (.+?) array: (.+?) string: (.+?) date: (.+?)', format_regexp_escaping_rule='Quoted' FORMAT Regexp"; echo 'id: 10 array: [1,2,3] string: "str10" date: "2020-01-10" id: 11 array: [1,2,3] string: "str11" date: "2020-01-11" -id: 12 array: [1,2,3] string: "str12" date: "2020-01-12"' | $CLICKHOUSE_CLIENT --query="INSERT INTO regexp FORMAT Regexp SETTINGS format_regexp='id: (.+?) array: (.+?) string: (.+?) date: (.+?)', format_regexp_escaping_rule='JSON'"; +id: 12 array: [1,2,3] string: "str12" date: "2020-01-12"' | $CLICKHOUSE_CLIENT --query="INSERT INTO regexp SETTINGS format_regexp='id: (.+?) array: (.+?) string: (.+?) date: (.+?)', format_regexp_escaping_rule='JSON' FORMAT Regexp"; $CLICKHOUSE_CLIENT --query="SELECT * FROM regexp ORDER BY id"; $CLICKHOUSE_CLIENT --query="DROP TABLE regexp"; diff --git a/tests/queries/0_stateless/01086_regexp_input_format_skip_unmatched.sh b/tests/queries/0_stateless/01086_regexp_input_format_skip_unmatched.sh index c96aed7d3ee..8db27891006 100755 --- a/tests/queries/0_stateless/01086_regexp_input_format_skip_unmatched.sh +++ b/tests/queries/0_stateless/01086_regexp_input_format_skip_unmatched.sh @@ -10,7 +10,7 @@ $CLICKHOUSE_CLIENT --query="CREATE TABLE regexp (id UInt32, string String) ENGIN echo 'id: 1 string: str1 id: 2 string: str2 id=3, string=str3 -id: 4 string: str4' | $CLICKHOUSE_CLIENT --query="INSERT INTO regexp FORMAT Regexp SETTINGS format_regexp='id: (.+?) string: (.+?)', format_regexp_escaping_rule='Escaped', format_regexp_skip_unmatched=1"; +id: 4 string: str4' | $CLICKHOUSE_CLIENT --query="INSERT INTO regexp SETTINGS format_regexp='id: (.+?) string: (.+?)', format_regexp_escaping_rule='Escaped', format_regexp_skip_unmatched=1 FORMAT Regexp"; $CLICKHOUSE_CLIENT --query="SELECT * FROM regexp"; $CLICKHOUSE_CLIENT --query="DROP TABLE regexp"; diff --git a/tests/queries/0_stateless/01161_all_system_tables.sh b/tests/queries/0_stateless/01161_all_system_tables.sh index 1a653763ad3..a5ed2ea7e6d 100755 --- a/tests/queries/0_stateless/01161_all_system_tables.sh +++ b/tests/queries/0_stateless/01161_all_system_tables.sh @@ -1,5 +1,7 @@ #!/usr/bin/env bash +# Tags: no-parallel + CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh @@ -12,7 +14,7 @@ function run_selects() { thread_num=$1 readarray -t tables_arr < <(${CLICKHOUSE_CLIENT} -q "SELECT database || '.' || name FROM system.tables - WHERE database in ('system', 'information_schema', 'INFORMATION_SCHEMA') and name!='zookeeper' and name!='merge_tree_metadata_cache' + WHERE database in ('system', 'information_schema', 'INFORMATION_SCHEMA') and name!='zookeeper' and name!='merge_tree_metadata_cache' AND sipHash64(name || toString($RAND)) % $THREADS = $thread_num") for t in "${tables_arr[@]}" diff --git a/tests/queries/0_stateless/01167_isolation_hermitage.reference b/tests/queries/0_stateless/01167_isolation_hermitage.reference new file mode 100644 index 00000000000..4488809f3ed --- /dev/null +++ b/tests/queries/0_stateless/01167_isolation_hermitage.reference @@ -0,0 +1,59 @@ +Serialization error +INVALID_TRANSACTION +INVALID_TRANSACTION +1 1 11 +1 2 21 +tx4 2 1 10 +tx4 2 2 20 +tx4 3 1 10 +tx4 3 2 20 +4 1 10 +4 2 20 +tx6 5 1 10 +tx6 5 2 20 +tx6 6 1 10 +tx6 6 2 20 +7 1 11 +7 2 20 +Serialization error +tx7 8 1 11 +tx7 8 2 20 +INVALID_TRANSACTION +INVALID_TRANSACTION +10 1 11 +10 2 20 +Serialization error +tx11 11 1 10 +tx11 11 2 20 +INVALID_TRANSACTION +tx11 12 1 10 +tx11 12 2 20 +INVALID_TRANSACTION +13 1 11 +13 2 19 +16 1 10 +16 2 20 +16 3 30 +Serialization error +INVALID_TRANSACTION +INVALID_TRANSACTION +18 1 20 +18 2 30 +tx16 19 1 10 +tx16 19 2 20 +tx17 20 1 10 +tx17 20 2 20 +Serialization error +INVALID_TRANSACTION +21 1 11 +21 2 20 +tx18 22 1 10 +tx19 23 1 10 +tx19 24 2 20 +tx18 25 2 20 +26 1 12 +26 2 18 +29 1 10 +29 2 20 +29 3 30 +29 4 42 diff --git a/tests/queries/0_stateless/01167_isolation_hermitage.sh b/tests/queries/0_stateless/01167_isolation_hermitage.sh new file mode 100755 index 00000000000..7f495801dd0 --- /dev/null +++ b/tests/queries/0_stateless/01167_isolation_hermitage.sh @@ -0,0 +1,166 @@ +#!/usr/bin/env bash +# Tags: long, no-fasttest, no-replicated-database +# Looks like server does not listen https port in fasttest +# FIXME Replicated database executes ALTERs in separate context, so transaction info is lost + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh +# shellcheck source=./transactions.lib +. "$CURDIR"/transactions.lib +set -e + +# https://github.com/ept/hermitage + +$CLICKHOUSE_CLIENT -q "drop table if exists test" +$CLICKHOUSE_CLIENT -q "create table test (id int, value int) engine=MergeTree order by id" + +function reset_table() +{ + $CLICKHOUSE_CLIENT -q "truncate table test;" + $CLICKHOUSE_CLIENT -q "insert into test (id, value) values (1, 10);" + $CLICKHOUSE_CLIENT -q "insert into test (id, value) values (2, 20);" +} + +# TODO update test after implementing Read Committed + +# G0 +reset_table +tx 1 "begin transaction" +tx 2 "begin transaction" +tx 1 "alter table test update value=11 where id=1" +tx 2 "alter table test update value=12 where id=1" | grep -Eo "Serialization error" | uniq +tx 1 "alter table test update value=21 where id=2" +tx 1 "commit" +tx 2 "alter table test update value=22 where id=2" | grep -Eo "INVALID_TRANSACTION" | uniq +tx 2 "commit" | grep -Eo "INVALID_TRANSACTION" | uniq +tx 2 "rollback" +$CLICKHOUSE_CLIENT -q "select 1, * from test order by id" + +# G1a +reset_table +tx_async 3 "begin transaction" +tx_async 4 "begin transaction" +tx_async 3 "alter table test update value=101 where id=1" +tx_async 4 "select 2, * from test order by id" +tx_async 3 "alter table test update value=11 where id=1" +tx_async 3 "rollback" +tx_async 4 "select 3, * from test order by id" +tx_async 4 "commit" +tx_wait 3 +tx_wait 4 +$CLICKHOUSE_CLIENT -q "select 4, * from test order by id" + +# G1b +reset_table +tx_async 5 "begin transaction" +tx_async 6 "begin transaction" +tx_async 5 "alter table test update value=101 where id=1" +tx_async 6 "select 5, * from test order by id" +tx_async 5 "alter table test update value=11 where id=1" +tx_async 5 "commit" +tx_async 6 "select 6, * from test order by id" +tx_async 6 "commit" +tx_wait 5 +tx_wait 6 +$CLICKHOUSE_CLIENT -q "select 7, * from test order by id" + +# G1c +# NOTE both transactions will succeed if we implement skipping of unaffected partitions/parts +reset_table +tx 7 "begin transaction" +tx 8 "begin transaction" +tx 7 "alter table test update value=11 where id=1" +tx 8 "alter table test update value=22 where id=2" | grep -Eo "Serialization error" | uniq +tx 7 "select 8, * from test order by id" +tx 8 "select 9, * from test order by id" | grep -Eo "INVALID_TRANSACTION" | uniq +tx 7 "commit" +tx 8 "commit" | grep -Eo "INVALID_TRANSACTION" | uniq +tx 8 "rollback" +$CLICKHOUSE_CLIENT -q "select 10, * from test order by id" + +# OTV +reset_table +tx 9 "begin transaction" +tx 10 "begin transaction" +tx 11 "begin transaction" +tx 9 "alter table test update value = 11 where id = 1" +tx 9 "alter table test update value = 19 where id = 2" +tx 10 "alter table test update value = 12 where id = 1" | grep -Eo "Serialization error" | uniq +tx 9 "commit" +tx 11 "select 11, * from test order by id" +tx 10 "alter table test update value = 18 where id = 2" | grep -Eo "INVALID_TRANSACTION" | uniq +tx 11 "select 12, * from test order by id" +tx 10 "commit" | grep -Eo "INVALID_TRANSACTION" | uniq +tx 10 "rollback" +tx 11 "commit" +$CLICKHOUSE_CLIENT -q "select 13, * from test order by id" + +# PMP +reset_table +tx_async 12 "begin transaction" +tx_async 13 "begin transaction" +tx_async 12 "select 14, * from test where value = 30" +tx_async 13 "insert into test (id, value) values (3, 30)" +tx_async 13 "commit" +tx_async 12 "select 15, * from test where value = 30" +tx_async 12 "commit" +tx_wait 12 +tx_wait 13 +$CLICKHOUSE_CLIENT -q "select 16, * from test order by id" + +# PMP write +reset_table +tx 14 "begin transaction" +tx 15 "begin transaction" +tx 14 "alter table test update value = value + 10 where 1" +tx 15 "alter table test delete where value = 20" | grep -Eo "Serialization error" | uniq +tx 14 "commit" +tx 15 "select 17, * from test order by id" | grep -Eo "INVALID_TRANSACTION" | uniq +tx 15 "commit" | grep -Eo "INVALID_TRANSACTION" | uniq +tx 15 "rollback" +$CLICKHOUSE_CLIENT -q "select 18, * from test order by id" + +# P4 +reset_table +tx 16 "begin transaction" +tx 17 "begin transaction" +tx 16 "select 19, * from test order by id" +tx 17 "select 20, * from test order by id" +tx 16 "alter table test update value = 11 where id = 1" +tx 17 "alter table test update value = 11 where id = 1" | grep -Eo "Serialization error" | uniq +tx 16 "commit" +tx 17 "commit" | grep -Eo "INVALID_TRANSACTION" | uniq +tx 17 "rollback" +$CLICKHOUSE_CLIENT -q "select 21, * from test order by id" + +# G-single +reset_table +tx_async 18 "begin transaction" +tx_async 19 "begin transaction" +tx_sync 18 "select 22, * from test where id = 1" +tx_async 19 "select 23, * from test where id = 1" +tx_async 19 "select 24, * from test where id = 2" +tx_async 19 "alter table test update value = 12 where id = 1" +tx_async 19 "alter table test update value = 18 where id = 2" +tx_async 19 "commit" +tx_async 18 "select 25, * from test where id = 2" +tx_async 18 "commit" +tx_wait 18 +tx_wait 19 +$CLICKHOUSE_CLIENT -q "select 26, * from test order by id" + +# G2 +reset_table +tx_async 20 "begin transaction" +tx_async 21 "begin transaction" +tx_sync 20 "select 27, * from test where value % 3 = 0" +tx_async 21 "select 28, * from test where value % 3 = 0" +tx_async 20 "insert into test (id, value) values (3, 30)" +tx_async 21 "insert into test (id, value) values (4, 42)" +tx_async 20 "commit" +tx_async 21 "commit" +tx_wait 20 +tx_wait 21 +$CLICKHOUSE_CLIENT -q "select 29, * from test order by id" + diff --git a/tests/queries/0_stateless/01168_mutations_isolation.reference b/tests/queries/0_stateless/01168_mutations_isolation.reference new file mode 100644 index 00000000000..1b3e3f145b1 --- /dev/null +++ b/tests/queries/0_stateless/01168_mutations_isolation.reference @@ -0,0 +1,38 @@ +tx2 1 10 all_1_1_0_4 +tx2 1 30 all_3_3_0_4 +tx1 2 1 all_1_1_0 +tx1 2 2 all_2_2_0 +Serialization error +INVALID_TRANSACTION +tx3 3 1 all_1_1_0 +Serialization error +INVALID_TRANSACTION +INVALID_TRANSACTION +tx5 4 2 all_1_1_0_8 +tx5 4 5 all_10_10_0 +tx5 4 6 all_7_7_0_8 +tx5 5 2 all_1_1_0_8 +tx5 5 5 all_10_10_0 +tx5 5 6 all_7_7_0_8 +SERIALIZATION_ERROR +tx6 6 2 all_1_1_0_11 +tx6 6 6 all_7_7_0_11 +tx7 7 20 all_1_1_0_13 +tx7 7 40 all_14_14_0 +tx7 7 60 all_7_7_0_13 +tx7 7 80 all_12_12_0_13 +tx7 8 20 all_1_14_1_13 +tx7 8 40 all_1_14_1_13 +tx7 8 60 all_1_14_1_13 +tx7 8 80 all_1_14_1_13 +Serialization error +INVALID_TRANSACTION +tx11 9 21 all_1_14_1_17 +tx11 9 41 all_1_14_1_17 +tx11 9 61 all_1_14_1_17 +tx11 9 81 all_1_14_1_17 +1 1 RUNNING +tx14 10 22 all_1_14_1_18 +tx14 10 42 all_1_14_1_18 +tx14 10 62 all_1_14_1_18 +tx14 10 82 all_1_14_1_18 diff --git a/tests/queries/0_stateless/01168_mutations_isolation.sh b/tests/queries/0_stateless/01168_mutations_isolation.sh new file mode 100755 index 00000000000..888858edf32 --- /dev/null +++ b/tests/queries/0_stateless/01168_mutations_isolation.sh @@ -0,0 +1,93 @@ +#!/usr/bin/env bash +# Tags: no-fasttest, no-replicated-database +# Looks like server does not listen https port in fasttest +# FIXME Replicated database executes ALTERs in separate context, so transaction info is lost + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh +# shellcheck source=./transactions.lib +. "$CURDIR"/transactions.lib + +$CLICKHOUSE_CLIENT -q "drop table if exists mt" +$CLICKHOUSE_CLIENT -q "create table mt (n int) engine=MergeTree order by tuple()" + +$CLICKHOUSE_CLIENT -q "insert into mt values (1)" + +tx 1 "begin transaction" +tx 2 "begin transaction" +tx 1 "insert into mt values (2)" +tx 2 "insert into mt values (3)" +tx 2 "alter table mt update n=n*10 where 1" +tx 2 "select 1, n, _part from mt order by n" +tx 1 "select 2, n, _part from mt order by n" +tx 1 "alter table mt update n=n+1 where 1" | grep -Eo "Serialization error" | uniq +tx 1 "commit" | grep -Eo "INVALID_TRANSACTION" | uniq +tx 2 "rollback" + + +tx 3 "begin transaction" +tx 3 "select 3, n, _part from mt order by n" +tx 4 "begin transaction" +tx 3 "insert into mt values (2)" +tx 4 "insert into mt values (3)" +tx 4 "alter table mt update n=n*2 where 1" +tx 3 "alter table mt update n=n+42 where 1" | grep -Eo "Serialization error" | uniq +tx 3 "insert into mt values (4)" | grep -Eo "INVALID_TRANSACTION" | uniq +tx 4 "insert into mt values (5)" +tx 3 "commit" | grep -Eo "INVALID_TRANSACTION" | uniq +tx 4 "commit" + + +tx 5 "begin transaction" +tx 5 "select 4, n, _part from mt order by n" +tx 6 "begin transaction" +tx 6 "alter table mt delete where n%2=1" +tx 6 "alter table mt drop part 'all_10_10_0_11'" +tx 5 "select 5, n, _part from mt order by n" +tx 5 "alter table mt drop partition id 'all'" | grep -Eo "SERIALIZATION_ERROR" | uniq +tx 6 "select 6, n, _part from mt order by n" +tx 5 "rollback" +tx 6 "insert into mt values (8)" +tx 6 "alter table mt update n=n*10 where 1" +tx 6 "insert into mt values (40)" +tx 6 "commit" + + +tx 7 "begin transaction" +tx 7 "select 7, n, _part from mt order by n" +tx 8 "begin transaction" +tx_async 8 "alter table mt update n = 0 where 1" >/dev/null +$CLICKHOUSE_CLIENT -q "kill mutation where database=currentDatabase() and mutation_id='mutation_15.txt' format Null" 2>&1| grep -Fv "probably it finished" +tx_sync 8 "rollback" +tx 7 "optimize table mt final" +tx 7 "select 8, n, _part from mt order by n" +tx 10 "begin transaction" +tx 10 "alter table mt update n = 0 where 1" | grep -Eo "Serialization error" | uniq +tx 7 "alter table mt update n=n+1 where 1" +tx 10 "commit" | grep -Eo "INVALID_TRANSACTION" | uniq +tx 10 "rollback" +tx 7 "commit" + + +tx_async 11 "begin transaction" +tx_async 11 "select 9, n, _part from mt order by n" +tx_async 12 "begin transaction" +tx_async 11 "alter table mt update n=n+1 where 1" >/dev/null +tx_async 12 "alter table mt update n=n+1 where 1" >/dev/null +tx_async 11 "commit" >/dev/null +tx_async 12 "commit" >/dev/null +tx_wait 11 +tx_wait 12 + +tx 13 "begin transaction" +tid_to_kill=$(tx 13 "select transactionID()" | grep -Po "\(.*") +$CLICKHOUSE_CLIENT -q "select count(), any(is_readonly), any(state) from system.transactions where tid=$tid_to_kill" +tx_async 13 "alter table mt update n = 0 where 1" >/dev/null +$CLICKHOUSE_CLIENT -q "kill transaction where tid=$tid_to_kill format Null" +tx_sync 13 "rollback" + +tx 14 "begin transaction" +tx 14 "select 10, n, _part from mt order by n" + +$CLICKHOUSE_CLIENT --database_atomic_wait_for_drop_and_detach_synchronously=0 -q "drop table mt" diff --git a/tests/queries/0_stateless/01169_alter_partition_isolation_stress.reference b/tests/queries/0_stateless/01169_alter_partition_isolation_stress.reference new file mode 100644 index 00000000000..12b941eab50 --- /dev/null +++ b/tests/queries/0_stateless/01169_alter_partition_isolation_stress.reference @@ -0,0 +1,8 @@ +1 1 +2 1 +3 1 +4 1 +1 +10 100 +1 1 1 +2 1 1 diff --git a/tests/queries/0_stateless/01169_alter_partition_isolation_stress.sh b/tests/queries/0_stateless/01169_alter_partition_isolation_stress.sh new file mode 100755 index 00000000000..ab348fd31fb --- /dev/null +++ b/tests/queries/0_stateless/01169_alter_partition_isolation_stress.sh @@ -0,0 +1,133 @@ +#!/usr/bin/env bash +# Tags: long, no-replicated-database + +# shellcheck disable=SC2015 + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +set -e + +$CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS src"; +$CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS dst"; +$CLICKHOUSE_CLIENT --query "CREATE TABLE src (n UInt64, type UInt8) ENGINE=MergeTree ORDER BY type SETTINGS old_parts_lifetime=0"; +$CLICKHOUSE_CLIENT --query "CREATE TABLE dst (n UInt64, type UInt8) ENGINE=MergeTree ORDER BY type SETTINGS old_parts_lifetime=0"; + +function thread_insert() +{ + set -e + trap "exit 0" INT + val=1 + while true; do + $CLICKHOUSE_CLIENT --multiquery --query " + BEGIN TRANSACTION; + INSERT INTO src VALUES /* ($val, 1) */ ($val, 1); + INSERT INTO src VALUES /* ($val, 2) */ ($val, 2); + COMMIT;" + val=$((val+1)) + sleep 0.$RANDOM; + done +} + + +# NOTE +# ALTER PARTITION query stops merges, +# but serialization error is still possible if some merge was assigned (and committed) between BEGIN and ALTER. +function thread_partition_src_to_dst() +{ + set -e + count=0 + sum=0 + for i in {1..20}; do + out=$( + $CLICKHOUSE_CLIENT --multiquery --query " + BEGIN TRANSACTION; + INSERT INTO src VALUES /* ($i, 3) */ ($i, 3); + INSERT INTO dst SELECT * FROM src; + ALTER TABLE src DROP PARTITION ID 'all'; + SET throw_on_unsupported_query_inside_transaction=0; + SELECT throwIf((SELECT (count(), sum(n)) FROM merge(currentDatabase(), '') WHERE type=3) != ($count + 1, $sum + $i)) FORMAT Null; + COMMIT;" 2>&1) ||: + + echo "$out" | grep -Fv "SERIALIZATION_ERROR" | grep -F "Received from " && $CLICKHOUSE_CLIENT --multiquery --query " + begin transaction; + set transaction snapshot 3; + select $i, 'src', type, n, _part from src order by type, n; + select $i, 'dst', type, n, _part from dst order by type, n; + rollback" ||: + echo "$out" | grep -Fa "SERIALIZATION_ERROR" >/dev/null || count=$((count+1)) + echo "$out" | grep -Fa "SERIALIZATION_ERROR" >/dev/null || sum=$((sum+i)) + done +} + +function thread_partition_dst_to_src() +{ + set -e + for i in {1..20}; do + action="ROLLBACK" + if (( i % 2 )); then + action="COMMIT" + fi + $CLICKHOUSE_CLIENT --multiquery --query " + SYSTEM STOP MERGES dst; + ALTER TABLE dst DROP PARTITION ID 'nonexistent'; -- STOP MERGES doesn't wait for started merges to finish, so we use this trick + BEGIN TRANSACTION; + INSERT INTO dst VALUES /* ($i, 4) */ ($i, 4); + INSERT INTO src SELECT * FROM dst; + ALTER TABLE dst DROP PARTITION ID 'all'; + SET throw_on_unsupported_query_inside_transaction=0; + SYSTEM START MERGES dst; + SELECT throwIf((SELECT (count(), sum(n)) FROM merge(currentDatabase(), '') WHERE type=4) != (toUInt8($i/2 + 1), (select sum(number) from numbers(1, $i) where number % 2 or number=$i))) FORMAT Null; + $action;" || $CLICKHOUSE_CLIENT --multiquery --query " + begin transaction; + set transaction snapshot 3; + select $i, 'src', type, n, _part from src order by type, n; + select $i, 'dst', type, n, _part from dst order by type, n; + rollback" ||: + done +} + +function thread_select() +{ + set -e + trap "exit 0" INT + while true; do + $CLICKHOUSE_CLIENT --multiquery --query " + BEGIN TRANSACTION; + -- no duplicates + SELECT type, throwIf(count(n) != countDistinct(n)) FROM src GROUP BY type FORMAT Null; + SELECT type, throwIf(count(n) != countDistinct(n)) FROM dst GROUP BY type FORMAT Null; + -- rows inserted by thread_insert moved together + SET throw_on_unsupported_query_inside_transaction=0; + SELECT _table, throwIf(arraySort(groupArrayIf(n, type=1)) != arraySort(groupArrayIf(n, type=2))) FROM merge(currentDatabase(), '') GROUP BY _table FORMAT Null; + -- all rows are inserted in insert_thread + SELECT type, throwIf(count(n) != max(n)), throwIf(sum(n) != max(n)*(max(n)+1)/2) FROM merge(currentDatabase(), '') WHERE type IN (1, 2) GROUP BY type ORDER BY type FORMAT Null; + COMMIT;" || $CLICKHOUSE_CLIENT --multiquery --query " + begin transaction; + set transaction snapshot 3; + select $i, 'src', type, n, _part from src order by type, n; + select $i, 'dst', type, n, _part from dst order by type, n; + rollback" ||: + done +} + +thread_insert & PID_1=$! +thread_select & PID_2=$! + +thread_partition_src_to_dst & PID_3=$! +thread_partition_dst_to_src & PID_4=$! +wait $PID_3 && wait $PID_4 + +kill -INT $PID_1 +kill -INT $PID_2 +wait + +$CLICKHOUSE_CLIENT -q "SELECT type, count(n) = countDistinct(n) FROM merge(currentDatabase(), '') GROUP BY type ORDER BY type" +$CLICKHOUSE_CLIENT -q "SELECT DISTINCT arraySort(groupArrayIf(n, type=1)) = arraySort(groupArrayIf(n, type=2)) FROM merge(currentDatabase(), '') GROUP BY _table ORDER BY _table" +$CLICKHOUSE_CLIENT -q "SELECT count(n), sum(n) FROM merge(currentDatabase(), '') WHERE type=4" +$CLICKHOUSE_CLIENT -q "SELECT type, count(n) == max(n), sum(n) == max(n)*(max(n)+1)/2 FROM merge(currentDatabase(), '') WHERE type IN (1, 2) GROUP BY type ORDER BY type" + + +$CLICKHOUSE_CLIENT --query "DROP TABLE src"; +$CLICKHOUSE_CLIENT --query "DROP TABLE dst"; diff --git a/tests/queries/0_stateless/01170_alter_partition_isolation.reference b/tests/queries/0_stateless/01170_alter_partition_isolation.reference new file mode 100644 index 00000000000..f384fc748d4 --- /dev/null +++ b/tests/queries/0_stateless/01170_alter_partition_isolation.reference @@ -0,0 +1,30 @@ +tx1 1 1 +tx1 2 3 +tx2 3 2 +tx2 3 4 +tx1 4 3 + +5 3 +5 5 + +tx4 6 3 +tx4 6 5 +tx4 6 6 +tx4 7 8 +tx3 8 3 +tx3 8 5 +tx3 8 7 +tx3 8 9 +SERIALIZATION_ERROR +INVALID_TRANSACTION +tx4 9 8 + +10 8 + +11 8 +11 11 +11 12 +12 8 +12 8 +12 11 +12 12 diff --git a/tests/queries/0_stateless/01170_alter_partition_isolation.sh b/tests/queries/0_stateless/01170_alter_partition_isolation.sh new file mode 100755 index 00000000000..2db178fb6d1 --- /dev/null +++ b/tests/queries/0_stateless/01170_alter_partition_isolation.sh @@ -0,0 +1,73 @@ +#!/usr/bin/env bash +# Tags: no-fasttest, no-replicated-database +# Looks like server does not listen https port in fasttest +# FIXME Replicated database executes ALTERs in separate context, so transaction info is lost + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh +# shellcheck source=./transactions.lib +. "$CURDIR"/transactions.lib + +$CLICKHOUSE_CLIENT -q "drop table if exists mt" +$CLICKHOUSE_CLIENT -q "create table mt (n int) engine=MergeTree order by n" + +tx 1 "begin transaction" +tx 1 "insert into mt values (1)" +tx 2 "begin transaction" +tx 2 "insert into mt values (2)" +tx 1 "select 1, n from mt order by n" +tx 1 "alter table mt drop partition id 'all'" +tx 2 "insert into mt values (4)" +tx 1 "insert into mt values (3)" +tx 1 "select 2, n from mt order by n" +tx 2 "select 3, n from mt order by n" +tx 2 "alter table mt drop partition id 'all'" +tx 2 "insert into mt values (5)" +tx 1 "select 4, n from mt order by n" +tx 2 "commit" +tx 1 "commit" + +echo '' +$CLICKHOUSE_CLIENT -q "select 5, n from mt order by n" +echo '' + +tx 4 "begin transaction" +tx 4 "insert into mt values (6)" +tx 3 "begin transaction" +tx 3 "insert into mt values (7)" +tx 4 "select 6, n from mt order by n" +tx 4 "alter table mt drop partition id 'all'" +tx 3 "insert into mt values (9)" +tx 4 "insert into mt values (8)" +tx 4 "select 7, n from mt order by n" +tx 3 "select 8, n from mt order by n" +tx 3 "alter table mt drop partition id 'all'" | grep -Eo "SERIALIZATION_ERROR" | uniq +tx 3 "insert into mt values (10)" | grep -Eo "INVALID_TRANSACTION" | uniq +tx 4 "select 9, n from mt order by n" +tx 3 "rollback" +tx 4 "commit" + +echo '' +$CLICKHOUSE_CLIENT -q "select 10, n from mt order by n" +echo '' + +$CLICKHOUSE_CLIENT -q "drop table if exists another_mt" +$CLICKHOUSE_CLIENT -q "create table another_mt (n int) engine=MergeTree order by n" + +tx 5 "begin transaction" +tx 5 "insert into another_mt values (11)" +tx 6 "begin transaction" +tx 6 "insert into mt values (12)" +tx 6 "insert into another_mt values (13)" +tx 5 "alter table another_mt move partition id 'all' to table mt" +tx 6 "alter table another_mt replace partition id 'all' from mt" +tx 5 "alter table another_mt attach partition id 'all' from mt" +tx 5 "commit" +tx 6 "commit" + +$CLICKHOUSE_CLIENT -q "select 11, n from mt order by n" +$CLICKHOUSE_CLIENT -q "select 12, n from another_mt order by n" + +$CLICKHOUSE_CLIENT -q "drop table another_mt" +$CLICKHOUSE_CLIENT -q "drop table mt" diff --git a/tests/queries/0_stateless/01171_mv_select_insert_isolation_long.reference b/tests/queries/0_stateless/01171_mv_select_insert_isolation_long.reference new file mode 100644 index 00000000000..d8bb9e310e6 --- /dev/null +++ b/tests/queries/0_stateless/01171_mv_select_insert_isolation_long.reference @@ -0,0 +1,4 @@ +275 0 138 136 0 +275 0 +275 0 138 136 0 +275 0 diff --git a/tests/queries/0_stateless/01171_mv_select_insert_isolation_long.sh b/tests/queries/0_stateless/01171_mv_select_insert_isolation_long.sh new file mode 100755 index 00000000000..5086515e9eb --- /dev/null +++ b/tests/queries/0_stateless/01171_mv_select_insert_isolation_long.sh @@ -0,0 +1,158 @@ +#!/usr/bin/env bash +# Tags: long, no-parallel +# Test is too heavy, avoid parallel run in Flaky Check + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +set -e + +$CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS src"; +$CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS dst"; +$CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS mv"; +$CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS tmp"; +$CLICKHOUSE_CLIENT --query "CREATE TABLE src (n Int8, m Int8, CONSTRAINT c CHECK xxHash32(n+m) % 8 != 0) ENGINE=MergeTree ORDER BY n PARTITION BY 0 < n SETTINGS old_parts_lifetime=0"; +$CLICKHOUSE_CLIENT --query "CREATE TABLE dst (nm Int16, CONSTRAINT c CHECK xxHash32(nm) % 8 != 0) ENGINE=MergeTree ORDER BY nm SETTINGS old_parts_lifetime=0"; +$CLICKHOUSE_CLIENT --query "CREATE MATERIALIZED VIEW mv TO dst (nm Int16) AS SELECT n*m AS nm FROM src"; + +$CLICKHOUSE_CLIENT --query "CREATE TABLE tmp (x UInt8, nm Int16) ENGINE=MergeTree ORDER BY (x, nm) SETTINGS old_parts_lifetime=0" + +$CLICKHOUSE_CLIENT --query "INSERT INTO src VALUES (0, 0)" + +# some transactions will fail due to constraint +function thread_insert_commit() +{ + set -e + for i in {1..100}; do + $CLICKHOUSE_CLIENT --multiquery --query " + BEGIN TRANSACTION; + INSERT INTO src VALUES /* ($i, $1) */ ($i, $1); + SELECT throwIf((SELECT sum(nm) FROM mv) != $(($i * $1))) FORMAT Null; + INSERT INTO src VALUES /* (-$i, $1) */ (-$i, $1); + COMMIT;" 2>&1| grep -Fv "is violated at row" | grep -Fv "Transaction is not in RUNNING state" | grep -F "Received from " ||: + done +} + +function thread_insert_rollback() +{ + set -e + for _ in {1..100}; do + $CLICKHOUSE_CLIENT --multiquery --query " + BEGIN TRANSACTION; + INSERT INTO src VALUES /* (42, $1) */ (42, $1); + SELECT throwIf((SELECT count() FROM src WHERE n=42 AND m=$1) != 1) FORMAT Null; + ROLLBACK;" + done +} + +# make merges more aggressive +function thread_optimize() +{ + set -e + trap "exit 0" INT + while true; do + optimize_query="OPTIMIZE TABLE src" + partition_id=$(( RANDOM % 2 )) + if (( RANDOM % 2 )); then + optimize_query="OPTIMIZE TABLE dst" + partition_id="all" + fi + if (( RANDOM % 2 )); then + optimize_query="$optimize_query PARTITION ID '$partition_id'" + fi + if (( RANDOM % 2 )); then + optimize_query="$optimize_query FINAL" + fi + action="COMMIT" + if (( RANDOM % 4 )); then + action="ROLLBACK" + fi + + $CLICKHOUSE_CLIENT --multiquery --query " + BEGIN TRANSACTION; + $optimize_query; + $action; + " 2>&1| grep -Fv "already exists, but it will be deleted soon" | grep -F "Received from " ||: + sleep 0.$RANDOM; + done +} + +function thread_select() +{ + set -e + trap "exit 0" INT + while true; do + $CLICKHOUSE_CLIENT --multiquery --query " + BEGIN TRANSACTION; + SELECT throwIf((SELECT (sum(n), count() % 2) FROM src) != (0, 1)) FORMAT Null; + SELECT throwIf((SELECT (sum(nm), count() % 2) FROM mv) != (0, 1)) FORMAT Null; + SELECT throwIf((SELECT (sum(nm), count() % 2) FROM dst) != (0, 1)) FORMAT Null; + SELECT throwIf((SELECT arraySort(groupArray(nm)) FROM mv) != (SELECT arraySort(groupArray(nm)) FROM dst)) FORMAT Null; + SELECT throwIf((SELECT arraySort(groupArray(nm)) FROM mv) != (SELECT arraySort(groupArray(n*m)) FROM src)) FORMAT Null; + COMMIT;" || $CLICKHOUSE_CLIENT --multiquery --query " + begin transaction; + set transaction snapshot 3; + select 'src', n, m, _part from src order by n, m; + select 'dst', nm, _part from dst order by nm; + rollback" ||: + done +} + +function thread_select_insert() +{ + set -e + trap "exit 0" INT + while true; do + $CLICKHOUSE_CLIENT --multiquery --query " + BEGIN TRANSACTION; + SELECT throwIf((SELECT count() FROM tmp) != 0) FORMAT Null; + INSERT INTO tmp SELECT 1, n*m FROM src; + INSERT INTO tmp SELECT 2, nm FROM mv; + INSERT INTO tmp SELECT 3, nm FROM dst; + INSERT INTO tmp SELECT 4, (*,).1 FROM (SELECT n*m FROM src UNION ALL SELECT nm FROM mv UNION ALL SELECT nm FROM dst); + SELECT throwIf((SELECT countDistinct(x) FROM tmp) != 4) FORMAT Null; + + -- now check that all results are the same + SELECT throwIf(1 != (SELECT countDistinct(arr) FROM (SELECT x, arraySort(groupArray(nm)) AS arr FROM tmp WHERE x!=4 GROUP BY x))) FORMAT Null; + SELECT throwIf((SELECT count(), sum(nm) FROM tmp WHERE x=4) != (SELECT count(), sum(nm) FROM tmp WHERE x!=4)) FORMAT Null; + ROLLBACK;" || $CLICKHOUSE_CLIENT --multiquery --query " + begin transaction; + set transaction snapshot 3; + select 'src', n, m, _part from src order by n, m; + select 'dst', nm, _part from dst order by nm; + rollback" ||: + done +} + +thread_insert_commit 1 & PID_1=$! +thread_insert_commit 2 & PID_2=$! +thread_insert_rollback 3 & PID_3=$! + +thread_optimize & PID_4=$! +thread_select & PID_5=$! +thread_select_insert & PID_6=$! +sleep 0.$RANDOM; +thread_select & PID_7=$! +thread_select_insert & PID_8=$! + +wait $PID_1 && wait $PID_2 && wait $PID_3 +kill -INT $PID_4 +kill -INT $PID_5 +kill -INT $PID_6 +kill -INT $PID_7 +kill -INT $PID_8 +wait + +$CLICKHOUSE_CLIENT --multiquery --query " +BEGIN TRANSACTION; +SELECT count(), sum(n), sum(m=1), sum(m=2), sum(m=3) FROM src; +SELECT count(), sum(nm) FROM mv"; + +$CLICKHOUSE_CLIENT --query "SELECT count(), sum(n), sum(m=1), sum(m=2), sum(m=3) FROM src" +$CLICKHOUSE_CLIENT --query "SELECT count(), sum(nm) FROM mv" + +$CLICKHOUSE_CLIENT --query "DROP TABLE src"; +$CLICKHOUSE_CLIENT --query "DROP TABLE dst"; +$CLICKHOUSE_CLIENT --query "DROP TABLE mv"; +$CLICKHOUSE_CLIENT --query "DROP TABLE tmp"; diff --git a/tests/queries/0_stateless/01172_transaction_counters.reference b/tests/queries/0_stateless/01172_transaction_counters.reference new file mode 100644 index 00000000000..3a167e76817 --- /dev/null +++ b/tests/queries/0_stateless/01172_transaction_counters.reference @@ -0,0 +1,39 @@ +(0,0,'00000000-0000-0000-0000-000000000000') +1 all_1_1_0 0 +1 all_2_2_0 1 +2 all_1_1_0 1 (0,0,'00000000-0000-0000-0000-000000000000') 0 +2 all_2_2_0 0 (0,0,'00000000-0000-0000-0000-000000000000') 0 +3 all_1_1_0 0 +3 all_3_3_0 1 +4 all_1_1_0 1 (0,0,'00000000-0000-0000-0000-000000000000') 0 +4 all_2_2_0 18446744073709551615 (0,0,'00000000-0000-0000-0000-000000000000') 0 +4 all_3_3_0 0 (0,0,'00000000-0000-0000-0000-000000000000') 0 +5 1 +6 all_1_1_0 0 +6 all_3_3_0 1 +6 all_4_4_0 1 +7 all_1_1_0 (0,0,'00000000-0000-0000-0000-000000000000') 0 +7 all_3_3_0 (0,0,'00000000-0000-0000-0000-000000000000') 0 +7 all_4_4_0 (0,0,'00000000-0000-0000-0000-000000000000') 0 +8 1 +1 1 AddPart 1 1 1 1 all_1_1_0 +2 1 Begin 1 1 1 1 +2 1 AddPart 1 1 1 1 all_2_2_0 +2 1 Rollback 1 1 1 1 +3 1 Begin 1 1 1 1 +3 1 AddPart 1 1 1 1 all_3_3_0 +3 1 Commit 1 1 1 0 +4 1 Begin 1 1 1 1 +4 1 AddPart 1 1 1 1 all_4_4_0 +4 1 Commit 1 1 1 0 +5 1 Begin 1 1 1 1 +5 1 AddPart 1 1 1 1 all_5_5_0 +5 1 LockPart 1 1 1 1 all_1_1_0 +5 1 LockPart 1 1 1 1 all_3_3_0 +5 1 LockPart 1 1 1 1 all_4_4_0 +5 1 LockPart 1 1 1 1 all_5_5_0 +5 1 UnlockPart 1 1 1 1 all_1_1_0 +5 1 UnlockPart 1 1 1 1 all_3_3_0 +5 1 UnlockPart 1 1 1 1 all_4_4_0 +5 1 UnlockPart 1 1 1 1 all_5_5_0 +5 1 Rollback 1 1 1 1 diff --git a/tests/queries/0_stateless/01172_transaction_counters.sql b/tests/queries/0_stateless/01172_transaction_counters.sql new file mode 100644 index 00000000000..5431673fd62 --- /dev/null +++ b/tests/queries/0_stateless/01172_transaction_counters.sql @@ -0,0 +1,50 @@ +-- Tags: no-s3-storage +-- FIXME this test fails with S3 due to a bug in DiskCacheWrapper +drop table if exists txn_counters; + +create table txn_counters (n Int64, creation_tid DEFAULT transactionID()) engine=MergeTree order by n; + +insert into txn_counters(n) values (1); +select transactionID(); + +-- stop background cleanup +system stop merges txn_counters; + +set throw_on_unsupported_query_inside_transaction=0; + +begin transaction; +insert into txn_counters(n) values (2); +select 1, system.parts.name, txn_counters.creation_tid = system.parts.creation_tid from txn_counters join system.parts on txn_counters._part = system.parts.name where database=currentDatabase() and table='txn_counters' order by system.parts.name; +select 2, name, creation_csn, removal_tid, removal_csn from system.parts where database=currentDatabase() and table='txn_counters' order by system.parts.name; +rollback; + +begin transaction; +insert into txn_counters(n) values (3); +select 3, system.parts.name, txn_counters.creation_tid = system.parts.creation_tid from txn_counters join system.parts on txn_counters._part = system.parts.name where database=currentDatabase() and table='txn_counters' order by system.parts.name; +select 4, name, creation_csn, removal_tid, removal_csn from system.parts where database=currentDatabase() and table='txn_counters' order by system.parts.name; +select 5, transactionID().3 == serverUUID(); +commit; + +detach table txn_counters; +attach table txn_counters; + +begin transaction; +insert into txn_counters(n) values (4); +select 6, system.parts.name, txn_counters.creation_tid = system.parts.creation_tid from txn_counters join system.parts on txn_counters._part = system.parts.name where database=currentDatabase() and table='txn_counters' order by system.parts.name; +select 7, name, removal_tid, removal_csn from system.parts where database=currentDatabase() and table='txn_counters' order by system.parts.name; +select 8, transactionID().3 == serverUUID(); +commit; + +begin transaction; +insert into txn_counters(n) values (5); +alter table txn_counters drop partition id 'all'; +rollback; + +system flush logs; +select indexOf((select arraySort(groupUniqArray(tid)) from system.transactions_info_log where database=currentDatabase() and table='txn_counters'), tid), + (toDecimal64(now64(6), 6) - toDecimal64(event_time, 6)) < 100, type, thread_id!=0, length(query_id)=length(queryID()), tid_hash!=0, csn=0, part +from system.transactions_info_log +where tid in (select tid from system.transactions_info_log where database=currentDatabase() and table='txn_counters' and not (tid.1=1 and tid.2=1)) +or (database=currentDatabase() and table='txn_counters') order by event_time; + +drop table txn_counters; diff --git a/tests/queries/0_stateless/01173_transaction_control_queries.reference b/tests/queries/0_stateless/01173_transaction_control_queries.reference new file mode 100644 index 00000000000..01acdffc581 --- /dev/null +++ b/tests/queries/0_stateless/01173_transaction_control_queries.reference @@ -0,0 +1,12 @@ +commit [1,10] +rollback [1,2,10,20] +no nested [1,10] +on exception before start [1,3,10,30] +on exception while processing [1,4,10,40] +on session close [1,6,10,60] +commit [1,7,10,70] +readonly [1,7,10,70] +snapshot 2 8 +snapshot1 0 0 +snapshot3 1 +snapshot100500 2 8 diff --git a/tests/queries/0_stateless/01173_transaction_control_queries.sql b/tests/queries/0_stateless/01173_transaction_control_queries.sql new file mode 100644 index 00000000000..930a2909f7a --- /dev/null +++ b/tests/queries/0_stateless/01173_transaction_control_queries.sql @@ -0,0 +1,102 @@ +drop table if exists mt1; +drop table if exists mt2; + +create table mt1 (n Int64) engine=MergeTree order by n; +create table mt2 (n Int64) engine=MergeTree order by n; + +commit; -- { serverError INVALID_TRANSACTION } +rollback; -- { serverError INVALID_TRANSACTION } + +begin transaction; +insert into mt1 values (1); +insert into mt2 values (10); +select 'commit', arraySort(groupArray(n)) from (select n from mt1 union all select * from mt2); +commit; + +begin transaction; +insert into mt1 values (2); +insert into mt2 values (20); +select 'rollback', arraySort(groupArray(n)) from (select n from mt1 union all select * from mt2); +rollback; + +begin transaction; +select 'no nested', arraySort(groupArray(n)) from (select n from mt1 union all select * from mt2); +begin transaction; -- { serverError INVALID_TRANSACTION } +rollback; + +begin transaction; +insert into mt1 values (3); +insert into mt2 values (30); +select 'on exception before start', arraySort(groupArray(n)) from (select n from mt1 union all select * from mt2); +-- rollback on exception before start +select functionThatDoesNotExist(); -- { serverError 46 } +-- cannot commit after exception +commit; -- { serverError INVALID_TRANSACTION } +begin transaction; -- { serverError INVALID_TRANSACTION } +rollback; + +begin transaction; +insert into mt1 values (4); +insert into mt2 values (40); +select 'on exception while processing', arraySort(groupArray(n)) from (select n from mt1 union all select * from mt2); +-- rollback on exception while processing +select throwIf(100 < number) from numbers(1000); -- { serverError 395 } +-- cannot commit after exception +commit; -- { serverError INVALID_TRANSACTION } +insert into mt1 values (5); -- { serverError INVALID_TRANSACTION } +insert into mt2 values (50); -- { serverError INVALID_TRANSACTION } +select 1; -- { serverError INVALID_TRANSACTION } +rollback; + +begin transaction; +insert into mt1 values (6); +insert into mt2 values (60); +select 'on session close', arraySort(groupArray(n)) from (select n from mt1 union all select * from mt2); +-- trigger reconnection by error on client, check rollback on session close +insert into mt1 values ([1]); -- { clientError 43 } +commit; -- { serverError INVALID_TRANSACTION } +rollback; -- { serverError INVALID_TRANSACTION } + +begin transaction; +insert into mt1 values (7); +insert into mt2 values (70); +select 'commit', arraySort(groupArray(n)) from (select n from mt1 union all select * from mt2); +commit; + +begin transaction; +select 'readonly', arraySort(groupArray(n)) from (select n from mt1 union all select * from mt2); +commit; + +begin transaction; +select 'snapshot', count(), sum(n) from mt1; +set transaction snapshot 1; +select 'snapshot1', count(), sum(n) from mt1; +set transaction snapshot 3; +set throw_on_unsupported_query_inside_transaction=0; +select 'snapshot3', count() = (select count() from system.parts where database=currentDatabase() and table='mt1' and _state in ('Active', 'Outdated')) from mt1; +set throw_on_unsupported_query_inside_transaction=1; +set transaction snapshot 1000000000000000; +select 'snapshot100500', count(), sum(n) from mt1; +set transaction snapshot 5; -- { serverError INVALID_TRANSACTION } +rollback; + +begin transaction; +create table m (n int) engine=Memory; -- { serverError 48 } +commit; -- { serverError INVALID_TRANSACTION } +rollback; + +create table m (n int) engine=Memory; +begin transaction; +insert into m values (1); -- { serverError 48 } +select * from m; -- { serverError INVALID_TRANSACTION } +commit; -- { serverError INVALID_TRANSACTION } +rollback; + +begin transaction; +select * from m; -- { serverError 48 } +commit; -- { serverError INVALID_TRANSACTION } +rollback; + +drop table m; +drop table mt1; +drop table mt2; diff --git a/tests/queries/0_stateless/01174_select_insert_isolation.reference b/tests/queries/0_stateless/01174_select_insert_isolation.reference new file mode 100644 index 00000000000..ba5f4de36ac --- /dev/null +++ b/tests/queries/0_stateless/01174_select_insert_isolation.reference @@ -0,0 +1,2 @@ +200 0 100 100 0 +200 0 100 100 0 diff --git a/tests/queries/0_stateless/01174_select_insert_isolation.sh b/tests/queries/0_stateless/01174_select_insert_isolation.sh new file mode 100755 index 00000000000..8872ab82c03 --- /dev/null +++ b/tests/queries/0_stateless/01174_select_insert_isolation.sh @@ -0,0 +1,66 @@ +#!/usr/bin/env bash +# Tags: long + +# shellcheck disable=SC2015 + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +set -e + +$CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS mt"; +$CLICKHOUSE_CLIENT --query "CREATE TABLE mt (n Int8, m Int8) ENGINE=MergeTree ORDER BY n PARTITION BY 0 < n SETTINGS old_parts_lifetime=0"; + +function thread_insert_commit() +{ + for i in {1..50}; do + $CLICKHOUSE_CLIENT --multiquery --query " + BEGIN TRANSACTION; + INSERT INTO mt VALUES /* ($i, $1) */ ($i, $1); + INSERT INTO mt VALUES /* (-$i, $1) */ (-$i, $1); + COMMIT;"; + done +} + +function thread_insert_rollback() +{ + for _ in {1..50}; do + $CLICKHOUSE_CLIENT --multiquery --query " + BEGIN TRANSACTION; + INSERT INTO mt VALUES /* (42, $1) */ (42, $1); + ROLLBACK;"; + done +} + +function thread_select() +{ + trap "exit 0" INT + while true; do + # Result of `uniq | wc -l` must be 1 if the first and the last queries got the same result + $CLICKHOUSE_CLIENT --multiquery --query " + BEGIN TRANSACTION; + SELECT arraySort(groupArray(n)), arraySort(groupArray(m)), arraySort(groupArray(_part)) FROM mt; + SELECT throwIf((SELECT sum(n) FROM mt) != 0) FORMAT Null; + SELECT throwIf((SELECT count() FROM mt) % 2 != 0) FORMAT Null; + SELECT arraySort(groupArray(n)), arraySort(groupArray(m)), arraySort(groupArray(_part)) FROM mt; + COMMIT;" | uniq | wc -l | grep -v "^1$" && $CLICKHOUSE_CLIENT -q "SELECT * FROM system.parts + WHERE database='$CLICKHOUSE_DATABASE' AND table='mt'" ||:; + done +} + +thread_insert_commit 1 & PID_1=$! +thread_insert_commit 2 & PID_2=$! +thread_insert_rollback 3 & PID_3=$! +thread_select & PID_4=$! +wait $PID_1 && wait $PID_2 && wait $PID_3 +kill -INT $PID_4 +wait + +$CLICKHOUSE_CLIENT --multiquery --query " +BEGIN TRANSACTION; +SELECT count(), sum(n), sum(m=1), sum(m=2), sum(m=3) FROM mt;"; + +$CLICKHOUSE_CLIENT --query "SELECT count(), sum(n), sum(m=1), sum(m=2), sum(m=3) FROM mt;" + +$CLICKHOUSE_CLIENT --query "DROP TABLE mt"; diff --git a/tests/queries/0_stateless/01183_custom_separated_format_http.sh b/tests/queries/0_stateless/01183_custom_separated_format_http.sh index f981ef5b890..8eaa22f4ecc 100755 --- a/tests/queries/0_stateless/01183_custom_separated_format_http.sh +++ b/tests/queries/0_stateless/01183_custom_separated_format_http.sh @@ -6,9 +6,9 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) echo 'DROP TABLE IF EXISTS mydb' | ${CLICKHOUSE_CURL} -sSg "${CLICKHOUSE_URL}" -d @- echo 'CREATE TABLE mydb (datetime String, d1 String, d2 String ) ENGINE=Memory' | ${CLICKHOUSE_CURL} -sSg "${CLICKHOUSE_URL}" -d @- -echo "2021-Jan^d1^d2" | ${CLICKHOUSE_CURL} -sSg "${CLICKHOUSE_URL}&query=INSERT%20INTO%20mydb%20FORMAT%20CustomSeparated%20SETTINGS%20format_custom_escaping_rule%3D%27CSV%27%2C%20format_custom_field_delimiter%20%3D%20%27%5E%27" --data-binary @- -echo -n "" | ${CLICKHOUSE_CURL} -sSg "${CLICKHOUSE_URL}&query=INSERT%20INTO%20mydb%20FORMAT%20CustomSeparated%20SETTINGS%20format_custom_escaping_rule%3D%27CSV%27%2C%20format_custom_field_delimiter%20%3D%20%27%5E%27" --data-binary @- +echo "2021-Jan^d1^d2" | ${CLICKHOUSE_CURL} -sSg "${CLICKHOUSE_URL}&query=INSERT%20INTO%20mydb%20SETTINGS%20format_custom_escaping_rule%3D%27CSV%27%2C%20format_custom_field_delimiter%20%3D%20%27%5E%27%20FORMAT%20CustomSeparated" --data-binary @- +echo -n "" | ${CLICKHOUSE_CURL} -sSg "${CLICKHOUSE_URL}&query=INSERT%20INTO%20mydb%20SETTINGS%20format_custom_escaping_rule%3D%27CSV%27%2C%20format_custom_field_delimiter%20%3D%20%27%5E%27%20FORMAT%20CustomSeparated" --data-binary @- echo 'SELECT * FROM mydb' | ${CLICKHOUSE_CURL} -sSg "${CLICKHOUSE_URL}" -d @- -printf "2021-Jan^d1^d2\n%.0s" {1..999999} | ${CLICKHOUSE_CURL} -sSg "${CLICKHOUSE_URL}&query=INSERT%20INTO%20mydb%20FORMAT%20CustomSeparated%20SETTINGS%20format_custom_escaping_rule%3D%27CSV%27%2C%20format_custom_field_delimiter%20%3D%20%27%5E%27" --data-binary @- +printf "2021-Jan^d1^d2\n%.0s" {1..999999} | ${CLICKHOUSE_CURL} -sSg "${CLICKHOUSE_URL}&query=INSERT%20INTO%20mydb%20SETTINGS%20format_custom_escaping_rule%3D%27CSV%27%2C%20format_custom_field_delimiter%20%3D%20%27%5E%27%20FORMAT%20CustomSeparated" --data-binary @- echo 'SELECT count(*), countDistinct(datetime, d1, d2) FROM mydb' | ${CLICKHOUSE_CURL} -sSg "${CLICKHOUSE_URL}" -d @- echo 'DROP TABLE mydb' | ${CLICKHOUSE_CURL} -sSg "${CLICKHOUSE_URL}" -d @- diff --git a/tests/queries/0_stateless/01271_show_privileges.reference b/tests/queries/0_stateless/01271_show_privileges.reference index 06bd6ab04e4..039e438dc0a 100644 --- a/tests/queries/0_stateless/01271_show_privileges.reference +++ b/tests/queries/0_stateless/01271_show_privileges.reference @@ -60,6 +60,7 @@ DROP [] \N ALL TRUNCATE ['TRUNCATE TABLE'] TABLE ALL OPTIMIZE ['OPTIMIZE TABLE'] TABLE ALL KILL QUERY [] GLOBAL ALL +KILL TRANSACTION [] GLOBAL ALL MOVE PARTITION BETWEEN SHARDS [] GLOBAL ALL CREATE USER [] GLOBAL ACCESS MANAGEMENT ALTER USER [] GLOBAL ACCESS MANAGEMENT @@ -133,6 +134,7 @@ ODBC [] GLOBAL SOURCES JDBC [] GLOBAL SOURCES HDFS [] GLOBAL SOURCES S3 [] GLOBAL SOURCES +HIVE [] GLOBAL SOURCES SOURCES [] \N ALL ALL ['ALL PRIVILEGES'] \N \N NONE ['USAGE','NO PRIVILEGES'] \N \N diff --git a/tests/queries/0_stateless/01278_min_insert_block_size_rows_for_materialized_views.sh b/tests/queries/0_stateless/01278_min_insert_block_size_rows_for_materialized_views.sh index dde6b8ccadb..0e258bbbb09 100755 --- a/tests/queries/0_stateless/01278_min_insert_block_size_rows_for_materialized_views.sh +++ b/tests/queries/0_stateless/01278_min_insert_block_size_rows_for_materialized_views.sh @@ -88,9 +88,9 @@ from numbers(100000); -- { serverError 241; }" > /dev/null 2>&1 # fails echo "Should throw 1" -execute_insert --testmode +execute_insert echo "Should throw 2" -execute_insert --testmode --min_insert_block_size_rows=1 --min_insert_block_size_rows_for_materialized_views=$((1<<20)) +execute_insert --min_insert_block_size_rows=1 --min_insert_block_size_rows_for_materialized_views=$((1<<20)) # passes echo "Should pass 1" diff --git a/tests/queries/0_stateless/01280_ssd_complex_key_dictionary.sh b/tests/queries/0_stateless/01280_ssd_complex_key_dictionary.sh index d5cae099f36..0de8b3a1a25 100755 --- a/tests/queries/0_stateless/01280_ssd_complex_key_dictionary.sh +++ b/tests/queries/0_stateless/01280_ssd_complex_key_dictionary.sh @@ -41,7 +41,7 @@ $CLICKHOUSE_CLIENT -n --query=" LIFETIME(MIN 1000 MAX 2000) LAYOUT(COMPLEX_KEY_SSD_CACHE(FILE_SIZE 8192 PATH '$USER_FILES_PATH/0d'));" -$CLICKHOUSE_CLIENT --testmode -nq "SELECT dictHas('01280_db.ssd_dict', 'a', tuple('1')); -- { serverError 43 }" +$CLICKHOUSE_CLIENT -nq "SELECT dictHas('01280_db.ssd_dict', 'a', tuple('1')); -- { serverError 43 }" $CLICKHOUSE_CLIENT -n --query=" SELECT 'TEST_SMALL'; @@ -65,7 +65,7 @@ $CLICKHOUSE_CLIENT -n --query=" SELECT dictGetInt32('01280_db.ssd_dict', 'b', tuple('10', toInt32(-20))); SELECT dictGetString('01280_db.ssd_dict', 'c', tuple('10', toInt32(-20)));" -$CLICKHOUSE_CLIENT --testmode -nq "SELECT dictGetUInt64('01280_db.ssd_dict', 'a', tuple(toInt32(3))); -- { serverError 53 }" +$CLICKHOUSE_CLIENT -nq "SELECT dictGetUInt64('01280_db.ssd_dict', 'a', tuple(toInt32(3))); -- { serverError 53 }" $CLICKHOUSE_CLIENT -n --query="DROP DICTIONARY 01280_db.ssd_dict; DROP TABLE IF EXISTS 01280_db.keys_table; diff --git a/tests/queries/0_stateless/01293_client_interactive_vertical_multiline.expect b/tests/queries/0_stateless/01293_client_interactive_vertical_multiline.expect index 5057ec44e8a..07bdbcdac76 100755 --- a/tests/queries/0_stateless/01293_client_interactive_vertical_multiline.expect +++ b/tests/queries/0_stateless/01293_client_interactive_vertical_multiline.expect @@ -27,6 +27,11 @@ expect "Row 1:" expect "1: 1" expect ":) " +send -- "SELECT 1\\G;\r" +expect "Row 1:" +expect "1: 1" +expect ":) " + send -- "SELECT 1\\\r" expect ":-] " send -- ", 2\r" @@ -41,6 +46,14 @@ expect "1: 1" expect "2: 2" expect ":) " +send -- "SELECT 1\\\r" +expect ":-] " +send -- ", 2\\G;\r" +expect "Row 1:" +expect "1: 1" +expect "2: 2" +expect ":) " + send -- "" expect eof @@ -56,6 +69,11 @@ expect "Row 1:" expect "1: 1" expect ":) " +send -- "SELECT 1\\G;\r" +expect "Row 1:" +expect "1: 1" +expect ":) " + send -- "SELECT 1; \r" expect "│ 1 │" expect ":) " @@ -65,6 +83,11 @@ expect "Row 1:" expect "1: 1" expect ":) " +send -- "SELECT 1\\G; \r" +expect "Row 1:" +expect "1: 1" +expect ":) " + send -- "SELECT 1\r" expect ":-] " send -- ";\r" @@ -78,6 +101,13 @@ expect "Row 1:" expect "1: 1" expect ":) " +send -- "SELECT 1\r" +expect ":-] " +send -- "\\G;\r" +expect "Row 1:" +expect "1: 1" +expect ":) " + send -- "SELECT 1\r" expect ":-] " send -- ", 2;\r" @@ -92,5 +122,14 @@ expect "1: 1" expect "2: 2" expect ":) " + +send -- "SELECT 1\r" +expect ":-] " +send -- ", 2\\G;\r" +expect "Row 1:" +expect "1: 1" +expect "2: 2" +expect ":) " + send -- "" expect eof diff --git a/tests/queries/0_stateless/01293_client_interactive_vertical_singleline.expect b/tests/queries/0_stateless/01293_client_interactive_vertical_singleline.expect index 2f871ab46d8..085a1140753 100755 --- a/tests/queries/0_stateless/01293_client_interactive_vertical_singleline.expect +++ b/tests/queries/0_stateless/01293_client_interactive_vertical_singleline.expect @@ -23,6 +23,12 @@ expect "Row 1:" expect "1: 1" expect ":) " + +send -- "SELECT 1\\G;\r" +expect "Row 1:" +expect "1: 1" +expect ":) " + send -- "SELECT 1\\\r" expect ":-] " send -- ", 2\r" @@ -37,5 +43,14 @@ expect "1: 1" expect "2: 2" expect ":) " + +send -- "SELECT 1\\\r" +expect ":-] " +send -- ", 2\\G;\r" +expect "Row 1:" +expect "1: 1" +expect "2: 2" +expect ":) " + send -- "" expect eof diff --git a/tests/queries/0_stateless/01297_create_quota.reference b/tests/queries/0_stateless/01297_create_quota.reference index 375d67346be..f3f833d7619 100644 --- a/tests/queries/0_stateless/01297_create_quota.reference +++ b/tests/queries/0_stateless/01297_create_quota.reference @@ -57,10 +57,10 @@ q2_01297 local directory [] [5259492] 0 ['r1_01297','u1_01297'] [] q3_01297 local directory ['client_key','user_name'] [5259492,15778476] 0 [] [] q4_01297 local directory [] [604800] 1 [] ['u1_01297'] -- system.quota_limits -q2_01297 5259492 0 100 \N \N 11 1000 10000 1001 10001 2.5 -q3_01297 5259492 0 \N \N \N \N 1002 \N \N \N \N -q3_01297 15778476 0 100 \N \N 11 \N \N \N \N \N -q4_01297 604800 0 \N \N \N \N \N \N \N \N \N +q2_01297 5259492 0 100 \N \N 11 1000 10000 1001 10001 2.5 \N +q3_01297 5259492 0 \N \N \N \N 1002 \N \N \N \N \N +q3_01297 15778476 0 100 \N \N 11 \N \N \N \N \N \N +q4_01297 604800 0 \N \N \N \N \N \N \N \N \N \N -- query_selects query_inserts CREATE QUOTA q1_01297 KEYED BY user_name FOR INTERVAL 1 minute MAX query_selects = 1 TO r1_01297 CREATE QUOTA q2_01297 KEYED BY user_name FOR INTERVAL 1 minute MAX query_inserts = 1 TO r1_01297 diff --git a/tests/queries/0_stateless/01428_nullable_asof_join.sql b/tests/queries/0_stateless/01428_nullable_asof_join.sql index 30e5c51eb1c..e1b00158d68 100644 --- a/tests/queries/0_stateless/01428_nullable_asof_join.sql +++ b/tests/queries/0_stateless/01428_nullable_asof_join.sql @@ -109,3 +109,8 @@ FROM (SELECT toUInt8(number) > 0 as pk, toNullable(toUInt8(number)) as dt FROM n ASOF JOIN (SELECT 1 as pk, toNullable(0) as dt) b ON a.dt >= b.dt AND a.pk = b.pk ORDER BY a.dt; -- { serverError 48 } + +SELECT * +FROM (SELECT NULL AS y, 1 AS x, '2020-01-01 10:10:10' :: DateTime64 AS t) AS t1 +ASOF LEFT JOIN (SELECT NULL AS y, 1 AS x, '2020-01-01 10:10:10' :: DateTime64 AS t) AS t2 +ON t1.t <= t2.t AND t1.x == t2.x FORMAT Null; diff --git a/tests/queries/0_stateless/01576_alias_column_rewrite.reference b/tests/queries/0_stateless/01576_alias_column_rewrite.reference index 11cc146dd62..68875735110 100644 --- a/tests/queries/0_stateless/01576_alias_column_rewrite.reference +++ b/tests/queries/0_stateless/01576_alias_column_rewrite.reference @@ -35,10 +35,11 @@ Expression (Projection) ReadFromMergeTree (default.test_table) Expression (Projection) Limit (preliminary LIMIT (without OFFSET)) - Sorting - Expression (Before ORDER BY) - SettingQuotaAndLimits (Set limits and quota after reading from storage) - ReadFromMergeTree (default.test_table) + Expression (Before ORDER BY [lifted up part]) + Sorting + Expression (Before ORDER BY) + SettingQuotaAndLimits (Set limits and quota after reading from storage) + ReadFromMergeTree (default.test_table) optimize_aggregation_in_order Expression ((Projection + Before ORDER BY)) Aggregating diff --git a/tests/queries/0_stateless/01591_window_functions.reference b/tests/queries/0_stateless/01591_window_functions.reference index 655232fcdd4..c766bf16f19 100644 --- a/tests/queries/0_stateless/01591_window_functions.reference +++ b/tests/queries/0_stateless/01591_window_functions.reference @@ -925,10 +925,11 @@ Expression ((Projection + Before ORDER BY)) Window (Window step for window \'ORDER BY o ASC, number ASC\') Sorting (Sorting for window \'ORDER BY o ASC, number ASC\') Window (Window step for window \'ORDER BY number ASC\') - Sorting (Sorting for window \'ORDER BY number ASC\') - Expression ((Before window functions + (Projection + Before ORDER BY))) - SettingQuotaAndLimits (Set limits and quota after reading from storage) - ReadFromStorage (SystemNumbers) + Expression ((Before window functions + (Projection + Before ORDER BY)) [lifted up part]) + Sorting (Sorting for window \'ORDER BY number ASC\') + Expression ((Before window functions + (Projection + Before ORDER BY))) + SettingQuotaAndLimits (Set limits and quota after reading from storage) + ReadFromStorage (SystemNumbers) -- A test case for the sort comparator found by fuzzer. SELECT max(number) OVER (ORDER BY number DESC NULLS FIRST), diff --git a/tests/queries/0_stateless/01593_insert_settings.sql b/tests/queries/0_stateless/01593_insert_settings.sql index 7ef49f54049..88a58b2152e 100644 --- a/tests/queries/0_stateless/01593_insert_settings.sql +++ b/tests/queries/0_stateless/01593_insert_settings.sql @@ -2,9 +2,8 @@ drop table if exists data_01593; create table data_01593 (key Int) engine=MergeTree() order by key partition by key; insert into data_01593 select * from numbers_mt(10); --- TOO_MANY_PARTS error -insert into data_01593 select * from numbers_mt(10) settings max_partitions_per_insert_block=1; -- { serverError 252 } +insert into data_01593 select * from numbers_mt(10) settings max_partitions_per_insert_block=1; -- { serverError TOO_MANY_PARTS } -- settings for INSERT is prefered -insert into data_01593 select * from numbers_mt(10) settings max_partitions_per_insert_block=1 settings max_partitions_per_insert_block=100; +insert into data_01593 settings max_partitions_per_insert_block=100 select * from numbers_mt(10) settings max_partitions_per_insert_block=1; drop table data_01593; diff --git a/tests/queries/0_stateless/01600_remerge_sort_lowered_memory_bytes_ratio.sql b/tests/queries/0_stateless/01600_remerge_sort_lowered_memory_bytes_ratio.sql index 5de4210d3f2..6e23ab9cdb9 100644 --- a/tests/queries/0_stateless/01600_remerge_sort_lowered_memory_bytes_ratio.sql +++ b/tests/queries/0_stateless/01600_remerge_sort_lowered_memory_bytes_ratio.sql @@ -10,8 +10,8 @@ set max_block_size=40960; -- MergeSortingTransform: Re-merging intermediate ORDER BY data (20 blocks with 819200 rows) to save memory consumption -- MergeSortingTransform: Memory usage is lowered from 186.25 MiB to 95.00 MiB -- MergeSortingTransform: Re-merging is not useful (memory usage was not lowered by remerge_sort_lowered_memory_bytes_ratio=2.0) -select number k, repeat(toString(number), 11) v1, repeat(toString(number), 12) v2 from numbers(3e6) order by k limit 400e3 format Null; -- { serverError 241 } -select number k, repeat(toString(number), 11) v1, repeat(toString(number), 12) v2 from numbers(3e6) order by k limit 400e3 settings remerge_sort_lowered_memory_bytes_ratio=2. format Null; -- { serverError 241 } +select number k, repeat(toString(number), 11) v1, repeat(toString(number), 12) v2 from numbers(3e6) order by v1, v2 limit 400e3 format Null; -- { serverError 241 } +select number k, repeat(toString(number), 11) v1, repeat(toString(number), 12) v2 from numbers(3e6) order by v1, v2 limit 400e3 settings remerge_sort_lowered_memory_bytes_ratio=2. format Null; -- { serverError 241 } -- remerge_sort_lowered_memory_bytes_ratio 1.9 is good (need at least 1.91/0.98=1.94) -- MergeSortingTransform: Re-merging intermediate ORDER BY data (20 blocks with 819200 rows) to save memory consumption diff --git a/tests/queries/0_stateless/01605_adaptive_granularity_block_borders.sql b/tests/queries/0_stateless/01605_adaptive_granularity_block_borders.sql index 750809da338..7654be4eb29 100644 --- a/tests/queries/0_stateless/01605_adaptive_granularity_block_borders.sql +++ b/tests/queries/0_stateless/01605_adaptive_granularity_block_borders.sql @@ -22,7 +22,7 @@ OPTIMIZE TABLE adaptive_table FINAL; SELECT marks FROM system.parts WHERE table = 'adaptive_table' and database=currentDatabase() and active; -SET remote_fs_enable_cache = 0; +SET enable_filesystem_cache = 0; -- If we have computed granularity incorrectly than we will exceed this limit. SET max_memory_usage='30M'; diff --git a/tests/queries/0_stateless/01641_memory_tracking_insert_optimize.sql b/tests/queries/0_stateless/01641_memory_tracking_insert_optimize.sql index 7ec3153886c..36b6c97460c 100644 --- a/tests/queries/0_stateless/01641_memory_tracking_insert_optimize.sql +++ b/tests/queries/0_stateless/01641_memory_tracking_insert_optimize.sql @@ -3,7 +3,7 @@ drop table if exists data_01641; -- Disable cache for s3 storage tests because it increases memory usage. -set remote_fs_enable_cache=0; +set enable_filesystem_cache=0; set remote_filesystem_read_method='read'; create table data_01641 (key Int, value String) engine=MergeTree order by (key, repeat(value, 40)) settings old_parts_lifetime=0, min_bytes_for_wide_part=0; diff --git a/tests/queries/0_stateless/01655_plan_optimizations.reference b/tests/queries/0_stateless/01655_plan_optimizations.reference index 33a7ff44b74..bb9c614f728 100644 --- a/tests/queries/0_stateless/01655_plan_optimizations.reference +++ b/tests/queries/0_stateless/01655_plan_optimizations.reference @@ -142,3 +142,12 @@ Filter Filter 2 3 2 3 +> function calculation should be done after sorting and limit (if possible) +> Expression should be divided into two subexpressions and only one of them should be moved after Sorting +Expression (Before ORDER BY [lifted up part]) +FUNCTION sipHash64 +Sorting +Expression (Before ORDER BY) +FUNCTION plus +> this query should be executed without throwing an exception +0 diff --git a/tests/queries/0_stateless/01655_plan_optimizations.sh b/tests/queries/0_stateless/01655_plan_optimizations.sh index b66d788a338..0b7f004a2ce 100755 --- a/tests/queries/0_stateless/01655_plan_optimizations.sh +++ b/tests/queries/0_stateless/01655_plan_optimizations.sh @@ -196,3 +196,12 @@ $CLICKHOUSE_CLIENT -q " select a, b from ( select number + 1 as a, number + 2 as b from numbers(2) union all select number + 1 as b, number + 2 as a from numbers(2) ) where a != 1 settings enable_optimize_predicate_expression = 0" + +echo "> function calculation should be done after sorting and limit (if possible)" +echo "> Expression should be divided into two subexpressions and only one of them should be moved after Sorting" +$CLICKHOUSE_CLIENT -q " + explain actions = 1 select number as n, sipHash64(n) from numbers(100) order by number + 1 limit 5" | + sed 's/^ *//g' | grep -o "^ *\(Expression (Before ORDER BY.*)\|Sorting\|FUNCTION \w\+\)" +echo "> this query should be executed without throwing an exception" +$CLICKHOUSE_CLIENT -q " + select throwIf(number = 5) from (select * from numbers(10)) order by number limit 1" diff --git a/tests/queries/0_stateless/01691_parser_data_type_exponential.sh b/tests/queries/0_stateless/01691_parser_data_type_exponential.sh index 2b1d34982a2..f8004f9350d 100755 --- a/tests/queries/0_stateless/01691_parser_data_type_exponential.sh +++ b/tests/queries/0_stateless/01691_parser_data_type_exponential.sh @@ -5,4 +5,4 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CURDIR"/../shell_config.sh # Check that DataType parser does not have exponential complexity in the case found by fuzzer. -for _ in {1..10}; do ${CLICKHOUSE_CLIENT} -n --testmode --query "SELECT CAST(1 AS A2222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateFuncpion(groupBitmapp, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222223ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateFuncpion(groupBitmap, 00000000000000000000000000000000000000000000000000000000000000000000000000000001841416382, 222222222222222ggregateFuncpion(groupBitmap22222222222222222222222222222222222222222222222222220000000000000000000000000000000000000000000000000000000000000000000000000000002260637443813394204 222222222222222222ggregateFuncpion(groupBitmapp, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222223ggregateFuncpio22222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222222ggregateFuncpion(groupBitmap2222222222222ggregateFuncpion(groupBitmapp, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222223ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggre222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 22222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 2222222222222eFuncpion(groupBitmap, 00000000000000000000000000000000000000000000000000000000000000000000000000000001841416382, 222222222222222ggregateFuncpion(groupBitmap22222222222222222222222222222222222222222222222222222222222222222222222200000000000000000000178859639454016722222222222222222222222222222222222222222 222222222222222222ggregateFuncpion(groupBitmapp, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222223ggregateFuncpio22222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateFuncpion(groupBitmapp, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222223ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateFuncpion(groupBitmap, 00000000000000000000000000000000000000000000000000000000000000000000000000000001841416382, 222222222222222ggregateFuncpion(groupBitmap22222222222222222222222222222222222222222222222222222222222222222222222200000000000000000000178859639454016722222222222222222222222222222222222222222 222222222222222222ggregateFuncpion(groupBitmapp, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222223ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateFuncpion(groupBitmap, 22222222222222222222222222222222222222222222222222222222222222222222222222222222222222222, 222222222222222ggregateFuncpion(groupBitmap222222222222222222222222222222222222222222222222222222222222222222222222000000000000000000001788596394540167623 222222222222222222ggregateFu22222222222222222222222222 222222222, UInt33)); -- { clientError 62 }"; done +for _ in {1..10}; do ${CLICKHOUSE_CLIENT} -n --query "SELECT CAST(1 AS A2222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateFuncpion(groupBitmapp, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222223ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateFuncpion(groupBitmap, 00000000000000000000000000000000000000000000000000000000000000000000000000000001841416382, 222222222222222ggregateFuncpion(groupBitmap22222222222222222222222222222222222222222222222222220000000000000000000000000000000000000000000000000000000000000000000000000000002260637443813394204 222222222222222222ggregateFuncpion(groupBitmapp, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222223ggregateFuncpio22222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222222ggregateFuncpion(groupBitmap2222222222222ggregateFuncpion(groupBitmapp, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222223ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggre222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 22222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 2222222222222eFuncpion(groupBitmap, 00000000000000000000000000000000000000000000000000000000000000000000000000000001841416382, 222222222222222ggregateFuncpion(groupBitmap22222222222222222222222222222222222222222222222222222222222222222222222200000000000000000000178859639454016722222222222222222222222222222222222222222 222222222222222222ggregateFuncpion(groupBitmapp, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222223ggregateFuncpio22222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateFuncpion(groupBitmapp, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222223ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateFuncpion(groupBitmap, 00000000000000000000000000000000000000000000000000000000000000000000000000000001841416382, 222222222222222ggregateFuncpion(groupBitmap22222222222222222222222222222222222222222222222222222222222222222222222200000000000000000000178859639454016722222222222222222222222222222222222222222 222222222222222222ggregateFuncpion(groupBitmapp, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222223ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateFuncpion(groupBitmap, 22222222222222222222222222222222222222222222222222222222222222222222222222222222222222222, 222222222222222ggregateFuncpion(groupBitmap222222222222222222222222222222222222222222222222222222222222222222222222000000000000000000001788596394540167623 222222222222222222ggregateFu22222222222222222222222222 222222222, UInt33)); -- { clientError 62 }"; done diff --git a/tests/queries/0_stateless/01710_minmax_count_projection.reference b/tests/queries/0_stateless/01710_minmax_count_projection.reference index b13738a66de..259d320a38a 100644 --- a/tests/queries/0_stateless/01710_minmax_count_projection.reference +++ b/tests/queries/0_stateless/01710_minmax_count_projection.reference @@ -9,6 +9,7 @@ 1 9999 3 2021-10-25 10:00:00 2021-10-27 10:00:00 3 +2021-10-25 10:00:00 2021-10-27 10:00:00 3 1 1 1 @@ -17,3 +18,5 @@ 0 2021-10-24 10:00:00 0 +1000 +1000 diff --git a/tests/queries/0_stateless/01710_minmax_count_projection.sql b/tests/queries/0_stateless/01710_minmax_count_projection.sql index 0792fe331bb..a6c04725583 100644 --- a/tests/queries/0_stateless/01710_minmax_count_projection.sql +++ b/tests/queries/0_stateless/01710_minmax_count_projection.sql @@ -50,6 +50,8 @@ drop table if exists d; create table d (dt DateTime, j int) engine MergeTree partition by (toDate(dt), ceiling(j), toDate(dt), CEILING(j)) order by tuple(); insert into d values ('2021-10-24 10:00:00', 10), ('2021-10-25 10:00:00', 10), ('2021-10-26 10:00:00', 10), ('2021-10-27 10:00:00', 10); select min(dt), max(dt), count() from d where toDate(dt) >= '2021-10-25'; +-- fuzz crash +select min(dt), max(dt), count(toDate(dt) >= '2021-10-25') from d where toDate(dt) >= '2021-10-25'; select count() from d group by toDate(dt); -- fuzz crash @@ -59,3 +61,15 @@ SELECT min(dt) FROM d PREWHERE ((0.9998999834060669 AND 1023) AND 255) <= ceil(j SELECT count('') AND NULL FROM d PREWHERE ceil(j) <= NULL; drop table d; + +-- count variant optimization + +drop table if exists test; +create table test (id Int64, d Int64, projection dummy(select * order by id)) engine MergeTree order by id; +insert into test select number, number from numbers(1e3); + +select count(if(d=4, d, 1)) from test settings force_optimize_projection = 1; +select count(d/3) from test settings force_optimize_projection = 1; +select count(if(d=4, Null, 1)) from test settings force_optimize_projection = 1; -- { serverError 584 } + +drop table test; diff --git a/tests/queries/0_stateless/01801_s3_cluster.reference b/tests/queries/0_stateless/01801_s3_cluster.reference index 31c97f14fa3..0448ff3933b 100644 --- a/tests/queries/0_stateless/01801_s3_cluster.reference +++ b/tests/queries/0_stateless/01801_s3_cluster.reference @@ -2,30 +2,6 @@ 0 0 0 0 0 0 1 2 3 -10 11 12 -13 14 15 -16 17 18 -20 21 22 -23 24 25 -26 27 28 -4 5 6 -7 8 9 -0 0 0 -0 0 0 -0 0 0 -1 2 3 -10 11 12 -13 14 15 -16 17 18 -20 21 22 -23 24 25 -26 27 28 -4 5 6 -7 8 9 -0 0 0 -0 0 0 -0 0 0 -1 2 3 4 5 6 7 8 9 10 11 12 @@ -38,14 +14,26 @@ 0 0 0 0 0 0 1 2 3 +4 5 6 +7 8 9 10 11 12 13 14 15 16 17 18 20 21 22 23 24 25 26 27 28 +0 0 0 +0 0 0 +0 0 0 +1 2 3 4 5 6 7 8 9 +10 11 12 +13 14 15 +16 17 18 +20 21 22 +23 24 25 +26 27 28 0 0 0 0 0 0 0 0 0 @@ -62,14 +50,26 @@ 0 0 0 0 0 0 1 2 3 +4 5 6 +7 8 9 10 11 12 13 14 15 16 17 18 20 21 22 23 24 25 26 27 28 +0 0 0 +0 0 0 +0 0 0 +1 2 3 4 5 6 7 8 9 +10 11 12 +13 14 15 +16 17 18 +20 21 22 +23 24 25 +26 27 28 0 0 0 0 0 0 0 0 0 diff --git a/tests/queries/0_stateless/01825_type_json_9.reference b/tests/queries/0_stateless/01825_type_json_9.reference new file mode 100644 index 00000000000..a426b09a100 --- /dev/null +++ b/tests/queries/0_stateless/01825_type_json_9.reference @@ -0,0 +1 @@ +Tuple(foo Int8, k1 Int8, k2 Int8) diff --git a/tests/queries/0_stateless/01825_type_json_9.sql b/tests/queries/0_stateless/01825_type_json_9.sql new file mode 100644 index 00000000000..8fa4b335578 --- /dev/null +++ b/tests/queries/0_stateless/01825_type_json_9.sql @@ -0,0 +1,16 @@ +-- Tags: no-fasttest + +DROP TABLE IF EXISTS t_json; + +SET allow_experimental_object_type = 1; + +CREATE TABLE t_json(id UInt64, obj JSON) ENGINE = MergeTree ORDER BY id; + +INSERT INTO t_json format JSONEachRow {"id": 1, "obj": {"foo": 1, "k1": 2}}; +INSERT INTO t_json format JSONEachRow {"id": 2, "obj": {"foo": 1, "k2": 2}}; + +OPTIMIZE TABLE t_json FINAL; + +SELECT any(toTypeName(obj)) from t_json; + +DROP TABLE IF EXISTS t_json; diff --git a/tests/queries/0_stateless/01825_type_json_empty_string.reference b/tests/queries/0_stateless/01825_type_json_empty_string.reference new file mode 100644 index 00000000000..7e3d2e54336 --- /dev/null +++ b/tests/queries/0_stateless/01825_type_json_empty_string.reference @@ -0,0 +1,4 @@ +1 (0,'') +2 (1,'v1') +3 (0,'') +4 (2,'') diff --git a/tests/queries/0_stateless/01825_type_json_empty_string.sql b/tests/queries/0_stateless/01825_type_json_empty_string.sql new file mode 100644 index 00000000000..b3f2a7cb120 --- /dev/null +++ b/tests/queries/0_stateless/01825_type_json_empty_string.sql @@ -0,0 +1,12 @@ +-- Tags: no-fasttest + +DROP TABLE IF EXISTS t_json_empty_str; +SET allow_experimental_object_type = 1; + +CREATE TABLE t_json_empty_str(id UInt32, o JSON) ENGINE = Memory; + +INSERT INTO t_json_empty_str VALUES (1, ''), (2, '{"k1": 1, "k2": "v1"}'), (3, '{}'), (4, '{"k1": 2}'); + +SELECT * FROM t_json_empty_str ORDER BY id; + +DROP TABLE t_json_empty_str; diff --git a/tests/queries/0_stateless/01825_type_json_ephemeral.reference b/tests/queries/0_stateless/01825_type_json_ephemeral.reference new file mode 100644 index 00000000000..67d2a24d3fe --- /dev/null +++ b/tests/queries/0_stateless/01825_type_json_ephemeral.reference @@ -0,0 +1 @@ +PushEvent some-repo (('https://avatars.githubusercontent.com/u/123213213?','github-actions','',123123123,'github-actions[bot]','https://api.github.com/users/github-actions[bot]'),'2022-01-04 07:00:00',(1001001010101,'some-repo','https://api.github.com/repos/some-repo'),'PushEvent') diff --git a/tests/queries/0_stateless/01825_type_json_ephemeral.sql b/tests/queries/0_stateless/01825_type_json_ephemeral.sql new file mode 100644 index 00000000000..4485510e419 --- /dev/null +++ b/tests/queries/0_stateless/01825_type_json_ephemeral.sql @@ -0,0 +1,19 @@ +-- Tags: no-fasttest + +SET allow_experimental_object_type = 1; + +DROP TABLE IF EXISTS t_github_json; + +CREATE table t_github_json +( + event_type LowCardinality(String) DEFAULT JSONExtractString(message_raw, 'type'), + repo_name LowCardinality(String) DEFAULT JSONExtractString(message_raw, 'repo', 'name'), + message JSON DEFAULT message_raw, + message_raw String EPHEMERAL +) ENGINE = MergeTree ORDER BY (event_type, repo_name); + +INSERT INTO t_github_json (message_raw) FORMAT JSONEachRow {"message_raw": "{\"type\":\"PushEvent\", \"created_at\": \"2022-01-04 07:00:00\", \"actor\":{\"avatar_url\":\"https://avatars.githubusercontent.com/u/123213213?\",\"display_login\":\"github-actions\",\"gravatar_id\":\"\",\"id\":123123123,\"login\":\"github-actions[bot]\",\"url\":\"https://api.github.com/users/github-actions[bot]\"},\"repo\":{\"id\":1001001010101,\"name\":\"some-repo\",\"url\":\"https://api.github.com/repos/some-repo\"}}"} + +SELECT * FROM t_github_json ORDER BY event_type, repo_name; + +DROP TABLE t_github_json; diff --git a/tests/queries/0_stateless/01825_type_json_parallel_insert.sql b/tests/queries/0_stateless/01825_type_json_parallel_insert.sql index f54004a6630..93d1eecfbd7 100644 --- a/tests/queries/0_stateless/01825_type_json_parallel_insert.sql +++ b/tests/queries/0_stateless/01825_type_json_parallel_insert.sql @@ -1,4 +1,4 @@ --- Tags: long +-- Tags: long, no-backward-compatibility-check:22.3.2.1 DROP TABLE IF EXISTS t_json_parallel; SET allow_experimental_object_type = 1, max_insert_threads = 20, max_threads = 20; diff --git a/tests/queries/0_stateless/01825_type_json_partitions.reference b/tests/queries/0_stateless/01825_type_json_partitions.reference new file mode 100644 index 00000000000..5a7ba251572 --- /dev/null +++ b/tests/queries/0_stateless/01825_type_json_partitions.reference @@ -0,0 +1,2 @@ +{"id":1,"obj":{"k1":"v1","k2":""}} +{"id":2,"obj":{"k1":"","k2":"v2"}} diff --git a/tests/queries/0_stateless/01825_type_json_partitions.sql b/tests/queries/0_stateless/01825_type_json_partitions.sql new file mode 100644 index 00000000000..27804e7edae --- /dev/null +++ b/tests/queries/0_stateless/01825_type_json_partitions.sql @@ -0,0 +1,15 @@ +-- Tags: no-fasttest + +DROP TABLE IF EXISTS t_json_partitions; + +SET allow_experimental_object_type = 1; +SET output_format_json_named_tuples_as_objects = 1; + +CREATE TABLE t_json_partitions (id UInt32, obj JSON) +ENGINE MergeTree ORDER BY id PARTITION BY id; + +INSERT INTO t_json_partitions FORMAT JSONEachRow {"id": 1, "obj": {"k1": "v1"}} {"id": 2, "obj": {"k2": "v2"}}; + +SELECT * FROM t_json_partitions ORDER BY id FORMAT JSONEachRow; + +DROP TABLE t_json_partitions; diff --git a/tests/queries/0_stateless/01825_type_json_schema_inference.reference b/tests/queries/0_stateless/01825_type_json_schema_inference.reference index c2c18b5a2ff..6f1a65c6af3 100644 --- a/tests/queries/0_stateless/01825_type_json_schema_inference.reference +++ b/tests/queries/0_stateless/01825_type_json_schema_inference.reference @@ -5,4 +5,4 @@ Tuple(k1 Int8, k2 Tuple(k3 String, k4 Nested(k5 Int8, k6 Int8)), some Int8) {"id":"2","obj":"bbb","s":"bar"} {"map":{"k1":1,"k2":2},"obj":{"k1":1,"k2.k3":2},"map_type":"Map(String, Nullable(Float64))","obj_type":"Object('json')"} {"obj":{"k1":1,"k2":2},"map":{"k1":"1","k2":"2"}} -Tuple(k1 Float64, k2 Float64) +Tuple(k1 Int8, k2 Int8) diff --git a/tests/queries/0_stateless/01880_materialized_view_to_table_type_check.reference b/tests/queries/0_stateless/01880_materialized_view_to_table_type_check.reference new file mode 100644 index 00000000000..5498a268179 --- /dev/null +++ b/tests/queries/0_stateless/01880_materialized_view_to_table_type_check.reference @@ -0,0 +1,5 @@ +----------test--------: +----------test--------: +100 \0\0\0\0\0\0\0 +101 \0\0\0\0\0\0\0 +102 \0\0\0\0\0\0\0 diff --git a/tests/queries/0_stateless/01880_materialized_view_to_table_type_check.sql b/tests/queries/0_stateless/01880_materialized_view_to_table_type_check.sql new file mode 100644 index 00000000000..2da9884ba8e --- /dev/null +++ b/tests/queries/0_stateless/01880_materialized_view_to_table_type_check.sql @@ -0,0 +1,23 @@ +DROP TABLE IF EXISTS test_mv; +DROP TABLE IF EXISTS test; +DROP TABLE IF EXISTS test_input; + +CREATE TABLE test_input(id Int32) ENGINE=MergeTree() order by id; + +CREATE TABLE test(`id` Int32, `pv` AggregateFunction(sum, Int32)) ENGINE = AggregatingMergeTree() ORDER BY id; + +CREATE MATERIALIZED VIEW test_mv to test(`id` Int32, `pv` AggregateFunction(sum, Int32)) as SELECT id, sumState(1) as pv from test_input group by id; -- { serverError 70 } + +INSERT INTO test_input SELECT toInt32(number % 1000) AS id FROM numbers(10); +select '----------test--------:'; +select * from test; + +create MATERIALIZED VIEW test_mv to test(`id` Int32, `pv` AggregateFunction(sum, Int32)) as SELECT id, sumState(toInt32(1)) as pv from test_input group by id; +INSERT INTO test_input SELECT toInt32(number % 1000) AS id FROM numbers(100,3); + +select '----------test--------:'; +select * from test; + +DROP TABLE test_mv; +DROP TABLE test; +DROP TABLE test_input; diff --git a/tests/queries/0_stateless/01921_concurrent_ttl_and_normal_merges_zookeeper_long.sh b/tests/queries/0_stateless/01921_concurrent_ttl_and_normal_merges_zookeeper_long.sh index 23df052a8d6..a29d0661621 100755 --- a/tests/queries/0_stateless/01921_concurrent_ttl_and_normal_merges_zookeeper_long.sh +++ b/tests/queries/0_stateless/01921_concurrent_ttl_and_normal_merges_zookeeper_long.sh @@ -1,6 +1,8 @@ #!/usr/bin/env bash # Tags: long, zookeeper, no-parallel +CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL=error + CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh diff --git a/tests/queries/0_stateless/01926_order_by_desc_limit.sql b/tests/queries/0_stateless/01926_order_by_desc_limit.sql index 9f65cf73252..86468b4fcd6 100644 --- a/tests/queries/0_stateless/01926_order_by_desc_limit.sql +++ b/tests/queries/0_stateless/01926_order_by_desc_limit.sql @@ -2,7 +2,7 @@ DROP TABLE IF EXISTS order_by_desc; -SET remote_fs_enable_cache=0; +SET enable_filesystem_cache=0; CREATE TABLE order_by_desc (u UInt32, s String) ENGINE MergeTree ORDER BY u PARTITION BY u % 100 diff --git a/tests/queries/0_stateless/02006_client_test_hint_no_such_error_name.sh b/tests/queries/0_stateless/02006_client_test_hint_no_such_error_name.sh index b846136ae58..972ff3ba73f 100755 --- a/tests/queries/0_stateless/02006_client_test_hint_no_such_error_name.sh +++ b/tests/queries/0_stateless/02006_client_test_hint_no_such_error_name.sh @@ -5,4 +5,4 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -$CLICKHOUSE_CLIENT --testmode -n -q 'select 1 -- { clientError FOOBAR }' |& grep -o 'No error code with name:.*' +$CLICKHOUSE_CLIENT -n -q 'select 1 -- { clientError FOOBAR }' |& grep -o 'No error code with name:.*' diff --git a/tests/queries/0_stateless/02008_tuple_to_name_value_pairs.sql b/tests/queries/0_stateless/02008_tuple_to_name_value_pairs.sql index 9204975b579..59987a86590 100644 --- a/tests/queries/0_stateless/02008_tuple_to_name_value_pairs.sql +++ b/tests/queries/0_stateless/02008_tuple_to_name_value_pairs.sql @@ -4,7 +4,7 @@ DROP TABLE IF EXISTS test02008; CREATE TABLE test02008 ( col Tuple( a Tuple(key1 int, key2 int), - b Tuple(key1 int, key3 int) + b Tuple(key1 int, key2 int) ) ) ENGINE=Memory(); INSERT INTO test02008 VALUES (tuple(tuple(1, 2), tuple(3, 4))); diff --git a/tests/queries/0_stateless/02010_lc_native.python b/tests/queries/0_stateless/02010_lc_native.python index 71965512e64..e6d6f9e1317 100755 --- a/tests/queries/0_stateless/02010_lc_native.python +++ b/tests/queries/0_stateless/02010_lc_native.python @@ -143,7 +143,7 @@ def sendQuery(s, query): writeStringBinary('', ba) # No interserver secret writeVarUInt(2, ba) # Stage - Complete ba.append(0) # No compression - writeStringBinary(query + ' settings input_format_defaults_for_omitted_fields=0', ba) # query, finally + writeStringBinary(query, ba) # query, finally s.sendall(ba) @@ -205,7 +205,7 @@ def insertValidLowCardinalityRow(): s.connect((CLICKHOUSE_HOST, CLICKHOUSE_PORT)) sendHello(s) receiveHello(s) - sendQuery(s, 'insert into {}.tab format TSV'.format(CLICKHOUSE_DATABASE)) + sendQuery(s, 'insert into {}.tab settings input_format_defaults_for_omitted_fields=0 format TSV'.format(CLICKHOUSE_DATABASE)) # external tables sendEmptyBlock(s) @@ -241,7 +241,7 @@ def insertLowCardinalityRowWithIndexOverflow(): s.connect((CLICKHOUSE_HOST, CLICKHOUSE_PORT)) sendHello(s) receiveHello(s) - sendQuery(s, 'insert into {}.tab format TSV'.format(CLICKHOUSE_DATABASE)) + sendQuery(s, 'insert into {}.tab settings input_format_defaults_for_omitted_fields=0 format TSV'.format(CLICKHOUSE_DATABASE)) # external tables sendEmptyBlock(s) @@ -275,7 +275,7 @@ def insertLowCardinalityRowWithIncorrectDictType(): s.connect((CLICKHOUSE_HOST, CLICKHOUSE_PORT)) sendHello(s) receiveHello(s) - sendQuery(s, 'insert into {}.tab format TSV'.format(CLICKHOUSE_DATABASE)) + sendQuery(s, 'insert into {}.tab settings input_format_defaults_for_omitted_fields=0 format TSV'.format(CLICKHOUSE_DATABASE)) # external tables sendEmptyBlock(s) @@ -308,7 +308,7 @@ def insertLowCardinalityRowWithIncorrectAdditionalKeys(): s.connect((CLICKHOUSE_HOST, CLICKHOUSE_PORT)) sendHello(s) receiveHello(s) - sendQuery(s, 'insert into {}.tab format TSV'.format(CLICKHOUSE_DATABASE)) + sendQuery(s, 'insert into {}.tab settings input_format_defaults_for_omitted_fields=0 format TSV'.format(CLICKHOUSE_DATABASE)) # external tables sendEmptyBlock(s) diff --git a/tests/queries/0_stateless/02030_capnp_format.sh b/tests/queries/0_stateless/02030_capnp_format.sh index aa2fe6c1b35..cdc1587bccd 100755 --- a/tests/queries/0_stateless/02030_capnp_format.sh +++ b/tests/queries/0_stateless/02030_capnp_format.sh @@ -19,7 +19,8 @@ cp -r $CLIENT_SCHEMADIR/02030_* $SCHEMADIR/$SERVER_SCHEMADIR/ $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS capnp_simple_types"; $CLICKHOUSE_CLIENT --query="CREATE TABLE capnp_simple_types (int8 Int8, uint8 UInt8, int16 Int16, uint16 UInt16, int32 Int32, uint32 UInt32, int64 Int64, uint64 UInt64, float32 Float32, float64 Float64, string String, fixed FixedString(5), data String, date Date, datetime DateTime, datetime64 DateTime64(3)) ENGINE=Memory" $CLICKHOUSE_CLIENT --query="INSERT INTO capnp_simple_types values (-1, 1, -1000, 1000, -10000000, 1000000, -1000000000, 1000000000, 123.123, 123123123.123123123, 'Some string', 'fixed', 'Some data', '2000-01-06', '2000-06-01 19:42:42', '2000-04-01 11:21:33.123')" -$CLICKHOUSE_CLIENT --query="SELECT * FROM capnp_simple_types FORMAT CapnProto SETTINGS format_schema='$CLIENT_SCHEMADIR/02030_capnp_simple_types:Message'" | $CLICKHOUSE_CLIENT --query="INSERT INTO capnp_simple_types FORMAT CapnProto SETTINGS format_schema='$CLIENT_SCHEMADIR/02030_capnp_simple_types:Message'" +$CLICKHOUSE_CLIENT --query="SELECT * FROM capnp_simple_types FORMAT CapnProto SETTINGS format_schema='$CLIENT_SCHEMADIR/02030_capnp_simple_types:Message'" | \ + $CLICKHOUSE_CLIENT --query="INSERT INTO capnp_simple_types SETTINGS format_schema='$CLIENT_SCHEMADIR/02030_capnp_simple_types:Message' FORMAT CapnProto" $CLICKHOUSE_CLIENT --query="SELECT * FROM capnp_simple_types" $CLICKHOUSE_CLIENT --query="DROP TABLE capnp_simple_types" @@ -27,7 +28,8 @@ $CLICKHOUSE_CLIENT --query="DROP TABLE capnp_simple_types" $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS capnp_tuples" $CLICKHOUSE_CLIENT --query="CREATE TABLE capnp_tuples (value UInt64, tuple1 Tuple(one UInt64, two Tuple(three UInt64, four UInt64)), tuple2 Tuple(nested1 Tuple(nested2 Tuple(x UInt64)))) ENGINE=Memory"; $CLICKHOUSE_CLIENT --query="INSERT INTO capnp_tuples VALUES (1, (2, (3, 4)), (((5))))" -$CLICKHOUSE_CLIENT --query="SELECT * FROM capnp_tuples FORMAT CapnProto SETTINGS format_schema='$CLIENT_SCHEMADIR/02030_capnp_tuples:Message'" | $CLICKHOUSE_CLIENT --query="INSERT INTO capnp_tuples FORMAT CapnProto SETTINGS format_schema='$CLIENT_SCHEMADIR/02030_capnp_tuples:Message'" +$CLICKHOUSE_CLIENT --query="SELECT * FROM capnp_tuples FORMAT CapnProto SETTINGS format_schema='$CLIENT_SCHEMADIR/02030_capnp_tuples:Message'" | \ + $CLICKHOUSE_CLIENT --query="INSERT INTO capnp_tuples SETTINGS format_schema='$CLIENT_SCHEMADIR/02030_capnp_tuples:Message' FORMAT CapnProto" $CLICKHOUSE_CLIENT --query="SELECT * FROM capnp_tuples" $CLICKHOUSE_CLIENT --query="DROP TABLE capnp_tuples" @@ -35,7 +37,8 @@ $CLICKHOUSE_CLIENT --query="DROP TABLE capnp_tuples" $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS capnp_lists" $CLICKHOUSE_CLIENT --query="CREATE TABLE capnp_lists (value UInt64, list1 Array(UInt64), list2 Array(Array(Array(UInt64)))) ENGINE=Memory"; $CLICKHOUSE_CLIENT --query="INSERT INTO capnp_lists VALUES (1, [1, 2, 3], [[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], []], []])" -$CLICKHOUSE_CLIENT --query="SELECT * FROM capnp_lists FORMAT CapnProto SETTINGS format_schema='$CLIENT_SCHEMADIR/02030_capnp_lists:Message'" | $CLICKHOUSE_CLIENT --query="INSERT INTO capnp_lists FORMAT CapnProto SETTINGS format_schema='$CLIENT_SCHEMADIR/02030_capnp_lists:Message'" +$CLICKHOUSE_CLIENT --query="SELECT * FROM capnp_lists FORMAT CapnProto SETTINGS format_schema='$CLIENT_SCHEMADIR/02030_capnp_lists:Message'" | \ + $CLICKHOUSE_CLIENT --query="INSERT INTO capnp_lists SETTINGS format_schema='$CLIENT_SCHEMADIR/02030_capnp_lists:Message' FORMAT CapnProto" $CLICKHOUSE_CLIENT --query="SELECT * FROM capnp_lists" $CLICKHOUSE_CLIENT --query="DROP TABLE capnp_lists" @@ -43,7 +46,8 @@ $CLICKHOUSE_CLIENT --query="DROP TABLE capnp_lists" $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS capnp_nested_lists_and_tuples" $CLICKHOUSE_CLIENT --query="CREATE TABLE capnp_nested_lists_and_tuples (value UInt64, nested Tuple(a Tuple(b UInt64, c Array(Array(UInt64))), d Array(Tuple(e Array(Array(Tuple(f UInt64, g UInt64))), h Array(Tuple(k Array(UInt64))))))) ENGINE=Memory"; $CLICKHOUSE_CLIENT --query="INSERT INTO capnp_nested_lists_and_tuples VALUES (1, ((2, [[3, 4], [5, 6], []]), [([[(7, 8), (9, 10)], [(11, 12), (13, 14)], []], [([15, 16, 17]), ([])])]))" -$CLICKHOUSE_CLIENT --query="SELECT * FROM capnp_nested_lists_and_tuples FORMAT CapnProto SETTINGS format_schema='$CLIENT_SCHEMADIR/02030_capnp_nested_lists_and_tuples:Message'" | $CLICKHOUSE_CLIENT --query="INSERT INTO capnp_nested_lists_and_tuples FORMAT CapnProto SETTINGS format_schema='$CLIENT_SCHEMADIR/02030_capnp_nested_lists_and_tuples:Message'" +$CLICKHOUSE_CLIENT --query="SELECT * FROM capnp_nested_lists_and_tuples FORMAT CapnProto SETTINGS format_schema='$CLIENT_SCHEMADIR/02030_capnp_nested_lists_and_tuples:Message'" | \ + $CLICKHOUSE_CLIENT --query="INSERT INTO capnp_nested_lists_and_tuples SETTINGS format_schema='$CLIENT_SCHEMADIR/02030_capnp_nested_lists_and_tuples:Message' FORMAT CapnProto" $CLICKHOUSE_CLIENT --query="SELECT * FROM capnp_nested_lists_and_tuples" $CLICKHOUSE_CLIENT --query="DROP TABLE capnp_nested_lists_and_tuples" @@ -51,7 +55,8 @@ $CLICKHOUSE_CLIENT --query="DROP TABLE capnp_nested_lists_and_tuples" $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS capnp_nested_table" $CLICKHOUSE_CLIENT --query="CREATE TABLE capnp_nested_table (nested Nested(value UInt64, array Array(UInt64), tuple Tuple(one UInt64, two UInt64))) ENGINE=Memory"; $CLICKHOUSE_CLIENT --query="INSERT INTO capnp_nested_table VALUES ([1, 2, 3], [[4, 5, 6], [], [7, 8]], [(9, 10), (11, 12), (13, 14)])" -$CLICKHOUSE_CLIENT --query="SELECT * FROM capnp_nested_table FORMAT CapnProto SETTINGS format_schema='$CLIENT_SCHEMADIR/02030_capnp_nested_table:Message'" | $CLICKHOUSE_CLIENT --query="INSERT INTO capnp_nested_table FORMAT CapnProto SETTINGS format_schema='$CLIENT_SCHEMADIR/02030_capnp_nested_table:Message'" +$CLICKHOUSE_CLIENT --query="SELECT * FROM capnp_nested_table FORMAT CapnProto SETTINGS format_schema='$CLIENT_SCHEMADIR/02030_capnp_nested_table:Message'" | \ + $CLICKHOUSE_CLIENT --query="INSERT INTO capnp_nested_table SETTINGS format_schema='$CLIENT_SCHEMADIR/02030_capnp_nested_table:Message' FORMAT CapnProto" $CLICKHOUSE_CLIENT --query="SELECT * FROM capnp_nested_table" $CLICKHOUSE_CLIENT --query="DROP TABLE capnp_nested_table" @@ -59,7 +64,8 @@ $CLICKHOUSE_CLIENT --query="DROP TABLE capnp_nested_table" $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS capnp_nullable" $CLICKHOUSE_CLIENT --query="CREATE TABLE capnp_nullable (nullable Nullable(UInt64), array Array(Nullable(UInt64)), tuple Tuple(nullable Nullable(UInt64))) ENGINE=Memory"; $CLICKHOUSE_CLIENT --query="INSERT INTO capnp_nullable VALUES (1, [1, Null, 2], (1)), (Null, [Null, Null, 42], (Null))" -$CLICKHOUSE_CLIENT --query="SELECT * FROM capnp_nullable FORMAT CapnProto SETTINGS format_schema='$CLIENT_SCHEMADIR/02030_capnp_nullable:Message'" | $CLICKHOUSE_CLIENT --query="INSERT INTO capnp_nullable FORMAT CapnProto SETTINGS format_schema='$CLIENT_SCHEMADIR/02030_capnp_nullable:Message'" +$CLICKHOUSE_CLIENT --query="SELECT * FROM capnp_nullable FORMAT CapnProto SETTINGS format_schema='$CLIENT_SCHEMADIR/02030_capnp_nullable:Message'" | \ + $CLICKHOUSE_CLIENT --query="INSERT INTO capnp_nullable SETTINGS format_schema='$CLIENT_SCHEMADIR/02030_capnp_nullable:Message' FORMAT CapnProto" $CLICKHOUSE_CLIENT --query="SELECT * FROM capnp_nullable" $CLICKHOUSE_CLIENT --query="DROP TABLE capnp_nullable" @@ -78,7 +84,8 @@ $CLICKHOUSE_CLIENT --query="SELECT * FROM file('data.capnp', 'CapnProto', 'value $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS capnp_low_cardinality" $CLICKHOUSE_CLIENT --query="CREATE TABLE capnp_low_cardinality (lc1 LowCardinality(String), lc2 LowCardinality(Nullable(String)), lc3 Array(LowCardinality(Nullable(String)))) ENGINE=Memory" $CLICKHOUSE_CLIENT --query="INSERT INTO capnp_low_cardinality VALUES ('one', 'two', ['one', Null, 'two', Null]), ('two', Null, [Null])" -$CLICKHOUSE_CLIENT --query="SELECT * FROM capnp_low_cardinality FORMAT CapnProto SETTINGS format_schema='$CLIENT_SCHEMADIR/02030_capnp_low_cardinality:Message'" | $CLICKHOUSE_CLIENT --query="INSERT INTO capnp_low_cardinality FORMAT CapnProto SETTINGS format_schema='$CLIENT_SCHEMADIR/02030_capnp_low_cardinality:Message'" +$CLICKHOUSE_CLIENT --query="SELECT * FROM capnp_low_cardinality FORMAT CapnProto SETTINGS format_schema='$CLIENT_SCHEMADIR/02030_capnp_low_cardinality:Message'" | \ + $CLICKHOUSE_CLIENT --query="INSERT INTO capnp_low_cardinality SETTINGS format_schema='$CLIENT_SCHEMADIR/02030_capnp_low_cardinality:Message' FORMAT CapnProto" $CLICKHOUSE_CLIENT --query="SELECT * FROM capnp_low_cardinality" $CLICKHOUSE_CLIENT --query="DROP TABLE capnp_low_cardinality" diff --git a/tests/queries/0_stateless/02050_client_profile_events.reference b/tests/queries/0_stateless/02050_client_profile_events.reference index 2451417ddf0..99b0fa6c981 100644 --- a/tests/queries/0_stateless/02050_client_profile_events.reference +++ b/tests/queries/0_stateless/02050_client_profile_events.reference @@ -1,5 +1,18 @@ +do not print any ProfileEvents packets 0 -100000 +print only last (and also number of rows to provide more info in case of failures) [ 0 ] SelectedRows: 131010 (increment) +regression test for incorrect filtering out snapshots +0 +regression test for overlap profile events snapshots between queries +[ 0 ] SelectedRows: 1 (increment) +[ 0 ] SelectedRows: 1 (increment) +regression test for overlap profile events snapshots between queries (clickhouse-local) +[ 0 ] SelectedRows: 1 (increment) +[ 0 ] SelectedRows: 1 (increment) +print everything OK +print each 100 ms +OK +check that ProfileEvents is new for each query OK diff --git a/tests/queries/0_stateless/02050_client_profile_events.sh b/tests/queries/0_stateless/02050_client_profile_events.sh index f8bcea0d1bb..dce0c80525a 100755 --- a/tests/queries/0_stateless/02050_client_profile_events.sh +++ b/tests/queries/0_stateless/02050_client_profile_events.sh @@ -4,13 +4,30 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -# do not print any ProfileEvents packets +echo 'do not print any ProfileEvents packets' $CLICKHOUSE_CLIENT -q 'select * from numbers(1e5) format Null' |& grep -c 'SelectedRows' -# print only last (and also number of rows to provide more info in case of failures) -$CLICKHOUSE_CLIENT --max_block_size=65505 --print-profile-events --profile-events-delay-ms=-1 -q 'select * from numbers(1e5)' 2> >(grep -o -e '\[ 0 \] SelectedRows: .*$' -e Exception) 1> >(wc -l) -# print everything + +echo 'print only last (and also number of rows to provide more info in case of failures)' +$CLICKHOUSE_CLIENT --max_block_size=65505 --print-profile-events --profile-events-delay-ms=-1 -q 'select * from numbers(1e5)' |& grep -o -e '\[ 0 \] SelectedRows: .*$' -e Exception + +echo 'regression test for incorrect filtering out snapshots' +$CLICKHOUSE_CLIENT --print-profile-events --profile-events-delay-ms=-1 -n -q 'select 1; select 1' >& /dev/null +echo $? + +echo 'regression test for overlap profile events snapshots between queries' +$CLICKHOUSE_CLIENT --print-profile-events --profile-events-delay-ms=-1 -n -q 'select 1; select 1' |& grep -F -o '[ 0 ] SelectedRows: 1 (increment)' + +echo 'regression test for overlap profile events snapshots between queries (clickhouse-local)' +$CLICKHOUSE_LOCAL --print-profile-events --profile-events-delay-ms=-1 -n -q 'select 1; select 1' |& grep -F -o '[ 0 ] SelectedRows: 1 (increment)' + +echo 'print everything' profile_events="$($CLICKHOUSE_CLIENT --max_block_size 1 --print-profile-events -q 'select sleep(1) from numbers(2) format Null' |& grep -c 'SelectedRows')" test "$profile_events" -gt 1 && echo OK || echo "FAIL ($profile_events)" -# print each 100 ms + +echo 'print each 100 ms' profile_events="$($CLICKHOUSE_CLIENT --max_block_size 1 --print-profile-events --profile-events-delay-ms=100 -q 'select sleep(1) from numbers(2) format Null' |& grep -c 'SelectedRows')" test "$profile_events" -gt 1 && echo OK || echo "FAIL ($profile_events)" + +echo 'check that ProfileEvents is new for each query' +sleep_function_calls=$($CLICKHOUSE_CLIENT --print-profile-events --profile-events-delay-ms=-1 -n -q 'select sleep(1); select 1' |& grep -c 'SleepFunctionCalls') +test "$sleep_function_calls" -eq 1 && echo OK || echo "FAIL ($sleep_function_calls)" diff --git a/tests/queries/0_stateless/02052_last_granula_adjust_LOGICAL_ERROR.reference b/tests/queries/0_stateless/02052_last_granula_adjust_LOGICAL_ERROR.reference index d7d3ee8f362..72d9eb2928a 100644 --- a/tests/queries/0_stateless/02052_last_granula_adjust_LOGICAL_ERROR.reference +++ b/tests/queries/0_stateless/02052_last_granula_adjust_LOGICAL_ERROR.reference @@ -1,8 +1,8 @@ -1 -1 -10 -10 -100 -100 -10000 -10000 +0 00000 +0 00000 +9 99999 +9 99999 +99 9999999999 +99 9999999999 +9999 99999999999999999999 +9999 99999999999999999999 diff --git a/tests/queries/0_stateless/02052_last_granula_adjust_LOGICAL_ERROR.sql.j2 b/tests/queries/0_stateless/02052_last_granula_adjust_LOGICAL_ERROR.sql.j2 index 465aa22beb3..53d970496b2 100644 --- a/tests/queries/0_stateless/02052_last_granula_adjust_LOGICAL_ERROR.sql.j2 +++ b/tests/queries/0_stateless/02052_last_granula_adjust_LOGICAL_ERROR.sql.j2 @@ -11,8 +11,8 @@ settings as select number, repeat(toString(number), 5) from numbers({{ rows_in_table }}); -- avoid any optimizations with ignore(*) -select count(ignore(*)) from data_02052_{{ rows_in_table }}_wide{{ wide }} settings max_read_buffer_size=1, max_threads=1; -select count(ignore(*)) from data_02052_{{ rows_in_table }}_wide{{ wide }} settings max_read_buffer_size=0, max_threads=1; -- { serverError CANNOT_READ_ALL_DATA } +select * apply max from data_02052_{{ rows_in_table }}_wide{{ wide }} settings max_read_buffer_size=1, max_threads=1; +select * apply max from data_02052_{{ rows_in_table }}_wide{{ wide }} settings max_read_buffer_size=0, max_threads=1; -- { serverError CANNOT_READ_ALL_DATA } drop table data_02052_{{ rows_in_table }}_wide{{ wide }}; {% endfor %} diff --git a/tests/queries/0_stateless/02117_custom_separated_with_names_and_types.sh b/tests/queries/0_stateless/02117_custom_separated_with_names_and_types.sh index 1285758866d..400bf2a56fa 100755 --- a/tests/queries/0_stateless/02117_custom_separated_with_names_and_types.sh +++ b/tests/queries/0_stateless/02117_custom_separated_with_names_and_types.sh @@ -14,7 +14,8 @@ for format in CustomSeparated CustomSeparatedWithNames CustomSeparatedWithNamesA do echo $format $CLICKHOUSE_CLIENT -q "SELECT number AS x, number + 1 AS y, 'hello' AS s FROM numbers(5) FORMAT $format $CUSTOM_SETTINGS" - $CLICKHOUSE_CLIENT -q "SELECT number AS x, number + 1 AS y, 'hello' AS s FROM numbers(5) FORMAT $format $CUSTOM_SETTINGS" | $CLICKHOUSE_CLIENT -q "INSERT INTO test_02117 FORMAT $format $CUSTOM_SETTINGS" + $CLICKHOUSE_CLIENT -q "SELECT number AS x, number + 1 AS y, 'hello' AS s FROM numbers(5) FORMAT $format $CUSTOM_SETTINGS" | \ + $CLICKHOUSE_CLIENT -q "INSERT INTO test_02117 $CUSTOM_SETTINGS FORMAT $format" $CLICKHOUSE_CLIENT -q "SELECT * FROM test_02117" $CLICKHOUSE_CLIENT -q "TRUNCATE TABLE test_02117" done @@ -23,66 +24,80 @@ $CLICKHOUSE_CLIENT -q "DROP TABLE test_02117" $CLICKHOUSE_CLIENT -q "CREATE TABLE test_02117 (x UInt32, y String DEFAULT 'default', z Date) engine=Memory()" -$CLICKHOUSE_CLIENT -q "SELECT toUInt32(1) AS x, 'text' AS y, toDate('2020-01-01') AS z FORMAT CustomSeparatedWithNames $CUSTOM_SETTINGS" | $CLICKHOUSE_CLIENT --input_format_with_names_use_header=1 -q "INSERT INTO test_02117 FORMAT CustomSeparatedWithNames $CUSTOM_SETTINGS" +$CLICKHOUSE_CLIENT -q "SELECT toUInt32(1) AS x, 'text' AS y, toDate('2020-01-01') AS z FORMAT CustomSeparatedWithNames $CUSTOM_SETTINGS" | \ + $CLICKHOUSE_CLIENT --input_format_with_names_use_header=1 -q "INSERT INTO test_02117 $CUSTOM_SETTINGS FORMAT CustomSeparatedWithNames" $CLICKHOUSE_CLIENT -q "SELECT * FROM test_02117" $CLICKHOUSE_CLIENT -q "TRUNCATE TABLE test_02117" -$CLICKHOUSE_CLIENT -q "SELECT toUInt32(1) AS x, 'text' AS y, toDate('2020-01-01') AS z FORMAT CustomSeparatedWithNamesAndTypes $CUSTOM_SETTINGS" | $CLICKHOUSE_CLIENT --input_format_with_names_use_header=1 --input_format_with_types_use_header=1 -q "INSERT INTO test_02117 FORMAT CustomSeparatedWithNamesAndTypes $CUSTOM_SETTINGS" +$CLICKHOUSE_CLIENT -q "SELECT toUInt32(1) AS x, 'text' AS y, toDate('2020-01-01') AS z FORMAT CustomSeparatedWithNamesAndTypes $CUSTOM_SETTINGS" | \ + $CLICKHOUSE_CLIENT --input_format_with_names_use_header=1 --input_format_with_types_use_header=1 -q "INSERT INTO test_02117 $CUSTOM_SETTINGS FORMAT CustomSeparatedWithNamesAndTypes" $CLICKHOUSE_CLIENT -q "SELECT * FROM test_02117" $CLICKHOUSE_CLIENT -q "TRUNCATE TABLE test_02117" -$CLICKHOUSE_CLIENT -q "SELECT toUInt32(1) AS x, 'text' AS y, toDate('2020-01-01') AS z FORMAT CustomSeparatedWithNames $CUSTOM_SETTINGS" | $CLICKHOUSE_CLIENT --input_format_with_names_use_header=0 -q "INSERT INTO test_02117 FORMAT CustomSeparatedWithNames $CUSTOM_SETTINGS" +$CLICKHOUSE_CLIENT -q "SELECT toUInt32(1) AS x, 'text' AS y, toDate('2020-01-01') AS z FORMAT CustomSeparatedWithNames $CUSTOM_SETTINGS" | \ + $CLICKHOUSE_CLIENT --input_format_with_names_use_header=0 -q "INSERT INTO test_02117 $CUSTOM_SETTINGS FORMAT CustomSeparatedWithNames" $CLICKHOUSE_CLIENT -q "SELECT * FROM test_02117" $CLICKHOUSE_CLIENT -q "TRUNCATE TABLE test_02117" -$CLICKHOUSE_CLIENT -q "SELECT toUInt32(1) AS x, 'text' AS y, toDate('2020-01-01') AS z FORMAT CustomSeparatedWithNamesAndTypes $CUSTOM_SETTINGS" | $CLICKHOUSE_CLIENT --input_format_with_names_use_header=0 --input_format_with_types_use_header=0 -q "INSERT INTO test_02117 FORMAT CustomSeparatedWithNamesAndTypes $CUSTOM_SETTINGS" +$CLICKHOUSE_CLIENT -q "SELECT toUInt32(1) AS x, 'text' AS y, toDate('2020-01-01') AS z FORMAT CustomSeparatedWithNamesAndTypes $CUSTOM_SETTINGS" | \ + $CLICKHOUSE_CLIENT --input_format_with_names_use_header=0 --input_format_with_types_use_header=0 -q "INSERT INTO test_02117 $CUSTOM_SETTINGS FORMAT CustomSeparatedWithNamesAndTypes" $CLICKHOUSE_CLIENT -q "SELECT * FROM test_02117" $CLICKHOUSE_CLIENT -q "TRUNCATE TABLE test_02117" -$CLICKHOUSE_CLIENT -q "SELECT 'text' AS y, toDate('2020-01-01') AS z, toUInt32(1) AS x FORMAT CustomSeparatedWithNames $CUSTOM_SETTINGS" | $CLICKHOUSE_CLIENT --input_format_with_names_use_header=1 -q "INSERT INTO test_02117 FORMAT CustomSeparatedWithNames $CUSTOM_SETTINGS" +$CLICKHOUSE_CLIENT -q "SELECT 'text' AS y, toDate('2020-01-01') AS z, toUInt32(1) AS x FORMAT CustomSeparatedWithNames $CUSTOM_SETTINGS" | \ + $CLICKHOUSE_CLIENT --input_format_with_names_use_header=1 -q "INSERT INTO test_02117 $CUSTOM_SETTINGS FORMAT CustomSeparatedWithNames" $CLICKHOUSE_CLIENT -q "SELECT * FROM test_02117" $CLICKHOUSE_CLIENT -q "TRUNCATE TABLE test_02117" -$CLICKHOUSE_CLIENT -q "SELECT 'text' AS y, toDate('2020-01-01') AS z, toUInt32(1) AS x FORMAT CustomSeparatedWithNamesAndTypes $CUSTOM_SETTINGS" | $CLICKHOUSE_CLIENT --input_format_with_names_use_header=1 --input_format_with_types_use_header=1 -q "INSERT INTO test_02117 FORMAT CustomSeparatedWithNamesAndTypes $CUSTOM_SETTINGS" +$CLICKHOUSE_CLIENT -q "SELECT 'text' AS y, toDate('2020-01-01') AS z, toUInt32(1) AS x FORMAT CustomSeparatedWithNamesAndTypes $CUSTOM_SETTINGS" | \ + $CLICKHOUSE_CLIENT --input_format_with_names_use_header=1 --input_format_with_types_use_header=1 -q "INSERT INTO test_02117 $CUSTOM_SETTINGS FORMAT CustomSeparatedWithNamesAndTypes" $CLICKHOUSE_CLIENT -q "SELECT * FROM test_02117" $CLICKHOUSE_CLIENT -q "TRUNCATE TABLE test_02117" -$CLICKHOUSE_CLIENT -q "SELECT toUInt32(1) AS x FORMAT CustomSeparatedWithNames $CUSTOM_SETTINGS" | $CLICKHOUSE_CLIENT --input_format_with_names_use_header=1 -q "INSERT INTO test_02117 FORMAT CustomSeparatedWithNames $CUSTOM_SETTINGS" +$CLICKHOUSE_CLIENT -q "SELECT toUInt32(1) AS x FORMAT CustomSeparatedWithNames $CUSTOM_SETTINGS" | \ + $CLICKHOUSE_CLIENT --input_format_with_names_use_header=1 -q "INSERT INTO test_02117 $CUSTOM_SETTINGS FORMAT CustomSeparatedWithNames" $CLICKHOUSE_CLIENT -q "SELECT * FROM test_02117" $CLICKHOUSE_CLIENT -q "TRUNCATE TABLE test_02117" -$CLICKHOUSE_CLIENT -q "SELECT toUInt32(1) AS x FORMAT CustomSeparatedWithNamesAndTypes $CUSTOM_SETTINGS" | $CLICKHOUSE_CLIENT --input_format_with_names_use_header=1 --input_format_with_types_use_header=1 -q "INSERT INTO test_02117 FORMAT CustomSeparatedWithNamesAndTypes $CUSTOM_SETTINGS" +$CLICKHOUSE_CLIENT -q "SELECT toUInt32(1) AS x FORMAT CustomSeparatedWithNamesAndTypes $CUSTOM_SETTINGS" | \ + $CLICKHOUSE_CLIENT --input_format_with_names_use_header=1 --input_format_with_types_use_header=1 -q "INSERT INTO test_02117 $CUSTOM_SETTINGS FORMAT CustomSeparatedWithNamesAndTypes" $CLICKHOUSE_CLIENT -q "SELECT * FROM test_02117" $CLICKHOUSE_CLIENT -q "TRUNCATE TABLE test_02117" -$CLICKHOUSE_CLIENT -q "SELECT toUInt32(1) AS x FORMAT CustomSeparatedWithNames $CUSTOM_SETTINGS" | $CLICKHOUSE_CLIENT --input_format_defaults_for_omitted_fields=0 --input_format_with_names_use_header=1 -q "INSERT INTO test_02117 FORMAT CustomSeparatedWithNames $CUSTOM_SETTINGS" +$CLICKHOUSE_CLIENT -q "SELECT toUInt32(1) AS x FORMAT CustomSeparatedWithNames $CUSTOM_SETTINGS" | \ + $CLICKHOUSE_CLIENT --input_format_defaults_for_omitted_fields=0 --input_format_with_names_use_header=1 -q "INSERT INTO test_02117 $CUSTOM_SETTINGS FORMAT CustomSeparatedWithNames" $CLICKHOUSE_CLIENT -q "SELECT * FROM test_02117" $CLICKHOUSE_CLIENT -q "TRUNCATE TABLE test_02117" -$CLICKHOUSE_CLIENT -q "SELECT toUInt32(1) AS x FORMAT CustomSeparatedWithNamesAndTypes $CUSTOM_SETTINGS" | $CLICKHOUSE_CLIENT --input_format_defaults_for_omitted_fields=0 --input_format_with_names_use_header=1 --input_format_with_types_use_header=1 -q "INSERT INTO test_02117 FORMAT CustomSeparatedWithNamesAndTypes $CUSTOM_SETTINGS" +$CLICKHOUSE_CLIENT -q "SELECT toUInt32(1) AS x FORMAT CustomSeparatedWithNamesAndTypes $CUSTOM_SETTINGS" | \ + $CLICKHOUSE_CLIENT --input_format_defaults_for_omitted_fields=0 --input_format_with_names_use_header=1 --input_format_with_types_use_header=1 -q "INSERT INTO test_02117 $CUSTOM_SETTINGS FORMAT CustomSeparatedWithNamesAndTypes" $CLICKHOUSE_CLIENT -q "SELECT * FROM test_02117" $CLICKHOUSE_CLIENT -q "TRUNCATE TABLE test_02117" -$CLICKHOUSE_CLIENT -q "SELECT toUInt32(1) AS x, [[1, 2, 3], [4, 5], []] as a FORMAT CustomSeparatedWithNames $CUSTOM_SETTINGS" | $CLICKHOUSE_CLIENT --input_format_skip_unknown_fields=1 --input_format_with_names_use_header=1 -q "INSERT INTO test_02117 FORMAT CustomSeparatedWithNames $CUSTOM_SETTINGS" +$CLICKHOUSE_CLIENT -q "SELECT toUInt32(1) AS x, [[1, 2, 3], [4, 5], []] as a FORMAT CustomSeparatedWithNames $CUSTOM_SETTINGS" | \ + $CLICKHOUSE_CLIENT --input_format_skip_unknown_fields=1 --input_format_with_names_use_header=1 -q "INSERT INTO test_02117 $CUSTOM_SETTINGS FORMAT CustomSeparatedWithNames" $CLICKHOUSE_CLIENT -q "SELECT * FROM test_02117" $CLICKHOUSE_CLIENT -q "TRUNCATE TABLE test_02117" -$CLICKHOUSE_CLIENT -q "SELECT toUInt32(1) AS x, [[1, 2, 3], [4, 5], []] as a FORMAT CustomSeparatedWithNamesAndTypes $CUSTOM_SETTINGS" | $CLICKHOUSE_CLIENT --input_format_skip_unknown_fields=1 --input_format_with_names_use_header=1 --input_format_with_types_use_header=1 -q "INSERT INTO test_02117 FORMAT CustomSeparatedWithNamesAndTypes $CUSTOM_SETTINGS" +$CLICKHOUSE_CLIENT -q "SELECT toUInt32(1) AS x, [[1, 2, 3], [4, 5], []] as a FORMAT CustomSeparatedWithNamesAndTypes $CUSTOM_SETTINGS" | \ + $CLICKHOUSE_CLIENT --input_format_skip_unknown_fields=1 --input_format_with_names_use_header=1 --input_format_with_types_use_header=1 -q "INSERT INTO test_02117 $CUSTOM_SETTINGS FORMAT CustomSeparatedWithNamesAndTypes" $CLICKHOUSE_CLIENT -q "SELECT * FROM test_02117" $CLICKHOUSE_CLIENT -q "TRUNCATE TABLE test_02117" TMP_FILE=$CURDIR/test_02117 $CLICKHOUSE_CLIENT -q "SELECT 'text' AS x, toDate('2020-01-01') AS y, toUInt32(1) AS z FORMAT CustomSeparatedWithNamesAndTypes $CUSTOM_SETTINGS" > $TMP_FILE -cat $TMP_FILE | $CLICKHOUSE_CLIENT --input_format_with_names_use_header=1 --input_format_with_types_use_header=1 -q "INSERT INTO test_02117 FORMAT CustomSeparatedWithNamesAndTypes $CUSTOM_SETTINGS" 2>&1 | grep -F -q "INCORRECT_DATA" && echo 'OK' || echo 'FAIL' +cat $TMP_FILE | $CLICKHOUSE_CLIENT --input_format_with_names_use_header=1 --input_format_with_types_use_header=1 -q "INSERT INTO test_02117 $CUSTOM_SETTINGS FORMAT CustomSeparatedWithNamesAndTypes" 2>&1 | \ + grep -F -q "INCORRECT_DATA" && echo 'OK' || echo 'FAIL' $CLICKHOUSE_CLIENT -q "SELECT toUInt32(1) AS x, 'text' as z, toDate('2020-01-01') AS y FORMAT CustomSeparatedWithNamesAndTypes $CUSTOM_SETTINGS" > $TMP_FILE -cat $TMP_FILE | $CLICKHOUSE_CLIENT --input_format_with_names_use_header=1 --input_format_with_types_use_header=1 -q "INSERT INTO test_02117 FORMAT CustomSeparatedWithNamesAndTypes $CUSTOM_SETTINGS" 2>&1 | grep -F -q "INCORRECT_DATA" && echo 'OK' || echo 'FAIL' +cat $TMP_FILE | $CLICKHOUSE_CLIENT --input_format_with_names_use_header=1 --input_format_with_types_use_header=1 -q "INSERT INTO test_02117 $CUSTOM_SETTINGS FORMAT CustomSeparatedWithNamesAndTypes" 2>&1 | \ + grep -F -q "INCORRECT_DATA" && echo 'OK' || echo 'FAIL' $CLICKHOUSE_CLIENT -q "DROP TABLE test_02117" rm $TMP_FILE diff --git a/tests/queries/0_stateless/02117_show_create_table_system.reference b/tests/queries/0_stateless/02117_show_create_table_system.reference index a6a184b3d22..246b8ef6d3b 100644 --- a/tests/queries/0_stateless/02117_show_create_table_system.reference +++ b/tests/queries/0_stateless/02117_show_create_table_system.reference @@ -12,7 +12,7 @@ CREATE TABLE system.data_type_families\n(\n `name` String,\n `case_insensi CREATE TABLE system.databases\n(\n `name` String,\n `engine` String,\n `data_path` String,\n `metadata_path` String,\n `uuid` UUID,\n `comment` String,\n `database` String\n)\nENGINE = SystemDatabases()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' CREATE TABLE system.detached_parts\n(\n `database` String,\n `table` String,\n `partition_id` Nullable(String),\n `name` String,\n `disk` String,\n `reason` Nullable(String),\n `min_block_number` Nullable(Int64),\n `max_block_number` Nullable(Int64),\n `level` Nullable(UInt32)\n)\nENGINE = SystemDetachedParts()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' CREATE TABLE system.dictionaries\n(\n `database` String,\n `name` String,\n `uuid` UUID,\n `status` Enum8(\'NOT_LOADED\' = 0, \'LOADED\' = 1, \'FAILED\' = 2, \'LOADING\' = 3, \'FAILED_AND_RELOADING\' = 4, \'LOADED_AND_RELOADING\' = 5, \'NOT_EXIST\' = 6),\n `origin` String,\n `type` String,\n `key.names` Array(String),\n `key.types` Array(String),\n `attribute.names` Array(String),\n `attribute.types` Array(String),\n `bytes_allocated` UInt64,\n `query_count` UInt64,\n `hit_rate` Float64,\n `found_rate` Float64,\n `element_count` UInt64,\n `load_factor` Float64,\n `source` String,\n `lifetime_min` UInt64,\n `lifetime_max` UInt64,\n `loading_start_time` DateTime,\n `last_successful_update_time` DateTime,\n `loading_duration` Float32,\n `last_exception` String,\n `comment` String\n)\nENGINE = SystemDictionaries()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' -CREATE TABLE system.disks\n(\n `name` String,\n `path` String,\n `free_space` UInt64,\n `total_space` UInt64,\n `keep_free_space` UInt64,\n `type` String\n)\nENGINE = SystemDisks()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' +CREATE TABLE system.disks\n(\n `name` String,\n `path` String,\n `free_space` UInt64,\n `total_space` UInt64,\n `keep_free_space` UInt64,\n `type` String,\n `cache_path` String\n)\nENGINE = SystemDisks()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' CREATE TABLE system.distributed_ddl_queue\n(\n `entry` String,\n `entry_version` Nullable(UInt8),\n `initiator_host` Nullable(String),\n `initiator_port` Nullable(UInt16),\n `cluster` String,\n `query` String,\n `settings` Map(String, String),\n `query_create_time` DateTime,\n `host` Nullable(String),\n `port` Nullable(UInt16),\n `status` Nullable(Enum8(\'Inactive\' = 0, \'Active\' = 1, \'Finished\' = 2, \'Removing\' = 3, \'Unknown\' = 4)),\n `exception_code` Nullable(UInt16),\n `exception_text` Nullable(String),\n `query_finish_time` Nullable(DateTime),\n `query_duration_ms` Nullable(UInt64)\n)\nENGINE = SystemDDLWorkerQueue()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' CREATE TABLE system.distribution_queue\n(\n `database` String,\n `table` String,\n `data_path` String,\n `is_blocked` UInt8,\n `error_count` UInt64,\n `data_files` UInt64,\n `data_compressed_bytes` UInt64,\n `broken_data_files` UInt64,\n `broken_data_compressed_bytes` UInt64,\n `last_exception` String\n)\nENGINE = SystemDistributionQueue()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' CREATE TABLE system.enabled_roles\n(\n `role_name` String,\n `with_admin_option` UInt8,\n `is_current` UInt8,\n `is_default` UInt8\n)\nENGINE = SystemEnabledRoles()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' @@ -20,7 +20,7 @@ CREATE TABLE system.errors\n(\n `name` String,\n `code` Int32,\n `value CREATE TABLE system.events\n(\n `event` String,\n `value` UInt64,\n `description` String\n)\nENGINE = SystemEvents()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' CREATE TABLE system.formats\n(\n `name` String,\n `is_input` UInt8,\n `is_output` UInt8\n)\nENGINE = SystemFormats()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' CREATE TABLE system.functions\n(\n `name` String,\n `is_aggregate` UInt8,\n `case_insensitive` UInt8,\n `alias_to` String,\n `create_query` String,\n `origin` Enum8(\'System\' = 0, \'SQLUserDefined\' = 1, \'ExecutableUserDefined\' = 2)\n)\nENGINE = SystemFunctions()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' -CREATE TABLE system.grants\n(\n `user_name` Nullable(String),\n `role_name` Nullable(String),\n `access_type` Enum16(\'SHOW DATABASES\' = 0, \'SHOW TABLES\' = 1, \'SHOW COLUMNS\' = 2, \'SHOW DICTIONARIES\' = 3, \'SHOW\' = 4, \'SELECT\' = 5, \'INSERT\' = 6, \'ALTER UPDATE\' = 7, \'ALTER DELETE\' = 8, \'ALTER ADD COLUMN\' = 9, \'ALTER MODIFY COLUMN\' = 10, \'ALTER DROP COLUMN\' = 11, \'ALTER COMMENT COLUMN\' = 12, \'ALTER CLEAR COLUMN\' = 13, \'ALTER RENAME COLUMN\' = 14, \'ALTER MATERIALIZE COLUMN\' = 15, \'ALTER COLUMN\' = 16, \'ALTER MODIFY COMMENT\' = 17, \'ALTER ORDER BY\' = 18, \'ALTER SAMPLE BY\' = 19, \'ALTER ADD INDEX\' = 20, \'ALTER DROP INDEX\' = 21, \'ALTER MATERIALIZE INDEX\' = 22, \'ALTER CLEAR INDEX\' = 23, \'ALTER INDEX\' = 24, \'ALTER ADD PROJECTION\' = 25, \'ALTER DROP PROJECTION\' = 26, \'ALTER MATERIALIZE PROJECTION\' = 27, \'ALTER CLEAR PROJECTION\' = 28, \'ALTER PROJECTION\' = 29, \'ALTER ADD CONSTRAINT\' = 30, \'ALTER DROP CONSTRAINT\' = 31, \'ALTER CONSTRAINT\' = 32, \'ALTER TTL\' = 33, \'ALTER MATERIALIZE TTL\' = 34, \'ALTER SETTINGS\' = 35, \'ALTER MOVE PARTITION\' = 36, \'ALTER FETCH PARTITION\' = 37, \'ALTER FREEZE PARTITION\' = 38, \'ALTER DATABASE SETTINGS\' = 39, \'ALTER TABLE\' = 40, \'ALTER DATABASE\' = 41, \'ALTER VIEW REFRESH\' = 42, \'ALTER VIEW MODIFY QUERY\' = 43, \'ALTER VIEW\' = 44, \'ALTER\' = 45, \'CREATE DATABASE\' = 46, \'CREATE TABLE\' = 47, \'CREATE VIEW\' = 48, \'CREATE DICTIONARY\' = 49, \'CREATE TEMPORARY TABLE\' = 50, \'CREATE FUNCTION\' = 51, \'CREATE\' = 52, \'DROP DATABASE\' = 53, \'DROP TABLE\' = 54, \'DROP VIEW\' = 55, \'DROP DICTIONARY\' = 56, \'DROP FUNCTION\' = 57, \'DROP\' = 58, \'TRUNCATE\' = 59, \'OPTIMIZE\' = 60, \'KILL QUERY\' = 61, \'MOVE PARTITION BETWEEN SHARDS\' = 62, \'CREATE USER\' = 63, \'ALTER USER\' = 64, \'DROP USER\' = 65, \'CREATE ROLE\' = 66, \'ALTER ROLE\' = 67, \'DROP ROLE\' = 68, \'ROLE ADMIN\' = 69, \'CREATE ROW POLICY\' = 70, \'ALTER ROW POLICY\' = 71, \'DROP ROW POLICY\' = 72, \'CREATE QUOTA\' = 73, \'ALTER QUOTA\' = 74, \'DROP QUOTA\' = 75, \'CREATE SETTINGS PROFILE\' = 76, \'ALTER SETTINGS PROFILE\' = 77, \'DROP SETTINGS PROFILE\' = 78, \'SHOW USERS\' = 79, \'SHOW ROLES\' = 80, \'SHOW ROW POLICIES\' = 81, \'SHOW QUOTAS\' = 82, \'SHOW SETTINGS PROFILES\' = 83, \'SHOW ACCESS\' = 84, \'ACCESS MANAGEMENT\' = 85, \'SYSTEM SHUTDOWN\' = 86, \'SYSTEM DROP DNS CACHE\' = 87, \'SYSTEM DROP MARK CACHE\' = 88, \'SYSTEM DROP UNCOMPRESSED CACHE\' = 89, \'SYSTEM DROP MMAP CACHE\' = 90, \'SYSTEM DROP COMPILED EXPRESSION CACHE\' = 91, \'SYSTEM DROP CACHE\' = 92, \'SYSTEM RELOAD CONFIG\' = 93, \'SYSTEM RELOAD SYMBOLS\' = 94, \'SYSTEM RELOAD DICTIONARY\' = 95, \'SYSTEM RELOAD MODEL\' = 96, \'SYSTEM RELOAD FUNCTION\' = 97, \'SYSTEM RELOAD EMBEDDED DICTIONARIES\' = 98, \'SYSTEM RELOAD\' = 99, \'SYSTEM RESTART DISK\' = 100, \'SYSTEM MERGES\' = 101, \'SYSTEM TTL MERGES\' = 102, \'SYSTEM FETCHES\' = 103, \'SYSTEM MOVES\' = 104, \'SYSTEM DISTRIBUTED SENDS\' = 105, \'SYSTEM REPLICATED SENDS\' = 106, \'SYSTEM SENDS\' = 107, \'SYSTEM REPLICATION QUEUES\' = 108, \'SYSTEM DROP REPLICA\' = 109, \'SYSTEM SYNC REPLICA\' = 110, \'SYSTEM RESTART REPLICA\' = 111, \'SYSTEM RESTORE REPLICA\' = 112, \'SYSTEM FLUSH DISTRIBUTED\' = 113, \'SYSTEM FLUSH LOGS\' = 114, \'SYSTEM FLUSH\' = 115, \'SYSTEM THREAD FUZZER\' = 116, \'SYSTEM\' = 117, \'dictGet\' = 118, \'addressToLine\' = 119, \'addressToLineWithInlines\' = 120, \'addressToSymbol\' = 121, \'demangle\' = 122, \'INTROSPECTION\' = 123, \'FILE\' = 124, \'URL\' = 125, \'REMOTE\' = 126, \'MONGO\' = 127, \'MYSQL\' = 128, \'POSTGRES\' = 129, \'SQLITE\' = 130, \'ODBC\' = 131, \'JDBC\' = 132, \'HDFS\' = 133, \'S3\' = 134, \'SOURCES\' = 135, \'ALL\' = 136, \'NONE\' = 137),\n `database` Nullable(String),\n `table` Nullable(String),\n `column` Nullable(String),\n `is_partial_revoke` UInt8,\n `grant_option` UInt8\n)\nENGINE = SystemGrants()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' +CREATE TABLE system.grants\n(\n `user_name` Nullable(String),\n `role_name` Nullable(String),\n `access_type` Enum16(\'SHOW DATABASES\' = 0, \'SHOW TABLES\' = 1, \'SHOW COLUMNS\' = 2, \'SHOW DICTIONARIES\' = 3, \'SHOW\' = 4, \'SELECT\' = 5, \'INSERT\' = 6, \'ALTER UPDATE\' = 7, \'ALTER DELETE\' = 8, \'ALTER ADD COLUMN\' = 9, \'ALTER MODIFY COLUMN\' = 10, \'ALTER DROP COLUMN\' = 11, \'ALTER COMMENT COLUMN\' = 12, \'ALTER CLEAR COLUMN\' = 13, \'ALTER RENAME COLUMN\' = 14, \'ALTER MATERIALIZE COLUMN\' = 15, \'ALTER COLUMN\' = 16, \'ALTER MODIFY COMMENT\' = 17, \'ALTER ORDER BY\' = 18, \'ALTER SAMPLE BY\' = 19, \'ALTER ADD INDEX\' = 20, \'ALTER DROP INDEX\' = 21, \'ALTER MATERIALIZE INDEX\' = 22, \'ALTER CLEAR INDEX\' = 23, \'ALTER INDEX\' = 24, \'ALTER ADD PROJECTION\' = 25, \'ALTER DROP PROJECTION\' = 26, \'ALTER MATERIALIZE PROJECTION\' = 27, \'ALTER CLEAR PROJECTION\' = 28, \'ALTER PROJECTION\' = 29, \'ALTER ADD CONSTRAINT\' = 30, \'ALTER DROP CONSTRAINT\' = 31, \'ALTER CONSTRAINT\' = 32, \'ALTER TTL\' = 33, \'ALTER MATERIALIZE TTL\' = 34, \'ALTER SETTINGS\' = 35, \'ALTER MOVE PARTITION\' = 36, \'ALTER FETCH PARTITION\' = 37, \'ALTER FREEZE PARTITION\' = 38, \'ALTER DATABASE SETTINGS\' = 39, \'ALTER TABLE\' = 40, \'ALTER DATABASE\' = 41, \'ALTER VIEW REFRESH\' = 42, \'ALTER VIEW MODIFY QUERY\' = 43, \'ALTER VIEW\' = 44, \'ALTER\' = 45, \'CREATE DATABASE\' = 46, \'CREATE TABLE\' = 47, \'CREATE VIEW\' = 48, \'CREATE DICTIONARY\' = 49, \'CREATE TEMPORARY TABLE\' = 50, \'CREATE FUNCTION\' = 51, \'CREATE\' = 52, \'DROP DATABASE\' = 53, \'DROP TABLE\' = 54, \'DROP VIEW\' = 55, \'DROP DICTIONARY\' = 56, \'DROP FUNCTION\' = 57, \'DROP\' = 58, \'TRUNCATE\' = 59, \'OPTIMIZE\' = 60, \'KILL QUERY\' = 61, \'KILL TRANSACTION\' = 62, \'MOVE PARTITION BETWEEN SHARDS\' = 63, \'CREATE USER\' = 64, \'ALTER USER\' = 65, \'DROP USER\' = 66, \'CREATE ROLE\' = 67, \'ALTER ROLE\' = 68, \'DROP ROLE\' = 69, \'ROLE ADMIN\' = 70, \'CREATE ROW POLICY\' = 71, \'ALTER ROW POLICY\' = 72, \'DROP ROW POLICY\' = 73, \'CREATE QUOTA\' = 74, \'ALTER QUOTA\' = 75, \'DROP QUOTA\' = 76, \'CREATE SETTINGS PROFILE\' = 77, \'ALTER SETTINGS PROFILE\' = 78, \'DROP SETTINGS PROFILE\' = 79, \'SHOW USERS\' = 80, \'SHOW ROLES\' = 81, \'SHOW ROW POLICIES\' = 82, \'SHOW QUOTAS\' = 83, \'SHOW SETTINGS PROFILES\' = 84, \'SHOW ACCESS\' = 85, \'ACCESS MANAGEMENT\' = 86, \'SYSTEM SHUTDOWN\' = 87, \'SYSTEM DROP DNS CACHE\' = 88, \'SYSTEM DROP MARK CACHE\' = 89, \'SYSTEM DROP UNCOMPRESSED CACHE\' = 90, \'SYSTEM DROP MMAP CACHE\' = 91, \'SYSTEM DROP COMPILED EXPRESSION CACHE\' = 92, \'SYSTEM DROP CACHE\' = 93, \'SYSTEM RELOAD CONFIG\' = 94, \'SYSTEM RELOAD SYMBOLS\' = 95, \'SYSTEM RELOAD DICTIONARY\' = 96, \'SYSTEM RELOAD MODEL\' = 97, \'SYSTEM RELOAD FUNCTION\' = 98, \'SYSTEM RELOAD EMBEDDED DICTIONARIES\' = 99, \'SYSTEM RELOAD\' = 100, \'SYSTEM RESTART DISK\' = 101, \'SYSTEM MERGES\' = 102, \'SYSTEM TTL MERGES\' = 103, \'SYSTEM FETCHES\' = 104, \'SYSTEM MOVES\' = 105, \'SYSTEM DISTRIBUTED SENDS\' = 106, \'SYSTEM REPLICATED SENDS\' = 107, \'SYSTEM SENDS\' = 108, \'SYSTEM REPLICATION QUEUES\' = 109, \'SYSTEM DROP REPLICA\' = 110, \'SYSTEM SYNC REPLICA\' = 111, \'SYSTEM RESTART REPLICA\' = 112, \'SYSTEM RESTORE REPLICA\' = 113, \'SYSTEM FLUSH DISTRIBUTED\' = 114, \'SYSTEM FLUSH LOGS\' = 115, \'SYSTEM FLUSH\' = 116, \'SYSTEM THREAD FUZZER\' = 117, \'SYSTEM\' = 118, \'dictGet\' = 119, \'addressToLine\' = 120, \'addressToLineWithInlines\' = 121, \'addressToSymbol\' = 122, \'demangle\' = 123, \'INTROSPECTION\' = 124, \'FILE\' = 125, \'URL\' = 126, \'REMOTE\' = 127, \'MONGO\' = 128, \'MYSQL\' = 129, \'POSTGRES\' = 130, \'SQLITE\' = 131, \'ODBC\' = 132, \'JDBC\' = 133, \'HDFS\' = 134, \'S3\' = 135, \'HIVE\' = 136, \'SOURCES\' = 137, \'ALL\' = 138, \'NONE\' = 139),\n `database` Nullable(String),\n `table` Nullable(String),\n `column` Nullable(String),\n `is_partial_revoke` UInt8,\n `grant_option` UInt8\n)\nENGINE = SystemGrants()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' CREATE TABLE system.graphite_retentions\n(\n `config_name` String,\n `rule_type` String,\n `regexp` String,\n `function` String,\n `age` UInt64,\n `precision` UInt64,\n `priority` UInt16,\n `is_default` UInt8,\n `Tables.database` Array(String),\n `Tables.table` Array(String)\n)\nENGINE = SystemGraphite()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' CREATE TABLE system.licenses\n(\n `library_name` String,\n `license_type` String,\n `license_path` String,\n `license_text` String\n)\nENGINE = SystemLicenses()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' CREATE TABLE system.macros\n(\n `macro` String,\n `substitution` String\n)\nENGINE = SystemMacros()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' @@ -33,16 +33,16 @@ CREATE TABLE system.numbers\n(\n `number` UInt64\n)\nENGINE = SystemNumbers() CREATE TABLE system.numbers_mt\n(\n `number` UInt64\n)\nENGINE = SystemNumbers()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' CREATE TABLE system.one\n(\n `dummy` UInt8\n)\nENGINE = SystemOne()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' CREATE TABLE system.part_moves_between_shards\n(\n `database` String,\n `table` String,\n `task_name` String,\n `task_uuid` UUID,\n `create_time` DateTime,\n `part_name` String,\n `part_uuid` UUID,\n `to_shard` String,\n `dst_part_name` String,\n `update_time` DateTime,\n `state` String,\n `rollback` UInt8,\n `num_tries` UInt32,\n `last_exception` String\n)\nENGINE = SystemShardMoves()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' -CREATE TABLE system.parts\n(\n `partition` String,\n `name` String,\n `uuid` UUID,\n `part_type` String,\n `active` UInt8,\n `marks` UInt64,\n `rows` UInt64,\n `bytes_on_disk` UInt64,\n `data_compressed_bytes` UInt64,\n `data_uncompressed_bytes` UInt64,\n `marks_bytes` UInt64,\n `secondary_indices_compressed_bytes` UInt64,\n `secondary_indices_uncompressed_bytes` UInt64,\n `secondary_indices_marks_bytes` UInt64,\n `modification_time` DateTime,\n `remove_time` DateTime,\n `refcount` UInt32,\n `min_date` Date,\n `max_date` Date,\n `min_time` DateTime,\n `max_time` DateTime,\n `partition_id` String,\n `min_block_number` Int64,\n `max_block_number` Int64,\n `level` UInt32,\n `data_version` UInt64,\n `primary_key_bytes_in_memory` UInt64,\n `primary_key_bytes_in_memory_allocated` UInt64,\n `is_frozen` UInt8,\n `database` String,\n `table` String,\n `engine` String,\n `disk_name` String,\n `path` String,\n `hash_of_all_files` String,\n `hash_of_uncompressed_files` String,\n `uncompressed_hash_of_compressed_files` String,\n `delete_ttl_info_min` DateTime,\n `delete_ttl_info_max` DateTime,\n `move_ttl_info.expression` Array(String),\n `move_ttl_info.min` Array(DateTime),\n `move_ttl_info.max` Array(DateTime),\n `default_compression_codec` String,\n `recompression_ttl_info.expression` Array(String),\n `recompression_ttl_info.min` Array(DateTime),\n `recompression_ttl_info.max` Array(DateTime),\n `group_by_ttl_info.expression` Array(String),\n `group_by_ttl_info.min` Array(DateTime),\n `group_by_ttl_info.max` Array(DateTime),\n `rows_where_ttl_info.expression` Array(String),\n `rows_where_ttl_info.min` Array(DateTime),\n `rows_where_ttl_info.max` Array(DateTime),\n `projections` Array(String),\n `bytes` UInt64,\n `marks_size` UInt64\n)\nENGINE = SystemParts()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' +CREATE TABLE system.parts\n(\n `partition` String,\n `name` String,\n `uuid` UUID,\n `part_type` String,\n `active` UInt8,\n `marks` UInt64,\n `rows` UInt64,\n `bytes_on_disk` UInt64,\n `data_compressed_bytes` UInt64,\n `data_uncompressed_bytes` UInt64,\n `marks_bytes` UInt64,\n `secondary_indices_compressed_bytes` UInt64,\n `secondary_indices_uncompressed_bytes` UInt64,\n `secondary_indices_marks_bytes` UInt64,\n `modification_time` DateTime,\n `remove_time` DateTime,\n `refcount` UInt32,\n `min_date` Date,\n `max_date` Date,\n `min_time` DateTime,\n `max_time` DateTime,\n `partition_id` String,\n `min_block_number` Int64,\n `max_block_number` Int64,\n `level` UInt32,\n `data_version` UInt64,\n `primary_key_bytes_in_memory` UInt64,\n `primary_key_bytes_in_memory_allocated` UInt64,\n `is_frozen` UInt8,\n `database` String,\n `table` String,\n `engine` String,\n `disk_name` String,\n `path` String,\n `hash_of_all_files` String,\n `hash_of_uncompressed_files` String,\n `uncompressed_hash_of_compressed_files` String,\n `delete_ttl_info_min` DateTime,\n `delete_ttl_info_max` DateTime,\n `move_ttl_info.expression` Array(String),\n `move_ttl_info.min` Array(DateTime),\n `move_ttl_info.max` Array(DateTime),\n `default_compression_codec` String,\n `recompression_ttl_info.expression` Array(String),\n `recompression_ttl_info.min` Array(DateTime),\n `recompression_ttl_info.max` Array(DateTime),\n `group_by_ttl_info.expression` Array(String),\n `group_by_ttl_info.min` Array(DateTime),\n `group_by_ttl_info.max` Array(DateTime),\n `rows_where_ttl_info.expression` Array(String),\n `rows_where_ttl_info.min` Array(DateTime),\n `rows_where_ttl_info.max` Array(DateTime),\n `projections` Array(String),\n `visible` UInt8,\n `creation_tid` Tuple(UInt64, UInt64, UUID),\n `removal_tid` Tuple(UInt64, UInt64, UUID),\n `creation_csn` UInt64,\n `removal_csn` UInt64,\n `bytes` UInt64,\n `marks_size` UInt64\n)\nENGINE = SystemParts()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' CREATE TABLE system.parts_columns\n(\n `partition` String,\n `name` String,\n `uuid` UUID,\n `part_type` String,\n `active` UInt8,\n `marks` UInt64,\n `rows` UInt64,\n `bytes_on_disk` UInt64,\n `data_compressed_bytes` UInt64,\n `data_uncompressed_bytes` UInt64,\n `marks_bytes` UInt64,\n `modification_time` DateTime,\n `remove_time` DateTime,\n `refcount` UInt32,\n `min_date` Date,\n `max_date` Date,\n `min_time` DateTime,\n `max_time` DateTime,\n `partition_id` String,\n `min_block_number` Int64,\n `max_block_number` Int64,\n `level` UInt32,\n `data_version` UInt64,\n `primary_key_bytes_in_memory` UInt64,\n `primary_key_bytes_in_memory_allocated` UInt64,\n `database` String,\n `table` String,\n `engine` String,\n `disk_name` String,\n `path` String,\n `column` String,\n `type` String,\n `column_position` UInt64,\n `default_kind` String,\n `default_expression` String,\n `column_bytes_on_disk` UInt64,\n `column_data_compressed_bytes` UInt64,\n `column_data_uncompressed_bytes` UInt64,\n `column_marks_bytes` UInt64,\n `serialization_kind` String,\n `subcolumns.names` Array(String),\n `subcolumns.types` Array(String),\n `subcolumns.serializations` Array(String),\n `subcolumns.bytes_on_disk` Array(UInt64),\n `subcolumns.data_compressed_bytes` Array(UInt64),\n `subcolumns.data_uncompressed_bytes` Array(UInt64),\n `subcolumns.marks_bytes` Array(UInt64),\n `bytes` UInt64,\n `marks_size` UInt64\n)\nENGINE = SystemPartsColumns()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' -CREATE TABLE system.privileges\n(\n `privilege` Enum16(\'SHOW DATABASES\' = 0, \'SHOW TABLES\' = 1, \'SHOW COLUMNS\' = 2, \'SHOW DICTIONARIES\' = 3, \'SHOW\' = 4, \'SELECT\' = 5, \'INSERT\' = 6, \'ALTER UPDATE\' = 7, \'ALTER DELETE\' = 8, \'ALTER ADD COLUMN\' = 9, \'ALTER MODIFY COLUMN\' = 10, \'ALTER DROP COLUMN\' = 11, \'ALTER COMMENT COLUMN\' = 12, \'ALTER CLEAR COLUMN\' = 13, \'ALTER RENAME COLUMN\' = 14, \'ALTER MATERIALIZE COLUMN\' = 15, \'ALTER COLUMN\' = 16, \'ALTER MODIFY COMMENT\' = 17, \'ALTER ORDER BY\' = 18, \'ALTER SAMPLE BY\' = 19, \'ALTER ADD INDEX\' = 20, \'ALTER DROP INDEX\' = 21, \'ALTER MATERIALIZE INDEX\' = 22, \'ALTER CLEAR INDEX\' = 23, \'ALTER INDEX\' = 24, \'ALTER ADD PROJECTION\' = 25, \'ALTER DROP PROJECTION\' = 26, \'ALTER MATERIALIZE PROJECTION\' = 27, \'ALTER CLEAR PROJECTION\' = 28, \'ALTER PROJECTION\' = 29, \'ALTER ADD CONSTRAINT\' = 30, \'ALTER DROP CONSTRAINT\' = 31, \'ALTER CONSTRAINT\' = 32, \'ALTER TTL\' = 33, \'ALTER MATERIALIZE TTL\' = 34, \'ALTER SETTINGS\' = 35, \'ALTER MOVE PARTITION\' = 36, \'ALTER FETCH PARTITION\' = 37, \'ALTER FREEZE PARTITION\' = 38, \'ALTER DATABASE SETTINGS\' = 39, \'ALTER TABLE\' = 40, \'ALTER DATABASE\' = 41, \'ALTER VIEW REFRESH\' = 42, \'ALTER VIEW MODIFY QUERY\' = 43, \'ALTER VIEW\' = 44, \'ALTER\' = 45, \'CREATE DATABASE\' = 46, \'CREATE TABLE\' = 47, \'CREATE VIEW\' = 48, \'CREATE DICTIONARY\' = 49, \'CREATE TEMPORARY TABLE\' = 50, \'CREATE FUNCTION\' = 51, \'CREATE\' = 52, \'DROP DATABASE\' = 53, \'DROP TABLE\' = 54, \'DROP VIEW\' = 55, \'DROP DICTIONARY\' = 56, \'DROP FUNCTION\' = 57, \'DROP\' = 58, \'TRUNCATE\' = 59, \'OPTIMIZE\' = 60, \'KILL QUERY\' = 61, \'MOVE PARTITION BETWEEN SHARDS\' = 62, \'CREATE USER\' = 63, \'ALTER USER\' = 64, \'DROP USER\' = 65, \'CREATE ROLE\' = 66, \'ALTER ROLE\' = 67, \'DROP ROLE\' = 68, \'ROLE ADMIN\' = 69, \'CREATE ROW POLICY\' = 70, \'ALTER ROW POLICY\' = 71, \'DROP ROW POLICY\' = 72, \'CREATE QUOTA\' = 73, \'ALTER QUOTA\' = 74, \'DROP QUOTA\' = 75, \'CREATE SETTINGS PROFILE\' = 76, \'ALTER SETTINGS PROFILE\' = 77, \'DROP SETTINGS PROFILE\' = 78, \'SHOW USERS\' = 79, \'SHOW ROLES\' = 80, \'SHOW ROW POLICIES\' = 81, \'SHOW QUOTAS\' = 82, \'SHOW SETTINGS PROFILES\' = 83, \'SHOW ACCESS\' = 84, \'ACCESS MANAGEMENT\' = 85, \'SYSTEM SHUTDOWN\' = 86, \'SYSTEM DROP DNS CACHE\' = 87, \'SYSTEM DROP MARK CACHE\' = 88, \'SYSTEM DROP UNCOMPRESSED CACHE\' = 89, \'SYSTEM DROP MMAP CACHE\' = 90, \'SYSTEM DROP COMPILED EXPRESSION CACHE\' = 91, \'SYSTEM DROP CACHE\' = 92, \'SYSTEM RELOAD CONFIG\' = 93, \'SYSTEM RELOAD SYMBOLS\' = 94, \'SYSTEM RELOAD DICTIONARY\' = 95, \'SYSTEM RELOAD MODEL\' = 96, \'SYSTEM RELOAD FUNCTION\' = 97, \'SYSTEM RELOAD EMBEDDED DICTIONARIES\' = 98, \'SYSTEM RELOAD\' = 99, \'SYSTEM RESTART DISK\' = 100, \'SYSTEM MERGES\' = 101, \'SYSTEM TTL MERGES\' = 102, \'SYSTEM FETCHES\' = 103, \'SYSTEM MOVES\' = 104, \'SYSTEM DISTRIBUTED SENDS\' = 105, \'SYSTEM REPLICATED SENDS\' = 106, \'SYSTEM SENDS\' = 107, \'SYSTEM REPLICATION QUEUES\' = 108, \'SYSTEM DROP REPLICA\' = 109, \'SYSTEM SYNC REPLICA\' = 110, \'SYSTEM RESTART REPLICA\' = 111, \'SYSTEM RESTORE REPLICA\' = 112, \'SYSTEM FLUSH DISTRIBUTED\' = 113, \'SYSTEM FLUSH LOGS\' = 114, \'SYSTEM FLUSH\' = 115, \'SYSTEM THREAD FUZZER\' = 116, \'SYSTEM\' = 117, \'dictGet\' = 118, \'addressToLine\' = 119, \'addressToLineWithInlines\' = 120, \'addressToSymbol\' = 121, \'demangle\' = 122, \'INTROSPECTION\' = 123, \'FILE\' = 124, \'URL\' = 125, \'REMOTE\' = 126, \'MONGO\' = 127, \'MYSQL\' = 128, \'POSTGRES\' = 129, \'SQLITE\' = 130, \'ODBC\' = 131, \'JDBC\' = 132, \'HDFS\' = 133, \'S3\' = 134, \'SOURCES\' = 135, \'ALL\' = 136, \'NONE\' = 137),\n `aliases` Array(String),\n `level` Nullable(Enum8(\'GLOBAL\' = 0, \'DATABASE\' = 1, \'TABLE\' = 2, \'DICTIONARY\' = 3, \'VIEW\' = 4, \'COLUMN\' = 5)),\n `parent_group` Nullable(Enum16(\'SHOW DATABASES\' = 0, \'SHOW TABLES\' = 1, \'SHOW COLUMNS\' = 2, \'SHOW DICTIONARIES\' = 3, \'SHOW\' = 4, \'SELECT\' = 5, \'INSERT\' = 6, \'ALTER UPDATE\' = 7, \'ALTER DELETE\' = 8, \'ALTER ADD COLUMN\' = 9, \'ALTER MODIFY COLUMN\' = 10, \'ALTER DROP COLUMN\' = 11, \'ALTER COMMENT COLUMN\' = 12, \'ALTER CLEAR COLUMN\' = 13, \'ALTER RENAME COLUMN\' = 14, \'ALTER MATERIALIZE COLUMN\' = 15, \'ALTER COLUMN\' = 16, \'ALTER MODIFY COMMENT\' = 17, \'ALTER ORDER BY\' = 18, \'ALTER SAMPLE BY\' = 19, \'ALTER ADD INDEX\' = 20, \'ALTER DROP INDEX\' = 21, \'ALTER MATERIALIZE INDEX\' = 22, \'ALTER CLEAR INDEX\' = 23, \'ALTER INDEX\' = 24, \'ALTER ADD PROJECTION\' = 25, \'ALTER DROP PROJECTION\' = 26, \'ALTER MATERIALIZE PROJECTION\' = 27, \'ALTER CLEAR PROJECTION\' = 28, \'ALTER PROJECTION\' = 29, \'ALTER ADD CONSTRAINT\' = 30, \'ALTER DROP CONSTRAINT\' = 31, \'ALTER CONSTRAINT\' = 32, \'ALTER TTL\' = 33, \'ALTER MATERIALIZE TTL\' = 34, \'ALTER SETTINGS\' = 35, \'ALTER MOVE PARTITION\' = 36, \'ALTER FETCH PARTITION\' = 37, \'ALTER FREEZE PARTITION\' = 38, \'ALTER DATABASE SETTINGS\' = 39, \'ALTER TABLE\' = 40, \'ALTER DATABASE\' = 41, \'ALTER VIEW REFRESH\' = 42, \'ALTER VIEW MODIFY QUERY\' = 43, \'ALTER VIEW\' = 44, \'ALTER\' = 45, \'CREATE DATABASE\' = 46, \'CREATE TABLE\' = 47, \'CREATE VIEW\' = 48, \'CREATE DICTIONARY\' = 49, \'CREATE TEMPORARY TABLE\' = 50, \'CREATE FUNCTION\' = 51, \'CREATE\' = 52, \'DROP DATABASE\' = 53, \'DROP TABLE\' = 54, \'DROP VIEW\' = 55, \'DROP DICTIONARY\' = 56, \'DROP FUNCTION\' = 57, \'DROP\' = 58, \'TRUNCATE\' = 59, \'OPTIMIZE\' = 60, \'KILL QUERY\' = 61, \'MOVE PARTITION BETWEEN SHARDS\' = 62, \'CREATE USER\' = 63, \'ALTER USER\' = 64, \'DROP USER\' = 65, \'CREATE ROLE\' = 66, \'ALTER ROLE\' = 67, \'DROP ROLE\' = 68, \'ROLE ADMIN\' = 69, \'CREATE ROW POLICY\' = 70, \'ALTER ROW POLICY\' = 71, \'DROP ROW POLICY\' = 72, \'CREATE QUOTA\' = 73, \'ALTER QUOTA\' = 74, \'DROP QUOTA\' = 75, \'CREATE SETTINGS PROFILE\' = 76, \'ALTER SETTINGS PROFILE\' = 77, \'DROP SETTINGS PROFILE\' = 78, \'SHOW USERS\' = 79, \'SHOW ROLES\' = 80, \'SHOW ROW POLICIES\' = 81, \'SHOW QUOTAS\' = 82, \'SHOW SETTINGS PROFILES\' = 83, \'SHOW ACCESS\' = 84, \'ACCESS MANAGEMENT\' = 85, \'SYSTEM SHUTDOWN\' = 86, \'SYSTEM DROP DNS CACHE\' = 87, \'SYSTEM DROP MARK CACHE\' = 88, \'SYSTEM DROP UNCOMPRESSED CACHE\' = 89, \'SYSTEM DROP MMAP CACHE\' = 90, \'SYSTEM DROP COMPILED EXPRESSION CACHE\' = 91, \'SYSTEM DROP CACHE\' = 92, \'SYSTEM RELOAD CONFIG\' = 93, \'SYSTEM RELOAD SYMBOLS\' = 94, \'SYSTEM RELOAD DICTIONARY\' = 95, \'SYSTEM RELOAD MODEL\' = 96, \'SYSTEM RELOAD FUNCTION\' = 97, \'SYSTEM RELOAD EMBEDDED DICTIONARIES\' = 98, \'SYSTEM RELOAD\' = 99, \'SYSTEM RESTART DISK\' = 100, \'SYSTEM MERGES\' = 101, \'SYSTEM TTL MERGES\' = 102, \'SYSTEM FETCHES\' = 103, \'SYSTEM MOVES\' = 104, \'SYSTEM DISTRIBUTED SENDS\' = 105, \'SYSTEM REPLICATED SENDS\' = 106, \'SYSTEM SENDS\' = 107, \'SYSTEM REPLICATION QUEUES\' = 108, \'SYSTEM DROP REPLICA\' = 109, \'SYSTEM SYNC REPLICA\' = 110, \'SYSTEM RESTART REPLICA\' = 111, \'SYSTEM RESTORE REPLICA\' = 112, \'SYSTEM FLUSH DISTRIBUTED\' = 113, \'SYSTEM FLUSH LOGS\' = 114, \'SYSTEM FLUSH\' = 115, \'SYSTEM THREAD FUZZER\' = 116, \'SYSTEM\' = 117, \'dictGet\' = 118, \'addressToLine\' = 119, \'addressToLineWithInlines\' = 120, \'addressToSymbol\' = 121, \'demangle\' = 122, \'INTROSPECTION\' = 123, \'FILE\' = 124, \'URL\' = 125, \'REMOTE\' = 126, \'MONGO\' = 127, \'MYSQL\' = 128, \'POSTGRES\' = 129, \'SQLITE\' = 130, \'ODBC\' = 131, \'JDBC\' = 132, \'HDFS\' = 133, \'S3\' = 134, \'SOURCES\' = 135, \'ALL\' = 136, \'NONE\' = 137))\n)\nENGINE = SystemPrivileges()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' +CREATE TABLE system.privileges\n(\n `privilege` Enum16(\'SHOW DATABASES\' = 0, \'SHOW TABLES\' = 1, \'SHOW COLUMNS\' = 2, \'SHOW DICTIONARIES\' = 3, \'SHOW\' = 4, \'SELECT\' = 5, \'INSERT\' = 6, \'ALTER UPDATE\' = 7, \'ALTER DELETE\' = 8, \'ALTER ADD COLUMN\' = 9, \'ALTER MODIFY COLUMN\' = 10, \'ALTER DROP COLUMN\' = 11, \'ALTER COMMENT COLUMN\' = 12, \'ALTER CLEAR COLUMN\' = 13, \'ALTER RENAME COLUMN\' = 14, \'ALTER MATERIALIZE COLUMN\' = 15, \'ALTER COLUMN\' = 16, \'ALTER MODIFY COMMENT\' = 17, \'ALTER ORDER BY\' = 18, \'ALTER SAMPLE BY\' = 19, \'ALTER ADD INDEX\' = 20, \'ALTER DROP INDEX\' = 21, \'ALTER MATERIALIZE INDEX\' = 22, \'ALTER CLEAR INDEX\' = 23, \'ALTER INDEX\' = 24, \'ALTER ADD PROJECTION\' = 25, \'ALTER DROP PROJECTION\' = 26, \'ALTER MATERIALIZE PROJECTION\' = 27, \'ALTER CLEAR PROJECTION\' = 28, \'ALTER PROJECTION\' = 29, \'ALTER ADD CONSTRAINT\' = 30, \'ALTER DROP CONSTRAINT\' = 31, \'ALTER CONSTRAINT\' = 32, \'ALTER TTL\' = 33, \'ALTER MATERIALIZE TTL\' = 34, \'ALTER SETTINGS\' = 35, \'ALTER MOVE PARTITION\' = 36, \'ALTER FETCH PARTITION\' = 37, \'ALTER FREEZE PARTITION\' = 38, \'ALTER DATABASE SETTINGS\' = 39, \'ALTER TABLE\' = 40, \'ALTER DATABASE\' = 41, \'ALTER VIEW REFRESH\' = 42, \'ALTER VIEW MODIFY QUERY\' = 43, \'ALTER VIEW\' = 44, \'ALTER\' = 45, \'CREATE DATABASE\' = 46, \'CREATE TABLE\' = 47, \'CREATE VIEW\' = 48, \'CREATE DICTIONARY\' = 49, \'CREATE TEMPORARY TABLE\' = 50, \'CREATE FUNCTION\' = 51, \'CREATE\' = 52, \'DROP DATABASE\' = 53, \'DROP TABLE\' = 54, \'DROP VIEW\' = 55, \'DROP DICTIONARY\' = 56, \'DROP FUNCTION\' = 57, \'DROP\' = 58, \'TRUNCATE\' = 59, \'OPTIMIZE\' = 60, \'KILL QUERY\' = 61, \'KILL TRANSACTION\' = 62, \'MOVE PARTITION BETWEEN SHARDS\' = 63, \'CREATE USER\' = 64, \'ALTER USER\' = 65, \'DROP USER\' = 66, \'CREATE ROLE\' = 67, \'ALTER ROLE\' = 68, \'DROP ROLE\' = 69, \'ROLE ADMIN\' = 70, \'CREATE ROW POLICY\' = 71, \'ALTER ROW POLICY\' = 72, \'DROP ROW POLICY\' = 73, \'CREATE QUOTA\' = 74, \'ALTER QUOTA\' = 75, \'DROP QUOTA\' = 76, \'CREATE SETTINGS PROFILE\' = 77, \'ALTER SETTINGS PROFILE\' = 78, \'DROP SETTINGS PROFILE\' = 79, \'SHOW USERS\' = 80, \'SHOW ROLES\' = 81, \'SHOW ROW POLICIES\' = 82, \'SHOW QUOTAS\' = 83, \'SHOW SETTINGS PROFILES\' = 84, \'SHOW ACCESS\' = 85, \'ACCESS MANAGEMENT\' = 86, \'SYSTEM SHUTDOWN\' = 87, \'SYSTEM DROP DNS CACHE\' = 88, \'SYSTEM DROP MARK CACHE\' = 89, \'SYSTEM DROP UNCOMPRESSED CACHE\' = 90, \'SYSTEM DROP MMAP CACHE\' = 91, \'SYSTEM DROP COMPILED EXPRESSION CACHE\' = 92, \'SYSTEM DROP CACHE\' = 93, \'SYSTEM RELOAD CONFIG\' = 94, \'SYSTEM RELOAD SYMBOLS\' = 95, \'SYSTEM RELOAD DICTIONARY\' = 96, \'SYSTEM RELOAD MODEL\' = 97, \'SYSTEM RELOAD FUNCTION\' = 98, \'SYSTEM RELOAD EMBEDDED DICTIONARIES\' = 99, \'SYSTEM RELOAD\' = 100, \'SYSTEM RESTART DISK\' = 101, \'SYSTEM MERGES\' = 102, \'SYSTEM TTL MERGES\' = 103, \'SYSTEM FETCHES\' = 104, \'SYSTEM MOVES\' = 105, \'SYSTEM DISTRIBUTED SENDS\' = 106, \'SYSTEM REPLICATED SENDS\' = 107, \'SYSTEM SENDS\' = 108, \'SYSTEM REPLICATION QUEUES\' = 109, \'SYSTEM DROP REPLICA\' = 110, \'SYSTEM SYNC REPLICA\' = 111, \'SYSTEM RESTART REPLICA\' = 112, \'SYSTEM RESTORE REPLICA\' = 113, \'SYSTEM FLUSH DISTRIBUTED\' = 114, \'SYSTEM FLUSH LOGS\' = 115, \'SYSTEM FLUSH\' = 116, \'SYSTEM THREAD FUZZER\' = 117, \'SYSTEM\' = 118, \'dictGet\' = 119, \'addressToLine\' = 120, \'addressToLineWithInlines\' = 121, \'addressToSymbol\' = 122, \'demangle\' = 123, \'INTROSPECTION\' = 124, \'FILE\' = 125, \'URL\' = 126, \'REMOTE\' = 127, \'MONGO\' = 128, \'MYSQL\' = 129, \'POSTGRES\' = 130, \'SQLITE\' = 131, \'ODBC\' = 132, \'JDBC\' = 133, \'HDFS\' = 134, \'S3\' = 135, \'HIVE\' = 136, \'SOURCES\' = 137, \'ALL\' = 138, \'NONE\' = 139),\n `aliases` Array(String),\n `level` Nullable(Enum8(\'GLOBAL\' = 0, \'DATABASE\' = 1, \'TABLE\' = 2, \'DICTIONARY\' = 3, \'VIEW\' = 4, \'COLUMN\' = 5)),\n `parent_group` Nullable(Enum16(\'SHOW DATABASES\' = 0, \'SHOW TABLES\' = 1, \'SHOW COLUMNS\' = 2, \'SHOW DICTIONARIES\' = 3, \'SHOW\' = 4, \'SELECT\' = 5, \'INSERT\' = 6, \'ALTER UPDATE\' = 7, \'ALTER DELETE\' = 8, \'ALTER ADD COLUMN\' = 9, \'ALTER MODIFY COLUMN\' = 10, \'ALTER DROP COLUMN\' = 11, \'ALTER COMMENT COLUMN\' = 12, \'ALTER CLEAR COLUMN\' = 13, \'ALTER RENAME COLUMN\' = 14, \'ALTER MATERIALIZE COLUMN\' = 15, \'ALTER COLUMN\' = 16, \'ALTER MODIFY COMMENT\' = 17, \'ALTER ORDER BY\' = 18, \'ALTER SAMPLE BY\' = 19, \'ALTER ADD INDEX\' = 20, \'ALTER DROP INDEX\' = 21, \'ALTER MATERIALIZE INDEX\' = 22, \'ALTER CLEAR INDEX\' = 23, \'ALTER INDEX\' = 24, \'ALTER ADD PROJECTION\' = 25, \'ALTER DROP PROJECTION\' = 26, \'ALTER MATERIALIZE PROJECTION\' = 27, \'ALTER CLEAR PROJECTION\' = 28, \'ALTER PROJECTION\' = 29, \'ALTER ADD CONSTRAINT\' = 30, \'ALTER DROP CONSTRAINT\' = 31, \'ALTER CONSTRAINT\' = 32, \'ALTER TTL\' = 33, \'ALTER MATERIALIZE TTL\' = 34, \'ALTER SETTINGS\' = 35, \'ALTER MOVE PARTITION\' = 36, \'ALTER FETCH PARTITION\' = 37, \'ALTER FREEZE PARTITION\' = 38, \'ALTER DATABASE SETTINGS\' = 39, \'ALTER TABLE\' = 40, \'ALTER DATABASE\' = 41, \'ALTER VIEW REFRESH\' = 42, \'ALTER VIEW MODIFY QUERY\' = 43, \'ALTER VIEW\' = 44, \'ALTER\' = 45, \'CREATE DATABASE\' = 46, \'CREATE TABLE\' = 47, \'CREATE VIEW\' = 48, \'CREATE DICTIONARY\' = 49, \'CREATE TEMPORARY TABLE\' = 50, \'CREATE FUNCTION\' = 51, \'CREATE\' = 52, \'DROP DATABASE\' = 53, \'DROP TABLE\' = 54, \'DROP VIEW\' = 55, \'DROP DICTIONARY\' = 56, \'DROP FUNCTION\' = 57, \'DROP\' = 58, \'TRUNCATE\' = 59, \'OPTIMIZE\' = 60, \'KILL QUERY\' = 61, \'KILL TRANSACTION\' = 62, \'MOVE PARTITION BETWEEN SHARDS\' = 63, \'CREATE USER\' = 64, \'ALTER USER\' = 65, \'DROP USER\' = 66, \'CREATE ROLE\' = 67, \'ALTER ROLE\' = 68, \'DROP ROLE\' = 69, \'ROLE ADMIN\' = 70, \'CREATE ROW POLICY\' = 71, \'ALTER ROW POLICY\' = 72, \'DROP ROW POLICY\' = 73, \'CREATE QUOTA\' = 74, \'ALTER QUOTA\' = 75, \'DROP QUOTA\' = 76, \'CREATE SETTINGS PROFILE\' = 77, \'ALTER SETTINGS PROFILE\' = 78, \'DROP SETTINGS PROFILE\' = 79, \'SHOW USERS\' = 80, \'SHOW ROLES\' = 81, \'SHOW ROW POLICIES\' = 82, \'SHOW QUOTAS\' = 83, \'SHOW SETTINGS PROFILES\' = 84, \'SHOW ACCESS\' = 85, \'ACCESS MANAGEMENT\' = 86, \'SYSTEM SHUTDOWN\' = 87, \'SYSTEM DROP DNS CACHE\' = 88, \'SYSTEM DROP MARK CACHE\' = 89, \'SYSTEM DROP UNCOMPRESSED CACHE\' = 90, \'SYSTEM DROP MMAP CACHE\' = 91, \'SYSTEM DROP COMPILED EXPRESSION CACHE\' = 92, \'SYSTEM DROP CACHE\' = 93, \'SYSTEM RELOAD CONFIG\' = 94, \'SYSTEM RELOAD SYMBOLS\' = 95, \'SYSTEM RELOAD DICTIONARY\' = 96, \'SYSTEM RELOAD MODEL\' = 97, \'SYSTEM RELOAD FUNCTION\' = 98, \'SYSTEM RELOAD EMBEDDED DICTIONARIES\' = 99, \'SYSTEM RELOAD\' = 100, \'SYSTEM RESTART DISK\' = 101, \'SYSTEM MERGES\' = 102, \'SYSTEM TTL MERGES\' = 103, \'SYSTEM FETCHES\' = 104, \'SYSTEM MOVES\' = 105, \'SYSTEM DISTRIBUTED SENDS\' = 106, \'SYSTEM REPLICATED SENDS\' = 107, \'SYSTEM SENDS\' = 108, \'SYSTEM REPLICATION QUEUES\' = 109, \'SYSTEM DROP REPLICA\' = 110, \'SYSTEM SYNC REPLICA\' = 111, \'SYSTEM RESTART REPLICA\' = 112, \'SYSTEM RESTORE REPLICA\' = 113, \'SYSTEM FLUSH DISTRIBUTED\' = 114, \'SYSTEM FLUSH LOGS\' = 115, \'SYSTEM FLUSH\' = 116, \'SYSTEM THREAD FUZZER\' = 117, \'SYSTEM\' = 118, \'dictGet\' = 119, \'addressToLine\' = 120, \'addressToLineWithInlines\' = 121, \'addressToSymbol\' = 122, \'demangle\' = 123, \'INTROSPECTION\' = 124, \'FILE\' = 125, \'URL\' = 126, \'REMOTE\' = 127, \'MONGO\' = 128, \'MYSQL\' = 129, \'POSTGRES\' = 130, \'SQLITE\' = 131, \'ODBC\' = 132, \'JDBC\' = 133, \'HDFS\' = 134, \'S3\' = 135, \'HIVE\' = 136, \'SOURCES\' = 137, \'ALL\' = 138, \'NONE\' = 139))\n)\nENGINE = SystemPrivileges()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' CREATE TABLE system.processes\n(\n `is_initial_query` UInt8,\n `user` String,\n `query_id` String,\n `address` IPv6,\n `port` UInt16,\n `initial_user` String,\n `initial_query_id` String,\n `initial_address` IPv6,\n `initial_port` UInt16,\n `interface` UInt8,\n `os_user` String,\n `client_hostname` String,\n `client_name` String,\n `client_revision` UInt64,\n `client_version_major` UInt64,\n `client_version_minor` UInt64,\n `client_version_patch` UInt64,\n `http_method` UInt8,\n `http_user_agent` String,\n `http_referer` String,\n `forwarded_for` String,\n `quota_key` String,\n `distributed_depth` UInt64,\n `elapsed` Float64,\n `is_cancelled` UInt8,\n `read_rows` UInt64,\n `read_bytes` UInt64,\n `total_rows_approx` UInt64,\n `written_rows` UInt64,\n `written_bytes` UInt64,\n `memory_usage` Int64,\n `peak_memory_usage` Int64,\n `query` String,\n `thread_ids` Array(UInt64),\n `ProfileEvents` Map(String, UInt64),\n `Settings` Map(String, String),\n `current_database` String,\n `ProfileEvents.Names` Array(String),\n `ProfileEvents.Values` Array(UInt64),\n `Settings.Names` Array(String),\n `Settings.Values` Array(String)\n)\nENGINE = SystemProcesses()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' CREATE TABLE system.projection_parts\n(\n `partition` String,\n `name` String,\n `part_type` String,\n `parent_name` String,\n `parent_uuid` UUID,\n `parent_part_type` String,\n `active` UInt8,\n `marks` UInt64,\n `rows` UInt64,\n `bytes_on_disk` UInt64,\n `data_compressed_bytes` UInt64,\n `data_uncompressed_bytes` UInt64,\n `marks_bytes` UInt64,\n `parent_marks` UInt64,\n `parent_rows` UInt64,\n `parent_bytes_on_disk` UInt64,\n `parent_data_compressed_bytes` UInt64,\n `parent_data_uncompressed_bytes` UInt64,\n `parent_marks_bytes` UInt64,\n `modification_time` DateTime,\n `remove_time` DateTime,\n `refcount` UInt32,\n `min_date` Date,\n `max_date` Date,\n `min_time` DateTime,\n `max_time` DateTime,\n `partition_id` String,\n `min_block_number` Int64,\n `max_block_number` Int64,\n `level` UInt32,\n `data_version` UInt64,\n `primary_key_bytes_in_memory` UInt64,\n `primary_key_bytes_in_memory_allocated` UInt64,\n `is_frozen` UInt8,\n `database` String,\n `table` String,\n `engine` String,\n `disk_name` String,\n `path` String,\n `hash_of_all_files` String,\n `hash_of_uncompressed_files` String,\n `uncompressed_hash_of_compressed_files` String,\n `delete_ttl_info_min` DateTime,\n `delete_ttl_info_max` DateTime,\n `move_ttl_info.expression` Array(String),\n `move_ttl_info.min` Array(DateTime),\n `move_ttl_info.max` Array(DateTime),\n `default_compression_codec` String,\n `recompression_ttl_info.expression` Array(String),\n `recompression_ttl_info.min` Array(DateTime),\n `recompression_ttl_info.max` Array(DateTime),\n `group_by_ttl_info.expression` Array(String),\n `group_by_ttl_info.min` Array(DateTime),\n `group_by_ttl_info.max` Array(DateTime),\n `rows_where_ttl_info.expression` Array(String),\n `rows_where_ttl_info.min` Array(DateTime),\n `rows_where_ttl_info.max` Array(DateTime),\n `bytes` UInt64,\n `marks_size` UInt64\n)\nENGINE = SystemProjectionParts()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' CREATE TABLE system.projection_parts_columns\n(\n `partition` String,\n `name` String,\n `part_type` String,\n `parent_name` String,\n `parent_uuid` UUID,\n `parent_part_type` String,\n `active` UInt8,\n `marks` UInt64,\n `rows` UInt64,\n `bytes_on_disk` UInt64,\n `data_compressed_bytes` UInt64,\n `data_uncompressed_bytes` UInt64,\n `marks_bytes` UInt64,\n `parent_marks` UInt64,\n `parent_rows` UInt64,\n `parent_bytes_on_disk` UInt64,\n `parent_data_compressed_bytes` UInt64,\n `parent_data_uncompressed_bytes` UInt64,\n `parent_marks_bytes` UInt64,\n `modification_time` DateTime,\n `remove_time` DateTime,\n `refcount` UInt32,\n `min_date` Date,\n `max_date` Date,\n `min_time` DateTime,\n `max_time` DateTime,\n `partition_id` String,\n `min_block_number` Int64,\n `max_block_number` Int64,\n `level` UInt32,\n `data_version` UInt64,\n `primary_key_bytes_in_memory` UInt64,\n `primary_key_bytes_in_memory_allocated` UInt64,\n `database` String,\n `table` String,\n `engine` String,\n `disk_name` String,\n `path` String,\n `column` String,\n `type` String,\n `column_position` UInt64,\n `default_kind` String,\n `default_expression` String,\n `column_bytes_on_disk` UInt64,\n `column_data_compressed_bytes` UInt64,\n `column_data_uncompressed_bytes` UInt64,\n `column_marks_bytes` UInt64,\n `bytes` UInt64,\n `marks_size` UInt64\n)\nENGINE = SystemProjectionPartsColumns()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' -CREATE TABLE system.quota_limits\n(\n `quota_name` String,\n `duration` UInt32,\n `is_randomized_interval` UInt8,\n `max_queries` Nullable(UInt64),\n `max_query_selects` Nullable(UInt64),\n `max_query_inserts` Nullable(UInt64),\n `max_errors` Nullable(UInt64),\n `max_result_rows` Nullable(UInt64),\n `max_result_bytes` Nullable(UInt64),\n `max_read_rows` Nullable(UInt64),\n `max_read_bytes` Nullable(UInt64),\n `max_execution_time` Nullable(Float64)\n)\nENGINE = SystemQuotaLimits()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' -CREATE TABLE system.quota_usage\n(\n `quota_name` String,\n `quota_key` String,\n `start_time` Nullable(DateTime),\n `end_time` Nullable(DateTime),\n `duration` Nullable(UInt32),\n `queries` Nullable(UInt64),\n `max_queries` Nullable(UInt64),\n `query_selects` Nullable(UInt64),\n `max_query_selects` Nullable(UInt64),\n `query_inserts` Nullable(UInt64),\n `max_query_inserts` Nullable(UInt64),\n `errors` Nullable(UInt64),\n `max_errors` Nullable(UInt64),\n `result_rows` Nullable(UInt64),\n `max_result_rows` Nullable(UInt64),\n `result_bytes` Nullable(UInt64),\n `max_result_bytes` Nullable(UInt64),\n `read_rows` Nullable(UInt64),\n `max_read_rows` Nullable(UInt64),\n `read_bytes` Nullable(UInt64),\n `max_read_bytes` Nullable(UInt64),\n `execution_time` Nullable(Float64),\n `max_execution_time` Nullable(Float64)\n)\nENGINE = SystemQuotaUsage()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' +CREATE TABLE system.quota_limits\n(\n `quota_name` String,\n `duration` UInt32,\n `is_randomized_interval` UInt8,\n `max_queries` Nullable(UInt64),\n `max_query_selects` Nullable(UInt64),\n `max_query_inserts` Nullable(UInt64),\n `max_errors` Nullable(UInt64),\n `max_result_rows` Nullable(UInt64),\n `max_result_bytes` Nullable(UInt64),\n `max_read_rows` Nullable(UInt64),\n `max_read_bytes` Nullable(UInt64),\n `max_execution_time` Nullable(Float64),\n `max_written_bytes` Nullable(UInt64)\n)\nENGINE = SystemQuotaLimits()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' +CREATE TABLE system.quota_usage\n(\n `quota_name` String,\n `quota_key` String,\n `start_time` Nullable(DateTime),\n `end_time` Nullable(DateTime),\n `duration` Nullable(UInt32),\n `queries` Nullable(UInt64),\n `max_queries` Nullable(UInt64),\n `query_selects` Nullable(UInt64),\n `max_query_selects` Nullable(UInt64),\n `query_inserts` Nullable(UInt64),\n `max_query_inserts` Nullable(UInt64),\n `errors` Nullable(UInt64),\n `max_errors` Nullable(UInt64),\n `result_rows` Nullable(UInt64),\n `max_result_rows` Nullable(UInt64),\n `result_bytes` Nullable(UInt64),\n `max_result_bytes` Nullable(UInt64),\n `read_rows` Nullable(UInt64),\n `max_read_rows` Nullable(UInt64),\n `read_bytes` Nullable(UInt64),\n `max_read_bytes` Nullable(UInt64),\n `execution_time` Nullable(Float64),\n `max_execution_time` Nullable(Float64),\n `written_bytes` Nullable(UInt64),\n `max_written_bytes` Nullable(UInt64)\n)\nENGINE = SystemQuotaUsage()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' CREATE TABLE system.quotas\n(\n `name` String,\n `id` UUID,\n `storage` String,\n `keys` Array(Enum8(\'user_name\' = 1, \'ip_address\' = 2, \'forwarded_ip_address\' = 3, \'client_key\' = 4)),\n `durations` Array(UInt32),\n `apply_to_all` UInt8,\n `apply_to_list` Array(String),\n `apply_to_except` Array(String)\n)\nENGINE = SystemQuotas()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' -CREATE TABLE system.quotas_usage\n(\n `quota_name` String,\n `quota_key` String,\n `is_current` UInt8,\n `start_time` Nullable(DateTime),\n `end_time` Nullable(DateTime),\n `duration` Nullable(UInt32),\n `queries` Nullable(UInt64),\n `max_queries` Nullable(UInt64),\n `query_selects` Nullable(UInt64),\n `max_query_selects` Nullable(UInt64),\n `query_inserts` Nullable(UInt64),\n `max_query_inserts` Nullable(UInt64),\n `errors` Nullable(UInt64),\n `max_errors` Nullable(UInt64),\n `result_rows` Nullable(UInt64),\n `max_result_rows` Nullable(UInt64),\n `result_bytes` Nullable(UInt64),\n `max_result_bytes` Nullable(UInt64),\n `read_rows` Nullable(UInt64),\n `max_read_rows` Nullable(UInt64),\n `read_bytes` Nullable(UInt64),\n `max_read_bytes` Nullable(UInt64),\n `execution_time` Nullable(Float64),\n `max_execution_time` Nullable(Float64)\n)\nENGINE = SystemQuotasUsage()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' +CREATE TABLE system.quotas_usage\n(\n `quota_name` String,\n `quota_key` String,\n `is_current` UInt8,\n `start_time` Nullable(DateTime),\n `end_time` Nullable(DateTime),\n `duration` Nullable(UInt32),\n `queries` Nullable(UInt64),\n `max_queries` Nullable(UInt64),\n `query_selects` Nullable(UInt64),\n `max_query_selects` Nullable(UInt64),\n `query_inserts` Nullable(UInt64),\n `max_query_inserts` Nullable(UInt64),\n `errors` Nullable(UInt64),\n `max_errors` Nullable(UInt64),\n `result_rows` Nullable(UInt64),\n `max_result_rows` Nullable(UInt64),\n `result_bytes` Nullable(UInt64),\n `max_result_bytes` Nullable(UInt64),\n `read_rows` Nullable(UInt64),\n `max_read_rows` Nullable(UInt64),\n `read_bytes` Nullable(UInt64),\n `max_read_bytes` Nullable(UInt64),\n `execution_time` Nullable(Float64),\n `max_execution_time` Nullable(Float64),\n `written_bytes` Nullable(UInt64),\n `max_written_bytes` Nullable(UInt64)\n)\nENGINE = SystemQuotasUsage()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' CREATE TABLE system.replicas\n(\n `database` String,\n `table` String,\n `engine` String,\n `is_leader` UInt8,\n `can_become_leader` UInt8,\n `is_readonly` UInt8,\n `is_session_expired` UInt8,\n `future_parts` UInt32,\n `parts_to_check` UInt32,\n `zookeeper_path` String,\n `replica_name` String,\n `replica_path` String,\n `columns_version` Int32,\n `queue_size` UInt32,\n `inserts_in_queue` UInt32,\n `merges_in_queue` UInt32,\n `part_mutations_in_queue` UInt32,\n `queue_oldest_time` DateTime,\n `inserts_oldest_time` DateTime,\n `merges_oldest_time` DateTime,\n `part_mutations_oldest_time` DateTime,\n `oldest_part_to_get` String,\n `oldest_part_to_merge_to` String,\n `oldest_part_to_mutate_to` String,\n `log_max_index` UInt64,\n `log_pointer` UInt64,\n `last_queue_update` DateTime,\n `absolute_delay` UInt64,\n `total_replicas` UInt8,\n `active_replicas` UInt8,\n `last_queue_update_exception` String,\n `zookeeper_exception` String,\n `replica_is_active` Map(String, UInt8)\n)\nENGINE = SystemReplicas()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' CREATE TABLE system.replicated_fetches\n(\n `database` String,\n `table` String,\n `elapsed` Float64,\n `progress` Float64,\n `result_part_name` String,\n `result_part_path` String,\n `partition_id` String,\n `total_size_bytes_compressed` UInt64,\n `bytes_read_compressed` UInt64,\n `source_replica_path` String,\n `source_replica_hostname` String,\n `source_replica_port` UInt16,\n `interserver_scheme` String,\n `URI` String,\n `to_detached` UInt8,\n `thread_id` UInt64\n)\nENGINE = SystemReplicatedFetches()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' CREATE TABLE system.replicated_merge_tree_settings\n(\n `name` String,\n `value` String,\n `changed` UInt8,\n `description` String,\n `type` String\n)\nENGINE = SystemReplicatedMergeTreeSettings()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' diff --git a/tests/queries/0_stateless/02127_plus_before_float.sh b/tests/queries/0_stateless/02127_plus_before_float.sh index b464bedb837..2f0195410eb 100755 --- a/tests/queries/0_stateless/02127_plus_before_float.sh +++ b/tests/queries/0_stateless/02127_plus_before_float.sh @@ -11,7 +11,7 @@ $CLICKHOUSE_CLIENT -q "create table test_02127 (x Float32, y Float64) engine=Mem for escaping_rule in Quoted JSON Escaped CSV Raw do -echo -e "+42.42\t+42.42" | $CLICKHOUSE_CLIENT -q "insert into test_02127 format CustomSeparated settings format_custom_escaping_rule='$escaping_rule'" +echo -e "+42.42\t+42.42" | $CLICKHOUSE_CLIENT -q "insert into test_02127 settings format_custom_escaping_rule='$escaping_rule' format CustomSeparated" done diff --git a/tests/queries/0_stateless/02129_skip_quoted_fields.sh b/tests/queries/0_stateless/02129_skip_quoted_fields.sh index c1baeb5b8f2..ac702d3c750 100755 --- a/tests/queries/0_stateless/02129_skip_quoted_fields.sh +++ b/tests/queries/0_stateless/02129_skip_quoted_fields.sh @@ -8,7 +8,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) $CLICKHOUSE_CLIENT -q "drop table if exists test_02129" $CLICKHOUSE_CLIENT -q "create table test_02129 (x UInt64, y UInt64) engine=Memory()" -QUERY="insert into test_02129 format CustomSeparatedWithNames settings input_format_skip_unknown_fields=1, format_custom_escaping_rule='Quoted'" +QUERY="insert into test_02129 settings input_format_skip_unknown_fields=1, format_custom_escaping_rule='Quoted' format CustomSeparatedWithNames" # Skip string echo -e "'x'\t'trash'\t'y'\n1\t'Some string'\t42" | $CLICKHOUSE_CLIENT -q "$QUERY" diff --git a/tests/queries/0_stateless/02134_async_inserts_formats.sh b/tests/queries/0_stateless/02134_async_inserts_formats.sh index bd102fefe9f..631809e5dc2 100755 --- a/tests/queries/0_stateless/02134_async_inserts_formats.sh +++ b/tests/queries/0_stateless/02134_async_inserts_formats.sh @@ -9,23 +9,23 @@ url="${CLICKHOUSE_URL}&async_insert=1&wait_for_async_insert=1" ${CLICKHOUSE_CLIENT} -q "DROP TABLE IF EXISTS async_inserts" ${CLICKHOUSE_CLIENT} -q "CREATE TABLE async_inserts (id UInt32, s String) ENGINE = MergeTree ORDER BY id" -${CLICKHOUSE_CURL} -sS "$url" -d "INSERT INTO async_inserts FORMAT CustomSeparated settings format_custom_escaping_rule='CSV', format_custom_field_delimiter=',' +${CLICKHOUSE_CURL} -sS "$url" -d "INSERT INTO async_inserts settings format_custom_escaping_rule='CSV', format_custom_field_delimiter=',' FORMAT CustomSeparated 1,\"a\" 2,\"b\" " & -${CLICKHOUSE_CURL} -sS "$url" -d "INSERT INTO async_inserts FORMAT CustomSeparated settings format_custom_escaping_rule='CSV', format_custom_field_delimiter=',' +${CLICKHOUSE_CURL} -sS "$url" -d "INSERT INTO async_inserts settings format_custom_escaping_rule='CSV', format_custom_field_delimiter=',' FORMAT CustomSeparated 3,\"a\" 4,\"b\" " & -${CLICKHOUSE_CURL} -sS "$url" -d "INSERT INTO async_inserts FORMAT CustomSeparatedWithNames settings format_custom_escaping_rule='CSV', format_custom_field_delimiter=',' +${CLICKHOUSE_CURL} -sS "$url" -d "INSERT INTO async_inserts settings format_custom_escaping_rule='CSV', format_custom_field_delimiter=',' FORMAT CustomSeparatedWithNames \"id\",\"s\" 5,\"a\" 6,\"b\" " & -${CLICKHOUSE_CURL} -sS "$url" -d "INSERT INTO async_inserts FORMAT CustomSeparatedWithNames settings format_custom_escaping_rule='CSV', format_custom_field_delimiter=',' +${CLICKHOUSE_CURL} -sS "$url" -d "INSERT INTO async_inserts settings format_custom_escaping_rule='CSV', format_custom_field_delimiter=',' FORMAT CustomSeparatedWithNames \"id\",\"s\" 7,\"a\" 8,\"b\" diff --git a/tests/queries/0_stateless/02147_arrow_duplicate_columns.sh b/tests/queries/0_stateless/02147_arrow_duplicate_columns.sh index 938b45fee98..548b2ca868b 100755 --- a/tests/queries/0_stateless/02147_arrow_duplicate_columns.sh +++ b/tests/queries/0_stateless/02147_arrow_duplicate_columns.sh @@ -26,6 +26,6 @@ GZDATA="H4sIAHTzuWEAA9VTuw3CMBB9+RCsyIULhFIwAC0SJQWZACkNi1CAxCCMwCCMQMEIKdkgPJ8P ${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS t1" ${CLICKHOUSE_CLIENT} --query="CREATE TABLE t1 ( x Int64, y Int64, z Int64 ) ENGINE = Memory" -echo ${GZDATA} | base64 --decode | gunzip | ${CLICKHOUSE_CLIENT} -q "INSERT INTO t1 FORMAT Arrow settings input_format_arrow_allow_missing_columns = true" 2>&1 | grep -qF "DUPLICATE_COLUMN" && echo 'OK' || echo 'FAIL' ||: +echo ${GZDATA} | base64 --decode | gunzip | ${CLICKHOUSE_CLIENT} -q "INSERT INTO t1 settings input_format_arrow_allow_missing_columns = true FORMAT Arrow" 2>&1 | grep -qF "DUPLICATE_COLUMN" && echo 'OK' || echo 'FAIL' ||: ${CLICKHOUSE_CLIENT} -q "DROP TABLE IF EXISTS t1" diff --git a/tests/queries/0_stateless/02149_read_in_order_fixed_prefix.reference b/tests/queries/0_stateless/02149_read_in_order_fixed_prefix.reference index 9e24b7c6ea6..67a043d6646 100644 --- a/tests/queries/0_stateless/02149_read_in_order_fixed_prefix.reference +++ b/tests/queries/0_stateless/02149_read_in_order_fixed_prefix.reference @@ -7,13 +7,15 @@ ExpressionTransform (Limit) Limit - (Sorting) - MergingSortedTransform 2 → 1 - (Expression) - ExpressionTransform × 2 - (SettingQuotaAndLimits) - (ReadFromMergeTree) - MergeTreeInOrder × 2 0 → 1 + (Expression) + ExpressionTransform + (Sorting) + MergingSortedTransform 2 → 1 + (Expression) + ExpressionTransform × 2 + (SettingQuotaAndLimits) + (ReadFromMergeTree) + MergeTreeInOrder × 2 0 → 1 2020-10-01 9 2020-10-01 9 2020-10-01 9 @@ -23,16 +25,18 @@ ExpressionTransform ExpressionTransform (Limit) Limit - (Sorting) - MergingSortedTransform 2 → 1 - (Expression) - ExpressionTransform × 2 - (SettingQuotaAndLimits) - (ReadFromMergeTree) - ReverseTransform - MergeTreeReverse 0 → 1 - ReverseTransform - MergeTreeReverse 0 → 1 + (Expression) + ExpressionTransform + (Sorting) + MergingSortedTransform 2 → 1 + (Expression) + ExpressionTransform × 2 + (SettingQuotaAndLimits) + (ReadFromMergeTree) + ReverseTransform + MergeTreeReverse 0 → 1 + ReverseTransform + MergeTreeReverse 0 → 1 2020-10-01 9 2020-10-01 9 2020-10-01 9 @@ -42,15 +46,17 @@ ExpressionTransform ExpressionTransform (Limit) Limit - (Sorting) - FinishSortingTransform - PartialSortingTransform - MergingSortedTransform 2 → 1 - (Expression) - ExpressionTransform × 2 - (SettingQuotaAndLimits) - (ReadFromMergeTree) - MergeTreeInOrder × 2 0 → 1 + (Expression) + ExpressionTransform + (Sorting) + FinishSortingTransform + PartialSortingTransform + MergingSortedTransform 2 → 1 + (Expression) + ExpressionTransform × 2 + (SettingQuotaAndLimits) + (ReadFromMergeTree) + MergeTreeInOrder × 2 0 → 1 2020-10-11 0 2020-10-11 0 2020-10-11 0 diff --git a/tests/queries/0_stateless/02149_schema_inference.reference b/tests/queries/0_stateless/02149_schema_inference.reference index f46e3bee101..2d7dd5caca7 100644 --- a/tests/queries/0_stateless/02149_schema_inference.reference +++ b/tests/queries/0_stateless/02149_schema_inference.reference @@ -1,17 +1,17 @@ TSV -c1 Nullable(String) +c1 Nullable(Float64) c2 Nullable(String) -c3 Nullable(String) -c4 Nullable(String) -42 Some string [1, 2, 3, 4] (1, 2, 3) -42 abcd [] (4, 5, 6) +c3 Array(Nullable(Float64)) +c4 Tuple(Nullable(Float64), Nullable(Float64), Nullable(Float64)) +42 Some string [1,2,3,4] (1,2,3) +42 abcd [] (4,5,6) TSVWithNames -number Nullable(String) +number Nullable(Float64) string Nullable(String) -array Nullable(String) -tuple Nullable(String) -42 Some string [1, 2, 3, 4] (1, 2, 3) -42 abcd [] (4, 5, 6) +array Array(Nullable(Float64)) +tuple Tuple(Nullable(Float64), Nullable(Float64), Nullable(Float64)) +42 Some string [1,2,3,4] (1,2,3) +42 abcd [] (4,5,6) CSV c1 Nullable(Float64) c2 Nullable(String) @@ -38,49 +38,49 @@ JSONCompactEachRow c1 Nullable(Float64) c2 Array(Tuple(Nullable(Float64), Nullable(String))) c3 Map(String, Nullable(Float64)) -c4 Nullable(UInt8) -42.42 [(1,'String'),(2,'abcd')] {'key':42,'key2':24} 1 +c4 Nullable(Bool) +42.42 [(1,'String'),(2,'abcd')] {'key':42,'key2':24} true c1 Nullable(Float64) c2 Array(Tuple(Nullable(Float64), Nullable(String))) c3 Map(String, Nullable(Float64)) -c4 Nullable(UInt8) +c4 Nullable(Bool) \N [(1,'String'),(2,NULL)] {'key':NULL,'key2':24} \N -32 [(2,'String 2'),(3,'hello')] {'key3':4242,'key4':2424} 1 +32 [(2,'String 2'),(3,'hello')] {'key3':4242,'key4':2424} true JSONCompactEachRowWithNames a Nullable(Float64) b Array(Tuple(Nullable(Float64), Nullable(String))) c Map(String, Nullable(Float64)) -d Nullable(UInt8) -42.42 [(1,'String'),(2,'abcd')] {'key':42,'key2':24} 1 +d Nullable(Bool) +42.42 [(1,'String'),(2,'abcd')] {'key':42,'key2':24} true JSONEachRow -d Nullable(UInt8) +a Nullable(Float64) b Array(Tuple(Nullable(Float64), Nullable(String))) c Map(String, Nullable(Float64)) +d Nullable(Bool) +42.42 [(1,'String'),(2,'abcd')] {'key':42,'key2':24} true a Nullable(Float64) -1 [(1,'String'),(2,'abcd')] {'key':42,'key2':24} 42.42 -d Nullable(UInt8) b Array(Tuple(Nullable(Float64), Nullable(String))) c Map(String, Nullable(Float64)) -a Nullable(Float64) +d Nullable(Bool) \N [(1,'String'),(2,NULL)] {'key':NULL,'key2':24} \N -1 [(2,'String 2'),(3,'hello')] {'key3':4242,'key4':2424} 32 +32 [(2,'String 2'),(3,'hello')] {'key3':4242,'key4':2424} true +a Nullable(Float64) b Nullable(String) c Array(Nullable(Float64)) -a Nullable(Float64) -s1 [] 1 -\N [2] 2 -\N [] \N -\N [] \N -\N [3] \N +1 s1 [] +2 \N [2] +\N \N [] +\N \N [] +\N \N [3] TSKV +a Nullable(Float64) b Nullable(String) -c Nullable(String) -a Nullable(String) -s1 \N 1 -} [2] 2 -\N \N \N -\N \N \N -\N [3] \N +c Array(Nullable(Float64)) +1 s1 [] +2 } [2] +\N \N [] +\N \N [] +\N \N [3] Values c1 Nullable(Float64) c2 Nullable(String) @@ -96,7 +96,7 @@ c5 Tuple(Array(Nullable(Float64)), Array(Tuple(Nullable(Float64), Nullable(Strin 42.42 \N [1,NULL,3] (1,NULL) ([1,2],[(3,'4'),(5,'6')]) \N Some string [10] (1,2) ([],[]) Regexp -c1 Nullable(String) +c1 Nullable(Float64) c2 Nullable(String) c3 Nullable(String) 42 Some string 1 [([1, 2, 3], String 1), ([], String 1)] diff --git a/tests/queries/0_stateless/02149_schema_inference_formats_with_schema.reference b/tests/queries/0_stateless/02149_schema_inference_formats_with_schema.reference index d3d2d86d696..b0ec4bef499 100644 --- a/tests/queries/0_stateless/02149_schema_inference_formats_with_schema.reference +++ b/tests/queries/0_stateless/02149_schema_inference_formats_with_schema.reference @@ -1,137 +1,137 @@ Arrow -int8 Int8 -uint8 UInt8 -int16 Int16 -uint16 UInt16 -int32 Int32 -uint32 UInt32 -int64 Int64 -uint64 UInt64 +int8 Nullable(Int8) +uint8 Nullable(UInt8) +int16 Nullable(Int16) +uint16 Nullable(UInt16) +int32 Nullable(Int32) +uint32 Nullable(UInt32) +int64 Nullable(Int64) +uint64 Nullable(UInt64) 0 0 0 0 0 0 0 0 -1 1 -1 1 -1 1 -1 1 -float32 Float32 -float64 Float64 -decimal32 Decimal(9, 5) -decimal64 Decimal(18, 5) +float32 Nullable(Float32) +float64 Nullable(Float64) +decimal32 Nullable(Decimal(9, 5)) +decimal64 Nullable(Decimal(18, 5)) 0 0 0 0 1.2 0.7692307692307692 3.33333 333.33333 -date UInt16 -date32 Date32 +date Nullable(UInt16) +date32 Nullable(Date32) 0 1970-01-01 1 1970-01-02 -str String -fixed_string String +str Nullable(String) +fixed_string Nullable(String) Str: 0 100 Str: 1 200 -array Array(UInt64) -tuple Tuple(`tuple.0` UInt64, `tuple.1` String) -map Map(String, UInt64) +array Array(Nullable(UInt64)) +tuple Tuple(Nullable(UInt64), Nullable(String)) +map Map(String, Nullable(UInt64)) [0,1] (0,'0') {'0':0} [1,2] (1,'1') {'1':1} -nested1 Array(Tuple(`nested1.0` Array(UInt64), `nested1.1` Map(String, UInt64))) -nested2 Tuple(`nested2.0` Tuple(`nested2.0.0` Array(Array(UInt64)), `nested2.0.1` Map(UInt64, Array(Tuple(`nested2.0.1.0` UInt64, `nested2.0.1.1` String)))), `nested2.1` UInt8) +nested1 Array(Tuple(Array(Nullable(UInt64)), Map(String, Nullable(UInt64)))) +nested2 Tuple(Tuple(Array(Array(Nullable(UInt64))), Map(UInt64, Array(Tuple(Nullable(UInt64), Nullable(String))))), Nullable(UInt8)) [([0,1],{'42':0}),([],{}),([42],{'42':42})] (([[0],[1],[]],{0:[(0,'42'),(1,'42')]}),42) [([1,2],{'42':1}),([],{}),([42],{'42':42})] (([[1],[2],[]],{1:[(1,'42'),(2,'42')]}),42) ArrowStream -int8 Int8 -uint8 UInt8 -int16 Int16 -uint16 UInt16 -int32 Int32 -uint32 UInt32 -int64 Int64 -uint64 UInt64 +int8 Nullable(Int8) +uint8 Nullable(UInt8) +int16 Nullable(Int16) +uint16 Nullable(UInt16) +int32 Nullable(Int32) +uint32 Nullable(UInt32) +int64 Nullable(Int64) +uint64 Nullable(UInt64) 0 0 0 0 0 0 0 0 -1 1 -1 1 -1 1 -1 1 -float32 Float32 -float64 Float64 -decimal32 Decimal(9, 5) -decimal64 Decimal(18, 5) +float32 Nullable(Float32) +float64 Nullable(Float64) +decimal32 Nullable(Decimal(9, 5)) +decimal64 Nullable(Decimal(18, 5)) 0 0 0 0 1.2 0.7692307692307692 3.33333 333.33333 -date UInt16 -date32 Date32 +date Nullable(UInt16) +date32 Nullable(Date32) 0 1970-01-01 1 1970-01-02 -str String -fixed_string String +str Nullable(String) +fixed_string Nullable(String) Str: 0 100 Str: 1 200 -array Array(UInt64) -tuple Tuple(`tuple.0` UInt64, `tuple.1` String) -map Map(String, UInt64) +array Array(Nullable(UInt64)) +tuple Tuple(Nullable(UInt64), Nullable(String)) +map Map(String, Nullable(UInt64)) [0,1] (0,'0') {'0':0} [1,2] (1,'1') {'1':1} -nested1 Array(Tuple(`nested1.0` Array(UInt64), `nested1.1` Map(String, UInt64))) -nested2 Tuple(`nested2.0` Tuple(`nested2.0.0` Array(Array(UInt64)), `nested2.0.1` Map(UInt64, Array(Tuple(`nested2.0.1.0` UInt64, `nested2.0.1.1` String)))), `nested2.1` UInt8) +nested1 Array(Tuple(Array(Nullable(UInt64)), Map(String, Nullable(UInt64)))) +nested2 Tuple(Tuple(Array(Array(Nullable(UInt64))), Map(UInt64, Array(Tuple(Nullable(UInt64), Nullable(String))))), Nullable(UInt8)) [([0,1],{'42':0}),([],{}),([42],{'42':42})] (([[0],[1],[]],{0:[(0,'42'),(1,'42')]}),42) [([1,2],{'42':1}),([],{}),([42],{'42':42})] (([[1],[2],[]],{1:[(1,'42'),(2,'42')]}),42) Parquet -int8 Int8 -uint8 UInt8 -int16 Int16 -uint16 UInt16 -int32 Int32 -uint32 Int64 -int64 Int64 -uint64 UInt64 +int8 Nullable(Int8) +uint8 Nullable(UInt8) +int16 Nullable(Int16) +uint16 Nullable(UInt16) +int32 Nullable(Int32) +uint32 Nullable(Int64) +int64 Nullable(Int64) +uint64 Nullable(UInt64) 0 0 0 0 0 0 0 0 -1 1 -1 1 -1 1 -1 1 -float32 Float32 -float64 Float64 -decimal32 Decimal(9, 5) -decimal64 Decimal(18, 5) +float32 Nullable(Float32) +float64 Nullable(Float64) +decimal32 Nullable(Decimal(9, 5)) +decimal64 Nullable(Decimal(18, 5)) 0 0 0 0 1.2 0.7692307692307692 3.33333 333.33333 -date UInt16 -date32 Date32 +date Nullable(UInt16) +date32 Nullable(Date32) 0 1970-01-01 1 1970-01-02 -str String -fixed_string String +str Nullable(String) +fixed_string Nullable(String) Str: 0 100 Str: 1 200 -array Array(UInt64) -tuple Tuple(`tuple.0` UInt64, `tuple.1` String) -map Map(String, UInt64) +array Array(Nullable(UInt64)) +tuple Tuple(Nullable(UInt64), Nullable(String)) +map Map(String, Nullable(UInt64)) [0,1] (0,'0') {'0':0} [1,2] (1,'1') {'1':1} -nested1 Array(Tuple(`nested1.0` Array(UInt64), `nested1.1` Map(String, UInt64))) -nested2 Tuple(`nested2.0` Tuple(`nested2.0.0` Array(Array(UInt64)), `nested2.0.1` Map(UInt64, Array(Tuple(`nested2.0.1.0` UInt64, `nested2.0.1.1` String)))), `nested2.1` UInt8) +nested1 Array(Tuple(Array(Nullable(UInt64)), Map(String, Nullable(UInt64)))) +nested2 Tuple(Tuple(Array(Array(Nullable(UInt64))), Map(UInt64, Array(Tuple(Nullable(UInt64), Nullable(String))))), Nullable(UInt8)) [([0,1],{'42':0}),([],{}),([42],{'42':42})] (([[0],[1],[]],{0:[(0,'42'),(1,'42')]}),42) [([1,2],{'42':1}),([],{}),([42],{'42':42})] (([[1],[2],[]],{1:[(1,'42'),(2,'42')]}),42) ORC -int8 Int8 -uint8 Int8 -int16 Int16 -uint16 Int16 -int32 Int32 -uint32 Int32 -int64 Int64 -uint64 Int64 +int8 Nullable(Int8) +uint8 Nullable(Int8) +int16 Nullable(Int16) +uint16 Nullable(Int16) +int32 Nullable(Int32) +uint32 Nullable(Int32) +int64 Nullable(Int64) +uint64 Nullable(Int64) 0 0 0 0 0 0 0 0 -1 1 -1 1 -1 1 -1 1 -float32 Float32 -float64 Float64 -decimal32 Decimal(9, 5) -decimal64 Decimal(18, 5) +float32 Nullable(Float32) +float64 Nullable(Float64) +decimal32 Nullable(Decimal(9, 5)) +decimal64 Nullable(Decimal(18, 5)) 0 0 0 0 1.2 0.7692307692307692 3.33333 333.33333 -date Date32 -date32 Date32 +date Nullable(Date32) +date32 Nullable(Date32) 1970-01-01 1970-01-01 1970-01-02 1970-01-02 -str String -fixed_string String +str Nullable(String) +fixed_string Nullable(String) Str: 0 100 Str: 1 200 -array Array(Int64) -tuple Tuple(`tuple.0` Int64, `tuple.1` String) -map Map(String, Int64) +array Array(Nullable(Int64)) +tuple Tuple(Nullable(Int64), Nullable(String)) +map Map(String, Nullable(Int64)) [0,1] (0,'0') {'0':0} [1,2] (1,'1') {'1':1} -nested1 Array(Tuple(`nested1.0` Array(Int64), `nested1.1` Map(String, Int64))) -nested2 Tuple(`nested2.0` Tuple(`nested2.0.0` Array(Array(Int64)), `nested2.0.1` Map(Int64, Array(Tuple(`nested2.0.1.0` Int64, `nested2.0.1.1` String)))), `nested2.1` Int8) +nested1 Array(Tuple(Array(Nullable(Int64)), Map(String, Nullable(Int64)))) +nested2 Tuple(Tuple(Array(Array(Nullable(Int64))), Map(Int64, Array(Tuple(Nullable(Int64), Nullable(String))))), Nullable(Int8)) [([0,1],{'42':0}),([],{}),([42],{'42':42})] (([[0],[1],[]],{0:[(0,'42'),(1,'42')]}),42) [([1,2],{'42':1}),([],{}),([42],{'42':42})] (([[1],[2],[]],{1:[(1,'42'),(2,'42')]}),42) Native diff --git a/tests/queries/0_stateless/02155_csv_with_strings_with_slash.sh b/tests/queries/0_stateless/02155_csv_with_strings_with_slash.sh index ab2577e6138..08d380bf559 100755 --- a/tests/queries/0_stateless/02155_csv_with_strings_with_slash.sh +++ b/tests/queries/0_stateless/02155_csv_with_strings_with_slash.sh @@ -10,13 +10,13 @@ ${CLICKHOUSE_CLIENT} --query="create table test_02155_csv (A Int64, S String, D echo "input_format_null_as_default = 1" -cat $CUR_DIR/data_csv/csv_with_slash.csv | ${CLICKHOUSE_CLIENT} -q "INSERT INTO test_02155_csv FORMAT CSV SETTINGS input_format_null_as_default = 1" +cat $CUR_DIR/data_csv/csv_with_slash.csv | ${CLICKHOUSE_CLIENT} -q "INSERT INTO test_02155_csv SETTINGS input_format_null_as_default = 1 FORMAT CSV" ${CLICKHOUSE_CLIENT} --query="SELECT * FROM test_02155_csv" ${CLICKHOUSE_CLIENT} --query="TRUNCATE TABLE test_02155_csv" echo "input_format_null_as_default = 0" -cat $CUR_DIR/data_csv/csv_with_slash.csv | ${CLICKHOUSE_CLIENT} -q "INSERT INTO test_02155_csv FORMAT CSV SETTINGS input_format_null_as_default = 0" +cat $CUR_DIR/data_csv/csv_with_slash.csv | ${CLICKHOUSE_CLIENT} -q "INSERT INTO test_02155_csv SETTINGS input_format_null_as_default = 0 FORMAT CSV" ${CLICKHOUSE_CLIENT} --query="SELECT * FROM test_02155_csv" diff --git a/tests/queries/0_stateless/02165_insert_from_infile.reference b/tests/queries/0_stateless/02165_insert_from_infile.reference index 2a00a8faa31..f8c205ecc0f 100644 --- a/tests/queries/0_stateless/02165_insert_from_infile.reference +++ b/tests/queries/0_stateless/02165_insert_from_infile.reference @@ -1,5 +1,5 @@ -INSERT INTO test FROM INFILE data.file SELECT x +INSERT INTO test FROM INFILE \'data.file\' SELECT x FROM input(\'x UInt32\') -INSERT INTO test FROM INFILE data.file WITH number AS x +INSERT INTO test FROM INFILE \'data.file\' WITH number AS x SELECT number FROM input(\'number UInt32\') diff --git a/tests/queries/0_stateless/02166_arrow_dictionary_inference.reference b/tests/queries/0_stateless/02166_arrow_dictionary_inference.reference index 46f448cfba7..20f3368e446 100644 --- a/tests/queries/0_stateless/02166_arrow_dictionary_inference.reference +++ b/tests/queries/0_stateless/02166_arrow_dictionary_inference.reference @@ -1 +1 @@ -x LowCardinality(UInt64) +x LowCardinality(Nullable(UInt64)) diff --git a/tests/queries/0_stateless/02166_arrow_dictionary_inference.sh b/tests/queries/0_stateless/02166_arrow_dictionary_inference.sh index e560dc10d2c..7d313b571d9 100755 --- a/tests/queries/0_stateless/02166_arrow_dictionary_inference.sh +++ b/tests/queries/0_stateless/02166_arrow_dictionary_inference.sh @@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -q "insert into table function file('arrow.dict', 'Arrow', 'x LowCardinality(UInt64)') select number from numbers(10) settings output_format_arrow_low_cardinality_as_dictionary=1" +$CLICKHOUSE_CLIENT -q "insert into table function file('arrow.dict', 'Arrow', 'x LowCardinality(UInt64)') select number from numbers(10) settings output_format_arrow_low_cardinality_as_dictionary=1, engine_file_truncate_on_insert=1" $CLICKHOUSE_CLIENT -q "desc file('arrow.dict', 'Arrow')" diff --git a/tests/queries/0_stateless/02174_cte_scalar_cache_mv.reference b/tests/queries/0_stateless/02174_cte_scalar_cache_mv.reference index 246706164df..055c88160ad 100644 --- a/tests/queries/0_stateless/02174_cte_scalar_cache_mv.reference +++ b/tests/queries/0_stateless/02174_cte_scalar_cache_mv.reference @@ -18,7 +18,7 @@ 89 89 89 89 5 94 94 94 94 5 99 99 99 99 5 -02177_MV 7 80 22 +02177_MV 3 80 26 10 40 70 diff --git a/tests/queries/0_stateless/02174_cte_scalar_cache_mv.sql b/tests/queries/0_stateless/02174_cte_scalar_cache_mv.sql index 4d4447c7f31..742d72fe2b2 100644 --- a/tests/queries/0_stateless/02174_cte_scalar_cache_mv.sql +++ b/tests/queries/0_stateless/02174_cte_scalar_cache_mv.sql @@ -39,13 +39,13 @@ SYSTEM FLUSH LOGS; -- The main query should have a cache miss and 3 global hits -- The MV is executed 20 times (100 / 5) and each run does 1 miss and 4 hits to the LOCAL cache -- In addition to this, to prepare the MV, there is an extra preparation to get the list of columns via --- InterpreterSelectQuery, which adds 1 miss and 4 global hits (since it uses the global cache) +-- InterpreterSelectQuery, which adds 5 miss (since we don't use cache for preparation) -- So in total we have: -- Main query: 1 miss, 3 global --- Preparation: 1 miss, 4 global +-- Preparation: 5 miss -- Blocks (20): 20 miss, 0 global, 80 local hits --- TOTAL: 22 miss, 7 global, 80 local +-- TOTAL: 26 miss, 3 global, 80 local SELECT '02177_MV', ProfileEvents['ScalarSubqueriesGlobalCacheHit'] as scalar_cache_global_hit, diff --git a/tests/queries/0_stateless/02180_insert_into_values_settings.sql b/tests/queries/0_stateless/02180_insert_into_values_settings.sql index 0a1468070c1..a499ab15f26 100644 --- a/tests/queries/0_stateless/02180_insert_into_values_settings.sql +++ b/tests/queries/0_stateless/02180_insert_into_values_settings.sql @@ -1,4 +1,4 @@ drop table if exists t; create table t (x Bool) engine=Memory(); -insert into t values settings bool_true_representation='да' ('да'); +insert into t settings bool_true_representation='да' values ('да'); drop table t; diff --git a/tests/queries/0_stateless/02188_table_function_format.reference b/tests/queries/0_stateless/02188_table_function_format.reference index ab568fb9fe5..403a4044544 100644 --- a/tests/queries/0_stateless/02188_table_function_format.reference +++ b/tests/queries/0_stateless/02188_table_function_format.reference @@ -1,52 +1,52 @@ -111 Hello -123 World -111 Hello -123 World -111 Hello -123 World -111 Hello -123 World -111 Hello -123 World -111 Hello -123 World -111 Hello -123 World -111 Hello -123 World -111 Hello -123 World -111 Hello -123 World -111 Hello -123 World -111 Hello -123 World -111 Hello -123 World -111 Hello -123 World -111 Hello -123 World -111 Hello -123 World -111 Hello -123 World -111 Hello -123 World -111 Hello -123 World -111 Hello -123 World +Hello 111 +World 123 +Hello 111 +World 123 +Hello 111 +World 123 +Hello 111 +World 123 +Hello 111 +World 123 +Hello 111 +World 123 +Hello 111 +World 123 +Hello 111 +World 123 +Hello 111 +World 123 +Hello 111 +World 123 +Hello 111 +World 123 +Hello 111 +World 123 +Hello 111 +World 123 +Hello 111 +World 123 +Hello 111 +World 123 +Hello 111 +World 123 +Hello 111 +World 123 +Hello 111 +World 123 +Hello 111 +World 123 +Hello 111 +World 123 1 2 [1,2,3] [['abc'],[],['d','e']] c1 Nullable(Float64) c2 Nullable(Float64) c3 Array(Nullable(Float64)) c4 Array(Array(Nullable(String))) -111 Hello -123 World -111 Hello -131 Hello -123 World -b Nullable(Float64) +Hello 111 +World 123 +Hello 111 +Hello 131 +World 123 a Nullable(String) +b Nullable(Float64) diff --git a/tests/queries/0_stateless/02193_async_insert_tcp_client_1.sql b/tests/queries/0_stateless/02193_async_insert_tcp_client_1.sql index 795a27883e6..7e68beb4b6f 100644 --- a/tests/queries/0_stateless/02193_async_insert_tcp_client_1.sql +++ b/tests/queries/0_stateless/02193_async_insert_tcp_client_1.sql @@ -1,12 +1,14 @@ +SET log_queries = 1; + DROP TABLE IF EXISTS t_async_insert_02193_1; CREATE TABLE t_async_insert_02193_1 (id UInt32, s String) ENGINE = Memory; -INSERT INTO t_async_insert_02193_1 FORMAT CSV SETTINGS async_insert = 1 +INSERT INTO t_async_insert_02193_1 SETTINGS async_insert = 1 FORMAT CSV 1,aaa ; -INSERT INTO t_async_insert_02193_1 FORMAT Values SETTINGS async_insert = 1 (2, 'bbb'); +INSERT INTO t_async_insert_02193_1 SETTINGS async_insert = 1 FORMAT Values (2, 'bbb'); SET async_insert = 1; diff --git a/tests/queries/0_stateless/02193_async_insert_tcp_client_2.sh b/tests/queries/0_stateless/02193_async_insert_tcp_client_2.sh index e620b21ac72..8aeb53d3b87 100755 --- a/tests/queries/0_stateless/02193_async_insert_tcp_client_2.sh +++ b/tests/queries/0_stateless/02193_async_insert_tcp_client_2.sh @@ -9,8 +9,8 @@ ${CLICKHOUSE_CLIENT} -q "DROP TABLE IF EXISTS t_async_insert_02193_2" ${CLICKHOUSE_CLIENT} -q "CREATE TABLE t_async_insert_02193_2 (id UInt32, s String) ENGINE = Memory" -${CLICKHOUSE_CLIENT} -q "INSERT INTO t_async_insert_02193_2 FORMAT CSV SETTINGS async_insert = 1 1,aaa" -${CLICKHOUSE_CLIENT} -q "INSERT INTO t_async_insert_02193_2 FORMAT Values SETTINGS async_insert = 1 (2, 'bbb')" +${CLICKHOUSE_CLIENT} -q "INSERT INTO t_async_insert_02193_2 SETTINGS async_insert = 1 FORMAT CSV 1,aaa" +${CLICKHOUSE_CLIENT} -q "INSERT INTO t_async_insert_02193_2 SETTINGS async_insert = 1 FORMAT Values (2, 'bbb')" ${CLICKHOUSE_CLIENT} -q "INSERT INTO t_async_insert_02193_2 VALUES (3, 'ccc')" --async_insert=1 ${CLICKHOUSE_CLIENT} -q 'INSERT INTO t_async_insert_02193_2 FORMAT JSONEachRow {"id": 4, "s": "ddd"}' --async_insert=1 diff --git a/tests/queries/0_stateless/02205_ephemeral_1.reference b/tests/queries/0_stateless/02205_ephemeral_1.reference index 6e98ffd2495..ba39033668f 100644 --- a/tests/queries/0_stateless/02205_ephemeral_1.reference +++ b/tests/queries/0_stateless/02205_ephemeral_1.reference @@ -6,3 +6,11 @@ z UInt32 DEFAULT 5 17 5 7 5 21 5 +x UInt32 DEFAULT y +y UInt32 EPHEMERAL 0 +z UInt32 DEFAULT 5 +1 2 +0 2 +0 5 +7 5 +21 5 diff --git a/tests/queries/0_stateless/02205_ephemeral_1.sql b/tests/queries/0_stateless/02205_ephemeral_1.sql index 5d0565cbfc0..7a996ee3a8f 100644 --- a/tests/queries/0_stateless/02205_ephemeral_1.sql +++ b/tests/queries/0_stateless/02205_ephemeral_1.sql @@ -38,3 +38,43 @@ SELECT * FROM t_ephemeral_02205_1; DROP TABLE IF EXISTS t_ephemeral_02205_1; + +# Test without default +CREATE TABLE t_ephemeral_02205_1 (x UInt32 DEFAULT y, y UInt32 EPHEMERAL, z UInt32 DEFAULT 5) ENGINE = Memory; + +DESCRIBE t_ephemeral_02205_1; + +# Test INSERT without columns list - should participate only ordinary columns (x, z) +INSERT INTO t_ephemeral_02205_1 VALUES (1, 2); +# SELECT * should only return ordinary columns (x, z) - ephemeral is not stored in the table +SELECT * FROM t_ephemeral_02205_1; + +TRUNCATE TABLE t_ephemeral_02205_1; + +INSERT INTO t_ephemeral_02205_1 VALUES (DEFAULT, 2); +SELECT * FROM t_ephemeral_02205_1; + +TRUNCATE TABLE t_ephemeral_02205_1; + +# Test INSERT using ephemerals default +INSERT INTO t_ephemeral_02205_1 (x, y) VALUES (DEFAULT, DEFAULT); +SELECT * FROM t_ephemeral_02205_1; + +TRUNCATE TABLE t_ephemeral_02205_1; + +# Test INSERT using explicit ephemerals value +INSERT INTO t_ephemeral_02205_1 (x, y) VALUES (DEFAULT, 7); +SELECT * FROM t_ephemeral_02205_1; + +# Test ALTER TABLE DELETE +ALTER TABLE t_ephemeral_02205_1 DELETE WHERE x = 7; +SELECT * FROM t_ephemeral_02205_1; + +TRUNCATE TABLE t_ephemeral_02205_1; + +# Test INSERT into column, defaulted to ephemeral, but explicitly provided with value +INSERT INTO t_ephemeral_02205_1 (x, y) VALUES (21, 7); +SELECT * FROM t_ephemeral_02205_1; + +DROP TABLE IF EXISTS t_ephemeral_02205_1; + diff --git a/tests/queries/0_stateless/02210_processors_profile_log.reference b/tests/queries/0_stateless/02210_processors_profile_log.reference new file mode 100644 index 00000000000..a056b445bbd --- /dev/null +++ b/tests/queries/0_stateless/02210_processors_profile_log.reference @@ -0,0 +1,38 @@ +-- { echo } +EXPLAIN PIPELINE SELECT sleep(1); +(Expression) +ExpressionTransform + (SettingQuotaAndLimits) + (ReadFromStorage) + SourceFromSingleChunk 0 → 1 +SELECT sleep(1) SETTINGS log_processors_profiles=true, log_queries=1, log_queries_min_type='QUERY_FINISH'; +0 +SYSTEM FLUSH LOGS; +WITH + ( + SELECT query_id + FROM system.query_log + WHERE current_database = currentDatabase() AND Settings['log_processors_profiles']='1' + ) AS query_id_ +SELECT + name, + multiIf( + -- ExpressionTransform executes sleep(), + -- so IProcessor::work() will spend 1 sec. + name = 'ExpressionTransform', elapsed_us>1e6, + -- SourceFromSingleChunk, that feed data to ExpressionTransform, + -- will feed first block and then wait in PortFull. + name = 'SourceFromSingleChunk', output_wait_elapsed_us>1e6, + -- NullSource/LazyOutputFormatLazyOutputFormat are the outputs + -- so they cannot starts to execute before sleep(1) will be executed. + input_wait_elapsed_us>1e6) + elapsed +FROM system.processors_profile_log +WHERE query_id = query_id_ +ORDER BY name; +ExpressionTransform 1 +LazyOutputFormat 1 +LimitsCheckingTransform 1 +NullSource 1 +NullSource 1 +SourceFromSingleChunk 1 diff --git a/tests/queries/0_stateless/02210_processors_profile_log.sql b/tests/queries/0_stateless/02210_processors_profile_log.sql new file mode 100644 index 00000000000..160f8009262 --- /dev/null +++ b/tests/queries/0_stateless/02210_processors_profile_log.sql @@ -0,0 +1,28 @@ +-- { echo } +EXPLAIN PIPELINE SELECT sleep(1); + +SELECT sleep(1) SETTINGS log_processors_profiles=true, log_queries=1, log_queries_min_type='QUERY_FINISH'; +SYSTEM FLUSH LOGS; + +WITH + ( + SELECT query_id + FROM system.query_log + WHERE current_database = currentDatabase() AND Settings['log_processors_profiles']='1' + ) AS query_id_ +SELECT + name, + multiIf( + -- ExpressionTransform executes sleep(), + -- so IProcessor::work() will spend 1 sec. + name = 'ExpressionTransform', elapsed_us>1e6, + -- SourceFromSingleChunk, that feed data to ExpressionTransform, + -- will feed first block and then wait in PortFull. + name = 'SourceFromSingleChunk', output_wait_elapsed_us>1e6, + -- NullSource/LazyOutputFormatLazyOutputFormat are the outputs + -- so they cannot starts to execute before sleep(1) will be executed. + input_wait_elapsed_us>1e6) + elapsed +FROM system.processors_profile_log +WHERE query_id = query_id_ +ORDER BY name; diff --git a/tests/queries/0_stateless/02211_shcema_inference_from_stdin.reference b/tests/queries/0_stateless/02211_shcema_inference_from_stdin.reference index d176e0ee1ed..6920aa16198 100644 --- a/tests/queries/0_stateless/02211_shcema_inference_from_stdin.reference +++ b/tests/queries/0_stateless/02211_shcema_inference_from_stdin.reference @@ -9,7 +9,7 @@ x Nullable(Float64) 7 8 9 -c1 Nullable(String) -c2 Nullable(String) -c3 Nullable(String) +c1 Nullable(Float64) +c2 Nullable(Float64) +c3 Nullable(Float64) 1 2 3 diff --git a/tests/queries/0_stateless/02221_parallel_replicas_bug.sh b/tests/queries/0_stateless/02221_parallel_replicas_bug.sh index b4ac6817a54..cce32bf8272 100755 --- a/tests/queries/0_stateless/02221_parallel_replicas_bug.sh +++ b/tests/queries/0_stateless/02221_parallel_replicas_bug.sh @@ -4,4 +4,4 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CURDIR"/../shell_config.sh -${CLICKHOUSE_CLIENT} --allow_experimental_parallel_reading_from_replicas=1 -nmT < "$CURDIR"/01099_parallel_distributed_insert_select.sql > /dev/null +${CLICKHOUSE_CLIENT} --allow_experimental_parallel_reading_from_replicas=1 -nm < "$CURDIR"/01099_parallel_distributed_insert_select.sql > /dev/null diff --git a/tests/queries/0_stateless/02222_create_table_without_columns_metadata.reference b/tests/queries/0_stateless/02222_create_table_without_columns_metadata.reference index 9e9e0082cb3..f32b0eb8a92 100644 --- a/tests/queries/0_stateless/02222_create_table_without_columns_metadata.reference +++ b/tests/queries/0_stateless/02222_create_table_without_columns_metadata.reference @@ -1,3 +1,3 @@ -CREATE TABLE default.test\n(\n `y` Nullable(String),\n `x` Nullable(Float64)\n)\nENGINE = File(\'JSONEachRow\', \'data.jsonl\') +CREATE TABLE default.test\n(\n `x` Nullable(Float64),\n `y` Nullable(String)\n)\nENGINE = File(\'JSONEachRow\', \'data.jsonl\') OK OK diff --git a/tests/queries/0_stateless/02226_s3_with_cache.reference b/tests/queries/0_stateless/02226_s3_with_cache.reference index 214addac2d6..4041f51b3f9 100644 --- a/tests/queries/0_stateless/02226_s3_with_cache.reference +++ b/tests/queries/0_stateless/02226_s3_with_cache.reference @@ -1,2 +1,4 @@ SELECT 1, * FROM test LIMIT 10 FORMAT Null; 1 0 1 SELECT 2, * FROM test LIMIT 10 FORMAT Null; 0 1 0 +0 +SELECT 3, * FROM test LIMIT 10 FORMAT Null; 1 1 0 diff --git a/tests/queries/0_stateless/02226_s3_with_cache.sql b/tests/queries/0_stateless/02226_s3_with_cache.sql index b3126a419df..d470f2ef140 100644 --- a/tests/queries/0_stateless/02226_s3_with_cache.sql +++ b/tests/queries/0_stateless/02226_s3_with_cache.sql @@ -1,7 +1,9 @@ -- Tags: no-parallel, no-fasttest, long SET max_memory_usage='20G'; +SET enable_filesystem_cache_on_write_operations = 0; +DROP TABLE IF EXISTS test; CREATE TABLE test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='s3_cache'; INSERT INTO test SELECT * FROM generateRandom('key UInt32, value String') LIMIT 10000; @@ -41,4 +43,27 @@ SET remote_filesystem_read_method='threadpool'; SELECT * FROM test WHERE value LIKE '%abc%' ORDER BY value LIMIT 10 FORMAT Null; +SET enable_filesystem_cache_on_write_operations = 1; + +TRUNCATE TABLE test; +SELECT count() FROM test; + +SYSTEM DROP FILESYSTEM CACHE; + +INSERT INTO test SELECT * FROM generateRandom('key UInt32, value String') LIMIT 10000; + +SELECT 3, * FROM test LIMIT 10 FORMAT Null; + +SYSTEM FLUSH LOGS; +SELECT query, + ProfileEvents['RemoteFSReadBytes'] > 0 as remote_fs_read, + ProfileEvents['RemoteFSCacheReadBytes'] > 0 as remote_fs_cache_read, + ProfileEvents['RemoteFSCacheDownloadBytes'] > 0 as remote_fs_read_and_download +FROM system.query_log +WHERE query LIKE 'SELECT 3, * FROM test LIMIT%' +AND type = 'QueryFinish' +AND current_database = currentDatabase() +ORDER BY query_start_time DESC +LIMIT 1; + DROP TABLE test; diff --git a/tests/queries/0_stateless/02233_interpolate_1.reference b/tests/queries/0_stateless/02233_interpolate_1.reference new file mode 100644 index 00000000000..64f5a8308bf --- /dev/null +++ b/tests/queries/0_stateless/02233_interpolate_1.reference @@ -0,0 +1,240 @@ +0 0 +0.5 0 +1 original 1 +1.5 0 +2 0 +2.5 0 +3 0 +3.5 0 +4 original 4 +4.5 0 +5 0 +5.5 0 +6 0 +6.5 0 +7 original 7 +7.5 0 +8 0 +8.5 0 +9 0 +9.5 0 +10 0 +10.5 0 +11 0 +11.5 0 +0 42 +0.5 42 +1 original 1 +1.5 42 +2 42 +2.5 42 +3 42 +3.5 42 +4 original 4 +4.5 42 +5 42 +5.5 42 +6 42 +6.5 42 +7 original 7 +7.5 42 +8 42 +8.5 42 +9 42 +9.5 42 +10 42 +10.5 42 +11 42 +11.5 42 +0 0 +0.5 0 +1 original 1 +1.5 1 +2 1 +2.5 1 +3 1 +3.5 1 +4 original 4 +4.5 4 +5 4 +5.5 4 +6 4 +6.5 4 +7 original 7 +7.5 7 +8 7 +8.5 7 +9 7 +9.5 7 +10 7 +10.5 7 +11 7 +11.5 7 +0 1 +0.5 2 +1 original 1 +1.5 2 +2 3 +2.5 4 +3 5 +3.5 6 +4 original 4 +4.5 5 +5 6 +5.5 7 +6 8 +6.5 9 +7 original 7 +7.5 8 +8 9 +8.5 10 +9 11 +9.5 12 +10 13 +10.5 14 +11 15 +11.5 16 +0 1 +0.5 2 +1 original 2 +1.5 3 +2 4 +2.5 5 +3 6 +3.5 7 +4 original 5 +4.5 6 +5 7 +5.5 8 +6 9 +6.5 10 +7 original 8 +7.5 9 +8 10 +8.5 11 +9 12 +9.5 13 +10 14 +10.5 15 +11 16 +11.5 17 + 0 + 0 +original 1 + 3 + 3 + 3 + 3 + 3 +original 4 + 9 + 9 + 9 + 9 + 9 +original 7 + 15 + 15 + 15 + 15 + 15 + 15 + 15 + 15 + 15 +0 0 +0.5 0 +1 original 1 +1.5 3 +2 3 +2.5 3 +3 3 +3.5 3 +4 original 4 +4.5 9 +5 9 +5.5 9 +6 9 +6.5 9 +7 original 7 +7.5 15 +8 15 +8.5 15 +9 15 +9.5 15 +10 15 +10.5 15 +11 15 +11.5 15 +0 1 +0.5 2 +1 original 1 +1.5 2 +2 3 +2.5 4 +3 5 +3.5 6 +4 original 4 +4.5 5 +5 6 +5.5 7 +6 8 +6.5 9 +7 original 7 +7.5 8 +8 9 +8.5 10 +9 11 +9.5 12 +10 13 +10.5 14 +11 15 +11.5 16 +0 \N +0.5 \N +1 original \N +1.5 \N +2 \N +2.5 \N +3 \N +3.5 \N +4 original \N +4.5 \N +5 \N +5.5 \N +6 \N +6.5 \N +7 original \N +7.5 \N +8 \N +8.5 \N +9 \N +9.5 \N +10 \N +10.5 \N +11 \N +11.5 \N +0 \N +0.5 \N +1 original \N +1.5 \N +2 \N +2.5 \N +3 \N +3.5 \N +4 original \N +4.5 \N +5 \N +5.5 \N +6 \N +6.5 \N +7 original \N +7.5 \N +8 \N +8.5 \N +9 \N +9.5 \N +10 \N +10.5 \N +11 \N +11.5 \N diff --git a/tests/queries/0_stateless/02233_interpolate_1.sql b/tests/queries/0_stateless/02233_interpolate_1.sql new file mode 100644 index 00000000000..b11385e17b6 --- /dev/null +++ b/tests/queries/0_stateless/02233_interpolate_1.sql @@ -0,0 +1,72 @@ +# Test WITH FILL without INTERPOLATE +SELECT n, source, inter FROM ( + SELECT toFloat32(number % 10) AS n, 'original' AS source, number as inter FROM numbers(10) WHERE number % 3 = 1 +) ORDER BY n WITH FILL FROM 0 TO 11.51 STEP 0.5; + +# Test INTERPOLATE with const +SELECT n, source, inter FROM ( + SELECT toFloat32(number % 10) AS n, 'original' AS source, number as inter FROM numbers(10) WHERE number % 3 = 1 +) ORDER BY n WITH FILL FROM 0 TO 11.51 STEP 0.5 INTERPOLATE (inter AS 42); + +# Test INTERPOLATE with field value +SELECT n, source, inter FROM ( + SELECT toFloat32(number % 10) AS n, 'original' AS source, number as inter FROM numbers(10) WHERE number % 3 = 1 +) ORDER BY n WITH FILL FROM 0 TO 11.51 STEP 0.5 INTERPOLATE (inter AS inter); + +# Test INTERPOLATE with expression +SELECT n, source, inter FROM ( + SELECT toFloat32(number % 10) AS n, 'original' AS source, number as inter FROM numbers(10) WHERE number % 3 = 1 +) ORDER BY n WITH FILL FROM 0 TO 11.51 STEP 0.5 INTERPOLATE (inter AS inter + 1); + +# Test INTERPOLATE with incompatible const - should produce error +SELECT n, source, inter FROM ( + SELECT toFloat32(number % 10) AS n, 'original' AS source, number as inter FROM numbers(10) WHERE number % 3 = 1 +) ORDER BY n WITH FILL FROM 0 TO 11.51 STEP 0.5 INTERPOLATE (inter AS 'inter'); -- { serverError 6 } + +# Test INTERPOLATE with incompatible expression - should produce error +SELECT n, source, inter FROM ( + SELECT toFloat32(number % 10) AS n, 'original' AS source, number as inter FROM numbers(10) WHERE number % 3 = 1 +) ORDER BY n WITH FILL FROM 0 TO 11.51 STEP 0.5 INTERPOLATE (inter AS inter||'inter'); -- { serverError 44 } + +# Test INTERPOLATE with column from WITH FILL expression - should produce error +SELECT n, source, inter FROM ( + SELECT toFloat32(number % 10) AS n, 'original' AS source, number as inter FROM numbers(10) WHERE number % 3 = 1 +) ORDER BY n WITH FILL FROM 0 TO 11.51 STEP 0.5 INTERPOLATE (n AS n); -- { serverError 475 } + +# Test INTERPOLATE with inconsistent column - should produce error +SELECT n, source, inter FROM ( + SELECT toFloat32(number % 10) AS n, 'original' AS source, number as inter FROM numbers(10) WHERE number % 3 = 1 +) ORDER BY n WITH FILL FROM 0 TO 11.51 STEP 0.5 INTERPOLATE (inter AS source); -- { serverError 32 } + +# Test INTERPOLATE with aliased column +SELECT n, source, inter + 1 AS inter_p FROM ( + SELECT toFloat32(number % 10) AS n, 'original' AS source, number AS inter FROM numbers(10) WHERE (number % 3) = 1 +) ORDER BY n ASC WITH FILL FROM 0 TO 11.51 STEP 0.5 INTERPOLATE ( inter_p AS inter_p + 1 ); + +# Test INTERPOLATE with column not present in select +SELECT source, inter FROM ( + SELECT toFloat32(number % 10) AS n, 'original' AS source, number AS inter, number + 1 AS inter2 FROM numbers(10) WHERE (number % 3) = 1 +) ORDER BY n ASC WITH FILL FROM 0 TO 11.51 STEP 0.5 INTERPOLATE ( inter AS inter2 + inter ); + +# Test INTERPOLATE in sub-select +SELECT n, source, inter FROM ( + SELECT n, source, inter, inter2 FROM ( + SELECT toFloat32(number % 10) AS n, 'original' AS source, number AS inter, number + 1 AS inter2 FROM numbers(10) WHERE (number % 3) = 1 + ) ORDER BY n ASC WITH FILL FROM 0 TO 11.51 STEP 0.5 INTERPOLATE ( inter AS inter + inter2 ) +); + +# Test INTERPOLATE with aggregates +SELECT n, any(source), sum(inter) AS inter_s FROM ( + SELECT toFloat32(number % 10) AS n, 'original' AS source, number AS inter FROM numbers(10) WHERE (number % 3) = 1 +) GROUP BY n +ORDER BY n ASC WITH FILL FROM 0 TO 11.51 STEP 0.5 INTERPOLATE ( inter_s AS inter_s + 1 ); + +# Test INTERPOLATE with Nullable in result +SELECT n, source, inter + NULL AS inter_p FROM ( + SELECT toFloat32(number % 10) AS n, 'original' AS source, number AS inter FROM numbers(10) WHERE (number % 3) = 1 +) ORDER BY n ASC WITH FILL FROM 0 TO 11.51 STEP 0.5 INTERPOLATE ( inter_p AS inter_p + 1 ); + +# Test INTERPOLATE with Nullable in source +SELECT n, source, inter AS inter_p FROM ( + SELECT toFloat32(number % 10) AS n, 'original' AS source, number + NULL AS inter FROM numbers(10) WHERE (number % 3) = 1 +) ORDER BY n ASC WITH FILL FROM 0 TO 11.51 STEP 0.5 INTERPOLATE ( inter_p AS inter_p + 1 ); diff --git a/tests/queries/0_stateless/02234_clickhouse_local_test_mode.sh b/tests/queries/0_stateless/02234_clickhouse_local_test_mode.sh index 6abe1e30334..f736751726d 100755 --- a/tests/queries/0_stateless/02234_clickhouse_local_test_mode.sh +++ b/tests/queries/0_stateless/02234_clickhouse_local_test_mode.sh @@ -6,5 +6,4 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) $CLICKHOUSE_LOCAL --query="SELECT n" 2>&1 | grep -q "Code: 47. DB::Exception: Missing columns:" && echo 'OK' || echo 'FAIL' ||: -$CLICKHOUSE_LOCAL --testmode --query="SELECT n -- { serverError 47 }" - +$CLICKHOUSE_LOCAL --query="SELECT n -- { serverError 47 }" diff --git a/tests/queries/0_stateless/02235_remote_fs_cache_stress.reference b/tests/queries/0_stateless/02235_remote_fs_cache_stress.reference new file mode 100644 index 00000000000..1fa6905307d --- /dev/null +++ b/tests/queries/0_stateless/02235_remote_fs_cache_stress.reference @@ -0,0 +1,32 @@ +OK +OK +OK +OK +OK +OK +OK +OK +OK +OK +OK +OK +OK +OK +OK +OK +OK +OK +OK +OK +OK +OK +OK +OK +OK +OK +OK +OK +OK +OK +OK +OK diff --git a/tests/queries/0_stateless/02235_remote_fs_cache_stress.sh b/tests/queries/0_stateless/02235_remote_fs_cache_stress.sh new file mode 100755 index 00000000000..a5c0ee6ecff --- /dev/null +++ b/tests/queries/0_stateless/02235_remote_fs_cache_stress.sh @@ -0,0 +1,73 @@ +#!/usr/bin/env bash +# Tags: no-fasttest, long, no-random-settings + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + + +${CLICKHOUSE_CLIENT} --multiquery --multiline --query=""" + +DROP TABLE IF EXISTS t_01411; +DROP TABLE IF EXISTS t_01411_num; +drop table if exists lc_dict_reading; + +CREATE TABLE t_01411( + str LowCardinality(String), + arr Array(LowCardinality(String)) default [str] +) ENGINE = MergeTree() +ORDER BY tuple(); + +INSERT INTO t_01411 (str) SELECT concat('asdf', toString(number % 10000)) FROM numbers(100000); + +CREATE TABLE t_01411_num( + num UInt8, + arr Array(LowCardinality(Int64)) default [num] +) ENGINE = MergeTree() +ORDER BY tuple(); + +INSERT INTO t_01411_num (num) SELECT number % 1000 FROM numbers(100000); + +create table lc_dict_reading (val UInt64, str StringWithDictionary, pat String) engine = MergeTree order by val; +insert into lc_dict_reading select number, if(number < 8192 * 4, number % 100, number) as s, s from system.numbers limit 100000; +""" + +function go() +{ + +${CLICKHOUSE_CLIENT} --multiquery --multiline --query=""" + +select sum(toUInt64(str)), sum(toUInt64(pat)) from lc_dict_reading where val < 8129 or val > 8192 * 4; + +SELECT count() FROM t_01411 WHERE str = 'asdf337'; +SELECT count() FROM t_01411 WHERE arr[1] = 'asdf337'; +SELECT count() FROM t_01411 WHERE has(arr, 'asdf337'); +SELECT count() FROM t_01411 WHERE indexOf(arr, 'asdf337') > 0; + +SELECT count() FROM t_01411 WHERE arr[1] = str; +SELECT count() FROM t_01411 WHERE has(arr, str); +SELECT count() FROM t_01411 WHERE indexOf(arr, str) > 0; + +SELECT count() FROM t_01411_num WHERE num = 42; +SELECT count() FROM t_01411_num WHERE arr[1] = 42; +SELECT count() FROM t_01411_num WHERE has(arr, 42); +SELECT count() FROM t_01411_num WHERE indexOf(arr, 42) > 0; + +SELECT count() FROM t_01411_num WHERE arr[1] = num; +SELECT count() FROM t_01411_num WHERE has(arr, num); +SELECT count() FROM t_01411_num WHERE indexOf(arr, num) > 0; +SELECT count() FROM t_01411_num WHERE indexOf(arr, num % 337) > 0; + +SELECT indexOf(['a', 'b', 'c'], toLowCardinality('a')); +SELECT indexOf(['a', 'b', NULL], toLowCardinality('a')); +""" +} + +for i in `seq 1 32`; do go | grep -q "Exception" && echo 'FAIL' || echo 'OK' ||: & done + +wait + +${CLICKHOUSE_CLIENT} --multiquery --multiline --query=""" +DROP TABLE IF EXISTS t_01411; +DROP TABLE IF EXISTS t_01411_num; +""" diff --git a/tests/queries/0_stateless/02236_explain_pipeline_join.reference b/tests/queries/0_stateless/02236_explain_pipeline_join.reference new file mode 100644 index 00000000000..ed993e2a1e7 --- /dev/null +++ b/tests/queries/0_stateless/02236_explain_pipeline_join.reference @@ -0,0 +1,19 @@ +(Expression) +ExpressionTransform + (Join) + JoiningTransform 2 → 1 + (Expression) + ExpressionTransform + (SettingQuotaAndLimits) + (Limit) + Limit + (ReadFromStorage) + Numbers 0 → 1 + (Expression) + FillingRightJoinSide + ExpressionTransform + (SettingQuotaAndLimits) + (Limit) + Limit + (ReadFromStorage) + Numbers 0 → 1 diff --git a/tests/queries/0_stateless/02236_explain_pipeline_join.sql b/tests/queries/0_stateless/02236_explain_pipeline_join.sql new file mode 100644 index 00000000000..de885ed74ee --- /dev/null +++ b/tests/queries/0_stateless/02236_explain_pipeline_join.sql @@ -0,0 +1,10 @@ +EXPLAIN PIPELINE +SELECT * FROM +( + SELECT * FROM system.numbers LIMIT 10 +) t1 +ALL LEFT JOIN +( + SELECT * FROM system.numbers LIMIT 10 +) t2 +USING number; diff --git a/tests/queries/0_stateless/02240_get_type_serialization_streams.reference b/tests/queries/0_stateless/02240_get_type_serialization_streams.reference new file mode 100644 index 00000000000..3537720214f --- /dev/null +++ b/tests/queries/0_stateless/02240_get_type_serialization_streams.reference @@ -0,0 +1,8 @@ +['{ArraySizes}','{ArrayElements, Regular}'] +['{ArraySizes}','{ArrayElements, TupleElement(keys, escape_tuple_delimiter = true), Regular}','{ArrayElements, TupleElement(values, escape_tuple_delimiter = true), Regular}'] +['{TupleElement(1, escape_tuple_delimiter = true), Regular}','{TupleElement(2, escape_tuple_delimiter = true), Regular}','{TupleElement(3, escape_tuple_delimiter = true), Regular}'] +['{DictionaryKeys, Regular}','{DictionaryIndexes}'] +['{NullMap}','{NullableElements, Regular}'] +['{ArraySizes}','{ArrayElements, Regular}'] +['{ArraySizes}','{ArrayElements, TupleElement(keys, escape_tuple_delimiter = true), Regular}','{ArrayElements, TupleElement(values, escape_tuple_delimiter = true), Regular}'] +['{TupleElement(1, escape_tuple_delimiter = true), Regular}','{TupleElement(2, escape_tuple_delimiter = true), Regular}','{TupleElement(3, escape_tuple_delimiter = true), Regular}','{TupleElement(4, escape_tuple_delimiter = true), Regular}'] diff --git a/tests/queries/0_stateless/02240_get_type_serialization_streams.sql b/tests/queries/0_stateless/02240_get_type_serialization_streams.sql new file mode 100644 index 00000000000..72a66269e22 --- /dev/null +++ b/tests/queries/0_stateless/02240_get_type_serialization_streams.sql @@ -0,0 +1,8 @@ +select getTypeSerializationStreams('Array(Int8)'); +select getTypeSerializationStreams('Map(String, Int64)'); +select getTypeSerializationStreams('Tuple(String, Int64, Float64)'); +select getTypeSerializationStreams('LowCardinality(String)'); +select getTypeSerializationStreams('Nullable(String)'); +select getTypeSerializationStreams([1,2,3]); +select getTypeSerializationStreams(map('a', 1, 'b', 2)); +select getTypeSerializationStreams(tuple('a', 1, 'b', 2)); diff --git a/tests/queries/0_stateless/02240_protobuflist_format_persons.sh b/tests/queries/0_stateless/02240_protobuflist_format_persons.sh index dec14b54eb2..637e01b9e63 100755 --- a/tests/queries/0_stateless/02240_protobuflist_format_persons.sh +++ b/tests/queries/0_stateless/02240_protobuflist_format_persons.sh @@ -72,7 +72,7 @@ $CURDIR/helpers/protobuf_length_delimited_encoder.py --decode_and_check --format echo echo "Roundtrip:" $CLICKHOUSE_CLIENT --query "CREATE TABLE roundtrip_persons_02240 AS persons_02240" -$CLICKHOUSE_CLIENT --query "INSERT INTO roundtrip_persons_02240 FORMAT ProtobufList SETTINGS format_schema='$SCHEMADIR/02240_protobuflist1_format_persons:Person'" < "$BINARY_FILE_PATH" +$CLICKHOUSE_CLIENT --query "INSERT INTO roundtrip_persons_02240 SETTINGS format_schema='$SCHEMADIR/02240_protobuflist1_format_persons:Person' FORMAT ProtobufList" < "$BINARY_FILE_PATH" $CLICKHOUSE_CLIENT --query "SELECT * FROM roundtrip_persons_02240 ORDER BY name" rm "$BINARY_FILE_PATH" @@ -86,7 +86,7 @@ $CURDIR/helpers/protobuf_length_delimited_encoder.py --decode_and_check --format echo echo "Roundtrip:" $CLICKHOUSE_CLIENT --query "CREATE TABLE alt_persons_02240 AS persons_02240" -$CLICKHOUSE_CLIENT --query "INSERT INTO alt_persons_02240 FORMAT ProtobufList SETTINGS format_schema='$SCHEMADIR/02240_protobuflist2_format_persons:AltPerson'" < "$BINARY_FILE_PATH" +$CLICKHOUSE_CLIENT --query "INSERT INTO alt_persons_02240 SETTINGS format_schema='$SCHEMADIR/02240_protobuflist2_format_persons:AltPerson' FORMAT ProtobufList" < "$BINARY_FILE_PATH" $CLICKHOUSE_CLIENT --query "SELECT * FROM alt_persons_02240 ORDER BY name" rm "$BINARY_FILE_PATH" @@ -100,7 +100,7 @@ $CURDIR/helpers/protobuf_length_delimited_encoder.py --decode_and_check --format # echo echo "Roundtrip:" $CLICKHOUSE_CLIENT --query "CREATE TABLE str_persons_02240 AS persons_02240" -$CLICKHOUSE_CLIENT --query "INSERT INTO str_persons_02240 FORMAT ProtobufList SETTINGS format_schema='$SCHEMADIR/02240_protobuflist3_format_persons:StrPerson'" < "$BINARY_FILE_PATH" +$CLICKHOUSE_CLIENT --query "INSERT INTO str_persons_02240 SETTINGS format_schema='$SCHEMADIR/02240_protobuflist3_format_persons:StrPerson' FORMAT ProtobufList" < "$BINARY_FILE_PATH" $CLICKHOUSE_CLIENT --query "SELECT * FROM str_persons_02240 ORDER BY name" rm "$BINARY_FILE_PATH" @@ -114,7 +114,7 @@ $CURDIR/helpers/protobuf_length_delimited_encoder.py --decode_and_check --format echo echo "Roundtrip:" $CLICKHOUSE_CLIENT --query "CREATE TABLE syntax2_persons_02240 AS persons_02240" -$CLICKHOUSE_CLIENT --query "INSERT INTO syntax2_persons_02240 FORMAT ProtobufList SETTINGS format_schema='$SCHEMADIR/02240_protobuflist_format_persons_syntax2:Syntax2Person'" < "$BINARY_FILE_PATH" +$CLICKHOUSE_CLIENT --query "INSERT INTO syntax2_persons_02240 SETTINGS format_schema='$SCHEMADIR/02240_protobuflist_format_persons_syntax2:Syntax2Person' FORMAT ProtobufList" < "$BINARY_FILE_PATH" $CLICKHOUSE_CLIENT --query "SELECT * FROM syntax2_persons_02240 ORDER BY name" rm "$BINARY_FILE_PATH" diff --git a/tests/queries/0_stateless/02240_system_remote_filesystem_cache.reference b/tests/queries/0_stateless/02240_system_remote_filesystem_cache.reference new file mode 100644 index 00000000000..8bcb7e1dd42 --- /dev/null +++ b/tests/queries/0_stateless/02240_system_remote_filesystem_cache.reference @@ -0,0 +1,19 @@ +-- { echo } + +SYSTEM DROP FILESYSTEM CACHE; +SET enable_filesystem_cache_on_write_operations=0; +DROP TABLE IF EXISTS test; +CREATE TABLE test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='s3_cache', min_bytes_for_wide_part = 10485760; +INSERT INTO test SELECT number, toString(number) FROM numbers(100); +SELECT * FROM test FORMAT Null; +SELECT file_segment_range_begin, file_segment_range_end, size FROM system.filesystem_cache ORDER BY file_segment_range_end, size; +0 0 1 +0 79 80 +0 745 746 +SYSTEM DROP FILESYSTEM CACHE; +SELECT file_segment_range_begin, file_segment_range_end, size FROM system.filesystem_cache; +SELECT * FROM test FORMAT Null; +SELECT file_segment_range_begin, file_segment_range_end, size FROM system.filesystem_cache; +0 745 746 +SYSTEM DROP FILESYSTEM CACHE; +SELECT file_segment_range_begin, file_segment_range_end, size FROM system.filesystem_cache; diff --git a/tests/queries/0_stateless/02240_system_remote_filesystem_cache.sql b/tests/queries/0_stateless/02240_system_remote_filesystem_cache.sql new file mode 100644 index 00000000000..aa469779130 --- /dev/null +++ b/tests/queries/0_stateless/02240_system_remote_filesystem_cache.sql @@ -0,0 +1,18 @@ +-- Tags: no-parallel, no-fasttest, no-s3-storage + +-- { echo } + +SYSTEM DROP FILESYSTEM CACHE; +SET enable_filesystem_cache_on_write_operations=0; +DROP TABLE IF EXISTS test; +CREATE TABLE test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='s3_cache', min_bytes_for_wide_part = 10485760; +INSERT INTO test SELECT number, toString(number) FROM numbers(100); + +SELECT * FROM test FORMAT Null; +SELECT file_segment_range_begin, file_segment_range_end, size FROM system.filesystem_cache ORDER BY file_segment_range_end, size; +SYSTEM DROP FILESYSTEM CACHE; +SELECT file_segment_range_begin, file_segment_range_end, size FROM system.filesystem_cache; +SELECT * FROM test FORMAT Null; +SELECT file_segment_range_begin, file_segment_range_end, size FROM system.filesystem_cache; +SYSTEM DROP FILESYSTEM CACHE; +SELECT file_segment_range_begin, file_segment_range_end, size FROM system.filesystem_cache; diff --git a/tests/queries/0_stateless/02240_tskv_schema_inference_bug.reference b/tests/queries/0_stateless/02240_tskv_schema_inference_bug.reference index a8abc33648e..d0ced74f8f6 100644 --- a/tests/queries/0_stateless/02240_tskv_schema_inference_bug.reference +++ b/tests/queries/0_stateless/02240_tskv_schema_inference_bug.reference @@ -1,8 +1,8 @@ +a Nullable(Float64) b Nullable(String) -c Nullable(String) -a Nullable(String) -s1 \N 1 -} [2] 2 -\N \N \N -\N \N \N -\N [3] \N +c Array(Nullable(Float64)) +1 s1 [] +2 } [2] +\N \N [] +\N \N [] +\N \N [3] diff --git a/tests/queries/0_stateless/02241_parquet_bad_column.sh b/tests/queries/0_stateless/02241_parquet_bad_column.sh index 9efd11cbbe1..cfe8c2d0dbe 100755 --- a/tests/queries/0_stateless/02241_parquet_bad_column.sh +++ b/tests/queries/0_stateless/02241_parquet_bad_column.sh @@ -22,7 +22,7 @@ for case_insensitive in "true" "false"; do original_width Nullable(UInt32), original_height Nullable(UInt32)) engine=Memory" - cat $CUR_DIR/data_parquet_bad_column/metadata_0.parquet | $CLICKHOUSE_CLIENT -q "insert into test_02241 format Parquet SETTINGS input_format_parquet_case_insensitive_column_matching=$case_insensitive" + cat $CUR_DIR/data_parquet_bad_column/metadata_0.parquet | $CLICKHOUSE_CLIENT -q "insert into test_02241 SETTINGS input_format_parquet_case_insensitive_column_matching=$case_insensitive format Parquet" $CLICKHOUSE_CLIENT -q "select count() from test_02241" $CLICKHOUSE_CLIENT -q "drop table test_02241" diff --git a/tests/queries/0_stateless/02241_remote_filesystem_cache_on_insert.reference b/tests/queries/0_stateless/02241_remote_filesystem_cache_on_insert.reference new file mode 100644 index 00000000000..b2269c16264 --- /dev/null +++ b/tests/queries/0_stateless/02241_remote_filesystem_cache_on_insert.reference @@ -0,0 +1,75 @@ +-- { echo } + +SET enable_filesystem_cache_on_write_operations=1; +DROP TABLE IF EXISTS test; +CREATE TABLE test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='s3_cache', min_bytes_for_wide_part = 10485760; +SYSTEM DROP FILESYSTEM CACHE; +SELECT file_segment_range_begin, file_segment_range_end, size, state FROM (SELECT file_segment_range_begin, file_segment_range_end, size, state, local_path FROM (SELECT arrayJoin(cache_paths) AS cache_path, local_path, remote_path FROM system.remote_data_paths ) AS data_paths INNER JOIN system.filesystem_cache AS caches ON data_paths.cache_path = caches.cache_path) WHERE endsWith(local_path, 'data.bin') FORMAT Vertical; +SELECT count() FROM (SELECT arrayJoin(cache_paths) AS cache_path, local_path, remote_path FROM system.remote_data_paths ) AS data_paths INNER JOIN system.filesystem_cache AS caches ON data_paths.cache_path = caches.cache_path; +0 +SELECT count() FROM system.filesystem_cache; +0 +INSERT INTO test SELECT number, toString(number) FROM numbers(100); +SELECT file_segment_range_begin, file_segment_range_end, size, state FROM (SELECT file_segment_range_begin, file_segment_range_end, size, state, local_path FROM (SELECT arrayJoin(cache_paths) AS cache_path, local_path, remote_path FROM system.remote_data_paths ) AS data_paths INNER JOIN system.filesystem_cache AS caches ON data_paths.cache_path = caches.cache_path) WHERE endsWith(local_path, 'data.bin') FORMAT Vertical; +Row 1: +────── +file_segment_range_begin: 0 +file_segment_range_end: 745 +size: 746 +state: DOWNLOADED +SELECT count() FROM (SELECT arrayJoin(cache_paths) AS cache_path, local_path, remote_path FROM system.remote_data_paths ) AS data_paths INNER JOIN system.filesystem_cache AS caches ON data_paths.cache_path = caches.cache_path; +7 +SELECT count() FROM system.filesystem_cache; +7 +SELECT count() FROM system.filesystem_cache WHERE cache_hits > 0; +0 +SELECT * FROM test FORMAT Null; +SELECT count() FROM system.filesystem_cache WHERE cache_hits > 0; +2 +SELECT * FROM test FORMAT Null; +SELECT count() FROM system.filesystem_cache WHERE cache_hits > 0; +2 +SELECT count() size FROM system.filesystem_cache; +7 +SYSTEM DROP FILESYSTEM CACHE; +INSERT INTO test SELECT number, toString(number) FROM numbers(100, 200); +SELECT file_segment_range_begin, file_segment_range_end, size, state FROM (SELECT file_segment_range_begin, file_segment_range_end, size, state, local_path FROM (SELECT arrayJoin(cache_paths) AS cache_path, local_path, remote_path FROM system.remote_data_paths ) AS data_paths INNER JOIN system.filesystem_cache AS caches ON data_paths.cache_path = caches.cache_path) WHERE endsWith(local_path, 'data.bin') FORMAT Vertical; +Row 1: +────── +file_segment_range_begin: 0 +file_segment_range_end: 1659 +size: 1660 +state: DOWNLOADED +SELECT count() FROM (SELECT arrayJoin(cache_paths) AS cache_path, local_path, remote_path FROM system.remote_data_paths ) AS data_paths INNER JOIN system.filesystem_cache AS caches ON data_paths.cache_path = caches.cache_path; +7 +SELECT count() FROM system.filesystem_cache; +7 +SELECT count() FROM system.filesystem_cache; +7 +INSERT INTO test SELECT number, toString(number) FROM numbers(100) SETTINGS enable_filesystem_cache_on_write_operations=0; +SELECT count() FROM system.filesystem_cache; +7 +INSERT INTO test SELECT number, toString(number) FROM numbers(100); +INSERT INTO test SELECT number, toString(number) FROM numbers(300, 10000); +SELECT count() FROM system.filesystem_cache; +21 +OPTIMIZE TABLE test FINAL; +SELECT count() FROM system.filesystem_cache; +27 +SET mutations_sync=2; +ALTER TABLE test UPDATE value = 'kek' WHERE key = 100; +SELECT count() FROM system.filesystem_cache; +28 +INSERT INTO test SELECT number, toString(number) FROM numbers(5000000); +SYSTEM FLUSH LOGS; +SELECT query, ProfileEvents['RemoteFSReadBytes'] > 0 as remote_fs_read +FROM system.query_log +WHERE query LIKE 'SELECT number, toString(number) FROM numbers(5000000)%' +AND type = 'QueryFinish' +AND current_database = currentDatabase() +ORDER BY query_start_time DESC +LIMIT 1; +SELECT count() FROM test; +5010500 +SELECT count() FROM test WHERE value LIKE '%010%'; +18816 diff --git a/tests/queries/0_stateless/02241_remote_filesystem_cache_on_insert.sql b/tests/queries/0_stateless/02241_remote_filesystem_cache_on_insert.sql new file mode 100644 index 00000000000..c3ab1de3693 --- /dev/null +++ b/tests/queries/0_stateless/02241_remote_filesystem_cache_on_insert.sql @@ -0,0 +1,64 @@ +-- Tags: no-parallel, no-fasttest, no-s3-storage + +-- { echo } + +SET enable_filesystem_cache_on_write_operations=1; + +DROP TABLE IF EXISTS test; +CREATE TABLE test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='s3_cache', min_bytes_for_wide_part = 10485760; + +SYSTEM DROP FILESYSTEM CACHE; + +SELECT file_segment_range_begin, file_segment_range_end, size, state FROM (SELECT file_segment_range_begin, file_segment_range_end, size, state, local_path FROM (SELECT arrayJoin(cache_paths) AS cache_path, local_path, remote_path FROM system.remote_data_paths ) AS data_paths INNER JOIN system.filesystem_cache AS caches ON data_paths.cache_path = caches.cache_path) WHERE endsWith(local_path, 'data.bin') FORMAT Vertical; +SELECT count() FROM (SELECT arrayJoin(cache_paths) AS cache_path, local_path, remote_path FROM system.remote_data_paths ) AS data_paths INNER JOIN system.filesystem_cache AS caches ON data_paths.cache_path = caches.cache_path; +SELECT count() FROM system.filesystem_cache; + +INSERT INTO test SELECT number, toString(number) FROM numbers(100); + +SELECT file_segment_range_begin, file_segment_range_end, size, state FROM (SELECT file_segment_range_begin, file_segment_range_end, size, state, local_path FROM (SELECT arrayJoin(cache_paths) AS cache_path, local_path, remote_path FROM system.remote_data_paths ) AS data_paths INNER JOIN system.filesystem_cache AS caches ON data_paths.cache_path = caches.cache_path) WHERE endsWith(local_path, 'data.bin') FORMAT Vertical; +SELECT count() FROM (SELECT arrayJoin(cache_paths) AS cache_path, local_path, remote_path FROM system.remote_data_paths ) AS data_paths INNER JOIN system.filesystem_cache AS caches ON data_paths.cache_path = caches.cache_path; +SELECT count() FROM system.filesystem_cache; + +SELECT count() FROM system.filesystem_cache WHERE cache_hits > 0; + +SELECT * FROM test FORMAT Null; +SELECT count() FROM system.filesystem_cache WHERE cache_hits > 0; + +SELECT * FROM test FORMAT Null; +SELECT count() FROM system.filesystem_cache WHERE cache_hits > 0; + +SELECT count() size FROM system.filesystem_cache; + +SYSTEM DROP FILESYSTEM CACHE; + +INSERT INTO test SELECT number, toString(number) FROM numbers(100, 200); + +SELECT file_segment_range_begin, file_segment_range_end, size, state FROM (SELECT file_segment_range_begin, file_segment_range_end, size, state, local_path FROM (SELECT arrayJoin(cache_paths) AS cache_path, local_path, remote_path FROM system.remote_data_paths ) AS data_paths INNER JOIN system.filesystem_cache AS caches ON data_paths.cache_path = caches.cache_path) WHERE endsWith(local_path, 'data.bin') FORMAT Vertical; +SELECT count() FROM (SELECT arrayJoin(cache_paths) AS cache_path, local_path, remote_path FROM system.remote_data_paths ) AS data_paths INNER JOIN system.filesystem_cache AS caches ON data_paths.cache_path = caches.cache_path; +SELECT count() FROM system.filesystem_cache; + +SELECT count() FROM system.filesystem_cache; +INSERT INTO test SELECT number, toString(number) FROM numbers(100) SETTINGS enable_filesystem_cache_on_write_operations=0; +SELECT count() FROM system.filesystem_cache; + +INSERT INTO test SELECT number, toString(number) FROM numbers(100); +INSERT INTO test SELECT number, toString(number) FROM numbers(300, 10000); +SELECT count() FROM system.filesystem_cache; +OPTIMIZE TABLE test FINAL; +SELECT count() FROM system.filesystem_cache; + +SET mutations_sync=2; +ALTER TABLE test UPDATE value = 'kek' WHERE key = 100; +SELECT count() FROM system.filesystem_cache; + +INSERT INTO test SELECT number, toString(number) FROM numbers(5000000); +SYSTEM FLUSH LOGS; +SELECT query, ProfileEvents['RemoteFSReadBytes'] > 0 as remote_fs_read +FROM system.query_log +WHERE query LIKE 'SELECT number, toString(number) FROM numbers(5000000)%' +AND type = 'QueryFinish' +AND current_database = currentDatabase() +ORDER BY query_start_time DESC +LIMIT 1; +SELECT count() FROM test; +SELECT count() FROM test WHERE value LIKE '%010%'; diff --git a/tests/queries/0_stateless/02242_arrow_orc_parquet_nullable_schema_inference.reference b/tests/queries/0_stateless/02242_arrow_orc_parquet_nullable_schema_inference.reference new file mode 100644 index 00000000000..debc5c58936 --- /dev/null +++ b/tests/queries/0_stateless/02242_arrow_orc_parquet_nullable_schema_inference.reference @@ -0,0 +1,40 @@ +Arrow +x Nullable(UInt64) +arr1 Array(Nullable(UInt64)) +arr2 Array(Array(Nullable(String))) +arr3 Array(Tuple(Nullable(String), Nullable(UInt64))) +0 [0,1] [[NULL,'String'],[NULL],[]] [(NULL,NULL),('String',NULL),(NULL,0)] +\N [NULL,2] [[NULL,'String'],[NULL],[]] [(NULL,NULL),('String',NULL),(NULL,1)] +2 [2,3] [[NULL,'String'],[NULL],[]] [(NULL,NULL),('String',NULL),(NULL,2)] +\N [NULL,4] [[NULL,'String'],[NULL],[]] [(NULL,NULL),('String',NULL),(NULL,3)] +4 [4,5] [[NULL,'String'],[NULL],[]] [(NULL,NULL),('String',NULL),(NULL,4)] +ArrowStream +x Nullable(UInt64) +arr1 Array(Nullable(UInt64)) +arr2 Array(Array(Nullable(String))) +arr3 Array(Tuple(Nullable(String), Nullable(UInt64))) +0 [0,1] [[NULL,'String'],[NULL],[]] [(NULL,NULL),('String',NULL),(NULL,0)] +\N [NULL,2] [[NULL,'String'],[NULL],[]] [(NULL,NULL),('String',NULL),(NULL,1)] +2 [2,3] [[NULL,'String'],[NULL],[]] [(NULL,NULL),('String',NULL),(NULL,2)] +\N [NULL,4] [[NULL,'String'],[NULL],[]] [(NULL,NULL),('String',NULL),(NULL,3)] +4 [4,5] [[NULL,'String'],[NULL],[]] [(NULL,NULL),('String',NULL),(NULL,4)] +Parquet +x Nullable(UInt64) +arr1 Array(Nullable(UInt64)) +arr2 Array(Array(Nullable(String))) +arr3 Array(Tuple(Nullable(String), Nullable(UInt64))) +0 [0,1] [[NULL,'String'],[NULL],[]] [(NULL,NULL),('String',NULL),(NULL,0)] +\N [NULL,2] [[NULL,'String'],[NULL],[]] [(NULL,NULL),('String',NULL),(NULL,1)] +2 [2,3] [[NULL,'String'],[NULL],[]] [(NULL,NULL),('String',NULL),(NULL,2)] +\N [NULL,4] [[NULL,'String'],[NULL],[]] [(NULL,NULL),('String',NULL),(NULL,3)] +4 [4,5] [[NULL,'String'],[NULL],[]] [(NULL,NULL),('String',NULL),(NULL,4)] +ORC +x Nullable(Int64) +arr1 Array(Nullable(Int64)) +arr2 Array(Array(Nullable(String))) +arr3 Array(Tuple(Nullable(String), Nullable(Int64))) +0 [0,1] [[NULL,'String'],[NULL],[]] [(NULL,NULL),('String',NULL),(NULL,0)] +\N [NULL,2] [[NULL,'String'],[NULL],[]] [(NULL,NULL),('String',NULL),(NULL,1)] +2 [2,3] [[NULL,'String'],[NULL],[]] [(NULL,NULL),('String',NULL),(NULL,2)] +\N [NULL,4] [[NULL,'String'],[NULL],[]] [(NULL,NULL),('String',NULL),(NULL,3)] +4 [4,5] [[NULL,'String'],[NULL],[]] [(NULL,NULL),('String',NULL),(NULL,4)] diff --git a/tests/queries/0_stateless/02242_arrow_orc_parquet_nullable_schema_inference.sh b/tests/queries/0_stateless/02242_arrow_orc_parquet_nullable_schema_inference.sh new file mode 100755 index 00000000000..1b6999e3f09 --- /dev/null +++ b/tests/queries/0_stateless/02242_arrow_orc_parquet_nullable_schema_inference.sh @@ -0,0 +1,21 @@ +#!/usr/bin/env bash +# Tags: no-parallel, no-fasttest + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + + +USER_FILES_PATH=$(clickhouse-client --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') +FILE_NAME=test_02242.data +DATA_FILE=$USER_FILES_PATH/$FILE_NAME + +for format in Arrow ArrowStream Parquet ORC +do + echo $format + $CLICKHOUSE_CLIENT -q "select number % 2 ? NULL : number as x, [number % 2 ? NULL : number, number + 1] as arr1, [[NULL, 'String'], [NULL], []] as arr2, [(NULL, NULL), ('String', NULL), (NULL, number)] as arr3 from numbers(5) format $format" > $DATA_FILE + $CLICKHOUSE_CLIENT -q "desc file('$FILE_NAME', '$format')" + $CLICKHOUSE_CLIENT -q "select * from file('$FILE_NAME', '$format')" +done + +rm $DATA_FILE diff --git a/tests/queries/0_stateless/02242_case_insensitive_column_matching.sh b/tests/queries/0_stateless/02242_case_insensitive_column_matching.sh index 8ebf2952ab3..42652615d7d 100755 --- a/tests/queries/0_stateless/02242_case_insensitive_column_matching.sh +++ b/tests/queries/0_stateless/02242_case_insensitive_column_matching.sh @@ -9,7 +9,7 @@ echo "Parquet" DATA_FILE=$CUR_DIR/data_parquet/case_insensitive_column_matching.parquet ${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS parquet_load" ${CLICKHOUSE_CLIENT} --query="CREATE TABLE parquet_load (iD String, scOre Int32) ENGINE = Memory" -cat "$DATA_FILE" | ${CLICKHOUSE_CLIENT} -q "INSERT INTO parquet_load FORMAT Parquet SETTINGS input_format_parquet_case_insensitive_column_matching=true" +cat "$DATA_FILE" | ${CLICKHOUSE_CLIENT} -q "INSERT INTO parquet_load SETTINGS input_format_parquet_case_insensitive_column_matching=true FORMAT Parquet" ${CLICKHOUSE_CLIENT} --query="SELECT * FROM parquet_load" ${CLICKHOUSE_CLIENT} --query="drop table parquet_load" @@ -17,7 +17,7 @@ echo "ORC" DATA_FILE=$CUR_DIR/data_orc/case_insensitive_column_matching.orc ${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS orc_load" ${CLICKHOUSE_CLIENT} --query="CREATE TABLE orc_load (iD String, sCorE Int32) ENGINE = Memory" -cat "$DATA_FILE" | ${CLICKHOUSE_CLIENT} -q "INSERT INTO orc_load FORMAT ORC SETTINGS input_format_orc_case_insensitive_column_matching=true" +cat "$DATA_FILE" | ${CLICKHOUSE_CLIENT} -q "INSERT INTO orc_load SETTINGS input_format_orc_case_insensitive_column_matching=true FORMAT ORC" ${CLICKHOUSE_CLIENT} --query="SELECT * FROM orc_load" ${CLICKHOUSE_CLIENT} --query="drop table orc_load" @@ -25,6 +25,6 @@ echo "Arrow" DATA_FILE=$CUR_DIR/data_arrow/case_insensitive_column_matching.arrow ${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS arrow_load" ${CLICKHOUSE_CLIENT} --query="CREATE TABLE arrow_load (iD String, sCorE Int32) ENGINE = Memory" -cat "$DATA_FILE" | ${CLICKHOUSE_CLIENT} -q "INSERT INTO arrow_load FORMAT Arrow SETTINGS input_format_arrow_case_insensitive_column_matching=true" +cat "$DATA_FILE" | ${CLICKHOUSE_CLIENT} -q "INSERT INTO arrow_load SETTINGS input_format_arrow_case_insensitive_column_matching=true FORMAT Arrow" ${CLICKHOUSE_CLIENT} --query="SELECT * FROM arrow_load" ${CLICKHOUSE_CLIENT} --query="drop table arrow_load" diff --git a/tests/queries/0_stateless/02242_case_insensitive_nested.sh b/tests/queries/0_stateless/02242_case_insensitive_nested.sh index c22f5695dc3..05d7bf4fc8e 100755 --- a/tests/queries/0_stateless/02242_case_insensitive_nested.sh +++ b/tests/queries/0_stateless/02242_case_insensitive_nested.sh @@ -17,7 +17,7 @@ for ((i = 0; i < 3; i++)) do echo ${formats[i]} ${CLICKHOUSE_CLIENT} --query="TRUNCATE TABLE nested_table" - cat $CUR_DIR/data_orc_arrow_parquet_nested/nested_table.${format_files[i]} | ${CLICKHOUSE_CLIENT} -q "INSERT INTO nested_table FORMAT ${formats[i]} SETTINGS input_format_${format_files[i]}_import_nested = 1, input_format_${format_files[i]}_case_insensitive_column_matching = true" + cat $CUR_DIR/data_orc_arrow_parquet_nested/nested_table.${format_files[i]} | ${CLICKHOUSE_CLIENT} -q "INSERT INTO nested_table SETTINGS input_format_${format_files[i]}_import_nested = 1, input_format_${format_files[i]}_case_insensitive_column_matching = true FORMAT ${formats[i]}" ${CLICKHOUSE_CLIENT} --query="SELECT * FROM nested_table" diff --git a/tests/queries/0_stateless/02242_optimize_to_subcolumns_no_storage.sql b/tests/queries/0_stateless/02242_optimize_to_subcolumns_no_storage.sql index e6e4663c5aa..8f8485eb58f 100644 --- a/tests/queries/0_stateless/02242_optimize_to_subcolumns_no_storage.sql +++ b/tests/queries/0_stateless/02242_optimize_to_subcolumns_no_storage.sql @@ -1,3 +1,4 @@ +-- Tags: no-backward-compatibility-check:22.3.2.1 SET optimize_functions_to_subcolumns = 1; SELECT count(*) FROM numbers(2) AS n1, numbers(3) AS n2, numbers(4) AS n3 WHERE (n1.number = n2.number) AND (n2.number = n3.number); diff --git a/tests/queries/0_stateless/02243_arrow_read_null_type_to_nullable_column.reference b/tests/queries/0_stateless/02243_arrow_read_null_type_to_nullable_column.reference new file mode 100644 index 00000000000..f599e28b8ab --- /dev/null +++ b/tests/queries/0_stateless/02243_arrow_read_null_type_to_nullable_column.reference @@ -0,0 +1 @@ +10 diff --git a/tests/queries/0_stateless/02243_arrow_read_null_type_to_nullable_column.sh b/tests/queries/0_stateless/02243_arrow_read_null_type_to_nullable_column.sh new file mode 100755 index 00000000000..cc8db7fb316 --- /dev/null +++ b/tests/queries/0_stateless/02243_arrow_read_null_type_to_nullable_column.sh @@ -0,0 +1,28 @@ +#!/usr/bin/env bash +# Tags: no-fasttest + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +$CLICKHOUSE_CLIENT -q "drop table if exists test_02243" +$CLICKHOUSE_CLIENT -q "create table test_02243 (image_path Nullable(String), + caption Nullable(String), + NSFW Nullable(String), + similarity Nullable(Float64), + LICENSE Nullable(String), + url Nullable(String), + key Nullable(UInt64), + shard_id Nullable(UInt64), + status Nullable(String), + error_message Nullable(String), + width Nullable(UInt32), + height Nullable(UInt32), + exif Nullable(String), + original_width Nullable(UInt32), + original_height Nullable(UInt32)) engine=Memory" + +cat $CUR_DIR/data_parquet_bad_column/metadata_0.parquet | $CLICKHOUSE_CLIENT --stacktrace -q "insert into test_02243 format Parquet" + +$CLICKHOUSE_CLIENT -q "select count() from test_02243" +$CLICKHOUSE_CLIENT -q "drop table test_02243" diff --git a/tests/queries/0_stateless/02244_column_names_in_shcmea_inference.reference b/tests/queries/0_stateless/02244_column_names_in_shcmea_inference.reference new file mode 100644 index 00000000000..d237caf630f --- /dev/null +++ b/tests/queries/0_stateless/02244_column_names_in_shcmea_inference.reference @@ -0,0 +1,8 @@ +x Nullable(String) +y Nullable(Float64) +x Nullable(String) +y Nullable(Float64) +x Nullable(String) +y Nullable(Float64) +x Nullable(String) +y Nullable(Float64) diff --git a/tests/queries/0_stateless/02244_column_names_in_shcmea_inference.sql b/tests/queries/0_stateless/02244_column_names_in_shcmea_inference.sql new file mode 100644 index 00000000000..af56856f0be --- /dev/null +++ b/tests/queries/0_stateless/02244_column_names_in_shcmea_inference.sql @@ -0,0 +1,14 @@ +-- Tags: no-fasttest, no-parallel + +insert into function file('test_02244', 'TSV', 'x String, y UInt32') select 'Hello, world!', 42 settings engine_file_truncate_on_insert=1; +desc file('test_02244', 'TSV') settings column_names_for_schema_inference='x,y'; + +insert into function file('test_02244', 'CSV', 'x String, y UInt32') select 'Hello, world!', 42 settings engine_file_truncate_on_insert=1; +desc file('test_02244', 'CSV') settings column_names_for_schema_inference='x,y'; + +insert into function file('test_02244', 'JSONCompactEachRow', 'x String, y UInt32') select 'Hello, world!', 42 settings engine_file_truncate_on_insert=1; +desc file('test_02244', 'JSONCompactEachRow') settings column_names_for_schema_inference='x,y'; + +insert into function file('test_02244', 'Values', 'x String, y UInt32') select 'Hello, world!', 42 settings engine_file_truncate_on_insert=1; +desc file('test_02244', 'Values') settings column_names_for_schema_inference='x,y'; + diff --git a/tests/queries/0_stateless/02244_ip_address_invalid_insert.reference b/tests/queries/0_stateless/02244_ip_address_invalid_insert.reference new file mode 100644 index 00000000000..60e6a5da083 --- /dev/null +++ b/tests/queries/0_stateless/02244_ip_address_invalid_insert.reference @@ -0,0 +1,10 @@ +1.1.1.1 1.1.1.1 + 0.0.0.0 +1.1.1.1 1.1.1.1 + 0.0.0.0 +fe80::9801:43ff:fe1f:7690 fe80::9801:43ff:fe1f:7690 +1.1.1.1 :: + :: +fe80::9801:43ff:fe1f:7690 fe80::9801:43ff:fe1f:7690 +1.1.1.1 ::ffff:1.1.1.1 + :: diff --git a/tests/queries/0_stateless/02244_ip_address_invalid_insert.sql b/tests/queries/0_stateless/02244_ip_address_invalid_insert.sql new file mode 100644 index 00000000000..4057b9b2d98 --- /dev/null +++ b/tests/queries/0_stateless/02244_ip_address_invalid_insert.sql @@ -0,0 +1,81 @@ +DROP TABLE IF EXISTS test_table_ipv4; +CREATE TABLE test_table_ipv4 +( + ip String, + ipv4 IPv4 +) ENGINE = TinyLog; + +INSERT INTO test_table_ipv4 VALUES ('1.1.1.1', '1.1.1.1'), ('', ''); --{clientError 441} + +SET input_format_ipv4_default_on_conversion_error = 1; + +INSERT INTO test_table_ipv4 VALUES ('1.1.1.1', '1.1.1.1'), ('', ''); +SELECT ip, ipv4 FROM test_table_ipv4; + +SET input_format_ipv4_default_on_conversion_error = 0; + +DROP TABLE test_table_ipv4; + +DROP TABLE IF EXISTS test_table_ipv4_materialized; +CREATE TABLE test_table_ipv4_materialized +( + ip String, + ipv6 IPv4 MATERIALIZED toIPv4(ip) +) ENGINE = TinyLog; + +INSERT INTO test_table_ipv4_materialized(ip) VALUES ('1.1.1.1'), (''); --{serverError 441} + +SET input_format_ipv4_default_on_conversion_error = 1; + +INSERT INTO test_table_ipv4_materialized(ip) VALUES ('1.1.1.1'), (''); --{serverError 441} + +SET cast_ipv4_ipv6_default_on_conversion_error = 1; + +INSERT INTO test_table_ipv4_materialized(ip) VALUES ('1.1.1.1'), (''); +SELECT ip, ipv6 FROM test_table_ipv4_materialized; + +SET input_format_ipv4_default_on_conversion_error = 0; +SET cast_ipv4_ipv6_default_on_conversion_error = 0; + +DROP TABLE test_table_ipv4_materialized; + +DROP TABLE IF EXISTS test_table_ipv6; +CREATE TABLE test_table_ipv6 +( + ip String, + ipv6 IPv6 +) ENGINE = TinyLog; + +INSERT INTO test_table_ipv6 VALUES ('fe80::9801:43ff:fe1f:7690', 'fe80::9801:43ff:fe1f:7690'), ('1.1.1.1', '1.1.1.1'), ('', ''); --{clientError 441} + +SET input_format_ipv6_default_on_conversion_error = 1; + +INSERT INTO test_table_ipv6 VALUES ('fe80::9801:43ff:fe1f:7690', 'fe80::9801:43ff:fe1f:7690'), ('1.1.1.1', '1.1.1.1'), ('', ''); +SELECT ip, ipv6 FROM test_table_ipv6; + +SET input_format_ipv6_default_on_conversion_error = 0; + +DROP TABLE test_table_ipv6; + +DROP TABLE IF EXISTS test_table_ipv6_materialized; +CREATE TABLE test_table_ipv6_materialized +( + ip String, + ipv6 IPv6 MATERIALIZED toIPv6(ip) +) ENGINE = TinyLog; + +INSERT INTO test_table_ipv6_materialized(ip) VALUES ('fe80::9801:43ff:fe1f:7690'), ('1.1.1.1'), (''); --{serverError 441} + +SET input_format_ipv6_default_on_conversion_error = 1; + +INSERT INTO test_table_ipv6_materialized(ip) VALUES ('fe80::9801:43ff:fe1f:7690'), ('1.1.1.1'), (''); --{serverError 441} + +SET cast_ipv4_ipv6_default_on_conversion_error = 1; + +INSERT INTO test_table_ipv6_materialized(ip) VALUES ('fe80::9801:43ff:fe1f:7690'), ('1.1.1.1'), (''); +SELECT ip, ipv6 FROM test_table_ipv6_materialized; + +SET input_format_ipv6_default_on_conversion_error = 0; +SET cast_ipv4_ipv6_default_on_conversion_error = 0; + +DROP TABLE test_table_ipv6_materialized; diff --git a/tests/queries/0_stateless/02244_make_datetime.reference b/tests/queries/0_stateless/02244_make_datetime.reference new file mode 100644 index 00000000000..57524c26254 --- /dev/null +++ b/tests/queries/0_stateless/02244_make_datetime.reference @@ -0,0 +1,33 @@ +1991-08-24 21:04:00 +1991-08-24 21:04:00 +1991-08-24 19:04:00 +DateTime +DateTime(\'CET\') +1970-01-01 00:00:00 +1970-01-01 00:00:00 +2106-02-07 06:28:15 +2106-02-07 06:28:15 +2106-02-07 06:28:15 +2106-02-07 06:28:15 +2106-02-07 06:28:15 +1970-01-01 00:00:00 +1970-01-01 00:00:00 +1970-01-01 00:00:00 +1970-01-01 00:00:00 +1984-01-02 01:00:00 +1984-01-01 01:10:00 +1984-01-01 00:01:10 +1984-01-01 00:00:00 +1983-03-01 00:00:00 +1970-01-01 00:00:00 +1970-01-01 00:00:00 +1970-01-01 00:00:00 +1970-01-01 00:00:00 +1970-01-01 00:00:00 +1970-01-01 00:00:00 +2106-02-07 06:28:15 +1970-01-01 00:00:00 +1970-01-01 00:00:00 +1970-01-01 00:00:00 +1970-01-01 00:00:00 +1970-01-01 00:00:00 diff --git a/tests/queries/0_stateless/02244_make_datetime.sql b/tests/queries/0_stateless/02244_make_datetime.sql new file mode 100644 index 00000000000..9b8f561994b --- /dev/null +++ b/tests/queries/0_stateless/02244_make_datetime.sql @@ -0,0 +1,39 @@ +select makeDateTime(1991, 8, 24, 21, 4, 0); +select makeDateTime(1991, 8, 24, 21, 4, 0, 'CET'); +select cast(makeDateTime(1991, 8, 24, 21, 4, 0, 'CET') as DateTime('UTC')); + +select toTypeName(makeDateTime(1991, 8, 24, 21, 4, 0)); +select toTypeName(makeDateTime(1991, 8, 24, 21, 4, 0, 'CET')); + +select makeDateTime(1925, 1, 1, 0, 0, 0, 'UTC'); +select makeDateTime(1924, 12, 31, 23, 59, 59, 'UTC'); +select makeDateTime(2283, 11, 11, 23, 59, 59, 'UTC'); +select makeDateTime(2283, 11, 12, 0, 0, 0, 'UTC'); +select makeDateTime(2262, 4, 11, 23, 47, 16, 'UTC'); +select makeDateTime(2262, 4, 11, 23, 47, 17, 'UTC'); +select makeDateTime(2262, 4, 11, 23, 47, 16, 'UTC'); + +select makeDateTime(1984, 0, 1, 0, 0, 0, 'UTC'); +select makeDateTime(1984, 1, 0, 0, 0, 0, 'UTC'); +select makeDateTime(1984, 13, 1, 0, 0, 0, 'UTC'); +select makeDateTime(1984, 1, 41, 0, 0, 0, 'UTC'); +select makeDateTime(1984, 1, 1, 25, 0, 0, 'UTC'); +select makeDateTime(1984, 1, 1, 0, 70, 0, 'UTC'); +select makeDateTime(1984, 1, 1, 0, 0, 70, 'UTC'); +select makeDateTime(1984, 1, 1, 0, 0, 0, 'not a timezone'); -- { serverError 1000 } + +select makeDateTime(1984, 1, 1, 0, 0, 0, 'UTC'); +select makeDateTime(1983, 2, 29, 0, 0, 0, 'UTC'); +select makeDateTime(-1984, 1, 1, 0, 0, 0, 'UTC'); +select makeDateTime(1984, -1, 1, 0, 0, 0, 'UTC'); +select makeDateTime(1984, 1, -1, 0, 0, 0, 'UTC'); +select makeDateTime(1984, 1, 1, -1, 0, 0, 'UTC'); +select makeDateTime(1984, 1, 1, 0, -1, 0, 'UTC'); +select makeDateTime(1984, 1, 1, 0, 0, -1, 'UTC'); + +select makeDateTime(65537, 8, 24, 21, 4, 0, 'UTC'); +select makeDateTime(1991, 65537, 24, 21, 4, 0, 'UTC'); +select makeDateTime(1991, 8, 65537, 21, 4, 0, 'UTC'); +select makeDateTime(1991, 8, 24, 65537, 4, 0, 'UTC'); +select makeDateTime(1991, 8, 24, 21, 65537, 0, 'UTC'); +select makeDateTime(1991, 8, 24, 21, 4, 65537, 'UTC'); \ No newline at end of file diff --git a/tests/queries/0_stateless/02245_format_string_stack_overflow.sql b/tests/queries/0_stateless/02245_format_string_stack_overflow.sql index 1ee3606d3a6..40053fd0d9b 100644 --- a/tests/queries/0_stateless/02245_format_string_stack_overflow.sql +++ b/tests/queries/0_stateless/02245_format_string_stack_overflow.sql @@ -1 +1,2 @@ +-- Tags: no-backward-compatibility-check:22.3 select format('{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}', toString(number)) str from numbers(1); diff --git a/tests/queries/0_stateless/02245_join_with_nullable_lowcardinality_crash.reference b/tests/queries/0_stateless/02245_join_with_nullable_lowcardinality_crash.reference new file mode 100644 index 00000000000..12c61d9c54e --- /dev/null +++ b/tests/queries/0_stateless/02245_join_with_nullable_lowcardinality_crash.reference @@ -0,0 +1,2 @@ +usa + diff --git a/tests/queries/0_stateless/02245_join_with_nullable_lowcardinality_crash.sql b/tests/queries/0_stateless/02245_join_with_nullable_lowcardinality_crash.sql new file mode 100644 index 00000000000..abc2ee41402 --- /dev/null +++ b/tests/queries/0_stateless/02245_join_with_nullable_lowcardinality_crash.sql @@ -0,0 +1,20 @@ +drop table if exists with_nullable; +drop table if exists without_nullable; + +CREATE TABLE with_nullable +( timestamp UInt32, + country LowCardinality(Nullable(String)) ) ENGINE = Memory; + +CREATE TABLE without_nullable +( timestamp UInt32, + country LowCardinality(String)) ENGINE = Memory; + +insert into with_nullable values(0,'f'),(0,'usa'); +insert into without_nullable values(0,'usa'),(0,'us2a'); + +select if(t0.country is null ,t2.country,t0.country) "country" +from without_nullable t0 right outer join with_nullable t2 on t0.country=t2.country; + +drop table with_nullable; +drop table without_nullable; + diff --git a/tests/queries/0_stateless/02245_make_datetime64.reference b/tests/queries/0_stateless/02245_make_datetime64.reference new file mode 100644 index 00000000000..eea768f1dc7 --- /dev/null +++ b/tests/queries/0_stateless/02245_make_datetime64.reference @@ -0,0 +1,69 @@ +1991-08-24 21:04:00.000 +1991-08-24 21:04:00.123 +1991-08-24 21:04:00.001234 +1991-08-24 21:04:00.0001234 +1991-08-24 19:04:00.0001234 +DateTime64(3) +DateTime64(3) +DateTime64(6) +DateTime64(7, \'CET\') +DateTime64(7, \'UTC\') +1925-01-01 00:00:00.000000000 +1925-01-01 00:00:00.000000000 +2283-11-11 23:59:59.99999999 +2262-04-11 23:47:16.854775807 +2262-04-11 23:47:16.85477581 +1991-08-24 21:04:00 +1991-08-24 21:04:00.9 +1991-08-24 21:04:00.99 +1991-08-24 21:04:00.999 +1991-08-24 21:04:00.1234 +1991-08-24 21:04:00.01234 +1991-08-24 21:04:00.001234 +1991-08-24 21:04:00.0001234 +1991-08-24 21:04:00.00001234 +1991-08-24 21:04:00.000001234 +1925-01-01 00:00:00.000000000 +1925-01-01 00:00:00.000000000 +1925-01-01 00:00:00.000000000 +1925-01-01 00:00:00.000000000 +1984-01-02 01:00:00.000000000 +1984-01-01 01:10:00.000000000 +1984-01-01 00:01:10.000000000 +1984-01-01 02:03:04.000000005 +1984-02-29 02:03:04.000000005 +1983-03-01 02:03:04.000000005 +1984-03-01 02:03:04.000000005 +1983-03-02 02:03:04.000000005 +1984-03-02 02:03:04.000000005 +1983-03-03 02:03:04.000000005 +1925-01-01 00:00:00.000000000 +1925-01-01 00:00:00.000000000 +1925-01-01 00:00:00.000000000 +1925-01-01 00:00:00.000000000 +1925-01-01 00:00:00.000000000 +1925-01-01 00:00:00.000000000 +1925-01-01 00:00:00.000000000 +1925-01-01 00:00:00.000000000 +1984-01-01 02:03:04.000000000 +1925-01-01 00:00:00.000000000 +1925-01-01 00:00:00.000000000 +1925-01-01 00:00:00.000000000 +1925-01-01 00:00:00.000000000 +1925-01-01 00:00:00.000000000 +1925-01-01 00:00:00.000000000 +1925-01-01 00:00:00.000000000 +1984-01-01 00:00:00.000000000 +1984-01-01 00:00:00.000000000 +1984-01-01 00:00:00.000000000 +1984-01-01 00:00:00.000000000 +1984-01-01 00:00:00.000000000 +1984-01-01 00:00:00.000000000 +1984-01-01 00:00:00.000000000 +1984-01-01 00:00:00.000000000 +2283-11-11 23:59:59.999 +1925-01-01 00:00:00.000 +1925-01-01 00:00:00.000 +1925-01-01 00:00:00.000 +1925-01-01 00:00:00.000 +1925-01-01 00:00:00.000 diff --git a/tests/queries/0_stateless/02245_make_datetime64.sql b/tests/queries/0_stateless/02245_make_datetime64.sql new file mode 100644 index 00000000000..2e2d81fa363 --- /dev/null +++ b/tests/queries/0_stateless/02245_make_datetime64.sql @@ -0,0 +1,89 @@ +select makeDateTime64(1991, 8, 24, 21, 4, 0); +select makeDateTime64(1991, 8, 24, 21, 4, 0, 123); +select makeDateTime64(1991, 8, 24, 21, 4, 0, 1234, 6); +select makeDateTime64(1991, 8, 24, 21, 4, 0, 1234, 7, 'CET'); +select cast(makeDateTime64(1991, 8, 24, 21, 4, 0, 1234, 7, 'CET') as DateTime64(7, 'UTC')); + +select toTypeName(makeDateTime64(1991, 8, 24, 21, 4, 0)); +select toTypeName(makeDateTime64(1991, 8, 24, 21, 4, 0, 123)); +select toTypeName(makeDateTime64(1991, 8, 24, 21, 4, 0, 1234, 6)); +select toTypeName(makeDateTime64(1991, 8, 24, 21, 4, 0, 1234, 7, 'CET')); +select toTypeName(cast(makeDateTime64(1991, 8, 24, 21, 4, 0, 1234, 7, 'CET') as DateTime64(7, 'UTC'))); + +select makeDateTime64(1925, 1, 1, 0, 0, 0, 0, 9, 'UTC'); +select makeDateTime64(1924, 12, 31, 23, 59, 59, 999999999, 9, 'UTC'); +select makeDateTime64(2283, 11, 11, 23, 59, 59, 99999999, 8, 'UTC'); +select makeDateTime64(2283, 11, 11, 23, 59, 59, 999999999, 9, 'UTC'); -- { serverError 407 } +select makeDateTime64(2262, 4, 11, 23, 47, 16, 854775807, 9, 'UTC'); +select makeDateTime64(2262, 4, 11, 23, 47, 16, 854775808, 9, 'UTC'); -- { serverError 407 } +select makeDateTime64(2262, 4, 11, 23, 47, 16, 85477581, 8, 'UTC'); + +select makeDateTime64(1991, 8, 24, 21, 4, 0, 1234, 0, 'CET'); +select makeDateTime64(1991, 8, 24, 21, 4, 0, 1234, 1, 'CET'); +select makeDateTime64(1991, 8, 24, 21, 4, 0, 1234, 2, 'CET'); +select makeDateTime64(1991, 8, 24, 21, 4, 0, 1234, 3, 'CET'); +select makeDateTime64(1991, 8, 24, 21, 4, 0, 1234, 4, 'CET'); +select makeDateTime64(1991, 8, 24, 21, 4, 0, 1234, 5, 'CET'); +select makeDateTime64(1991, 8, 24, 21, 4, 0, 1234, 6, 'CET'); +select makeDateTime64(1991, 8, 24, 21, 4, 0, 1234, 7, 'CET'); +select makeDateTime64(1991, 8, 24, 21, 4, 0, 1234, 8, 'CET'); +select makeDateTime64(1991, 8, 24, 21, 4, 0, 1234, 9, 'CET'); +select makeDateTime64(1991, 8, 24, 21, 4, 0, 1234, 10, 'CET'); -- { serverError 69 } +select makeDateTime64(1991, 8, 24, 21, 4, 0, 1234, -1, 'CET'); -- { serverError 69 } + +select makeDateTime64(1984, 0, 1, 0, 0, 0, 0, 9, 'UTC'); +select makeDateTime64(1984, 1, 0, 0, 0, 0, 0, 9, 'UTC'); +select makeDateTime64(1984, 13, 1, 0, 0, 0, 0, 9, 'UTC'); +select makeDateTime64(1984, 1, 41, 0, 0, 0, 0, 9, 'UTC'); +select makeDateTime64(1984, 1, 1, 25, 0, 0, 0, 9, 'UTC'); +select makeDateTime64(1984, 1, 1, 0, 70, 0, 0, 9, 'UTC'); +select makeDateTime64(1984, 1, 1, 0, 0, 70, 0, 9, 'UTC'); +select makeDateTime64(1984, 1, 1, 0, 0, 0, 0, 9, 'not a timezone'); -- { serverError 1000 } + +select makeDateTime64(1984, 1, 1, 2, 3, 4, 5, 9, 'UTC'); +select makeDateTime64(1984, 2, 29, 2, 3, 4, 5, 9, 'UTC'); +select makeDateTime64(1983, 2, 29, 2, 3, 4, 5, 9, 'UTC'); +select makeDateTime64(1984, 2, 30, 2, 3, 4, 5, 9, 'UTC'); +select makeDateTime64(1983, 2, 30, 2, 3, 4, 5, 9, 'UTC'); +select makeDateTime64(1984, 2, 31, 2, 3, 4, 5, 9, 'UTC'); +select makeDateTime64(1983, 2, 31, 2, 3, 4, 5, 9, 'UTC'); +select makeDateTime64(1984, 2, 32, 2, 3, 4, 5, 9, 'UTC'); +select makeDateTime64(1983, 2, 32, 2, 3, 4, 5, 9, 'UTC'); + +select makeDateTime64(-1984, 1, 1, 2, 3, 4, 5, 9, 'UTC'); +select makeDateTime64(1984, -1, 1, 2, 3, 4, 5, 9, 'UTC'); +select makeDateTime64(1984, 1, -1, 2, 3, 4, 5, 9, 'UTC'); +select makeDateTime64(1984, 1, 1, -1, 3, 4, 5, 9, 'UTC'); +select makeDateTime64(1984, 1, 1, 2, -1, 4, 5, 9, 'UTC'); +select makeDateTime64(1984, 1, 1, 2, 3, -1, 5, 9, 'UTC'); +select makeDateTime64(1984, 1, 1, 2, 3, 4, -1, 9, 'UTC'); + +select makeDateTime64(NaN, 1, 1, 2, 3, 4, 5, 9, 'UTC'); +select makeDateTime64(1984, NaN, 1, 2, 3, 4, 5, 9, 'UTC'); +select makeDateTime64(1984, 1, NaN, 2, 3, 4, 5, 9, 'UTC'); +select makeDateTime64(1984, 1, 1, NaN, 3, 4, 5, 9, 'UTC'); +select makeDateTime64(1984, 1, 1, 2, NaN, 4, 5, 9, 'UTC'); +select makeDateTime64(1984, 1, 1, 2, 3, NaN, 5, 9, 'UTC'); +select makeDateTime64(1984, 1, 1, 2, 3, 4, NaN, 9, 'UTC'); + +select makeDateTime64(1984.5, 1, 1, 0, 0, 0, 0, 9, 'UTC'); +select makeDateTime64(1984, 1.5, 1, 0, 0, 0, 0, 9, 'UTC'); +select makeDateTime64(1984, 1, 1.5, 0, 0, 0, 0, 9, 'UTC'); +select makeDateTime64(1984, 1, 1, 0.5, 0, 0, 0, 9, 'UTC'); +select makeDateTime64(1984, 1, 1, 0, 0.5, 0, 0, 9, 'UTC'); +select makeDateTime64(1984, 1, 1, 0, 0, 0.5, 0, 9, 'UTC'); +select makeDateTime64(1984, 1, 1, 0, 0, 0, 0.5, 9, 'UTC'); +select makeDateTime64(1984, 1, 1, 0, 0, 0, 0, 9.5, 'UTC'); + +select makeDateTime64(65537, 8, 24, 21, 4, 0); +select makeDateTime64(1991, 65537, 24, 21, 4, 0); +select makeDateTime64(1991, 8, 65537, 21, 4, 0); +select makeDateTime64(1991, 8, 24, 65537, 4, 0); +select makeDateTime64(1991, 8, 24, 21, 65537, 0); +select makeDateTime64(1991, 8, 24, 21, 4, 65537); + +select makeDateTime64(year, 1, 1, 1, 0, 0, 0, precision, timezone) from ( + select 1984 as year, 5 as precision, 'UTC' as timezone + union all + select 1985 as year, 5 as precision, 'UTC' as timezone +); -- { serverError 43 } diff --git a/tests/queries/0_stateless/02245_parquet_skip_unknown_type.reference b/tests/queries/0_stateless/02245_parquet_skip_unknown_type.reference new file mode 100644 index 00000000000..4f9cde534f0 --- /dev/null +++ b/tests/queries/0_stateless/02245_parquet_skip_unknown_type.reference @@ -0,0 +1,16 @@ +OK +image_path Nullable(String) +caption Nullable(String) +NSFW Nullable(String) +similarity Nullable(Float64) +LICENSE Nullable(String) +url Nullable(String) +key Nullable(Int64) +shard_id Nullable(Int64) +status Nullable(String) +width Nullable(Int64) +height Nullable(Int64) +exif Nullable(String) +original_width Nullable(Int64) +original_height Nullable(Int64) +10 diff --git a/tests/queries/0_stateless/02245_parquet_skip_unknown_type.sh b/tests/queries/0_stateless/02245_parquet_skip_unknown_type.sh new file mode 100755 index 00000000000..005c089e434 --- /dev/null +++ b/tests/queries/0_stateless/02245_parquet_skip_unknown_type.sh @@ -0,0 +1,18 @@ +#!/usr/bin/env bash +# Tags: no-fasttest + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +USER_FILES_PATH=$(clickhouse-client --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') +FILE_NAME=test_02245.parquet +DATA_FILE=$USER_FILES_PATH/$FILE_NAME + +cp $CUR_DIR/data_parquet_bad_column/metadata_0.parquet $DATA_FILE + + +$CLICKHOUSE_CLIENT -q "desc file(test_02245.parquet)" 2>&1 | grep -qF "CANNOT_EXTRACT_TABLE_STRUCTURE" && echo "OK" || echo "FAIL" +$CLICKHOUSE_CLIENT -q "desc file(test_02245.parquet) settings input_format_parquet_skip_columns_with_unsupported_types_in_schema_inference=1" +$CLICKHOUSE_CLIENT -q "select count(*) from file(test_02245.parquet) settings input_format_parquet_skip_columns_with_unsupported_types_in_schema_inference=1" + diff --git a/tests/queries/0_stateless/02245_s3_schema_desc.reference b/tests/queries/0_stateless/02245_s3_schema_desc.reference index a5b0f81a2c7..e039680d933 100644 --- a/tests/queries/0_stateless/02245_s3_schema_desc.reference +++ b/tests/queries/0_stateless/02245_s3_schema_desc.reference @@ -1,21 +1,21 @@ -c1 Nullable(String) -c2 Nullable(String) -c3 Nullable(String) -c1 Nullable(String) -c2 Nullable(String) -c3 Nullable(String) +c1 Nullable(Float64) +c2 Nullable(Float64) +c3 Nullable(Float64) +c1 Nullable(Float64) +c2 Nullable(Float64) +c3 Nullable(Float64) c1 UInt64 c2 UInt64 c3 UInt64 -c1 Nullable(String) -c2 Nullable(String) -c3 Nullable(String) +c1 Nullable(Float64) +c2 Nullable(Float64) +c3 Nullable(Float64) c1 UInt64 c2 UInt64 c3 UInt64 -c1 Nullable(String) -c2 Nullable(String) -c3 Nullable(String) +c1 Nullable(Float64) +c2 Nullable(Float64) +c3 Nullable(Float64) c1 UInt64 c2 UInt64 c3 UInt64 diff --git a/tests/queries/0_stateless/02245_s3_schema_desc.sql b/tests/queries/0_stateless/02245_s3_schema_desc.sql index 4ab870e1379..2cd362ff233 100644 --- a/tests/queries/0_stateless/02245_s3_schema_desc.sql +++ b/tests/queries/0_stateless/02245_s3_schema_desc.sql @@ -10,4 +10,5 @@ desc s3Cluster('test_cluster_two_shards_localhost', 'http://localhost:11111/test desc s3Cluster('test_cluster_two_shards_localhost', 'http://localhost:11111/test/{a,b,c}.tsv', 'test', 'testtest', 'TSV', 'c1 UInt64, c2 UInt64, c3 UInt64'); desc s3Cluster('test_cluster_two_shards_localhost', 'http://localhost:11111/test/{a,b,c}.tsv', 'test', 'testtest', 'TSV', 'c1 UInt64, c2 UInt64, c3 UInt64', 'auto'); + SELECT * FROM s3(decodeURLComponent(NULL), [NULL]); --{serverError 170} diff --git a/tests/queries/0_stateless/02246_async_insert_quota.reference b/tests/queries/0_stateless/02246_async_insert_quota.reference index 99b8e471635..9d384c1aaf1 100644 --- a/tests/queries/0_stateless/02246_async_insert_quota.reference +++ b/tests/queries/0_stateless/02246_async_insert_quota.reference @@ -1,2 +1,2 @@ -QUOTA_EXPIRED +QUOTA_EXCEEDED 2 diff --git a/tests/queries/0_stateless/02246_async_insert_quota.sh b/tests/queries/0_stateless/02246_async_insert_quota.sh index 4da93f94f19..d1080313ed9 100755 --- a/tests/queries/0_stateless/02246_async_insert_quota.sh +++ b/tests/queries/0_stateless/02246_async_insert_quota.sh @@ -20,7 +20,7 @@ ${CLICKHOUSE_CLIENT} -q "CREATE QUOTA q02246 FOR INTERVAL 100 YEAR MAX QUERY INS ${CLICKHOUSE_CLIENT} --user u02246 --async_insert 1 -q "INSERT INTO async_inserts_02246 VALUES (1, 'a')" ${CLICKHOUSE_CLIENT} --user u02246 --async_insert 1 -q "INSERT INTO async_inserts_02246 VALUES (2, 'b')" -${CLICKHOUSE_CLIENT} --user u02246 --async_insert 1 -q "INSERT INTO async_inserts_02246 VALUES (3, 'c')" 2>&1 | grep -m1 -o QUOTA_EXPIRED +${CLICKHOUSE_CLIENT} --user u02246 --async_insert 1 -q "INSERT INTO async_inserts_02246 VALUES (3, 'c')" 2>&1 | grep -m1 -o QUOTA_EXCEEDED sleep 1.0 diff --git a/tests/queries/0_stateless/02246_is_secure_query_log.reference b/tests/queries/0_stateless/02246_is_secure_query_log.reference new file mode 100644 index 00000000000..1e8c0bbc9cf --- /dev/null +++ b/tests/queries/0_stateless/02246_is_secure_query_log.reference @@ -0,0 +1,4 @@ +1 0 +1 1 +2 0 +2 1 diff --git a/tests/queries/0_stateless/02246_is_secure_query_log.sh b/tests/queries/0_stateless/02246_is_secure_query_log.sh new file mode 100755 index 00000000000..6b064cedfdf --- /dev/null +++ b/tests/queries/0_stateless/02246_is_secure_query_log.sh @@ -0,0 +1,22 @@ +#!/usr/bin/env bash +# Tags: no-fasttest + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +${CLICKHOUSE_CLIENT} --log_queries=1 --query_id "2246_${CLICKHOUSE_DATABASE}_client_nonsecure" -q "select 1 Format Null" +${CLICKHOUSE_CLIENT} -q "system flush logs" +${CLICKHOUSE_CLIENT} -q "select interface, is_secure from system.query_log where query_id = '2246_${CLICKHOUSE_DATABASE}_client_nonsecure' and type = 'QueryFinish' and current_database = currentDatabase()" + +${CLICKHOUSE_CLIENT_SECURE} --log_queries=1 --query_id "2246_${CLICKHOUSE_DATABASE}_client_secure" -q "select 1 Format Null" +${CLICKHOUSE_CLIENT} -q "system flush logs" +${CLICKHOUSE_CLIENT} -q "select interface, is_secure from system.query_log where query_id = '2246_${CLICKHOUSE_DATABASE}_client_secure' and type = 'QueryFinish' and current_database = currentDatabase()" + +${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&log_queries=1&query_id=2246_${CLICKHOUSE_DATABASE}_http_nonsecure" -d "select 1 Format Null" +${CLICKHOUSE_CLIENT} -q "system flush logs" +${CLICKHOUSE_CLIENT} -q "select interface, is_secure from system.query_log where query_id = '2246_${CLICKHOUSE_DATABASE}_http_nonsecure' and type = 'QueryFinish' and current_database = currentDatabase()" + +${CLICKHOUSE_CURL} -sSk "${CLICKHOUSE_URL_HTTPS}&log_queries=1&query_id=2246_${CLICKHOUSE_DATABASE}_http_secure" -d "select 1 Format Null" +${CLICKHOUSE_CLIENT} -q "system flush logs" +${CLICKHOUSE_CLIENT} -q "select interface, is_secure from system.query_log where query_id = '2246_${CLICKHOUSE_DATABASE}_http_secure' and type = 'QueryFinish' and current_database = currentDatabase()" diff --git a/tests/queries/0_stateless/02246_tsv_csv_best_effort_schema_inference.reference b/tests/queries/0_stateless/02246_tsv_csv_best_effort_schema_inference.reference new file mode 100644 index 00000000000..c245f13fdbe --- /dev/null +++ b/tests/queries/0_stateless/02246_tsv_csv_best_effort_schema_inference.reference @@ -0,0 +1,107 @@ +TSV +c1 Nullable(Float64) +c2 Nullable(String) +c3 Array(Nullable(Float64)) +c4 Tuple(Nullable(Float64), Nullable(Float64), Nullable(Float64)) +42 Some string [1,2,3,4] (1,2,3) +42 abcd [] (4,5,6) +c1 Nullable(String) +[({\'key\' : 42.42}, [\'String\', \'String2\'], 42.42), ({}, [], -42), ({\'key2\' : NULL}, [NULL], NULL)] +[] +[({}, [], 0)] +[({}, [NULL], NULL)] +[({}, [\'String3\'], NULL)] +[({\'key3\': NULL}, []), NULL] +c1 Array(Tuple(Map(String, Nullable(Float64)), Array(Nullable(String)), Nullable(Float64))) +[({'key':42.42},['String','String2'],42.42),({},[],-42),({'key2':NULL},[NULL],NULL)] +[] +[({},[],0)] +[({},[NULL],NULL)] +[({},['String3'],NULL)] +[({'key3':NULL},[],NULL)] +c1 Nullable(Bool) +true +false +\N +c1 Array(Nullable(Bool)) +[true,NULL] +[] +[NULL] +[false] +c1 Nullable(String) +[] +c1 Nullable(String) +{} +c1 Nullable(String) +() +c1 Nullable(String) +[1, 2, 3 +c1 Nullable(String) +[(1, 2, 3 4)] +c1 Nullable(String) +[1, 2, 3 + 4] +c1 Nullable(String) +(1, 2, +c1 Nullable(String) +[1, Some trash, 42.2] +c1 Nullable(String) +[1, \'String\', {\'key\' : 2}] +c1 Nullable(String) +{\'key\' : 1, [1] : 10} +c1 Nullable(String) +{}{} +c1 Nullable(String) +[1, 2, 3 +c1 Nullable(String) +[abc, def] +c1 Array(Nullable(String)) +['abc','def'] +c1 Nullable(String) +[\'string] +c1 Nullable(String) +\'string +c1 Nullable(Float64) +42.42 +c1 Nullable(String) +42.42sometrash +c1 Nullable(String) +[42.42sometrash, 42.42] + +CSV +c1 Nullable(String) +c2 Nullable(String) +c3 Array(Nullable(Float64)) +c4 Array(Tuple(Nullable(Float64), Nullable(Float64), Nullable(Float64))) +42 Some string [1,2,3,4] [(1,2,3)] +42\\ abcd [] [(4,5,6)] +c1 Nullable(String) +[({\'key\' : 42.42}, [\'String\', \'String2\'], 42.42), ({}, [], -42), ({\'key2\' : NULL}, [NULL], NULL)] +[] +[({}, [], 0)] +[({}, [NULL], NULL)] +[({}, [\'String3\'], NULL)] +[({\'key3\': NULL}, []), NULL] +c1 Array(Tuple(Map(String, Nullable(Float64)), Array(Nullable(String)), Nullable(Float64))) +[({'key':42.42},['String','String2'],42.42),({},[],-42),({'key2':NULL},[NULL],NULL)] +[] +[({},[],0)] +[({},[NULL],NULL)] +[({},['String3'],NULL)] +[({'key3':NULL},[],NULL)] +c1 Nullable(Bool) +true +false +\N +c1 Array(Nullable(Bool)) +[true,NULL] +[] +[NULL] +[false] +c1 Nullable(String) +(1, 2, 3) +c1 Nullable(String) +123.123 +c1 Array(Tuple(Nullable(Float64), Nullable(Float64), Nullable(Float64))) +[(1,2,3)] +c1 Array(Tuple(Nullable(Float64), Nullable(Float64), Nullable(Float64))) +[(1,2,3)] diff --git a/tests/queries/0_stateless/02246_tsv_csv_best_effort_schema_inference.sh b/tests/queries/0_stateless/02246_tsv_csv_best_effort_schema_inference.sh new file mode 100755 index 00000000000..6589765f739 --- /dev/null +++ b/tests/queries/0_stateless/02246_tsv_csv_best_effort_schema_inference.sh @@ -0,0 +1,220 @@ +#!/usr/bin/env bash +# Tags: no-parallel, no-fasttest + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + + +USER_FILES_PATH=$(clickhouse-client --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') +FILE_NAME=test_02149.data +DATA_FILE=${USER_FILES_PATH:?}/$FILE_NAME + +touch $DATA_FILE + +echo "TSV" + +echo -e "42\tSome string\t[1, 2, 3, 4]\t(1, 2, 3) +42\tabcd\t[]\t(4, 5, 6)" > $DATA_FILE + +$CLICKHOUSE_CLIENT -q "desc file('$FILE_NAME', 'TSV')" +$CLICKHOUSE_CLIENT -q "select * from file('$FILE_NAME', 'TSV')" + +echo -e "[({'key' : 42.42}, ['String', 'String2'], 42.42), ({}, [], -42), ({'key2' : NULL}, [NULL], NULL)] +[] +[({}, [], 0)] +[({}, [NULL], NULL)] +[({}, ['String3'], NULL)] +[({'key3': NULL}, []), NULL]"> $DATA_FILE + +$CLICKHOUSE_CLIENT -q "desc file('$FILE_NAME', 'TSV') settings input_format_tsv_use_best_effort_in_schema_inference=false" +$CLICKHOUSE_CLIENT -q "select * from file('$FILE_NAME', 'TSV') settings input_format_tsv_use_best_effort_in_schema_inference=false" + + +echo -e "[({'key' : 42.42}, ['String', 'String2'], 42.42), ({}, [], -42), ({'key2' : NULL}, [NULL], NULL)] +[] +[({}, [], 0)] +[({}, [NULL], NULL)] +[({}, ['String3'], NULL)] +[({'key3': NULL}, [], NULL)]"> $DATA_FILE + +$CLICKHOUSE_CLIENT -q "desc file('$FILE_NAME', 'TSV')" +$CLICKHOUSE_CLIENT -q "select * from file('$FILE_NAME', 'TSV')" + +echo -e "true +false +\N" > $DATA_FILE + +$CLICKHOUSE_CLIENT -q "desc file('$FILE_NAME', 'TSV')" +$CLICKHOUSE_CLIENT -q "select * from file('$FILE_NAME', 'TSV')" + +echo -e "[true, NULL] +[] +[NULL] +[false]" > $DATA_FILE + +$CLICKHOUSE_CLIENT -q "desc file('$FILE_NAME', 'TSV')" +$CLICKHOUSE_CLIENT -q "select * from file('$FILE_NAME', 'TSV')" + +echo -e "[]" > $DATA_FILE + +$CLICKHOUSE_CLIENT -q "desc file('$FILE_NAME', 'TSV')" +$CLICKHOUSE_CLIENT -q "select * from file('$FILE_NAME', 'TSV')" + +echo -e "{}" > $DATA_FILE + +$CLICKHOUSE_CLIENT -q "desc file('$FILE_NAME', 'TSV')" +$CLICKHOUSE_CLIENT -q "select * from file('$FILE_NAME', 'TSV')" + +echo -e "()" > $DATA_FILE + +$CLICKHOUSE_CLIENT -q "desc file('$FILE_NAME', 'TSV')" +$CLICKHOUSE_CLIENT -q "select * from file('$FILE_NAME', 'TSV')" + +echo -e "[1, 2, 3" > $DATA_FILE + +$CLICKHOUSE_CLIENT -q "desc file('$FILE_NAME', 'TSV')" +$CLICKHOUSE_CLIENT -q "select * from file('$FILE_NAME', 'TSV')" + +echo -e "[(1, 2, 3 4)]" > $DATA_FILE + +$CLICKHOUSE_CLIENT -q "desc file('$FILE_NAME', 'TSV')" +$CLICKHOUSE_CLIENT -q "select * from file('$FILE_NAME', 'TSV')" + +echo -e "[1, 2, 3 + 4]" > $DATA_FILE + +$CLICKHOUSE_CLIENT -q "desc file('$FILE_NAME', 'TSV')" +$CLICKHOUSE_CLIENT -q "select * from file('$FILE_NAME', 'TSV')" + +echo -e "(1, 2," > $DATA_FILE + +$CLICKHOUSE_CLIENT -q "desc file('$FILE_NAME', 'TSV')" +$CLICKHOUSE_CLIENT -q "select * from file('$FILE_NAME', 'TSV')" + +echo -e "[1, Some trash, 42.2]" > $DATA_FILE + +$CLICKHOUSE_CLIENT -q "desc file('$FILE_NAME', 'TSV')" +$CLICKHOUSE_CLIENT -q "select * from file('$FILE_NAME', 'TSV')" + +echo -e "[1, 'String', {'key' : 2}]" > $DATA_FILE + +$CLICKHOUSE_CLIENT -q "desc file('$FILE_NAME', 'TSV')" +$CLICKHOUSE_CLIENT -q "select * from file('$FILE_NAME', 'TSV')" + +echo -e "{'key' : 1, [1] : 10}" > $DATA_FILE + +$CLICKHOUSE_CLIENT -q "desc file('$FILE_NAME', 'TSV')" +$CLICKHOUSE_CLIENT -q "select * from file('$FILE_NAME', 'TSV')" + +echo -e "{}{}" > $DATA_FILE + +$CLICKHOUSE_CLIENT -q "desc file('$FILE_NAME', 'TSV')" +$CLICKHOUSE_CLIENT -q "select * from file('$FILE_NAME', 'TSV')" + +echo -e "[1, 2, 3" > $DATA_FILE + +$CLICKHOUSE_CLIENT -q "desc file('$FILE_NAME', 'TSV')" +$CLICKHOUSE_CLIENT -q "select * from file('$FILE_NAME', 'TSV')" + +echo -e "[abc, def]" > $DATA_FILE + +$CLICKHOUSE_CLIENT -q "desc file('$FILE_NAME', 'TSV')" +$CLICKHOUSE_CLIENT -q "select * from file('$FILE_NAME', 'TSV')" + +echo -e "['abc', 'def']" > $DATA_FILE + +$CLICKHOUSE_CLIENT -q "desc file('$FILE_NAME', 'TSV')" +$CLICKHOUSE_CLIENT -q "select * from file('$FILE_NAME', 'TSV')" + +echo -e "['string]" > $DATA_FILE + +$CLICKHOUSE_CLIENT -q "desc file('$FILE_NAME', 'TSV')" +$CLICKHOUSE_CLIENT -q "select * from file('$FILE_NAME', 'TSV')" + +echo -e "'string" > $DATA_FILE + +$CLICKHOUSE_CLIENT -q "desc file('$FILE_NAME', 'TSV')" +$CLICKHOUSE_CLIENT -q "select * from file('$FILE_NAME', 'TSV')" + +echo -e "42.42" > $DATA_FILE + +$CLICKHOUSE_CLIENT -q "desc file('$FILE_NAME', 'TSV')" +$CLICKHOUSE_CLIENT -q "select * from file('$FILE_NAME', 'TSV')" + +echo -e "42.42sometrash" > $DATA_FILE + +$CLICKHOUSE_CLIENT -q "desc file('$FILE_NAME', 'TSV')" +$CLICKHOUSE_CLIENT -q "select * from file('$FILE_NAME', 'TSV')" + +echo -e "[42.42sometrash, 42.42]" > $DATA_FILE + +$CLICKHOUSE_CLIENT -q "desc file('$FILE_NAME', 'TSV')" +$CLICKHOUSE_CLIENT -q "select * from file('$FILE_NAME', 'TSV')" + + +echo +echo "CSV" + +echo -e "42,Some string,'[1, 2, 3, 4]','[(1, 2, 3)]' +42\,abcd,'[]','[(4, 5, 6)]'" > $DATA_FILE + +$CLICKHOUSE_CLIENT -q "desc file('$FILE_NAME', 'CSV')" +$CLICKHOUSE_CLIENT -q "select * from file('$FILE_NAME', 'CSV')" + +echo -e "\"[({'key' : 42.42}, ['String', 'String2'], 42.42), ({}, [], -42), ({'key2' : NULL}, [NULL], NULL)]\" +'[]' +'[({}, [], 0)]' +'[({}, [NULL], NULL)]' +\"[({}, ['String3'], NULL)]\" +\"[({'key3': NULL}, []), NULL]\""> $DATA_FILE + +$CLICKHOUSE_CLIENT -q "desc file('$FILE_NAME', 'CSV') settings input_format_csv_use_best_effort_in_schema_inference=false" +$CLICKHOUSE_CLIENT -q "select * from file('$FILE_NAME', 'CSV') settings input_format_csv_use_best_effort_in_schema_inference=false" + +echo -e "\"[({'key' : 42.42}, ['String', 'String2'], 42.42), ({}, [], -42), ({'key2' : NULL}, [NULL], NULL)]\" +'[]' +'[({}, [], 0)]' +'[({}, [NULL], NULL)]' +\"[({}, ['String3'], NULL)]\" +\"[({'key3': NULL}, [], NULL)]\""> $DATA_FILE + +$CLICKHOUSE_CLIENT -q "desc file('$FILE_NAME', 'CSV')" +$CLICKHOUSE_CLIENT -q "select * from file('$FILE_NAME', 'CSV')" + +echo -e "true +false +\N" > $DATA_FILE + +$CLICKHOUSE_CLIENT -q "desc file('$FILE_NAME', 'CSV')" +$CLICKHOUSE_CLIENT -q "select * from file('$FILE_NAME', 'CSV')" + +echo -e "'[true, NULL]' +'[]' +'[NULL]' +'[false]'" > $DATA_FILE + +$CLICKHOUSE_CLIENT -q "desc file('$FILE_NAME', 'CSV')" +$CLICKHOUSE_CLIENT -q "select * from file('$FILE_NAME', 'CSV')" + + +echo -e "'(1, 2, 3)'"> $DATA_FILE + +$CLICKHOUSE_CLIENT -q "desc file('$FILE_NAME', 'CSV')" +$CLICKHOUSE_CLIENT -q "select * from file('$FILE_NAME', 'CSV')" + +echo -e "'123.123'"> $DATA_FILE + +$CLICKHOUSE_CLIENT -q "desc file('$FILE_NAME', 'CSV')" +$CLICKHOUSE_CLIENT -q "select * from file('$FILE_NAME', 'CSV')" + +echo -e "'[(1, 2, 3)]'"> $DATA_FILE + +$CLICKHOUSE_CLIENT -q "desc file('$FILE_NAME', 'CSV')" +$CLICKHOUSE_CLIENT -q "select * from file('$FILE_NAME', 'CSV')" + +echo -e "\"[(1, 2, 3)]\""> $DATA_FILE + +$CLICKHOUSE_CLIENT -q "desc file('$FILE_NAME', 'CSV')" +$CLICKHOUSE_CLIENT -q "select * from file('$FILE_NAME', 'CSV')" + + diff --git a/tests/queries/0_stateless/02247_fix_extract_parser.reference b/tests/queries/0_stateless/02247_fix_extract_parser.reference new file mode 100644 index 00000000000..01e79c32a8c --- /dev/null +++ b/tests/queries/0_stateless/02247_fix_extract_parser.reference @@ -0,0 +1,3 @@ +1 +2 +3 diff --git a/tests/queries/0_stateless/02247_fix_extract_parser.sql b/tests/queries/0_stateless/02247_fix_extract_parser.sql new file mode 100644 index 00000000000..9b721a6e830 --- /dev/null +++ b/tests/queries/0_stateless/02247_fix_extract_parser.sql @@ -0,0 +1,3 @@ +WITH 'number: 1' as year SELECT extract(year, '\\d+'); +WITH 'number: 2' as mm SELECT extract(mm, '\\d+'); +WITH 'number: 3' as s SELECT extract(s, '\\d+'); diff --git a/tests/queries/0_stateless/02247_names_order_in_json_and_tskv.reference b/tests/queries/0_stateless/02247_names_order_in_json_and_tskv.reference new file mode 100644 index 00000000000..300846c17a0 --- /dev/null +++ b/tests/queries/0_stateless/02247_names_order_in_json_and_tskv.reference @@ -0,0 +1,34 @@ +a Nullable(Float64) +b Nullable(String) +c Array(Nullable(Float64)) +1 s1 [] +2 } [2] +\N \N [] +\N \N [] +\N \N [3] +b Nullable(Float64) +a Nullable(Float64) +c Nullable(Float64) +e Nullable(Float64) +1 \N \N \N +\N 2 3 \N +\N \N \N \N +\N \N \N 3 +3 3 1 \N +a Nullable(Float64) +b Nullable(String) +c Array(Nullable(Float64)) +1 s1 [] +2 \N [2] +\N \N [] +\N \N [] +\N \N [3] +b Nullable(Float64) +a Nullable(Float64) +c Nullable(Float64) +e Nullable(Float64) +1 \N \N \N +\N 2 3 \N +\N \N \N \N +\N \N \N 3 +3 3 1 \N diff --git a/tests/queries/0_stateless/02247_names_order_in_json_and_tskv.sh b/tests/queries/0_stateless/02247_names_order_in_json_and_tskv.sh new file mode 100755 index 00000000000..0be26371585 --- /dev/null +++ b/tests/queries/0_stateless/02247_names_order_in_json_and_tskv.sh @@ -0,0 +1,49 @@ +#!/usr/bin/env bash +# Tags: no-parallel, no-fasttest + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + + +USER_FILES_PATH=$(clickhouse-client --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') +FILE_NAME=test_02247.data +DATA_FILE=${USER_FILES_PATH:?}/$FILE_NAME + +touch $DATA_FILE + +echo -e 'a=1\tb=s1\tc=\N +c=[2]\ta=2\tb=\N} +a=\N + +c=[3]\ta=\N' > $DATA_FILE +$CLICKHOUSE_CLIENT -q "desc file('$FILE_NAME', 'TSKV')" +$CLICKHOUSE_CLIENT -q "select * from file('$FILE_NAME', 'TSKV')" + +echo -e 'b=1 +a=2\tc=3 + +e=3 +c=1\tb=3\ta=3' > $DATA_FILE +$CLICKHOUSE_CLIENT -q "desc file('$FILE_NAME', 'TSKV')" +$CLICKHOUSE_CLIENT -q "select * from file('$FILE_NAME', 'TSKV')" + + +echo -e '{"a" : 1, "b" : "s1", "c" : null} +{"c" : [2], "a" : 2, "b" : null} +{} +{"a" : null} +{"c" : [3], "a" : null}' > $DATA_FILE +$CLICKHOUSE_CLIENT -q "desc file('$FILE_NAME', 'JSONEachRow')" +$CLICKHOUSE_CLIENT -q "select * from file('$FILE_NAME', 'JSONEachRow')" + +echo -e '{"b" : 1} +{"a" : 2, "c" : 3} +{} +{"e" : 3} +{"c" : 1, "b" : 3, "a" : 3}' > $DATA_FILE +$CLICKHOUSE_CLIENT -q "desc file('$FILE_NAME', 'JSONEachRow')" +$CLICKHOUSE_CLIENT -q "select * from file('$FILE_NAME', 'JSONEachRow')" + + +rm $DATA_FILE diff --git a/tests/queries/0_stateless/02247_read_bools_as_numbers_json.reference b/tests/queries/0_stateless/02247_read_bools_as_numbers_json.reference new file mode 100644 index 00000000000..a7609bdd86b --- /dev/null +++ b/tests/queries/0_stateless/02247_read_bools_as_numbers_json.reference @@ -0,0 +1,18 @@ +x Nullable(Bool) +true +false +x Nullable(Float64) +42.42 +0 +x Nullable(Float64) +1 +0.42 +c1 Nullable(Bool) +true +false +c1 Nullable(Float64) +42.42 +0 +c1 Nullable(Float64) +1 +0.42 diff --git a/tests/queries/0_stateless/02247_read_bools_as_numbers_json.sh b/tests/queries/0_stateless/02247_read_bools_as_numbers_json.sh new file mode 100755 index 00000000000..10f050ea6d1 --- /dev/null +++ b/tests/queries/0_stateless/02247_read_bools_as_numbers_json.sh @@ -0,0 +1,47 @@ +#!/usr/bin/env bash +# Tags: no-parallel, no-fasttest + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + + +USER_FILES_PATH=$(clickhouse-client --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') +FILE_NAME=test_02247.data +DATA_FILE=${USER_FILES_PATH:?}/$FILE_NAME + +touch $DATA_FILE + +echo -e '{"x" : true} +{"x" : false}' > $DATA_FILE +$CLICKHOUSE_CLIENT -q "desc file('$FILE_NAME', 'JSONEachRow')" +$CLICKHOUSE_CLIENT -q "select * from file('$FILE_NAME', 'JSONEachRow')" + +echo -e '{"x" : 42.42} +{"x" : false}' > $DATA_FILE +$CLICKHOUSE_CLIENT -q "desc file('$FILE_NAME', 'JSONEachRow')" +$CLICKHOUSE_CLIENT -q "select * from file('$FILE_NAME', 'JSONEachRow')" + +echo -e '{"x" : true} +{"x" : 0.42}' > $DATA_FILE +$CLICKHOUSE_CLIENT -q "desc file('$FILE_NAME', 'JSONEachRow')" +$CLICKHOUSE_CLIENT -q "select * from file('$FILE_NAME', 'JSONEachRow')" + + +echo -e '[true] +[false]' > $DATA_FILE +$CLICKHOUSE_CLIENT -q "desc file('$FILE_NAME', 'JSONCompactEachRow')" +$CLICKHOUSE_CLIENT -q "select * from file('$FILE_NAME', 'JSONCompactEachRow')" + +echo -e '[42.42] +[false]' > $DATA_FILE +$CLICKHOUSE_CLIENT -q "desc file('$FILE_NAME', 'JSONCompactEachRow')" +$CLICKHOUSE_CLIENT -q "select * from file('$FILE_NAME', 'JSONCompactEachRow')" + +echo -e '[true] +[0.42]' > $DATA_FILE +$CLICKHOUSE_CLIENT -q "desc file('$FILE_NAME', 'JSONCompactEachRow')" +$CLICKHOUSE_CLIENT -q "select * from file('$FILE_NAME', 'JSONCompactEachRow')" + + +rm $DATA_FILE diff --git a/tests/queries/0_stateless/02247_written_bytes_quota.reference b/tests/queries/0_stateless/02247_written_bytes_quota.reference new file mode 100644 index 00000000000..f6e9710e282 --- /dev/null +++ b/tests/queries/0_stateless/02247_written_bytes_quota.reference @@ -0,0 +1,7 @@ +QUOTA_EXCEEDED +QUOTA_EXCEEDED +1 +2 +QUOTA_EXCEEDED +1 +50 diff --git a/tests/queries/0_stateless/02247_written_bytes_quota.sh b/tests/queries/0_stateless/02247_written_bytes_quota.sh new file mode 100755 index 00000000000..072626f41ac --- /dev/null +++ b/tests/queries/0_stateless/02247_written_bytes_quota.sh @@ -0,0 +1,42 @@ +#!/usr/bin/env bash +# Tags: no-parallel + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +${CLICKHOUSE_CLIENT} -q "DROP TABLE IF EXISTS written_bytes_02247" +${CLICKHOUSE_CLIENT} -q "DROP ROLE IF EXISTS r02247" +${CLICKHOUSE_CLIENT} -q "DROP USER IF EXISTS u02247" +${CLICKHOUSE_CLIENT} -q "DROP QUOTA IF EXISTS q02247" + +${CLICKHOUSE_CLIENT} -q "CREATE TABLE written_bytes_02247(s String) ENGINE = Memory" + +${CLICKHOUSE_CLIENT} -q "CREATE ROLE r02247" +${CLICKHOUSE_CLIENT} -q "CREATE USER u02247" +${CLICKHOUSE_CLIENT} -q "GRANT ALL ON *.* TO r02247" +${CLICKHOUSE_CLIENT} -q "GRANT r02247 to u02247" +${CLICKHOUSE_CLIENT} -q "CREATE QUOTA q02247 FOR INTERVAL 100 YEAR MAX WRITTEN BYTES = 25 TO r02247" + +${CLICKHOUSE_CLIENT} --user u02247 --async_insert 1 -q "INSERT INTO written_bytes_02247 VALUES ('qwqw')" +${CLICKHOUSE_CLIENT} --user u02247 --async_insert 0 -q "INSERT INTO written_bytes_02247 VALUES ('qwqw')" +${CLICKHOUSE_CLIENT} --user u02247 --async_insert 1 -q "INSERT INTO written_bytes_02247 VALUES ('qwqw')" 2>&1 | grep -m1 -o QUOTA_EXCEEDED +${CLICKHOUSE_CLIENT} --user u02247 --async_insert 0 -q "INSERT INTO written_bytes_02247 VALUES ('qwqw')" 2>&1 | grep -m1 -o QUOTA_EXCEEDED + +${CLICKHOUSE_CLIENT} -q "SELECT written_bytes > 10 FROM system.quotas_usage WHERE quota_name = 'q02247'" +${CLICKHOUSE_CLIENT} -q "SELECT count() FROM written_bytes_02247" + +${CLICKHOUSE_CLIENT} -q "DROP QUOTA q02247" +${CLICKHOUSE_CLIENT} -q "CREATE QUOTA q02247 FOR INTERVAL 100 YEAR MAX WRITTEN BYTES = 1000 TO r02247" +${CLICKHOUSE_CLIENT} -q "TRUNCATE TABLE written_bytes_02247" + +${CLICKHOUSE_CLIENT} --user u02247 -q "INSERT INTO written_bytes_02247 SELECT toString(number) FROM numbers(50)" +${CLICKHOUSE_CLIENT} --user u02247 -q "INSERT INTO written_bytes_02247 SELECT toString(number) FROM numbers(100)" 2>&1 | grep -m1 -o QUOTA_EXCEEDED + +${CLICKHOUSE_CLIENT} -q "SELECT written_bytes > 100 FROM system.quotas_usage WHERE quota_name = 'q02247'" +${CLICKHOUSE_CLIENT} -q "SELECT count() FROM written_bytes_02247" + +${CLICKHOUSE_CLIENT} -q "DROP TABLE IF EXISTS written_bytes_02247" +${CLICKHOUSE_CLIENT} -q "DROP ROLE IF EXISTS r02247" +${CLICKHOUSE_CLIENT} -q "DROP USER IF EXISTS u02247" +${CLICKHOUSE_CLIENT} -q "DROP QUOTA IF EXISTS q02247" diff --git a/tests/queries/0_stateless/02248_nullable_custom_types_to_string.sql b/tests/queries/0_stateless/02248_nullable_custom_types_to_string.sql index 313f703fd03..605500ee840 100644 --- a/tests/queries/0_stateless/02248_nullable_custom_types_to_string.sql +++ b/tests/queries/0_stateless/02248_nullable_custom_types_to_string.sql @@ -1,3 +1,4 @@ +-- Tags: no-backward-compatibility-check:22.3.4.44 select toString(toNullable(true)); select toString(CAST(NULL, 'Nullable(Bool)')); select toString(toNullable(toIPv4('0.0.0.0'))); diff --git a/tests/queries/0_stateless/02249_insert_select_from_input_schema_inference.reference b/tests/queries/0_stateless/02249_insert_select_from_input_schema_inference.reference new file mode 100644 index 00000000000..7fd75453a3c --- /dev/null +++ b/tests/queries/0_stateless/02249_insert_select_from_input_schema_inference.reference @@ -0,0 +1,2 @@ +1 string1 +2 string2 diff --git a/tests/queries/0_stateless/02249_insert_select_from_input_schema_inference.sql b/tests/queries/0_stateless/02249_insert_select_from_input_schema_inference.sql new file mode 100644 index 00000000000..da3bccfd745 --- /dev/null +++ b/tests/queries/0_stateless/02249_insert_select_from_input_schema_inference.sql @@ -0,0 +1,5 @@ +drop table if exists test_02249; +create table test_02249 (x UInt32, y String) engine=Memory(); +insert into test_02249 select * from input() format JSONEachRow {"x" : 1, "y" : "string1"}, {"y" : "string2", "x" : 2}; +select * from test_02249; +drop table test_02249; diff --git a/tests/queries/0_stateless/02249_parse_date_time_basic.reference b/tests/queries/0_stateless/02249_parse_date_time_basic.reference new file mode 100644 index 00000000000..eb030a8fd3d --- /dev/null +++ b/tests/queries/0_stateless/02249_parse_date_time_basic.reference @@ -0,0 +1,5 @@ +2022-03-31T00:00:00Z 1 +2022-04-01T09:10:24Z 2 +2022-03-31T10:18:56Z 3 +2022-03-31T10:18:56Z 4 +2022-04-01T09:10:24Z 5 diff --git a/tests/queries/0_stateless/02249_parse_date_time_basic.sql b/tests/queries/0_stateless/02249_parse_date_time_basic.sql new file mode 100644 index 00000000000..7146462fb74 --- /dev/null +++ b/tests/queries/0_stateless/02249_parse_date_time_basic.sql @@ -0,0 +1,10 @@ +SET date_time_output_format='iso'; +drop table if exists t; +CREATE TABLE t (a DateTime('UTC'), b String, c String, d String, e Int32) ENGINE = Memory; +INSERT INTO t(a, b, c, d ,e) VALUES ('2022-03-31','','','',1); +INSERT INTO t(a, b, c, d ,e) VALUES (1648804224,'','','',2); +INSERT INTO t(a, b, c, d ,e) VALUES ('2022-03-31 10:18:56','','','',3); +INSERT INTO t(a, b, c, d ,e) VALUES ('2022-03-31T10:18:56','','','',4); +INSERT INTO t(a, b, c, d ,e) VALUES ('1648804224','','','',5); +select a, e from t order by e; +drop table if exists t; diff --git a/tests/queries/0_stateless/02250_hints_for_columns.reference b/tests/queries/0_stateless/02250_hints_for_columns.reference new file mode 100644 index 00000000000..0eabe367130 --- /dev/null +++ b/tests/queries/0_stateless/02250_hints_for_columns.reference @@ -0,0 +1,3 @@ +OK +OK +OK diff --git a/tests/queries/0_stateless/02250_hints_for_columns.sh b/tests/queries/0_stateless/02250_hints_for_columns.sh new file mode 100755 index 00000000000..45fd2f238b1 --- /dev/null +++ b/tests/queries/0_stateless/02250_hints_for_columns.sh @@ -0,0 +1,17 @@ +#!/usr/bin/env bash + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +$CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS t" + +$CLICKHOUSE_CLIENT --query="CREATE TABLE t (CounterID UInt32, StartDate Date, UserID UInt32, VisitID UInt32, NestedColumn Nested(A UInt8, S String), ToDrop UInt32) ENGINE = MergeTree(StartDate, intHash32(UserID), (CounterID, StartDate, intHash32(UserID), VisitID), 8192)" + +$CLICKHOUSE_CLIENT --query="ALTER TABLE t DROP COLUMN ToDro" 2>&1 | grep -q "Maybe you meant: \['ToDrop'\]" && echo 'OK' || echo 'FAIL' + +$CLICKHOUSE_CLIENT --query="ALTER TABLE t MODIFY COLUMN ToDro UInt64" 2>&1 | grep -q "Maybe you meant: \['ToDrop'\]" && echo 'OK' || echo 'FAIL' + +$CLICKHOUSE_CLIENT --query="ALTER TABLE t RENAME COLUMN ToDro to ToDropp" 2>&1 | grep -q "Maybe you meant: \['ToDrop'\]" && echo 'OK' || echo 'FAIL' + +$CLICKHOUSE_CLIENT --query="DROP TABLE t" diff --git a/tests/queries/0_stateless/02250_hints_for_projections.reference b/tests/queries/0_stateless/02250_hints_for_projections.reference new file mode 100644 index 00000000000..d86bac9de59 --- /dev/null +++ b/tests/queries/0_stateless/02250_hints_for_projections.reference @@ -0,0 +1 @@ +OK diff --git a/tests/queries/0_stateless/02250_hints_for_projections.sh b/tests/queries/0_stateless/02250_hints_for_projections.sh new file mode 100755 index 00000000000..7db8b243ae4 --- /dev/null +++ b/tests/queries/0_stateless/02250_hints_for_projections.sh @@ -0,0 +1,13 @@ +#!/usr/bin/env bash + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +$CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS t" + +$CLICKHOUSE_CLIENT --query="create table t (x Int32, y Int32, projection pToDrop (select x, y order by x)) engine = MergeTree order by y;" + +$CLICKHOUSE_CLIENT --query="ALTER TABLE t DROP PROJECTION pToDro" 2>&1 | grep -q "Maybe you meant: \['pToDrop'\]" && echo 'OK' || echo 'FAIL' + +$CLICKHOUSE_CLIENT --query="DROP TABLE t" diff --git a/tests/queries/0_stateless/02250_insert_select_from_file_schema_inference.reference b/tests/queries/0_stateless/02250_insert_select_from_file_schema_inference.reference new file mode 100644 index 00000000000..dec7d2fabd2 --- /dev/null +++ b/tests/queries/0_stateless/02250_insert_select_from_file_schema_inference.reference @@ -0,0 +1 @@ +\N diff --git a/tests/queries/0_stateless/02250_insert_select_from_file_schema_inference.sql b/tests/queries/0_stateless/02250_insert_select_from_file_schema_inference.sql new file mode 100644 index 00000000000..2c04045463e --- /dev/null +++ b/tests/queries/0_stateless/02250_insert_select_from_file_schema_inference.sql @@ -0,0 +1,6 @@ +insert into table function file('data_02250.jsonl') select NULL as x settings engine_file_truncate_on_insert=1; +drop table if exists test_02250; +create table test_02250 (x Nullable(UInt32)) engine=Memory(); +insert into test_02250 select * from file('data_02250.jsonl'); +select * from test_02250; +drop table test_02250; diff --git a/tests/queries/0_stateless/02250_lots_of_columns_in_csv_with_names.reference b/tests/queries/0_stateless/02250_lots_of_columns_in_csv_with_names.reference new file mode 100644 index 00000000000..9cd40e0d748 --- /dev/null +++ b/tests/queries/0_stateless/02250_lots_of_columns_in_csv_with_names.reference @@ -0,0 +1 @@ +42 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 diff --git a/tests/queries/0_stateless/02250_lots_of_columns_in_csv_with_names.sh b/tests/queries/0_stateless/02250_lots_of_columns_in_csv_with_names.sh new file mode 100755 index 00000000000..9366d41af9a --- /dev/null +++ b/tests/queries/0_stateless/02250_lots_of_columns_in_csv_with_names.sh @@ -0,0 +1,274 @@ +#!/usr/bin/env bash + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +$CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS test_02250" + +$CLICKHOUSE_CLIENT -q "CREATE TABLE test_02250 +( + field_1 Int32, + field_2 Int32, + field_3 Int32, + field_4 Int32, + field_5 Int32, + field_6 Int32, + field_7 Int32, + field_8 Int32, + field_9 Int32, + field_10 Int32, + field_11 Int32, + field_12 Int32, + field_13 Int32, + field_14 Int32, + field_15 Int32, + field_16 Int32, + field_17 Int32, + field_18 Int32, + field_19 Int32, + field_20 Int32, + field_21 Int32, + field_22 Int32, + field_23 Int32, + field_24 Int32, + field_25 Int32, + field_26 Int32, + field_27 Int32, + field_28 Int32, + field_29 Int32, + field_30 Int32, + field_31 Int32, + field_32 Int32, + field_33 Int32, + field_34 Int32, + field_35 Int32, + field_36 Int32, + field_37 Int32, + field_38 Int32, + field_39 Int32, + field_40 Int32, + field_41 Int32, + field_42 Int32, + field_43 Int32, + field_44 Int32, + field_45 Int32, + field_46 Int32, + field_47 Int32, + field_48 Int32, + field_49 Int32, + field_50 Int32, + field_51 Int32, + field_52 Int32, + field_53 Int32, + field_54 Int32, + field_55 Int32, + field_56 Int32, + field_57 Int32, + field_58 Int32, + field_59 Int32, + field_60 Int32, + field_61 Int32, + field_62 Int32, + field_63 Int32, + field_64 Int32, + field_65 Int32, + field_66 Int32, + field_67 Int32, + field_68 Int32, + field_69 Int32, + field_70 Int32, + field_71 Int32, + field_72 Int32, + field_73 Int32, + field_74 Int32, + field_75 Int32, + field_76 Int32, + field_77 Int32, + field_78 Int32, + field_79 Int32, + field_80 Int32, + field_81 Int32, + field_82 Int32, + field_83 Int32, + field_84 Int32, + field_85 Int32, + field_86 Int32, + field_87 Int32, + field_88 Int32, + field_89 Int32, + field_90 Int32, + field_91 Int32, + field_92 Int32, + field_93 Int32, + field_94 Int32, + field_95 Int32, + field_96 Int32, + field_97 Int32, + field_98 Int32, + field_99 Int32, + field_100 Int32, + field_101 Int32, + field_102 Int32, + field_103 Int32, + field_104 Int32, + field_105 Int32, + field_106 Int32, + field_107 Int32, + field_108 Int32, + field_109 Int32, + field_110 Int32, + field_111 Int32, + field_112 Int32, + field_113 Int32, + field_114 Int32, + field_115 Int32, + field_116 Int32, + field_117 Int32, + field_118 Int32, + field_119 Int32, + field_120 Int32, + field_121 Int32, + field_122 Int32, + field_123 Int32, + field_124 Int32, + field_125 Int32, + field_126 Int32, + field_127 Int32, + field_128 Int32, + field_129 Int32, + field_130 Int32, + field_131 Int32, + field_132 Int32, + field_133 Int32, + field_134 Int32, + field_135 Int32, + field_136 Int32, + field_137 Int32, + field_138 Int32, + field_139 Int32, + field_140 Int32, + field_141 Int32, + field_142 Int32, + field_143 Int32, + field_144 Int32, + field_145 Int32, + field_146 Int32, + field_147 Int32, + field_148 Int32, + field_149 Int32, + field_150 Int32, + field_151 Int32, + field_152 Int32, + field_153 Int32, + field_154 Int32, + field_155 Int32, + field_156 Int32, + field_157 Int32, + field_158 Int32, + field_159 Int32, + field_160 Int32, + field_161 Int32, + field_162 Int32, + field_163 Int32, + field_164 Int32, + field_165 Int32, + field_166 Int32, + field_167 Int32, + field_168 Int32, + field_169 Int32, + field_170 Int32, + field_171 Int32, + field_172 Int32, + field_173 Int32, + field_174 Int32, + field_175 Int32, + field_176 Int32, + field_177 Int32, + field_178 Int32, + field_179 Int32, + field_180 Int32, + field_181 Int32, + field_182 Int32, + field_183 Int32, + field_184 Int32, + field_185 Int32, + field_186 Int32, + field_187 Int32, + field_188 Int32, + field_189 Int32, + field_190 Int32, + field_191 Int32, + field_192 Int32, + field_193 Int32, + field_194 Int32, + field_195 Int32, + field_196 Int32, + field_197 Int32, + field_198 Int32, + field_199 Int32, + field_200 Int32, + field_201 Int32, + field_202 Int32, + field_203 Int32, + field_204 Int32, + field_205 Int32, + field_206 Int32, + field_207 Int32, + field_208 Int32, + field_209 Int32, + field_210 Int32, + field_211 Int32, + field_212 Int32, + field_213 Int32, + field_214 Int32, + field_215 Int32, + field_216 Int32, + field_217 Int32, + field_218 Int32, + field_219 Int32, + field_220 Int32, + field_221 Int32, + field_222 Int32, + field_223 Int32, + field_224 Int32, + field_225 Int32, + field_226 Int32, + field_227 Int32, + field_228 Int32, + field_229 Int32, + field_230 Int32, + field_231 Int32, + field_232 Int32, + field_233 Int32, + field_234 Int32, + field_235 Int32, + field_236 Int32, + field_237 Int32, + field_238 Int32, + field_239 Int32, + field_240 Int32, + field_241 Int32, + field_242 Int32, + field_243 Int32, + field_244 Int32, + field_245 Int32, + field_246 Int32, + field_247 Int32, + field_248 Int32, + field_249 Int32, + field_250 Int32, + field_251 Int32, + field_252 Int32, + field_253 Int32, + field_254 Int32, + field_255 Int32, + field_256 Int32, + field_257 Int32 +) +ENGINE = MergeTree +ORDER BY tuple()" + +echo -e "field_1\n42" | $CLICKHOUSE_CLIENT -q "INSERT INTO test_02250 FORMAT CSVWithNames" --input_format_with_names_use_header 1 +$CLICKHOUSE_CLIENT -q "SELECT * FROM test_02250" +$CLICKHOUSE_CLIENT -q "DROP TABLE test_02250" diff --git a/tests/queries/0_stateless/02251_alter_enum_nested_struct.reference b/tests/queries/0_stateless/02251_alter_enum_nested_struct.reference new file mode 100644 index 00000000000..ada5f47c230 --- /dev/null +++ b/tests/queries/0_stateless/02251_alter_enum_nested_struct.reference @@ -0,0 +1,7 @@ +1 ['Option2','Option1'] +2 ['Option1'] +3 ['Option1','Option3'] +1 ['Option2','Option1'] +2 ['Option1'] +3 ['Option1','Option3'] +0 diff --git a/tests/queries/0_stateless/02251_alter_enum_nested_struct.sql b/tests/queries/0_stateless/02251_alter_enum_nested_struct.sql new file mode 100644 index 00000000000..ad2dab3631f --- /dev/null +++ b/tests/queries/0_stateless/02251_alter_enum_nested_struct.sql @@ -0,0 +1,27 @@ +DROP TABLE IF EXISTS alter_enum_array; + +CREATE TABLE alter_enum_array( + Key UInt64, + Value Array(Enum8('Option1'=1, 'Option2'=2)) +) +ENGINE=MergeTree() +ORDER BY tuple(); + +INSERT INTO alter_enum_array VALUES (1, ['Option2', 'Option1']), (2, ['Option1']); + +ALTER TABLE alter_enum_array MODIFY COLUMN Value Array(Enum8('Option1'=1, 'Option2'=2, 'Option3'=3)) SETTINGS mutations_sync=2; + +INSERT INTO alter_enum_array VALUES (3, ['Option1','Option3']); + +SELECT * FROM alter_enum_array ORDER BY Key; + +DETACH TABLE alter_enum_array; +ATTACH TABLE alter_enum_array; + +SELECT * FROM alter_enum_array ORDER BY Key; + +OPTIMIZE TABLE alter_enum_array FINAL; + +SELECT COUNT() FROM system.mutations where table='alter_enum_array' and database=currentDatabase(); + +DROP TABLE IF EXISTS alter_enum_array; diff --git a/tests/queries/0_stateless/02251_last_day_of_month.reference b/tests/queries/0_stateless/02251_last_day_of_month.reference new file mode 100644 index 00000000000..0b83aff1e42 --- /dev/null +++ b/tests/queries/0_stateless/02251_last_day_of_month.reference @@ -0,0 +1,7 @@ +2021-09-30 2021-09-30 2021-09-30 +2021-03-31 2021-03-31 2021-03-31 +2021-02-28 2021-02-28 2021-02-28 +2020-02-29 2020-02-29 2020-02-29 +2021-12-31 2021-12-31 2021-12-31 +2020-12-31 2020-12-31 2020-12-31 +2020-12-31 2020-12-31 diff --git a/tests/queries/0_stateless/02251_last_day_of_month.sql b/tests/queries/0_stateless/02251_last_day_of_month.sql new file mode 100644 index 00000000000..1261f051e17 --- /dev/null +++ b/tests/queries/0_stateless/02251_last_day_of_month.sql @@ -0,0 +1,46 @@ +-- month with 30 days +WITH + toDate('2021-09-12') AS date_value, + toDateTime('2021-09-12 11:22:33') AS date_time_value, + toDateTime64('2021-09-12 11:22:33', 3) AS date_time_64_value +SELECT toLastDayOfMonth(date_value), toLastDayOfMonth(date_time_value), toLastDayOfMonth(date_time_64_value); + +-- month with 31 days +WITH + toDate('2021-03-12') AS date_value, + toDateTime('2021-03-12 11:22:33') AS date_time_value, + toDateTime64('2021-03-12 11:22:33', 3) AS date_time_64_value +SELECT toLastDayOfMonth(date_value), toLastDayOfMonth(date_time_value), toLastDayOfMonth(date_time_64_value); + +-- non leap year February +WITH + toDate('2021-02-12') AS date_value, + toDateTime('2021-02-12 11:22:33') AS date_time_value, + toDateTime64('2021-02-12 11:22:33', 3) AS date_time_64_value +SELECT toLastDayOfMonth(date_value), toLastDayOfMonth(date_time_value), toLastDayOfMonth(date_time_64_value); + +-- leap year February +WITH + toDate('2020-02-12') AS date_value, + toDateTime('2020-02-12 11:22:33') AS date_time_value, + toDateTime64('2020-02-12 11:22:33', 3) AS date_time_64_value +SELECT toLastDayOfMonth(date_value), toLastDayOfMonth(date_time_value), toLastDayOfMonth(date_time_64_value); + +-- December 31 for non-leap year +WITH + toDate('2021-12-12') AS date_value, + toDateTime('2021-12-12 11:22:33') AS date_time_value, + toDateTime64('2021-12-12 11:22:33', 3) AS date_time_64_value +SELECT toLastDayOfMonth(date_value), toLastDayOfMonth(date_time_value), toLastDayOfMonth(date_time_64_value); + +-- December 31 for leap year +WITH + toDate('2020-12-12') AS date_value, + toDateTime('2020-12-12 11:22:33') AS date_time_value, + toDateTime64('2020-12-12 11:22:33', 3) AS date_time_64_value +SELECT toLastDayOfMonth(date_value), toLastDayOfMonth(date_time_value), toLastDayOfMonth(date_time_64_value); + +-- aliases +WITH + toDate('2020-12-12') AS date_value +SELECT last_day(date_value), LAST_DAY(date_value); diff --git a/tests/queries/0_stateless/02252_executable_user_defined_function_short_circuit.reference b/tests/queries/0_stateless/02252_executable_user_defined_function_short_circuit.reference new file mode 100644 index 00000000000..573541ac970 --- /dev/null +++ b/tests/queries/0_stateless/02252_executable_user_defined_function_short_circuit.reference @@ -0,0 +1 @@ +0 diff --git a/tests/queries/0_stateless/02252_executable_user_defined_function_short_circuit.sql b/tests/queries/0_stateless/02252_executable_user_defined_function_short_circuit.sql new file mode 100644 index 00000000000..a475ba33740 --- /dev/null +++ b/tests/queries/0_stateless/02252_executable_user_defined_function_short_circuit.sql @@ -0,0 +1,10 @@ +SELECT number FROM numbers(10) WHERE number > 15 and test_function(number, number) == 4; + +SYSTEM FLUSH LOGS; + +SELECT ProfileEvents['ExecuteShellCommand'] FROM system.query_log WHERE + current_database = currentDatabase() + AND type = 'QueryFinish' + AND query == 'SELECT number FROM numbers(10) WHERE number > 15 and test_function(number, number) == 4;' + AND event_date >= yesterday() AND event_time > now() - interval 10 minute + LIMIT 1; diff --git a/tests/queries/0_stateless/02252_jit_profile_events.reference b/tests/queries/0_stateless/02252_jit_profile_events.reference new file mode 100644 index 00000000000..12d82114f75 --- /dev/null +++ b/tests/queries/0_stateless/02252_jit_profile_events.reference @@ -0,0 +1,4 @@ +0 +1 +0 1 2 +1 diff --git a/tests/queries/0_stateless/02252_jit_profile_events.sql b/tests/queries/0_stateless/02252_jit_profile_events.sql new file mode 100644 index 00000000000..ddb95d4fa37 --- /dev/null +++ b/tests/queries/0_stateless/02252_jit_profile_events.sql @@ -0,0 +1,31 @@ +-- Tags: no-fasttest, no-ubsan, no-cpu-aarch64 + +SET compile_expressions = 1; +SET min_count_to_compile_expression = 0; + +SYSTEM DROP COMPILED EXPRESSION CACHE; + +SELECT number + number + number FROM numbers(1); + +SYSTEM FLUSH LOGS; + +SELECT ProfileEvents['CompileFunction'] FROM system.query_log WHERE + current_database = currentDatabase() + AND type = 'QueryFinish' + AND query == 'SELECT number + number + number FROM numbers(1);' + AND event_date >= yesterday() AND event_time > now() - interval 10 minute + LIMIT 1; + +SET compile_aggregate_expressions = 1; +SET min_count_to_compile_aggregate_expression = 0; + +SELECT sum(number), sum(number + 1), sum(number + 2) FROM numbers(1) GROUP BY number; + +SYSTEM FLUSH LOGS; + +SELECT ProfileEvents['CompileFunction'] FROM system.query_log WHERE + current_database = currentDatabase() + AND type = 'QueryFinish' + AND query == 'SELECT sum(number), sum(number + 1), sum(number + 2) FROM numbers(1) GROUP BY number;' + AND event_date >= yesterday() AND event_time > now() - interval 10 minute + LIMIT 1; diff --git a/tests/queries/0_stateless/02252_reset_non_existing_setting.reference b/tests/queries/0_stateless/02252_reset_non_existing_setting.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/02252_reset_non_existing_setting.sql b/tests/queries/0_stateless/02252_reset_non_existing_setting.sql new file mode 100644 index 00000000000..362388c4a10 --- /dev/null +++ b/tests/queries/0_stateless/02252_reset_non_existing_setting.sql @@ -0,0 +1,13 @@ +DROP TABLE IF EXISTS most_ordinary_mt; + +CREATE TABLE most_ordinary_mt +( + Key UInt64 +) +ENGINE = MergeTree() +ORDER BY tuple(); + +ALTER TABLE most_ordinary_mt RESET SETTING ttl; --{serverError 36} +ALTER TABLE most_ordinary_mt RESET SETTING allow_remote_fs_zero_copy_replication, xxx; --{serverError 36} + +DROP TABLE IF EXISTS most_ordinary_mt; diff --git a/tests/queries/0_stateless/02262_column_ttl.reference b/tests/queries/0_stateless/02262_column_ttl.reference new file mode 100644 index 00000000000..f59cb48c5f5 --- /dev/null +++ b/tests/queries/0_stateless/02262_column_ttl.reference @@ -0,0 +1 @@ +1 0 diff --git a/tests/queries/0_stateless/02262_column_ttl.sh b/tests/queries/0_stateless/02262_column_ttl.sh new file mode 100755 index 00000000000..b5e29c9b2a1 --- /dev/null +++ b/tests/queries/0_stateless/02262_column_ttl.sh @@ -0,0 +1,51 @@ +#!/usr/bin/env bash +# Tags: no-parallel, no-ordinary-database +# ^^^^^^^^^^^ +# Since the underlying view may disappears while flushing log, and leads to: +# +# DB::Exception: Table test_x449vo..inner_id.9c14fb82-e6b1-4d1a-85a6-935c3a2a2029 is dropped. (TABLE_IS_DROPPED) +# + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +# regression test for columns TTLs +# note, that this should be written in .sh since we need $CLICKHOUSE_DATABASE +# not 'default' to catch text_log + +$CLICKHOUSE_CLIENT -nm -q " + drop table if exists ttl_02262; + drop table if exists this_text_log; + + create table ttl_02262 (date Date, key Int, value String TTL date + interval 1 month) engine=MergeTree order by key; + insert into ttl_02262 values ('2010-01-01', 2010, 'foo'); + optimize table ttl_02262 final; + + detach table ttl_02262; + attach table ttl_02262; + + -- create system.text_log + system flush logs; +" + +ttl_02262_uuid=$($CLICKHOUSE_CLIENT -q "select uuid from system.tables where database = '$CLICKHOUSE_DATABASE' and name = 'ttl_02262'") + +$CLICKHOUSE_CLIENT -nm -q " + -- OPTIMIZE TABLE x FINAL will be done in background + -- attach to it's log, via table UUID in query_id (see merger/mutator code). + create materialized view this_text_log engine=Memory() as + select * from system.text_log where query_id like '%${ttl_02262_uuid}%'; + + optimize table ttl_02262 final; + system flush logs; + -- If TTL will be applied again (during OPTIMIZE TABLE FINAL) it will produce the following message: + -- + -- Some TTL values were not calculated for part 201701_487_641_3. Will calculate them forcefully during merge. + -- + -- Let's ensure that this is not happen anymore: + select count()>0, countIf(message LIKE '%TTL%') from this_text_log; + + drop table ttl_02262; + drop table this_text_log; +" diff --git a/tests/queries/0_stateless/02263_format_insert_settings.reference b/tests/queries/0_stateless/02263_format_insert_settings.reference new file mode 100644 index 00000000000..721e7960875 --- /dev/null +++ b/tests/queries/0_stateless/02263_format_insert_settings.reference @@ -0,0 +1,69 @@ +insert into foo settings max_threads=1 +Syntax error (query): failed at position 40 (end of query): +insert into foo format tsv settings max_threads=1 +Can't format ASTInsertQuery with data, since data will be lost. +[multi] insert into foo format tsv settings max_threads=1 +INSERT INTO foo +SETTINGS max_threads = 1 +FORMAT tsv +[oneline] insert into foo format tsv settings max_threads=1 +INSERT INTO foo SETTINGS max_threads = 1 FORMAT tsv +insert into foo settings max_threads=1 format tsv settings max_threads=1 +You have SETTINGS before and after FORMAT +Cannot parse input: expected '\n' before: 'settings max_threads=1 1' +1 +You have SETTINGS before and after FORMAT +[multi] insert into foo values +INSERT INTO foo FORMAT Values +[oneline] insert into foo values +INSERT INTO foo FORMAT Values +[multi] insert into foo select 1 +INSERT INTO foo SELECT 1 +[oneline] insert into foo select 1 +INSERT INTO foo SELECT 1 +[multi] insert into foo watch bar +INSERT INTO foo WATCH bar +[oneline] insert into foo watch bar +INSERT INTO foo WATCH bar +[multi] insert into foo format tsv +INSERT INTO foo FORMAT tsv +[oneline] insert into foo format tsv +INSERT INTO foo FORMAT tsv +[multi] insert into foo settings max_threads=1 values +INSERT INTO foo +SETTINGS max_threads = 1 +FORMAT Values +[oneline] insert into foo settings max_threads=1 values +INSERT INTO foo SETTINGS max_threads = 1 FORMAT Values +[multi] insert into foo settings max_threads=1 select 1 +INSERT INTO foo +SETTINGS max_threads = 1 +SELECT 1 +[oneline] insert into foo settings max_threads=1 select 1 +INSERT INTO foo SETTINGS max_threads = 1 SELECT 1 +[multi] insert into foo settings max_threads=1 watch bar +INSERT INTO foo +SETTINGS max_threads = 1 +WATCH bar +[oneline] insert into foo settings max_threads=1 watch bar +INSERT INTO foo SETTINGS max_threads = 1 WATCH bar +[multi] insert into foo settings max_threads=1 format tsv +INSERT INTO foo +SETTINGS max_threads = 1 +FORMAT tsv +[oneline] insert into foo settings max_threads=1 format tsv +INSERT INTO foo SETTINGS max_threads = 1 FORMAT tsv +[multi] insert into foo select 1 settings max_threads=1 +INSERT INTO foo +SETTINGS max_threads = 1 +SELECT 1 +SETTINGS max_threads = 1 +[oneline] insert into foo select 1 settings max_threads=1 +INSERT INTO foo SETTINGS max_threads = 1 SELECT 1 SETTINGS max_threads = 1 +[multi] insert into foo settings max_threads=1 select 1 settings max_threads=1 +INSERT INTO foo +SETTINGS max_threads = 1 +SELECT 1 +SETTINGS max_threads = 1 +[oneline] insert into foo settings max_threads=1 select 1 settings max_threads=1 +INSERT INTO foo SETTINGS max_threads = 1 SELECT 1 SETTINGS max_threads = 1 diff --git a/tests/queries/0_stateless/02263_format_insert_settings.sh b/tests/queries/0_stateless/02263_format_insert_settings.sh new file mode 100755 index 00000000000..3d5f780a38c --- /dev/null +++ b/tests/queries/0_stateless/02263_format_insert_settings.sh @@ -0,0 +1,49 @@ +#!/usr/bin/env bash + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +function run_format() +{ + local q="$1" && shift + + echo "$q" + $CLICKHOUSE_FORMAT "$@" <<<"$q" +} +function run_format_both() +{ + local q="$1" && shift + + echo "[multi] $q" + $CLICKHOUSE_FORMAT "$@" <<<"$q" + echo "[oneline] $q" + $CLICKHOUSE_FORMAT --oneline "$@" <<<"$q" +} + +# NOTE: that those queries may work slow, due to stack trace obtaining +run_format 'insert into foo settings max_threads=1' 2> >(grep -m1 -o "Syntax error (query): failed at position .* (end of query):") +# compatibility +run_format 'insert into foo format tsv settings max_threads=1' 2> >(grep -m1 -F -o "Can't format ASTInsertQuery with data, since data will be lost.") +run_format_both 'insert into foo format tsv settings max_threads=1' --allow_settings_after_format_in_insert +run_format 'insert into foo settings max_threads=1 format tsv settings max_threads=1' --allow_settings_after_format_in_insert 2> >(grep -m1 -F -o "You have SETTINGS before and after FORMAT") +# and via server (since this is a separate code path) +$CLICKHOUSE_CLIENT -q 'drop table if exists data_02263' +$CLICKHOUSE_CLIENT -q 'create table data_02263 (key Int) engine=Memory()' +$CLICKHOUSE_CLIENT -q 'insert into data_02263 format TSV settings max_threads=1 1' 2> >(grep -m1 -F -o "Cannot parse input: expected '\n' before: 'settings max_threads=1 1'") +$CLICKHOUSE_CLIENT --allow_settings_after_format_in_insert=1 -q 'insert into data_02263 format TSV settings max_threads=1 1' +$CLICKHOUSE_CLIENT -q 'select * from data_02263' +$CLICKHOUSE_CLIENT --allow_settings_after_format_in_insert=1 -q 'insert into data_02263 settings max_threads=1 format tsv settings max_threads=1' 2> >(grep -m1 -F -o "You have SETTINGS before and after FORMAT") +$CLICKHOUSE_CLIENT -q 'drop table data_02263' + +run_format_both 'insert into foo values' +run_format_both 'insert into foo select 1' +run_format_both 'insert into foo watch bar' +run_format_both 'insert into foo format tsv' + +run_format_both 'insert into foo settings max_threads=1 values' +run_format_both 'insert into foo settings max_threads=1 select 1' +run_format_both 'insert into foo settings max_threads=1 watch bar' +run_format_both 'insert into foo settings max_threads=1 format tsv' +run_format_both 'insert into foo select 1 settings max_threads=1' +run_format_both 'insert into foo settings max_threads=1 select 1 settings max_threads=1' diff --git a/tests/queries/0_stateless/02264_format_insert_compression.reference b/tests/queries/0_stateless/02264_format_insert_compression.reference new file mode 100644 index 00000000000..107b7fcb3e9 --- /dev/null +++ b/tests/queries/0_stateless/02264_format_insert_compression.reference @@ -0,0 +1,3 @@ +-- { echo } +EXPLAIN SYNTAX INSERT INTO foo FROM INFILE '/dev/null' COMPRESSION 'gz'; +INSERT INTO foo FROM INFILE \'/dev/null\' COMPRESSION \'gz\' diff --git a/tests/queries/0_stateless/02264_format_insert_compression.sql b/tests/queries/0_stateless/02264_format_insert_compression.sql new file mode 100644 index 00000000000..c095a8fbbb7 --- /dev/null +++ b/tests/queries/0_stateless/02264_format_insert_compression.sql @@ -0,0 +1,2 @@ +-- { echo } +EXPLAIN SYNTAX INSERT INTO foo FROM INFILE '/dev/null' COMPRESSION 'gz'; diff --git a/tests/queries/0_stateless/02264_format_insert_infile.reference b/tests/queries/0_stateless/02264_format_insert_infile.reference new file mode 100644 index 00000000000..338ea6fbfc6 --- /dev/null +++ b/tests/queries/0_stateless/02264_format_insert_infile.reference @@ -0,0 +1,3 @@ +-- { echo } +EXPLAIN SYNTAX INSERT INTO foo FROM INFILE '/dev/null'; +INSERT INTO foo FROM INFILE \'/dev/null\' diff --git a/tests/queries/0_stateless/02264_format_insert_infile.sql b/tests/queries/0_stateless/02264_format_insert_infile.sql new file mode 100644 index 00000000000..38ee39d932d --- /dev/null +++ b/tests/queries/0_stateless/02264_format_insert_infile.sql @@ -0,0 +1,2 @@ +-- { echo } +EXPLAIN SYNTAX INSERT INTO foo FROM INFILE '/dev/null'; diff --git a/tests/queries/0_stateless/02265_cross_join_empty_list.reference b/tests/queries/0_stateless/02265_cross_join_empty_list.reference new file mode 100644 index 00000000000..fef5e889a1e --- /dev/null +++ b/tests/queries/0_stateless/02265_cross_join_empty_list.reference @@ -0,0 +1,52 @@ +24 +24 +24 +24 24 24 +0 0 0 +0 0 1 +0 0 2 +0 0 3 +0 1 0 +0 1 1 +0 1 2 +0 1 3 +0 2 0 +0 2 1 +0 2 2 +0 2 3 +1 0 0 +1 0 1 +1 0 2 +1 0 3 +1 1 0 +1 1 1 +1 1 2 +1 1 3 +1 2 0 +1 2 1 +1 2 2 +1 2 3 +0 0 0 +0 0 1 +0 0 2 +0 0 3 +0 1 0 +0 1 1 +0 1 2 +0 1 3 +0 2 0 +0 2 1 +0 2 2 +0 2 3 +1 0 0 +1 0 1 +1 0 2 +1 0 3 +1 1 0 +1 1 1 +1 1 2 +1 1 3 +1 2 0 +1 2 1 +1 2 2 +1 2 3 diff --git a/tests/queries/0_stateless/02265_cross_join_empty_list.sql b/tests/queries/0_stateless/02265_cross_join_empty_list.sql new file mode 100644 index 00000000000..346a047351d --- /dev/null +++ b/tests/queries/0_stateless/02265_cross_join_empty_list.sql @@ -0,0 +1,6 @@ +SELECT count(1) FROM numbers(2) AS n1, numbers(3) AS n2, numbers(4) AS n3; +SELECT count(*) FROM numbers(2) AS n1, numbers(3) AS n2, numbers(4) AS n3; +SELECT count() FROM numbers(2) AS n1, numbers(3) AS n2, numbers(4) AS n3; +SELECT count(n1.number), count(n2.number), count(n3.number) FROM numbers(2) AS n1, numbers(3) AS n2, numbers(4) AS n3; +SELECT * FROM numbers(2) AS n1, numbers(3) AS n2, numbers(4) AS n3 ORDER BY n1.number, n2.number, n3.number; +SELECT n1.number, n2.number, n3.number FROM numbers(2) AS n1, numbers(3) AS n2, numbers(4) AS n3 ORDER BY n1.number, n2.number, n3.number; diff --git a/tests/queries/0_stateless/02265_limit_push_down_over_window_functions_bug.reference b/tests/queries/0_stateless/02265_limit_push_down_over_window_functions_bug.reference new file mode 100644 index 00000000000..592637a91fc --- /dev/null +++ b/tests/queries/0_stateless/02265_limit_push_down_over_window_functions_bug.reference @@ -0,0 +1,10 @@ +0 1 +1 2 +2 3 +a 2 +0 10000000 +1 10000000 +2 10000000 +0 10000000 +1 10000000 +2 10000000 diff --git a/tests/queries/0_stateless/02265_limit_push_down_over_window_functions_bug.sql b/tests/queries/0_stateless/02265_limit_push_down_over_window_functions_bug.sql new file mode 100644 index 00000000000..208ec8ef11f --- /dev/null +++ b/tests/queries/0_stateless/02265_limit_push_down_over_window_functions_bug.sql @@ -0,0 +1,16 @@ +SELECT + number, + leadInFrame(number) OVER w AS W +FROM numbers(10) +WINDOW w AS (ORDER BY number ASC Rows BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) +LIMIT 3; + +WITH arrayJoin(['a', 'a', 'b', 'b']) AS field +SELECT + field, + count() OVER (PARTITION BY field) +ORDER BY field ASC +LIMIT 1; + +select * from ( ( select *, count() over () cnt from ( select * from numbers(10000000) ) ) ) limit 3 ; +select * from ( ( select *, count() over () cnt from ( select * from numbers(10000000) ) ) ) order by number limit 3 ; diff --git a/tests/queries/0_stateless/02265_per_table_ttl_mutation_on_change.reference b/tests/queries/0_stateless/02265_per_table_ttl_mutation_on_change.reference new file mode 100644 index 00000000000..740b4edf189 --- /dev/null +++ b/tests/queries/0_stateless/02265_per_table_ttl_mutation_on_change.reference @@ -0,0 +1,22 @@ +-- { echoOn } +alter table per_table_ttl_02265 modify TTL date + interval 1 month; +select count() from system.mutations where database = currentDatabase() and table = 'per_table_ttl_02265'; +1 +alter table per_table_ttl_02265 modify TTL date + interval 1 month; +select count() from system.mutations where database = currentDatabase() and table = 'per_table_ttl_02265'; +1 +alter table per_table_ttl_02265 modify TTL date + interval 2 month; +select count() from system.mutations where database = currentDatabase() and table = 'per_table_ttl_02265'; +2 +alter table per_table_ttl_02265 modify TTL date + interval 2 month group by key set value = argMax(value, date); +select count() from system.mutations where database = currentDatabase() and table = 'per_table_ttl_02265'; +3 +alter table per_table_ttl_02265 modify TTL date + interval 2 month group by key set value = argMax(value, date); +select count() from system.mutations where database = currentDatabase() and table = 'per_table_ttl_02265'; +3 +alter table per_table_ttl_02265 modify TTL date + interval 2 month recompress codec(ZSTD(17)); +select count() from system.mutations where database = currentDatabase() and table = 'per_table_ttl_02265'; +4 +alter table per_table_ttl_02265 modify TTL date + interval 2 month recompress codec(ZSTD(17)); +select count() from system.mutations where database = currentDatabase() and table = 'per_table_ttl_02265'; +4 diff --git a/tests/queries/0_stateless/02265_per_table_ttl_mutation_on_change.sql b/tests/queries/0_stateless/02265_per_table_ttl_mutation_on_change.sql new file mode 100644 index 00000000000..53e2e72228a --- /dev/null +++ b/tests/queries/0_stateless/02265_per_table_ttl_mutation_on_change.sql @@ -0,0 +1,22 @@ +drop table if exists per_table_ttl_02265; +create table per_table_ttl_02265 (key Int, date Date, value String) engine=MergeTree() order by key; +insert into per_table_ttl_02265 values (1, today(), '1'); + +-- { echoOn } +alter table per_table_ttl_02265 modify TTL date + interval 1 month; +select count() from system.mutations where database = currentDatabase() and table = 'per_table_ttl_02265'; +alter table per_table_ttl_02265 modify TTL date + interval 1 month; +select count() from system.mutations where database = currentDatabase() and table = 'per_table_ttl_02265'; +alter table per_table_ttl_02265 modify TTL date + interval 2 month; +select count() from system.mutations where database = currentDatabase() and table = 'per_table_ttl_02265'; +alter table per_table_ttl_02265 modify TTL date + interval 2 month group by key set value = argMax(value, date); +select count() from system.mutations where database = currentDatabase() and table = 'per_table_ttl_02265'; +alter table per_table_ttl_02265 modify TTL date + interval 2 month group by key set value = argMax(value, date); +select count() from system.mutations where database = currentDatabase() and table = 'per_table_ttl_02265'; +alter table per_table_ttl_02265 modify TTL date + interval 2 month recompress codec(ZSTD(17)); +select count() from system.mutations where database = currentDatabase() and table = 'per_table_ttl_02265'; +alter table per_table_ttl_02265 modify TTL date + interval 2 month recompress codec(ZSTD(17)); +select count() from system.mutations where database = currentDatabase() and table = 'per_table_ttl_02265'; + +-- { echoOff } +drop table per_table_ttl_02265; diff --git a/tests/queries/0_stateless/02265_rename_join_ordinary_to_atomic.reference b/tests/queries/0_stateless/02265_rename_join_ordinary_to_atomic.reference new file mode 100644 index 00000000000..58c9bdf9d01 --- /dev/null +++ b/tests/queries/0_stateless/02265_rename_join_ordinary_to_atomic.reference @@ -0,0 +1 @@ +111 diff --git a/tests/queries/0_stateless/02265_rename_join_ordinary_to_atomic.sql b/tests/queries/0_stateless/02265_rename_join_ordinary_to_atomic.sql new file mode 100644 index 00000000000..3ec995a6a24 --- /dev/null +++ b/tests/queries/0_stateless/02265_rename_join_ordinary_to_atomic.sql @@ -0,0 +1,17 @@ +-- Tags: no-parallel + +DROP DATABASE IF EXISTS 02265_atomic_db; +DROP DATABASE IF EXISTS 02265_ordinary_db; + +CREATE DATABASE 02265_atomic_db ENGINE = Atomic; +CREATE DATABASE 02265_ordinary_db ENGINE = Ordinary; + +CREATE TABLE 02265_ordinary_db.join_table ( `a` Int64 ) ENGINE = Join(`ALL`, LEFT, a); +INSERT INTO 02265_ordinary_db.join_table VALUES (111); + +RENAME TABLE 02265_ordinary_db.join_table TO 02265_atomic_db.join_table; + +SELECT * FROM 02265_atomic_db.join_table; + +DROP DATABASE IF EXISTS 02265_atomic_db; +DROP DATABASE IF EXISTS 02265_ordinary_db; diff --git a/tests/queries/0_stateless/02265_test_dns_profile_events.reference b/tests/queries/0_stateless/02265_test_dns_profile_events.reference new file mode 100644 index 00000000000..97ca33b311f --- /dev/null +++ b/tests/queries/0_stateless/02265_test_dns_profile_events.reference @@ -0,0 +1,2 @@ +first_check 1 +second_check 1 diff --git a/tests/queries/0_stateless/02265_test_dns_profile_events.sh b/tests/queries/0_stateless/02265_test_dns_profile_events.sh new file mode 100755 index 00000000000..756a761a0ae --- /dev/null +++ b/tests/queries/0_stateless/02265_test_dns_profile_events.sh @@ -0,0 +1,17 @@ +#!/usr/bin/env bash +# Tags: no-parallel + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + + +current_dns_errors=$($CLICKHOUSE_CLIENT --query "SELECT sum(value) FROM system.events where event = 'DNSError';") +${CLICKHOUSE_CLIENT} --query "SELECT * FROM remote('ThisHostNameDoesNotExistSoItShouldFail', system, one)" 2>/dev/null +${CLICKHOUSE_CLIENT} --query "SELECT 'first_check', sum(value) > ${current_dns_errors} FROM system.events where event = 'DNSError';" + +current_dns_errors=$($CLICKHOUSE_CLIENT --query "SELECT sum(value) FROM system.events where event = 'DNSError';") +${CLICKHOUSE_CLIENT} --query "SELECT * FROM remote('ThisHostNameDoesNotExistSoItShouldFail2', system, one)" 2>/dev/null +${CLICKHOUSE_CLIENT} --query "SELECT 'second_check', sum(value) > ${current_dns_errors} FROM system.events where event = 'DNSError';" + +${CLICKHOUSE_CLIENT} --query "SYSTEM DROP DNS CACHE" diff --git a/tests/queries/0_stateless/02266_auto_add_nullable.reference b/tests/queries/0_stateless/02266_auto_add_nullable.reference new file mode 100644 index 00000000000..582bd1a1d6d --- /dev/null +++ b/tests/queries/0_stateless/02266_auto_add_nullable.reference @@ -0,0 +1,6 @@ +val0 Nullable(Int8) DEFAULT NULL +val1 Nullable(Int8) DEFAULT NULL +val2 Nullable(UInt8) DEFAULT NULL +val3 Nullable(String) DEFAULT NULL +val4 LowCardinality(Nullable(Int8)) DEFAULT NULL +val5 LowCardinality(Nullable(Int8)) DEFAULT NULL diff --git a/tests/queries/0_stateless/02266_auto_add_nullable.sql b/tests/queries/0_stateless/02266_auto_add_nullable.sql new file mode 100644 index 00000000000..7a9c6fbb19f --- /dev/null +++ b/tests/queries/0_stateless/02266_auto_add_nullable.sql @@ -0,0 +1,17 @@ +SET allow_suspicious_low_cardinality_types = 1; +DROP TABLE IF EXISTS 02266_auto_add_nullable; + +CREATE TABLE 02266_auto_add_nullable +( + val0 Int8 DEFAULT NULL, + val1 Nullable(Int8) DEFAULT NULL, + val2 UInt8 DEFAULT NUll, + val3 String DEFAULT null, + val4 LowCardinality(Int8) DEFAULT NULL, + val5 LowCardinality(Nullable(Int8)) DEFAULT NULL +) +ENGINE = MergeTree order by tuple(); + +DESCRIBE TABLE 02266_auto_add_nullable; + +DROP TABLE IF EXISTS 02266_auto_add_nullable; \ No newline at end of file diff --git a/tests/queries/0_stateless/02267_empty_arrays_read_reverse.reference b/tests/queries/0_stateless/02267_empty_arrays_read_reverse.reference new file mode 100644 index 00000000000..cc0cc5714d9 --- /dev/null +++ b/tests/queries/0_stateless/02267_empty_arrays_read_reverse.reference @@ -0,0 +1 @@ +['x'] 0 ['1','2','3','4','5','6'] diff --git a/tests/queries/0_stateless/02267_empty_arrays_read_reverse.sql b/tests/queries/0_stateless/02267_empty_arrays_read_reverse.sql new file mode 100644 index 00000000000..7c1cf47c540 --- /dev/null +++ b/tests/queries/0_stateless/02267_empty_arrays_read_reverse.sql @@ -0,0 +1,22 @@ +DROP TABLE IF EXISTS t_02267; + +CREATE TABLE t_02267 +( + a Array(String), + b UInt32, + c Array(String) +) +ENGINE = MergeTree +ORDER BY b +SETTINGS index_granularity = 500; + +INSERT INTO t_02267 (b, a, c) SELECT 0, ['x'], ['1','2','3','4','5','6'] FROM numbers(1) ; +INSERT INTO t_02267 (b, a, c) SELECT 1, [], ['1','2','3','4','5','6'] FROM numbers(300000); + +OPTIMIZE TABLE t_02267 FINAL; + +SELECT * FROM t_02267 WHERE hasAll(a, ['x']) +ORDER BY b DESC +SETTINGS max_threads=1, max_block_size=1000; + +DROP TABLE IF EXISTS t_02267; diff --git a/tests/queries/0_stateless/02268_json_wrong_root_type_in_schema_inference.reference b/tests/queries/0_stateless/02268_json_wrong_root_type_in_schema_inference.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/02268_json_wrong_root_type_in_schema_inference.sql b/tests/queries/0_stateless/02268_json_wrong_root_type_in_schema_inference.sql new file mode 100644 index 00000000000..2e66635a752 --- /dev/null +++ b/tests/queries/0_stateless/02268_json_wrong_root_type_in_schema_inference.sql @@ -0,0 +1,8 @@ +-- Tags: no-backward-compatibility-check:22.4.1.1 + +insert into function file('02268_data.jsonl', 'TSV') select 1; +select * from file('02268_data.jsonl'); --{serverError CANNOT_EXTRACT_TABLE_STRUCTURE} + +insert into function file('02268_data.jsonCompactEachRow', 'TSV') select 1; +select * from file('02268_data.jsonCompactEachRow'); --{serverError CANNOT_EXTRACT_TABLE_STRUCTURE} + diff --git a/tests/queries/0_stateless/transactions.lib b/tests/queries/0_stateless/transactions.lib new file mode 100755 index 00000000000..521c56754bc --- /dev/null +++ b/tests/queries/0_stateless/transactions.lib @@ -0,0 +1,78 @@ +#!/usr/bin/env bash + +# shellcheck disable=SC2015 + +# Useful to run queries in parallel sessions +function tx() +{ + tx_num=$1 + query=$2 + + session="${CLICKHOUSE_TEST_ZOOKEEPER_PREFIX}_tx$tx_num" + query_id="${session}_${RANDOM}" + url_without_session="https://${CLICKHOUSE_HOST}:${CLICKHOUSE_PORT_HTTPS}/?" + url="${url_without_session}session_id=$session&query_id=$query_id&database=$CLICKHOUSE_DATABASE" + + ${CLICKHOUSE_CURL} -m 60 -sSk "$url" --data "$query" | sed "s/^/tx$tx_num\t/" +} + +# Waits for the last query in session to finish +function tx_wait() { + tx_num=$1 + + session="${CLICKHOUSE_TEST_ZOOKEEPER_PREFIX}_tx$tx_num" + + # try get pid of previous query + query_pid="" + tmp_file_name="${CLICKHOUSE_TMP}/tmp_tx_${CLICKHOUSE_TEST_ZOOKEEPER_PREFIX}" + query_id_and_pid=$(grep -F "$session" "$tmp_file_name" 2>/dev/null | tail -1) ||: + read -r query_id query_pid <<< "$query_id_and_pid" ||: + + # wait for previous query in transaction + if [ -n "$query_pid" ]; then + timeout 5 tail --pid=$query_pid -f /dev/null && return ||: + fi + + # there is no pid (or maybe we got wrong one), so wait using system.processes (it's less reliable) + count=0 + while [[ $($CLICKHOUSE_CLIENT -q "SELECT count() FROM system.processes WHERE query_id LIKE '$session%'") -gt 0 ]]; do + sleep 0.5 + count=$((count+1)) + if [ "$count" -gt 120 ]; then + echo "timeout while waiting for $tx_num" + break + fi + done; +} + +# Wait for previous query in session to finish, starts new one asynchronously +function tx_async() +{ + tx_num=$1 + query=$2 + + tx_wait "$tx_num" + + session="${CLICKHOUSE_TEST_ZOOKEEPER_PREFIX}_tx$tx_num" + query_id="${session}_${RANDOM}" + url_without_session="https://${CLICKHOUSE_HOST}:${CLICKHOUSE_PORT_HTTPS}/?" + url="${url_without_session}session_id=$session&query_id=$query_id&database=$CLICKHOUSE_DATABASE" + + # We cannot be sure that query will actually start execution and appear in system.processes before the next call to tx_wait + # Also we cannot use global map in bash to store last query_id for each tx_num, so we use tmp file... + tmp_file_name="${CLICKHOUSE_TMP}/tmp_tx_${CLICKHOUSE_TEST_ZOOKEEPER_PREFIX}" + + # run query asynchronously + ${CLICKHOUSE_CURL} -m 60 -sSk "$url" --data "$query" | sed "s/^/tx$tx_num\t/" & + query_pid=$! + echo -e "$query_id\t$query_pid" >> "$tmp_file_name" +} + +# Wait for previous query in session to finish, execute the next one synchronously +function tx_sync() +{ + tx_num=$1 + query=$2 + tx_wait "$tx_num" + tx "$tx_num" "$query" +} diff --git a/tests/queries/1_stateful/00159_parallel_formatting_csv_and_friends.sh b/tests/queries/1_stateful/00159_parallel_formatting_csv_and_friends.sh index 1476d2892bf..6f251e7f95a 100755 --- a/tests/queries/1_stateful/00159_parallel_formatting_csv_and_friends.sh +++ b/tests/queries/1_stateful/00159_parallel_formatting_csv_and_friends.sh @@ -9,11 +9,11 @@ FORMATS=('CSV' 'CSVWithNames') for format in "${FORMATS[@]}" do echo "$format, false"; - $CLICKHOUSE_CLIENT --output_format_parallel_formatting=false -q \ + $CLICKHOUSE_CLIENT --max_threads=0 --output_format_parallel_formatting=false -q \ "SELECT ClientEventTime::DateTime('Asia/Dubai') as a, MobilePhoneModel as b, ClientIP6 as c FROM test.hits ORDER BY a, b, c Format $format" | md5sum echo "$format, true"; - $CLICKHOUSE_CLIENT --output_format_parallel_formatting=true -q \ + $CLICKHOUSE_CLIENT --max_threads=0 --output_format_parallel_formatting=true -q \ "SELECT ClientEventTime::DateTime('Asia/Dubai') as a, MobilePhoneModel as b, ClientIP6 as c FROM test.hits ORDER BY a, b, c Format $format" | md5sum done diff --git a/tests/queries/1_stateful/00159_parallel_formatting_json_and_friends.sh b/tests/queries/1_stateful/00159_parallel_formatting_json_and_friends.sh index a96ed0c9b96..77dd330099a 100755 --- a/tests/queries/1_stateful/00159_parallel_formatting_json_and_friends.sh +++ b/tests/queries/1_stateful/00159_parallel_formatting_json_and_friends.sh @@ -11,10 +11,10 @@ FORMATS=('JSONEachRow' 'JSONCompactEachRow' 'JSONCompactStringsEachRow' 'JSONCom for format in "${FORMATS[@]}" do echo "$format, false"; - $CLICKHOUSE_CLIENT --output_format_parallel_formatting=false -q \ + $CLICKHOUSE_CLIENT --max_threads=0 --output_format_parallel_formatting=false -q \ "SELECT ClientEventTime::DateTime('Asia/Dubai') as a, MobilePhoneModel as b, ClientIP6 as c FROM test.hits ORDER BY a, b, c LIMIT 3000000 Format $format" | md5sum echo "$format, true"; - $CLICKHOUSE_CLIENT --output_format_parallel_formatting=true -q \ + $CLICKHOUSE_CLIENT --max_threads=0 --output_format_parallel_formatting=true -q \ "SELECT ClientEventTime::DateTime('Asia/Dubai') as a, MobilePhoneModel as b, ClientIP6 as c FROM test.hits ORDER BY a, b, c LIMIT 3000000 Format $format" | md5sum done diff --git a/tests/queries/1_stateful/00159_parallel_formatting_tsv_and_friends.sh b/tests/queries/1_stateful/00159_parallel_formatting_tsv_and_friends.sh index 9d48774dd2d..02441190b91 100755 --- a/tests/queries/1_stateful/00159_parallel_formatting_tsv_and_friends.sh +++ b/tests/queries/1_stateful/00159_parallel_formatting_tsv_and_friends.sh @@ -10,10 +10,10 @@ FORMATS=('TSV' 'TSVWithNames' 'TSKV') for format in "${FORMATS[@]}" do echo "$format, false"; - $CLICKHOUSE_CLIENT --output_format_parallel_formatting=false -q \ + $CLICKHOUSE_CLIENT --max_threads=0 --output_format_parallel_formatting=false -q \ "SELECT ClientEventTime::DateTime('Asia/Dubai') as a, MobilePhoneModel as b, ClientIP6 as c FROM test.hits ORDER BY a, b, c Format $format" | md5sum echo "$format, true"; - $CLICKHOUSE_CLIENT --output_format_parallel_formatting=true -q \ + $CLICKHOUSE_CLIENT --max_threads=0 --output_format_parallel_formatting=true -q \ "SELECT ClientEventTime::DateTime('Asia/Dubai') as a, MobilePhoneModel as b, ClientIP6 as c FROM test.hits ORDER BY a, b, c Format $format" | md5sum done diff --git a/tests/queries/1_stateful/00161_parallel_parsing_with_names.sh b/tests/queries/1_stateful/00161_parallel_parsing_with_names.sh index d14a174d3a0..0c930a9fb6e 100755 --- a/tests/queries/1_stateful/00161_parallel_parsing_with_names.sh +++ b/tests/queries/1_stateful/00161_parallel_parsing_with_names.sh @@ -13,9 +13,9 @@ do $CLICKHOUSE_CLIENT -q "CREATE TABLE parsing_with_names(c FixedString(16), a DateTime('Asia/Dubai'), b String) ENGINE=Memory()" echo "$format, false"; - $CLICKHOUSE_CLIENT --max_block_size=65505 --output_format_parallel_formatting=false -q \ + $CLICKHOUSE_CLIENT --max_threads=0 --max_block_size=65505 --output_format_parallel_formatting=false -q \ "SELECT URLRegions as d, toTimeZone(ClientEventTime, 'Asia/Dubai') as a, MobilePhoneModel as b, ParamPrice as e, ClientIP6 as c FROM test.hits LIMIT 50000 Format $format" | \ - $CLICKHOUSE_CLIENT --max_block_size=65505 --input_format_skip_unknown_fields=1 --input_format_parallel_parsing=false -q "INSERT INTO parsing_with_names FORMAT $format" + $CLICKHOUSE_CLIENT --max_threads=0 --max_block_size=65505 --input_format_skip_unknown_fields=1 --input_format_parallel_parsing=false -q "INSERT INTO parsing_with_names FORMAT $format" $CLICKHOUSE_CLIENT -q "SELECT * FROM parsing_with_names;" | md5sum $CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS parsing_with_names" @@ -23,9 +23,9 @@ do $CLICKHOUSE_CLIENT -q "CREATE TABLE parsing_with_names(c FixedString(16), a DateTime('Asia/Dubai'), b String) ENGINE=Memory()" echo "$format, true"; - $CLICKHOUSE_CLIENT --max_block_size=65505 --output_format_parallel_formatting=false -q \ + $CLICKHOUSE_CLIENT --max_threads=0 --max_block_size=65505 --output_format_parallel_formatting=false -q \ "SELECT URLRegions as d, toTimeZone(ClientEventTime, 'Asia/Dubai') as a, MobilePhoneModel as b, ParamPrice as e, ClientIP6 as c FROM test.hits LIMIT 50000 Format $format" | \ - $CLICKHOUSE_CLIENT --max_block_size=65505 --input_format_skip_unknown_fields=1 --input_format_parallel_parsing=true -q "INSERT INTO parsing_with_names FORMAT $format" + $CLICKHOUSE_CLIENT --max_threads=0 --max_block_size=65505 --input_format_skip_unknown_fields=1 --input_format_parallel_parsing=true -q "INSERT INTO parsing_with_names FORMAT $format" $CLICKHOUSE_CLIENT -q "SELECT * FROM parsing_with_names;" | md5sum $CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS parsing_with_names" diff --git a/tests/queries/1_stateful/00167_parallel_parsing_with_names_and_types.sh b/tests/queries/1_stateful/00167_parallel_parsing_with_names_and_types.sh index 33562918f67..29546ca1814 100755 --- a/tests/queries/1_stateful/00167_parallel_parsing_with_names_and_types.sh +++ b/tests/queries/1_stateful/00167_parallel_parsing_with_names_and_types.sh @@ -13,9 +13,9 @@ do $CLICKHOUSE_CLIENT -q "CREATE TABLE parsing_with_names(c FixedString(16), a DateTime('Asia/Dubai'), b String) ENGINE=Memory()" echo "$format, false"; - $CLICKHOUSE_CLIENT --output_format_parallel_formatting=false -q \ + $CLICKHOUSE_CLIENT --max_threads=0 --output_format_parallel_formatting=false -q \ "SELECT URLRegions as d, toTimeZone(ClientEventTime, 'Asia/Dubai') as a, MobilePhoneModel as b, ParamPrice as e, ClientIP6 as c FROM test.hits LIMIT 5000 Format $format" | \ - $CLICKHOUSE_CLIENT --input_format_skip_unknown_fields=1 --input_format_parallel_parsing=false -q "INSERT INTO parsing_with_names FORMAT $format SETTINGS input_format_null_as_default=0" + $CLICKHOUSE_CLIENT --max_threads=0 --input_format_skip_unknown_fields=1 --input_format_parallel_parsing=false -q "INSERT INTO parsing_with_names SETTINGS input_format_null_as_default=0 FORMAT $format" $CLICKHOUSE_CLIENT -q "SELECT * FROM parsing_with_names;" | md5sum $CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS parsing_with_names" @@ -23,9 +23,9 @@ do $CLICKHOUSE_CLIENT -q "CREATE TABLE parsing_with_names(c FixedString(16), a DateTime('Asia/Dubai'), b String) ENGINE=Memory()" echo "$format, true"; - $CLICKHOUSE_CLIENT --output_format_parallel_formatting=false -q \ + $CLICKHOUSE_CLIENT --max_threads=0 --output_format_parallel_formatting=false -q \ "SELECT URLRegions as d, toTimeZone(ClientEventTime, 'Asia/Dubai') as a, MobilePhoneModel as b, ParamPrice as e, ClientIP6 as c FROM test.hits LIMIT 5000 Format $format" | \ - $CLICKHOUSE_CLIENT --input_format_skip_unknown_fields=1 --input_format_parallel_parsing=true -q "INSERT INTO parsing_with_names FORMAT $format SETTINGS input_format_null_as_default=0" + $CLICKHOUSE_CLIENT --max_threads=0 --input_format_skip_unknown_fields=1 --input_format_parallel_parsing=true -q "INSERT INTO parsing_with_names SETTINGS input_format_null_as_default=0 FORMAT $format" $CLICKHOUSE_CLIENT -q "SELECT * FROM parsing_with_names;" | md5sum $CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS parsing_with_names" diff --git a/tests/queries/1_stateful/00168_parallel_processing_on_replicas_part_1.sh b/tests/queries/1_stateful/00168_parallel_processing_on_replicas_part_1.sh index 276fc0274c2..58ce66056af 100755 --- a/tests/queries/1_stateful/00168_parallel_processing_on_replicas_part_1.sh +++ b/tests/queries/1_stateful/00168_parallel_processing_on_replicas_part_1.sh @@ -68,8 +68,8 @@ do TESTNAME_RESULT="/tmp/result_$TESTNAME" NEW_TESTNAME_RESULT="/tmp/result_dist_$TESTNAME" - $CLICKHOUSE_CLIENT $SETTINGS -nm --testmode < $TESTPATH > $TESTNAME_RESULT - $CLICKHOUSE_CLIENT $SETTINGS -nm --testmode < $NEW_TESTNAME > $NEW_TESTNAME_RESULT + $CLICKHOUSE_CLIENT $SETTINGS -nm < $TESTPATH > $TESTNAME_RESULT + $CLICKHOUSE_CLIENT $SETTINGS -nm < $NEW_TESTNAME > $NEW_TESTNAME_RESULT expected=$(cat $TESTNAME_RESULT | md5sum) actual=$(cat $NEW_TESTNAME_RESULT | md5sum) diff --git a/tests/queries/0_stateless/01747_system_session_log_long.reference b/tests/queries/bugs/01747_system_session_log_long.reference similarity index 100% rename from tests/queries/0_stateless/01747_system_session_log_long.reference rename to tests/queries/bugs/01747_system_session_log_long.reference diff --git a/tests/queries/0_stateless/01747_system_session_log_long.sh b/tests/queries/bugs/01747_system_session_log_long.sh similarity index 100% rename from tests/queries/0_stateless/01747_system_session_log_long.sh rename to tests/queries/bugs/01747_system_session_log_long.sh diff --git a/tests/testflows/aes_encryption/requirements/requirements.md b/tests/testflows/aes_encryption/requirements/requirements.md index 80cb614268c..23906f797d0 100644 --- a/tests/testflows/aes_encryption/requirements/requirements.md +++ b/tests/testflows/aes_encryption/requirements/requirements.md @@ -311,7 +311,7 @@ version: 1.0 of the `encrypt` function where the `key_length` SHALL specifies the length of the key and SHALL accept `128`, `192`, or `256` as the values and the `mode` SHALL specify the block encryption -mode and SHALL accept [ECB], [CBC], [CFB128], or [OFB] as well as +mode and SHALL accept [CBC], [CFB128], or [OFB] as well as [CTR] and [GCM] as the values. For example, `aes-256-ofb`. #### RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.Invalid @@ -327,9 +327,6 @@ version: 1.0 [ClickHouse] SHALL support the following [AES] block encryption modes as the value for the `mode` parameter of the `encrypt` function: -* `aes-128-ecb` that SHALL use [ECB] block mode encryption with 128 bit key -* `aes-192-ecb` that SHALL use [ECB] block mode encryption with 192 bit key -* `aes-256-ecb` that SHALL use [ECB] block mode encryption with 256 bit key * `aes-128-cbc` that SHALL use [CBC] block mode encryption with 128 bit key * `aes-192-cbc` that SHALL use [CBC] block mode encryption with 192 bit key * `aes-192-cbc` that SHALL use [CBC] block mode encryption with 256 bit key @@ -403,9 +400,6 @@ version: 1.0 [ClickHouse] SHALL return an error when the `encrypt` function is called with the following parameter values when using non-GCM modes -* `aes-128-ecb` mode and `key` is not 16 bytes or `iv` or `aad` is specified -* `aes-192-ecb` mode and `key` is not 24 bytes or `iv` or `aad` is specified -* `aes-256-ecb` mode and `key` is not 32 bytes or `iv` or `aad` is specified * `aes-128-cbc` mode and `key` is not 16 bytes or if specified `iv` is not 16 bytes or `aad` is specified * `aes-192-cbc` mode and `key` is not 24 bytes or if specified `iv` is not 16 bytes or `aad` is specified * `aes-256-cbc` mode and `key` is not 32 bytes or if specified `iv` is not 16 bytes or `aad` is specified @@ -476,7 +470,7 @@ version: 1.0 of the `decrypt` function where the `key_length` SHALL specifies the length of the key and SHALL accept `128`, `192`, or `256` as the values and the `mode` SHALL specify the block encryption -mode and SHALL accept [ECB], [CBC], [CFB128], or [OFB] as well as +mode and SHALL accept [CBC], [CFB128], or [OFB] as well as [CTR] and [GCM] as the values. For example, `aes-256-ofb`. #### RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Value.Invalid @@ -492,9 +486,6 @@ version: 1.0 [ClickHouse] SHALL support the following [AES] block encryption modes as the value for the `mode` parameter of the `decrypt` function: -* `aes-128-ecb` that SHALL use [ECB] block mode encryption with 128 bit key -* `aes-192-ecb` that SHALL use [ECB] block mode encryption with 192 bit key -* `aes-256-ecb` that SHALL use [ECB] block mode encryption with 256 bit key * `aes-128-cbc` that SHALL use [CBC] block mode encryption with 128 bit key * `aes-192-cbc` that SHALL use [CBC] block mode encryption with 192 bit key * `aes-192-cbc` that SHALL use [CBC] block mode encryption with 256 bit key @@ -570,9 +561,6 @@ version: 1.0 [ClickHouse] SHALL return an error when the `decrypt` function is called with the following parameter values when using non-GCM modes -* `aes-128-ecb` mode and `key` is not 16 bytes or `iv` or `aad` is specified -* `aes-192-ecb` mode and `key` is not 24 bytes or `iv` or `aad` is specified -* `aes-256-ecb` mode and `key` is not 32 bytes or `iv` or `aad` is specified * `aes-128-cbc` mode and `key` is not 16 bytes or if specified `iv` is not 16 bytes or `aad` is specified * `aes-192-cbc` mode and `key` is not 24 bytes or if specified `iv` is not 16 bytes or `aad` is specified * `aes-256-cbc` mode and `key` is not 32 bytes or if specified `iv` is not 16 bytes or `aad` is specified @@ -644,7 +632,7 @@ version: 1.0 of the `aes_encrypt_mysql` function where the `key_length` SHALL specifies the length of the key and SHALL accept `128`, `192`, or `256` as the values and the `mode` SHALL specify the block encryption -mode and SHALL accept [ECB], [CBC], [CFB128], or [OFB]. For example, `aes-256-ofb`. +mode and SHALL accept [CBC], [CFB128], or [OFB]. For example, `aes-256-ofb`. #### RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Value.Invalid version: 1.0 @@ -659,9 +647,6 @@ version: 1.0 [ClickHouse] SHALL support the following [AES] block encryption modes as the value for the `mode` parameter of the `aes_encrypt_mysql` function: -* `aes-128-ecb` that SHALL use [ECB] block mode encryption with 128 bit key -* `aes-192-ecb` that SHALL use [ECB] block mode encryption with 192 bit key -* `aes-256-ecb` that SHALL use [ECB] block mode encryption with 256 bit key * `aes-128-cbc` that SHALL use [CBC] block mode encryption with 128 bit key * `aes-192-cbc` that SHALL use [CBC] block mode encryption with 192 bit key * `aes-192-cbc` that SHALL use [CBC] block mode encryption with 256 bit key @@ -750,9 +735,6 @@ version: 1.0 [ClickHouse] SHALL return an error when the `aes_encrypt_mysql` function is called with the following parameter values -* `aes-128-ecb` mode and `key` is less than 16 bytes or `iv` is specified -* `aes-192-ecb` mode and `key` is less than 24 bytes or `iv` is specified -* `aes-256-ecb` mode and `key` is less than 32 bytes or `iv` is specified * `aes-128-cbc` mode and `key` is less than 16 bytes or if specified `iv` is less than 16 bytes * `aes-192-cbc` mode and `key` is less than 24 bytes or if specified `iv` is less than 16 bytes * `aes-256-cbc` mode and `key` is less than 32 bytes or if specified `iv` is less than 16 bytes @@ -810,7 +792,7 @@ version: 1.0 of the `aes_decrypt_mysql` function where the `key_length` SHALL specifies the length of the key and SHALL accept `128`, `192`, or `256` as the values and the `mode` SHALL specify the block encryption -mode and SHALL accept [ECB], [CBC], [CFB128], or [OFB]. For example, `aes-256-ofb`. +mode and SHALL accept [CBC], [CFB128], or [OFB]. For example, `aes-256-ofb`. #### RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Value.Invalid version: 1.0 @@ -825,9 +807,6 @@ version: 1.0 [ClickHouse] SHALL support the following [AES] block encryption modes as the value for the `mode` parameter of the `aes_decrypt_mysql` function: -* `aes-128-ecb` that SHALL use [ECB] block mode encryption with 128 bit key -* `aes-192-ecb` that SHALL use [ECB] block mode encryption with 192 bit key -* `aes-256-ecb` that SHALL use [ECB] block mode encryption with 256 bit key * `aes-128-cbc` that SHALL use [CBC] block mode encryption with 128 bit key * `aes-192-cbc` that SHALL use [CBC] block mode encryption with 192 bit key * `aes-192-cbc` that SHALL use [CBC] block mode encryption with 256 bit key @@ -916,9 +895,6 @@ version: 1.0 [ClickHouse] SHALL return an error when the `aes_decrypt_mysql` function is called with the following parameter values -* `aes-128-ecb` mode and `key` is less than 16 bytes or `iv` is specified -* `aes-192-ecb` mode and `key` is less than 24 bytes or `iv` is specified -* `aes-256-ecb` mode and `key` is less than 32 bytes or `iv` is specified * `aes-128-cbc` mode and `key` is less than 16 bytes or if specified `iv` is less than 16 bytes * `aes-192-cbc` mode and `key` is less than 24 bytes or if specified `iv` is less than 16 bytes * `aes-256-cbc` mode and `key` is less than 32 bytes or if specified `iv` is less than 16 bytes @@ -954,7 +930,6 @@ version: 1.0 [GCM]: https://en.wikipedia.org/wiki/Galois/Counter_Mode [CTR]: https://en.wikipedia.org/wiki/Block_cipher_mode_of_operation#Counter_(CTR) [CBC]: https://en.wikipedia.org/wiki/Block_cipher_mode_of_operation#Cipher_block_chaining_(CBC) -[ECB]: https://en.wikipedia.org/wiki/Block_cipher_mode_of_operation#Electronic_codebook_(ECB) [CFB]: https://en.wikipedia.org/wiki/Block_cipher_mode_of_operation#Cipher_feedback_(CFB) [CFB128]: https://en.wikipedia.org/wiki/Block_cipher_mode_of_operation#Cipher_feedback_(CFB) [OFB]: https://en.wikipedia.org/wiki/Block_cipher_mode_of_operation#Output_feedback_(OFB) diff --git a/tests/testflows/aes_encryption/requirements/requirements.py b/tests/testflows/aes_encryption/requirements/requirements.py index 0fbbea7e85a..4523f2d820f 100644 --- a/tests/testflows/aes_encryption/requirements/requirements.py +++ b/tests/testflows/aes_encryption/requirements/requirements.py @@ -429,7 +429,7 @@ RQ_SRS008_AES_Encrypt_Function_Parameters_Mode_ValuesFormat = Requirement( "of the `encrypt` function where\n" "the `key_length` SHALL specifies the length of the key and SHALL accept\n" "`128`, `192`, or `256` as the values and the `mode` SHALL specify the block encryption\n" - "mode and SHALL accept [ECB], [CBC], [CFB128], or [OFB] as well as\n" + "mode and SHALL accept [CBC], [CFB128], or [OFB] as well as\n" "[CTR] and [GCM] as the values. For example, `aes-256-ofb`.\n" "\n" ), @@ -467,9 +467,6 @@ RQ_SRS008_AES_Encrypt_Function_Parameters_Mode_Values = Requirement( "[ClickHouse] SHALL support the following [AES] block encryption modes as the value for the `mode` parameter\n" "of the `encrypt` function:\n" "\n" - "* `aes-128-ecb` that SHALL use [ECB] block mode encryption with 128 bit key\n" - "* `aes-192-ecb` that SHALL use [ECB] block mode encryption with 192 bit key\n" - "* `aes-256-ecb` that SHALL use [ECB] block mode encryption with 256 bit key\n" "* `aes-128-cbc` that SHALL use [CBC] block mode encryption with 128 bit key\n" "* `aes-192-cbc` that SHALL use [CBC] block mode encryption with 192 bit key\n" "* `aes-192-cbc` that SHALL use [CBC] block mode encryption with 256 bit key\n" @@ -642,9 +639,6 @@ RQ_SRS008_AES_Encrypt_Function_NonGCMMode_KeyAndInitializationVector_Length = Re "[ClickHouse] SHALL return an error when the `encrypt` function is called with the following parameter values\n" "when using non-GCM modes\n" "\n" - "* `aes-128-ecb` mode and `key` is not 16 bytes or `iv` or `aad` is specified\n" - "* `aes-192-ecb` mode and `key` is not 24 bytes or `iv` or `aad` is specified\n" - "* `aes-256-ecb` mode and `key` is not 32 bytes or `iv` or `aad` is specified\n" "* `aes-128-cbc` mode and `key` is not 16 bytes or if specified `iv` is not 16 bytes or `aad` is specified\n" "* `aes-192-cbc` mode and `key` is not 24 bytes or if specified `iv` is not 16 bytes or `aad` is specified\n" "* `aes-256-cbc` mode and `key` is not 32 bytes or if specified `iv` is not 16 bytes or `aad` is specified\n" @@ -790,7 +784,7 @@ RQ_SRS008_AES_Decrypt_Function_Parameters_Mode_ValuesFormat = Requirement( "of the `decrypt` function where\n" "the `key_length` SHALL specifies the length of the key and SHALL accept\n" "`128`, `192`, or `256` as the values and the `mode` SHALL specify the block encryption\n" - "mode and SHALL accept [ECB], [CBC], [CFB128], or [OFB] as well as\n" + "mode and SHALL accept [CBC], [CFB128], or [OFB] as well as\n" "[CTR] and [GCM] as the values. For example, `aes-256-ofb`.\n" "\n" ), @@ -828,9 +822,6 @@ RQ_SRS008_AES_Decrypt_Function_Parameters_Mode_Values = Requirement( "[ClickHouse] SHALL support the following [AES] block encryption modes as the value for the `mode` parameter\n" "of the `decrypt` function:\n" "\n" - "* `aes-128-ecb` that SHALL use [ECB] block mode encryption with 128 bit key\n" - "* `aes-192-ecb` that SHALL use [ECB] block mode encryption with 192 bit key\n" - "* `aes-256-ecb` that SHALL use [ECB] block mode encryption with 256 bit key\n" "* `aes-128-cbc` that SHALL use [CBC] block mode encryption with 128 bit key\n" "* `aes-192-cbc` that SHALL use [CBC] block mode encryption with 192 bit key\n" "* `aes-192-cbc` that SHALL use [CBC] block mode encryption with 256 bit key\n" @@ -1005,9 +996,6 @@ RQ_SRS008_AES_Decrypt_Function_NonGCMMode_KeyAndInitializationVector_Length = Re "[ClickHouse] SHALL return an error when the `decrypt` function is called with the following parameter values\n" "when using non-GCM modes\n" "\n" - "* `aes-128-ecb` mode and `key` is not 16 bytes or `iv` or `aad` is specified\n" - "* `aes-192-ecb` mode and `key` is not 24 bytes or `iv` or `aad` is specified\n" - "* `aes-256-ecb` mode and `key` is not 32 bytes or `iv` or `aad` is specified\n" "* `aes-128-cbc` mode and `key` is not 16 bytes or if specified `iv` is not 16 bytes or `aad` is specified\n" "* `aes-192-cbc` mode and `key` is not 24 bytes or if specified `iv` is not 16 bytes or `aad` is specified\n" "* `aes-256-cbc` mode and `key` is not 32 bytes or if specified `iv` is not 16 bytes or `aad` is specified\n" @@ -1154,7 +1142,7 @@ RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_Mode_ValuesFormat = Requirement( "of the `aes_encrypt_mysql` function where\n" "the `key_length` SHALL specifies the length of the key and SHALL accept\n" "`128`, `192`, or `256` as the values and the `mode` SHALL specify the block encryption\n" - "mode and SHALL accept [ECB], [CBC], [CFB128], or [OFB]. For example, `aes-256-ofb`.\n" + "mode and SHALL accept [CBC], [CFB128], or [OFB]. For example, `aes-256-ofb`.\n" "\n" ), link=None, @@ -1191,9 +1179,6 @@ RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_Mode_Values = Requirement( "[ClickHouse] SHALL support the following [AES] block encryption modes as the value for the `mode` parameter\n" "of the `aes_encrypt_mysql` function:\n" "\n" - "* `aes-128-ecb` that SHALL use [ECB] block mode encryption with 128 bit key\n" - "* `aes-192-ecb` that SHALL use [ECB] block mode encryption with 192 bit key\n" - "* `aes-256-ecb` that SHALL use [ECB] block mode encryption with 256 bit key\n" "* `aes-128-cbc` that SHALL use [CBC] block mode encryption with 128 bit key\n" "* `aes-192-cbc` that SHALL use [CBC] block mode encryption with 192 bit key\n" "* `aes-192-cbc` that SHALL use [CBC] block mode encryption with 256 bit key\n" @@ -1392,9 +1377,6 @@ RQ_SRS008_AES_MySQL_Encrypt_Function_Mode_KeyAndInitializationVector_Length = Re description=( "[ClickHouse] SHALL return an error when the `aes_encrypt_mysql` function is called with the following parameter values\n" "\n" - "* `aes-128-ecb` mode and `key` is less than 16 bytes or `iv` is specified\n" - "* `aes-192-ecb` mode and `key` is less than 24 bytes or `iv` is specified\n" - "* `aes-256-ecb` mode and `key` is less than 32 bytes or `iv` is specified\n" "* `aes-128-cbc` mode and `key` is less than 16 bytes or if specified `iv` is less than 16 bytes\n" "* `aes-192-cbc` mode and `key` is less than 24 bytes or if specified `iv` is less than 16 bytes\n" "* `aes-256-cbc` mode and `key` is less than 32 bytes or if specified `iv` is less than 16 bytes\n" @@ -1516,7 +1498,7 @@ RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_Mode_ValuesFormat = Requirement( "of the `aes_decrypt_mysql` function where\n" "the `key_length` SHALL specifies the length of the key and SHALL accept\n" "`128`, `192`, or `256` as the values and the `mode` SHALL specify the block encryption\n" - "mode and SHALL accept [ECB], [CBC], [CFB128], or [OFB]. For example, `aes-256-ofb`.\n" + "mode and SHALL accept [CBC], [CFB128], or [OFB]. For example, `aes-256-ofb`.\n" "\n" ), link=None, @@ -1553,9 +1535,6 @@ RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_Mode_Values = Requirement( "[ClickHouse] SHALL support the following [AES] block encryption modes as the value for the `mode` parameter\n" "of the `aes_decrypt_mysql` function:\n" "\n" - "* `aes-128-ecb` that SHALL use [ECB] block mode encryption with 128 bit key\n" - "* `aes-192-ecb` that SHALL use [ECB] block mode encryption with 192 bit key\n" - "* `aes-256-ecb` that SHALL use [ECB] block mode encryption with 256 bit key\n" "* `aes-128-cbc` that SHALL use [CBC] block mode encryption with 128 bit key\n" "* `aes-192-cbc` that SHALL use [CBC] block mode encryption with 192 bit key\n" "* `aes-192-cbc` that SHALL use [CBC] block mode encryption with 256 bit key\n" @@ -1754,9 +1733,6 @@ RQ_SRS008_AES_MySQL_Decrypt_Function_Mode_KeyAndInitializationVector_Length = Re description=( "[ClickHouse] SHALL return an error when the `aes_decrypt_mysql` function is called with the following parameter values\n" "\n" - "* `aes-128-ecb` mode and `key` is less than 16 bytes or `iv` is specified\n" - "* `aes-192-ecb` mode and `key` is less than 24 bytes or `iv` is specified\n" - "* `aes-256-ecb` mode and `key` is less than 32 bytes or `iv` is specified\n" "* `aes-128-cbc` mode and `key` is less than 16 bytes or if specified `iv` is less than 16 bytes\n" "* `aes-192-cbc` mode and `key` is less than 24 bytes or if specified `iv` is less than 16 bytes\n" "* `aes-256-cbc` mode and `key` is less than 32 bytes or if specified `iv` is less than 16 bytes\n" @@ -2606,7 +2582,7 @@ version: 1.0 of the `encrypt` function where the `key_length` SHALL specifies the length of the key and SHALL accept `128`, `192`, or `256` as the values and the `mode` SHALL specify the block encryption -mode and SHALL accept [ECB], [CBC], [CFB128], or [OFB] as well as +mode and SHALL accept [CBC], [CFB128], or [OFB] as well as [CTR] and [GCM] as the values. For example, `aes-256-ofb`. #### RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.Invalid @@ -2622,9 +2598,6 @@ version: 1.0 [ClickHouse] SHALL support the following [AES] block encryption modes as the value for the `mode` parameter of the `encrypt` function: -* `aes-128-ecb` that SHALL use [ECB] block mode encryption with 128 bit key -* `aes-192-ecb` that SHALL use [ECB] block mode encryption with 192 bit key -* `aes-256-ecb` that SHALL use [ECB] block mode encryption with 256 bit key * `aes-128-cbc` that SHALL use [CBC] block mode encryption with 128 bit key * `aes-192-cbc` that SHALL use [CBC] block mode encryption with 192 bit key * `aes-192-cbc` that SHALL use [CBC] block mode encryption with 256 bit key @@ -2698,9 +2671,6 @@ version: 1.0 [ClickHouse] SHALL return an error when the `encrypt` function is called with the following parameter values when using non-GCM modes -* `aes-128-ecb` mode and `key` is not 16 bytes or `iv` or `aad` is specified -* `aes-192-ecb` mode and `key` is not 24 bytes or `iv` or `aad` is specified -* `aes-256-ecb` mode and `key` is not 32 bytes or `iv` or `aad` is specified * `aes-128-cbc` mode and `key` is not 16 bytes or if specified `iv` is not 16 bytes or `aad` is specified * `aes-192-cbc` mode and `key` is not 24 bytes or if specified `iv` is not 16 bytes or `aad` is specified * `aes-256-cbc` mode and `key` is not 32 bytes or if specified `iv` is not 16 bytes or `aad` is specified @@ -2771,7 +2741,7 @@ version: 1.0 of the `decrypt` function where the `key_length` SHALL specifies the length of the key and SHALL accept `128`, `192`, or `256` as the values and the `mode` SHALL specify the block encryption -mode and SHALL accept [ECB], [CBC], [CFB128], or [OFB] as well as +mode and SHALL accept [CBC], [CFB128], or [OFB] as well as [CTR] and [GCM] as the values. For example, `aes-256-ofb`. #### RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Value.Invalid @@ -2787,9 +2757,6 @@ version: 1.0 [ClickHouse] SHALL support the following [AES] block encryption modes as the value for the `mode` parameter of the `decrypt` function: -* `aes-128-ecb` that SHALL use [ECB] block mode encryption with 128 bit key -* `aes-192-ecb` that SHALL use [ECB] block mode encryption with 192 bit key -* `aes-256-ecb` that SHALL use [ECB] block mode encryption with 256 bit key * `aes-128-cbc` that SHALL use [CBC] block mode encryption with 128 bit key * `aes-192-cbc` that SHALL use [CBC] block mode encryption with 192 bit key * `aes-192-cbc` that SHALL use [CBC] block mode encryption with 256 bit key @@ -2865,9 +2832,6 @@ version: 1.0 [ClickHouse] SHALL return an error when the `decrypt` function is called with the following parameter values when using non-GCM modes -* `aes-128-ecb` mode and `key` is not 16 bytes or `iv` or `aad` is specified -* `aes-192-ecb` mode and `key` is not 24 bytes or `iv` or `aad` is specified -* `aes-256-ecb` mode and `key` is not 32 bytes or `iv` or `aad` is specified * `aes-128-cbc` mode and `key` is not 16 bytes or if specified `iv` is not 16 bytes or `aad` is specified * `aes-192-cbc` mode and `key` is not 24 bytes or if specified `iv` is not 16 bytes or `aad` is specified * `aes-256-cbc` mode and `key` is not 32 bytes or if specified `iv` is not 16 bytes or `aad` is specified @@ -2939,7 +2903,7 @@ version: 1.0 of the `aes_encrypt_mysql` function where the `key_length` SHALL specifies the length of the key and SHALL accept `128`, `192`, or `256` as the values and the `mode` SHALL specify the block encryption -mode and SHALL accept [ECB], [CBC], [CFB128], or [OFB]. For example, `aes-256-ofb`. +mode and SHALL accept [CBC], [CFB128], or [OFB]. For example, `aes-256-ofb`. #### RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Value.Invalid version: 1.0 @@ -2954,9 +2918,6 @@ version: 1.0 [ClickHouse] SHALL support the following [AES] block encryption modes as the value for the `mode` parameter of the `aes_encrypt_mysql` function: -* `aes-128-ecb` that SHALL use [ECB] block mode encryption with 128 bit key -* `aes-192-ecb` that SHALL use [ECB] block mode encryption with 192 bit key -* `aes-256-ecb` that SHALL use [ECB] block mode encryption with 256 bit key * `aes-128-cbc` that SHALL use [CBC] block mode encryption with 128 bit key * `aes-192-cbc` that SHALL use [CBC] block mode encryption with 192 bit key * `aes-192-cbc` that SHALL use [CBC] block mode encryption with 256 bit key @@ -3045,9 +3006,6 @@ version: 1.0 [ClickHouse] SHALL return an error when the `aes_encrypt_mysql` function is called with the following parameter values -* `aes-128-ecb` mode and `key` is less than 16 bytes or `iv` is specified -* `aes-192-ecb` mode and `key` is less than 24 bytes or `iv` is specified -* `aes-256-ecb` mode and `key` is less than 32 bytes or `iv` is specified * `aes-128-cbc` mode and `key` is less than 16 bytes or if specified `iv` is less than 16 bytes * `aes-192-cbc` mode and `key` is less than 24 bytes or if specified `iv` is less than 16 bytes * `aes-256-cbc` mode and `key` is less than 32 bytes or if specified `iv` is less than 16 bytes @@ -3105,7 +3063,7 @@ version: 1.0 of the `aes_decrypt_mysql` function where the `key_length` SHALL specifies the length of the key and SHALL accept `128`, `192`, or `256` as the values and the `mode` SHALL specify the block encryption -mode and SHALL accept [ECB], [CBC], [CFB128], or [OFB]. For example, `aes-256-ofb`. +mode and SHALL accept [CBC], [CFB128], or [OFB]. For example, `aes-256-ofb`. #### RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Value.Invalid version: 1.0 @@ -3120,9 +3078,6 @@ version: 1.0 [ClickHouse] SHALL support the following [AES] block encryption modes as the value for the `mode` parameter of the `aes_decrypt_mysql` function: -* `aes-128-ecb` that SHALL use [ECB] block mode encryption with 128 bit key -* `aes-192-ecb` that SHALL use [ECB] block mode encryption with 192 bit key -* `aes-256-ecb` that SHALL use [ECB] block mode encryption with 256 bit key * `aes-128-cbc` that SHALL use [CBC] block mode encryption with 128 bit key * `aes-192-cbc` that SHALL use [CBC] block mode encryption with 192 bit key * `aes-192-cbc` that SHALL use [CBC] block mode encryption with 256 bit key @@ -3211,9 +3166,6 @@ version: 1.0 [ClickHouse] SHALL return an error when the `aes_decrypt_mysql` function is called with the following parameter values -* `aes-128-ecb` mode and `key` is less than 16 bytes or `iv` is specified -* `aes-192-ecb` mode and `key` is less than 24 bytes or `iv` is specified -* `aes-256-ecb` mode and `key` is less than 32 bytes or `iv` is specified * `aes-128-cbc` mode and `key` is less than 16 bytes or if specified `iv` is less than 16 bytes * `aes-192-cbc` mode and `key` is less than 24 bytes or if specified `iv` is less than 16 bytes * `aes-256-cbc` mode and `key` is less than 32 bytes or if specified `iv` is less than 16 bytes @@ -3249,7 +3201,6 @@ version: 1.0 [GCM]: https://en.wikipedia.org/wiki/Galois/Counter_Mode [CTR]: https://en.wikipedia.org/wiki/Block_cipher_mode_of_operation#Counter_(CTR) [CBC]: https://en.wikipedia.org/wiki/Block_cipher_mode_of_operation#Cipher_block_chaining_(CBC) -[ECB]: https://en.wikipedia.org/wiki/Block_cipher_mode_of_operation#Electronic_codebook_(ECB) [CFB]: https://en.wikipedia.org/wiki/Block_cipher_mode_of_operation#Cipher_feedback_(CFB) [CFB128]: https://en.wikipedia.org/wiki/Block_cipher_mode_of_operation#Cipher_feedback_(CFB) [OFB]: https://en.wikipedia.org/wiki/Block_cipher_mode_of_operation#Output_feedback_(OFB) diff --git a/tests/testflows/ldap/external_user_directory/tests/common.py b/tests/testflows/ldap/external_user_directory/tests/common.py index 871be815a35..c0b6e72cd8e 100644 --- a/tests/testflows/ldap/external_user_directory/tests/common.py +++ b/tests/testflows/ldap/external_user_directory/tests/common.py @@ -84,16 +84,6 @@ def rbac_roles(*roles, node=None): node.query(f"DROP ROLE IF EXISTS {role}") -def verify_ldap_user_exists(server, username, password): - """Check that LDAP user is defined on the LDAP server.""" - with By("searching LDAP database"): - ldap_node = current().context.cluster.node(server) - r = ldap_node.command( - f"ldapwhoami -H ldap://localhost -D 'cn={user_name},ou=users,dc=company,dc=com' -w {password}" - ) - assert r.exitcode == 0, error() - - def create_ldap_external_user_directory_config_content( server=None, roles=None, **kwargs ): diff --git a/utils/check-style/codespell-ignore-words.list b/utils/check-style/codespell-ignore-words.list index d3a7586647c..7aabaff17c5 100644 --- a/utils/check-style/codespell-ignore-words.list +++ b/utils/check-style/codespell-ignore-words.list @@ -10,3 +10,4 @@ ths offsett numer ue +alse diff --git a/utils/db-generator/query_db_generator.cpp b/utils/db-generator/query_db_generator.cpp index dec1f6fe60f..6455bc045d6 100644 --- a/utils/db-generator/query_db_generator.cpp +++ b/utils/db-generator/query_db_generator.cpp @@ -229,7 +229,7 @@ std::map func_to_return_type = { {"torelativeweeknum", FuncRet(Type::i, "")}, {"torelativedaynum", FuncRet(Type::i, "")}, {"torelativehournum", FuncRet(Type::i, "")}, {"torelativeminutenum", FuncRet(Type::i, "")}, {"torelativesecondsnum", FuncRet(Type::i, "")}, {"datediff", FuncRet(Type::d | Type::dt, "")}, {"formatdatetime", FuncRet(Type::s, "")}, {"now", FuncRet(Type::dt | Type::d, "now()")}, {"today", FuncRet(Type::d | Type::dt, "today()")}, - {"yesterday", FuncRet(Type::d | Type::dt, "yesterday()")} + {"yesterday", FuncRet(Type::d | Type::dt, "yesterday()")}, {"tolastdayofmonth", FuncRet(Type::dt | Type::d, "")} }; std::set func_args_same_types = { @@ -253,7 +253,7 @@ std::map func_to_param_type = { {"tostartofinterval", Type::d | Type::dt}, {"totime", Type::d | Type::dt}, {"torelativehonthnum", Type::d | Type::dt}, {"torelativeweeknum", Type::d | Type::dt}, {"torelativedaynum", Type::d | Type::dt}, {"torelativehournum", Type::d | Type::dt}, {"torelativeminutenum", Type::d | Type::dt}, {"torelativesecondnum", Type::d | Type::dt}, {"datediff", Type::d | Type::dt}, - {"formatdatetime", Type::dt} + {"formatdatetime", Type::dt}, {"tolastdayofmonth", Type::d | Type::dt} }; diff --git a/utils/keeper-data-dumper/main.cpp b/utils/keeper-data-dumper/main.cpp index 0f86d34d334..df6083e4bd7 100644 --- a/utils/keeper-data-dumper/main.cpp +++ b/utils/keeper-data-dumper/main.cpp @@ -32,9 +32,9 @@ void dumpMachine(std::shared_ptr machine) ", numChildren: " << value.stat.numChildren << ", dataLength: " << value.stat.dataLength << "}" << std::endl; - std::cout << "\tData: " << storage.container.getValue(key).data << std::endl; + std::cout << "\tData: " << storage.container.getValue(key).getData() << std::endl; - for (const auto & child : value.children) + for (const auto & child : value.getChildren()) { if (key == "/") keys.push(key + child.toString()); diff --git a/utils/list-versions/version_date.tsv b/utils/list-versions/version_date.tsv index e87c4ea2b46..6366aef19ce 100644 --- a/utils/list-versions/version_date.tsv +++ b/utils/list-versions/version_date.tsv @@ -1,3 +1,4 @@ +v22.3.3.44-lts 2022-04-06 v22.3.2.2-lts 2022-03-17 v22.2.3.5-stable 2022-02-25 v22.2.2.1-stable 2022-02-17 diff --git a/utils/tests-visualizer/index.html b/utils/tests-visualizer/index.html index 00076f683fa..15ee221aa8e 100644 --- a/utils/tests-visualizer/index.html +++ b/utils/tests-visualizer/index.html @@ -144,7 +144,7 @@ let test_names_query = ` async function loadDataByQuery(query) { const response = await fetch( - "https://play-ci.clickhouse.com?user=play&add_http_cors_header=1", + "https://play.clickhouse.com?user=play&add_http_cors_header=1", { method: "POST", body: query } ) if (!response.ok) throw new Error(`Data download failed\nHTTP status ${response.status}`); diff --git a/website/blog/en/2018/clickhouse-community-meetup-in-paris-on-october-2-2018.md b/website/blog/en/2018/clickhouse-community-meetup-in-paris-on-october-2-2018.md index a8c5c2a92dd..f94d2de411c 100644 --- a/website/blog/en/2018/clickhouse-community-meetup-in-paris-on-october-2-2018.md +++ b/website/blog/en/2018/clickhouse-community-meetup-in-paris-on-october-2-2018.md @@ -7,7 +7,7 @@ tags: ['meetup', 'Paris', 'France', 'events'] Agenda of Paris ClickHouse Meetup was full of use cases, mostly from France-based companies which are actively using ClickHouse. Slides for all talks are [available on the GitHub](https://github.com/clickhouse/clickhouse-presentations/tree/master/meetup18). -Christophe Kalenzaga and Vianney Foucault, engineers from ContentSquare, company that provided the meetup venue: +Christophe Kalenzaga and Vianney Foucault, engineers from Contentsquare, company that provided the meetup venue: ![Christophe Kalenzaga and Vianney Foucault](https://blog-images.clickhouse.com/en/2018/clickhouse-community-meetup-in-paris-on-october-2-2018/1.jpg) Matthieu Jacquet from Storetail (Criteo): diff --git a/website/js/base.js b/website/js/base.js index 9389028f1ef..1ab8f841dbe 100644 --- a/website/js/base.js +++ b/website/js/base.js @@ -67,22 +67,17 @@ }); } - (function (d, w, c) { - (w[c] = w[c] || []).push(function() { - var is_single_page = $('html').attr('data-single-page') === 'true'; - - if (!is_single_page) { - $('head').each(function(_, element) { - $(element).append( - '' - ); - $(element).append( - '' - ); - }); - } + var is_single_page = $('html').attr('data-single-page') === 'true'; + if (!is_single_page) { + $('head').each(function (_, element) { + $(element).append( + '' + ); + $(element).append( + '' + ); }); - })(document, window, ""); + } var beforePrint = function() { var details = document.getElementsByTagName("details");